1 //===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides a class for OpenMP runtime code generation specialized to NVPTX
13 //===----------------------------------------------------------------------===//
15 #include "CGOpenMPRuntimeNVPTX.h"
16 #include "CodeGenFunction.h"
17 #include "clang/AST/DeclOpenMP.h"
18 #include "clang/AST/StmtOpenMP.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "llvm/ADT/SmallPtrSet.h"
22 using namespace clang;
23 using namespace CodeGen;
26 enum OpenMPRTLFunctionNVPTX {
27 /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
28 /// int16_t RequiresOMPRuntime);
29 OMPRTL_NVPTX__kmpc_kernel_init,
30 /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
31 OMPRTL_NVPTX__kmpc_kernel_deinit,
32 /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
33 /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
34 OMPRTL_NVPTX__kmpc_spmd_kernel_init,
35 /// Call to void __kmpc_spmd_kernel_deinit();
36 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit,
37 /// Call to void __kmpc_kernel_prepare_parallel(void
38 /// *outlined_function, int16_t
39 /// IsOMPRuntimeInitialized);
40 OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
41 /// Call to bool __kmpc_kernel_parallel(void **outlined_function,
42 /// int16_t IsOMPRuntimeInitialized);
43 OMPRTL_NVPTX__kmpc_kernel_parallel,
44 /// Call to void __kmpc_kernel_end_parallel();
45 OMPRTL_NVPTX__kmpc_kernel_end_parallel,
46 /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
48 OMPRTL_NVPTX__kmpc_serialized_parallel,
49 /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
51 OMPRTL_NVPTX__kmpc_end_serialized_parallel,
52 /// Call to int32_t __kmpc_shuffle_int32(int32_t element,
53 /// int16_t lane_offset, int16_t warp_size);
54 OMPRTL_NVPTX__kmpc_shuffle_int32,
55 /// Call to int64_t __kmpc_shuffle_int64(int64_t element,
56 /// int16_t lane_offset, int16_t warp_size);
57 OMPRTL_NVPTX__kmpc_shuffle_int64,
58 /// Call to __kmpc_nvptx_parallel_reduce_nowait(kmp_int32
59 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
60 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
61 /// lane_offset, int16_t shortCircuit),
62 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
63 OMPRTL_NVPTX__kmpc_parallel_reduce_nowait,
64 /// Call to __kmpc_nvptx_simd_reduce_nowait(kmp_int32
65 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
66 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
67 /// lane_offset, int16_t shortCircuit),
68 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
69 OMPRTL_NVPTX__kmpc_simd_reduce_nowait,
70 /// Call to __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
71 /// int32_t num_vars, size_t reduce_size, void *reduce_data,
72 /// void (*kmp_ShuffleReductFctPtr)(void *rhs, int16_t lane_id, int16_t
73 /// lane_offset, int16_t shortCircuit),
74 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
75 /// void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
76 /// int32_t index, int32_t width),
77 /// void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad, int32_t
78 /// index, int32_t width, int32_t reduce))
79 OMPRTL_NVPTX__kmpc_teams_reduce_nowait,
80 /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
81 OMPRTL_NVPTX__kmpc_end_reduce_nowait,
82 /// Call to void __kmpc_data_sharing_init_stack();
83 OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
84 /// Call to void __kmpc_data_sharing_init_stack_spmd();
85 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
86 /// Call to void* __kmpc_data_sharing_push_stack(size_t size,
87 /// int16_t UseSharedMemory);
88 OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
89 /// Call to void __kmpc_data_sharing_pop_stack(void *a);
90 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
91 /// Call to void __kmpc_begin_sharing_variables(void ***args,
93 OMPRTL_NVPTX__kmpc_begin_sharing_variables,
94 /// Call to void __kmpc_end_sharing_variables();
95 OMPRTL_NVPTX__kmpc_end_sharing_variables,
96 /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
97 OMPRTL_NVPTX__kmpc_get_shared_variables,
98 /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
100 OMPRTL_NVPTX__kmpc_parallel_level,
101 /// Call to int8_t __kmpc_is_spmd_exec_mode();
102 OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
105 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
106 class NVPTXActionTy final : public PrePostActionTy {
107 llvm::Value *EnterCallee = nullptr;
108 ArrayRef<llvm::Value *> EnterArgs;
109 llvm::Value *ExitCallee = nullptr;
110 ArrayRef<llvm::Value *> ExitArgs;
111 bool Conditional = false;
112 llvm::BasicBlock *ContBlock = nullptr;
115 NVPTXActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
116 llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
117 bool Conditional = false)
118 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
119 ExitArgs(ExitArgs), Conditional(Conditional) {}
120 void Enter(CodeGenFunction &CGF) override {
121 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
123 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
124 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
125 ContBlock = CGF.createBasicBlock("omp_if.end");
126 // Generate the branch (If-stmt)
127 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
128 CGF.EmitBlock(ThenBlock);
131 void Done(CodeGenFunction &CGF) {
132 // Emit the rest of blocks/branches
133 CGF.EmitBranch(ContBlock);
134 CGF.EmitBlock(ContBlock, true);
136 void Exit(CodeGenFunction &CGF) override {
137 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
141 /// A class to track the execution mode when codegening directives within
142 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
143 /// to the target region and used by containing directives such as 'parallel'
144 /// to emit optimized code.
145 class ExecutionModeRAII {
147 CGOpenMPRuntimeNVPTX::ExecutionMode SavedMode;
148 CGOpenMPRuntimeNVPTX::ExecutionMode &Mode;
151 ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode, bool IsSPMD)
154 Mode = IsSPMD ? CGOpenMPRuntimeNVPTX::EM_SPMD
155 : CGOpenMPRuntimeNVPTX::EM_NonSPMD;
157 ~ExecutionModeRAII() { Mode = SavedMode; }
160 /// GPU Configuration: This information can be derived from cuda registers,
161 /// however, providing compile time constants helps generate more efficient
162 /// code. For all practical purposes this is fine because the configuration
163 /// is the same for all known NVPTX architectures.
164 enum MachineConfiguration : unsigned {
166 /// Number of bits required to represent a lane identifier, which is
167 /// computed as log_2(WarpSize).
169 LaneIDMask = WarpSize - 1,
171 /// Global memory alignment for performance.
172 GlobalMemoryAlignment = 256,
175 enum NamedBarrier : unsigned {
176 /// Synchronize on this barrier #ID using a named barrier primitive.
177 /// Only the subset of active threads in a parallel region arrive at the
182 /// Get the list of variables that can escape their declaration context.
183 class CheckVarsEscapingDeclContext final
184 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
185 CodeGenFunction &CGF;
186 llvm::SetVector<const ValueDecl *> EscapedDecls;
187 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
188 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
189 RecordDecl *GlobalizedRD = nullptr;
190 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
191 bool AllEscaped = false;
192 bool IsForCombinedParallelRegion = false;
194 static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
195 isDeclareTargetDeclaration(const ValueDecl *VD) {
196 for (const Decl *D : VD->redecls()) {
199 if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
200 return Attr->getMapType();
205 void markAsEscaped(const ValueDecl *VD) {
206 // Do not globalize declare target variables.
207 if (!isa<VarDecl>(VD) || isDeclareTargetDeclaration(VD))
209 VD = cast<ValueDecl>(VD->getCanonicalDecl());
210 // Variables captured by value must be globalized.
211 if (auto *CSI = CGF.CapturedStmtInfo) {
212 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
213 // Check if need to capture the variable that was already captured by
214 // value in the outer region.
215 if (!IsForCombinedParallelRegion) {
218 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
221 if (!isOpenMPPrivate(
222 static_cast<OpenMPClauseKind>(Attr->getCaptureKind())) ||
223 Attr->getCaptureKind() == OMPC_map)
226 if (!FD->getType()->isReferenceType()) {
227 assert(!VD->getType()->isVariablyModifiedType() &&
228 "Parameter captured by value with variably modified type");
229 EscapedParameters.insert(VD);
230 } else if (!IsForCombinedParallelRegion) {
235 if ((!CGF.CapturedStmtInfo ||
236 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
237 VD->getType()->isReferenceType())
238 // Do not globalize variables with reference type.
240 if (VD->getType()->isVariablyModifiedType())
241 EscapedVariableLengthDecls.insert(VD);
243 EscapedDecls.insert(VD);
246 void VisitValueDecl(const ValueDecl *VD) {
247 if (VD->getType()->isLValueReferenceType())
249 if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
250 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
251 const bool SavedAllEscaped = AllEscaped;
252 AllEscaped = VD->getType()->isLValueReferenceType();
253 Visit(VarD->getInit());
254 AllEscaped = SavedAllEscaped;
258 void VisitOpenMPCapturedStmt(const CapturedStmt *S,
259 ArrayRef<OMPClause *> Clauses,
260 bool IsCombinedParallelRegion) {
263 for (const CapturedStmt::Capture &C : S->captures()) {
264 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
265 const ValueDecl *VD = C.getCapturedVar();
266 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
267 if (IsCombinedParallelRegion) {
268 // Check if the variable is privatized in the combined construct and
269 // those private copies must be shared in the inner parallel
271 IsForCombinedParallelRegion = false;
272 for (const OMPClause *C : Clauses) {
273 if (!isOpenMPPrivate(C->getClauseKind()) ||
274 C->getClauseKind() == OMPC_reduction ||
275 C->getClauseKind() == OMPC_linear ||
276 C->getClauseKind() == OMPC_private)
278 ArrayRef<const Expr *> Vars;
279 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
280 Vars = PC->getVarRefs();
281 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
282 Vars = PC->getVarRefs();
284 llvm_unreachable("Unexpected clause.");
285 for (const auto *E : Vars) {
287 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
288 if (D == VD->getCanonicalDecl()) {
289 IsForCombinedParallelRegion = true;
293 if (IsForCombinedParallelRegion)
298 if (isa<OMPCapturedExprDecl>(VD))
300 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
305 typedef std::pair<CharUnits /*Align*/, const ValueDecl *> VarsDataTy;
306 static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
307 return P1.first > P2.first;
310 void buildRecordForGlobalizedVars() {
311 assert(!GlobalizedRD &&
312 "Record for globalized variables is built already.");
313 if (EscapedDecls.empty())
315 ASTContext &C = CGF.getContext();
316 SmallVector<VarsDataTy, 4> GlobalizedVars;
317 for (const ValueDecl *D : EscapedDecls)
318 GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
319 std::stable_sort(GlobalizedVars.begin(), GlobalizedVars.end(),
320 stable_sort_comparator);
321 // Build struct _globalized_locals_ty {
322 // /* globalized vars */
324 GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
325 GlobalizedRD->startDefinition();
326 for (const auto &Pair : GlobalizedVars) {
327 const ValueDecl *VD = Pair.second;
328 QualType Type = VD->getType();
329 if (Type->isLValueReferenceType())
330 Type = C.getPointerType(Type.getNonReferenceType());
332 Type = Type.getNonReferenceType();
333 SourceLocation Loc = VD->getLocation();
334 auto *Field = FieldDecl::Create(
335 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
336 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
337 /*BW=*/nullptr, /*Mutable=*/false,
338 /*InitStyle=*/ICIS_NoInit);
339 Field->setAccess(AS_public);
340 GlobalizedRD->addDecl(Field);
341 if (VD->hasAttrs()) {
342 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
343 E(VD->getAttrs().end());
347 MappedDeclsFields.try_emplace(VD, Field);
349 GlobalizedRD->completeDefinition();
353 CheckVarsEscapingDeclContext(CodeGenFunction &CGF) : CGF(CGF) {}
354 virtual ~CheckVarsEscapingDeclContext() = default;
355 void VisitDeclStmt(const DeclStmt *S) {
358 for (const Decl *D : S->decls())
359 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
362 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
365 if (!D->hasAssociatedStmt())
368 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
369 // Do not analyze directives that do not actually require capturing,
370 // like `omp for` or `omp simd` directives.
371 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
372 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
373 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
374 VisitStmt(S->getCapturedStmt());
377 VisitOpenMPCapturedStmt(
379 CaptureRegions.back() == OMPD_parallel &&
380 isOpenMPDistributeDirective(D->getDirectiveKind()));
383 void VisitCapturedStmt(const CapturedStmt *S) {
386 for (const CapturedStmt::Capture &C : S->captures()) {
387 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
388 const ValueDecl *VD = C.getCapturedVar();
390 if (isa<OMPCapturedExprDecl>(VD))
395 void VisitLambdaExpr(const LambdaExpr *E) {
398 for (const LambdaCapture &C : E->captures()) {
399 if (C.capturesVariable()) {
400 if (C.getCaptureKind() == LCK_ByRef) {
401 const ValueDecl *VD = C.getCapturedVar();
403 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
409 void VisitBlockExpr(const BlockExpr *E) {
412 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
414 const VarDecl *VD = C.getVariable();
416 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
421 void VisitCallExpr(const CallExpr *E) {
424 for (const Expr *Arg : E->arguments()) {
427 if (Arg->isLValue()) {
428 const bool SavedAllEscaped = AllEscaped;
431 AllEscaped = SavedAllEscaped;
436 Visit(E->getCallee());
438 void VisitDeclRefExpr(const DeclRefExpr *E) {
441 const ValueDecl *VD = E->getDecl();
444 if (isa<OMPCapturedExprDecl>(VD))
446 else if (const auto *VarD = dyn_cast<VarDecl>(VD))
447 if (VarD->isInitCapture())
450 void VisitUnaryOperator(const UnaryOperator *E) {
453 if (E->getOpcode() == UO_AddrOf) {
454 const bool SavedAllEscaped = AllEscaped;
456 Visit(E->getSubExpr());
457 AllEscaped = SavedAllEscaped;
459 Visit(E->getSubExpr());
462 void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
465 if (E->getCastKind() == CK_ArrayToPointerDecay) {
466 const bool SavedAllEscaped = AllEscaped;
468 Visit(E->getSubExpr());
469 AllEscaped = SavedAllEscaped;
471 Visit(E->getSubExpr());
474 void VisitExpr(const Expr *E) {
477 bool SavedAllEscaped = AllEscaped;
480 for (const Stmt *Child : E->children())
483 AllEscaped = SavedAllEscaped;
485 void VisitStmt(const Stmt *S) {
488 for (const Stmt *Child : S->children())
493 /// Returns the record that handles all the escaped local variables and used
494 /// instead of their original storage.
495 const RecordDecl *getGlobalizedRecord() {
497 buildRecordForGlobalizedVars();
501 /// Returns the field in the globalized record for the escaped variable.
502 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
503 assert(GlobalizedRD &&
504 "Record for globalized variables must be generated already.");
505 auto I = MappedDeclsFields.find(VD);
506 if (I == MappedDeclsFields.end())
508 return I->getSecond();
511 /// Returns the list of the escaped local variables/parameters.
512 ArrayRef<const ValueDecl *> getEscapedDecls() const {
513 return EscapedDecls.getArrayRef();
516 /// Checks if the escaped local variable is actually a parameter passed by
518 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
519 return EscapedParameters;
522 /// Returns the list of the escaped variables with the variably modified
524 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
525 return EscapedVariableLengthDecls.getArrayRef();
528 } // anonymous namespace
530 /// Get the GPU warp size.
531 static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
532 return CGF.EmitRuntimeCall(
533 llvm::Intrinsic::getDeclaration(
534 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
538 /// Get the id of the current thread on the GPU.
539 static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
540 return CGF.EmitRuntimeCall(
541 llvm::Intrinsic::getDeclaration(
542 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
546 /// Get the id of the warp in the block.
547 /// We assume that the warp size is 32, which is always the case
548 /// on the NVPTX device, to generate more efficient code.
549 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
550 CGBuilderTy &Bld = CGF.Builder;
551 return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
554 /// Get the id of the current lane in the Warp.
555 /// We assume that the warp size is 32, which is always the case
556 /// on the NVPTX device, to generate more efficient code.
557 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
558 CGBuilderTy &Bld = CGF.Builder;
559 return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
563 /// Get the maximum number of threads in a block of the GPU.
564 static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
565 return CGF.EmitRuntimeCall(
566 llvm::Intrinsic::getDeclaration(
567 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
568 "nvptx_num_threads");
571 /// Get barrier to synchronize all threads in a block.
572 static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
573 CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
574 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
577 /// Get barrier #ID to synchronize selected (multiple of warp size) threads in
579 static void getNVPTXBarrier(CodeGenFunction &CGF, int ID,
580 llvm::Value *NumThreads) {
581 CGBuilderTy &Bld = CGF.Builder;
582 llvm::Value *Args[] = {Bld.getInt32(ID), NumThreads};
583 CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
584 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier),
588 /// Synchronize all GPU threads in a block.
589 static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
591 /// Synchronize worker threads in a parallel region.
592 static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads) {
593 return getNVPTXBarrier(CGF, NB_Parallel, NumThreads);
596 /// Get the value of the thread_limit clause in the teams directive.
597 /// For the 'generic' execution mode, the runtime encodes thread_limit in
598 /// the launch parameters, always starting thread_limit+warpSize threads per
599 /// CTA. The threads in the last warp are reserved for master execution.
600 /// For the 'spmd' execution mode, all threads in a CTA are part of the team.
601 static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
602 bool IsInSPMDExecutionMode = false) {
603 CGBuilderTy &Bld = CGF.Builder;
604 return IsInSPMDExecutionMode
605 ? getNVPTXNumThreads(CGF)
606 : Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
610 /// Get the thread id of the OMP master thread.
611 /// The master thread id is the first thread (lane) of the last warp in the
612 /// GPU block. Warp size is assumed to be some power of 2.
613 /// Thread id is 0 indexed.
614 /// E.g: If NumThreads is 33, master id is 32.
615 /// If NumThreads is 64, master id is 32.
616 /// If NumThreads is 1024, master id is 992.
617 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
618 CGBuilderTy &Bld = CGF.Builder;
619 llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
621 // We assume that the warp size is a power of 2.
622 llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
624 return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
625 Bld.CreateNot(Mask), "master_tid");
628 CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
629 CodeGenModule &CGM, SourceLocation Loc)
630 : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
632 createWorkerFunction(CGM);
635 void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
636 CodeGenModule &CGM) {
637 // Create an worker function with no arguments.
639 WorkerFn = llvm::Function::Create(
640 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
641 /*placeholder=*/"_worker", &CGM.getModule());
642 CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
643 WorkerFn->setDoesNotRecurse();
646 CGOpenMPRuntimeNVPTX::ExecutionMode
647 CGOpenMPRuntimeNVPTX::getExecutionMode() const {
648 return CurrentExecutionMode;
651 static CGOpenMPRuntimeNVPTX::DataSharingMode
652 getDataSharingMode(CodeGenModule &CGM) {
653 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA
654 : CGOpenMPRuntimeNVPTX::Generic;
657 /// Checks if the \p Body is the \a CompoundStmt and returns its child statement
658 /// iff there is only one.
659 static const Stmt *getSingleCompoundChild(const Stmt *Body) {
660 if (const auto *C = dyn_cast<CompoundStmt>(Body))
662 return C->body_front();
666 /// Check if the parallel directive has an 'if' clause with non-constant or
667 /// false condition. Also, check if the number of threads is strictly specified
668 /// and run those directives in non-SPMD mode.
669 static bool hasParallelIfNumThreadsClause(ASTContext &Ctx,
670 const OMPExecutableDirective &D) {
671 if (D.hasClausesOfKind<OMPNumThreadsClause>())
673 for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
674 OpenMPDirectiveKind NameModifier = C->getNameModifier();
675 if (NameModifier != OMPD_parallel && NameModifier != OMPD_unknown)
677 const Expr *Cond = C->getCondition();
679 if (!Cond->EvaluateAsBooleanCondition(Result, Ctx) || !Result)
685 /// Check for inner (nested) SPMD construct, if any
686 static bool hasNestedSPMDDirective(ASTContext &Ctx,
687 const OMPExecutableDirective &D) {
688 const auto *CS = D.getInnermostCapturedStmt();
689 const auto *Body = CS->getCapturedStmt()->IgnoreContainers();
690 const Stmt *ChildStmt = getSingleCompoundChild(Body);
692 if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
693 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
694 switch (D.getDirectiveKind()) {
696 if (isOpenMPParallelDirective(DKind) &&
697 !hasParallelIfNumThreadsClause(Ctx, *NestedDir))
699 if (DKind == OMPD_teams || DKind == OMPD_teams_distribute) {
700 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
703 ChildStmt = getSingleCompoundChild(Body);
704 if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
705 DKind = NND->getDirectiveKind();
706 if (isOpenMPParallelDirective(DKind) &&
707 !hasParallelIfNumThreadsClause(Ctx, *NND))
709 if (DKind == OMPD_distribute) {
710 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
713 ChildStmt = getSingleCompoundChild(Body);
716 if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
717 DKind = NND->getDirectiveKind();
718 return isOpenMPParallelDirective(DKind) &&
719 !hasParallelIfNumThreadsClause(Ctx, *NND);
725 case OMPD_target_teams:
726 if (isOpenMPParallelDirective(DKind) &&
727 !hasParallelIfNumThreadsClause(Ctx, *NestedDir))
729 if (DKind == OMPD_distribute) {
730 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
733 ChildStmt = getSingleCompoundChild(Body);
734 if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
735 DKind = NND->getDirectiveKind();
736 return isOpenMPParallelDirective(DKind) &&
737 !hasParallelIfNumThreadsClause(Ctx, *NND);
741 case OMPD_target_teams_distribute:
742 return isOpenMPParallelDirective(DKind) &&
743 !hasParallelIfNumThreadsClause(Ctx, *NestedDir);
744 case OMPD_target_simd:
745 case OMPD_target_parallel:
746 case OMPD_target_parallel_for:
747 case OMPD_target_parallel_for_simd:
748 case OMPD_target_teams_distribute_simd:
749 case OMPD_target_teams_distribute_parallel_for:
750 case OMPD_target_teams_distribute_parallel_for_simd:
753 case OMPD_parallel_for:
754 case OMPD_parallel_sections:
756 case OMPD_parallel_for_simd:
758 case OMPD_cancellation_point:
760 case OMPD_threadprivate:
775 case OMPD_target_data:
776 case OMPD_target_exit_data:
777 case OMPD_target_enter_data:
778 case OMPD_distribute:
779 case OMPD_distribute_simd:
780 case OMPD_distribute_parallel_for:
781 case OMPD_distribute_parallel_for_simd:
782 case OMPD_teams_distribute:
783 case OMPD_teams_distribute_simd:
784 case OMPD_teams_distribute_parallel_for:
785 case OMPD_teams_distribute_parallel_for_simd:
786 case OMPD_target_update:
787 case OMPD_declare_simd:
788 case OMPD_declare_target:
789 case OMPD_end_declare_target:
790 case OMPD_declare_reduction:
792 case OMPD_taskloop_simd:
794 llvm_unreachable("Unexpected directive.");
801 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
802 const OMPExecutableDirective &D) {
803 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
804 switch (DirectiveKind) {
806 case OMPD_target_teams:
807 case OMPD_target_teams_distribute:
808 return hasNestedSPMDDirective(Ctx, D);
809 case OMPD_target_parallel:
810 case OMPD_target_parallel_for:
811 case OMPD_target_parallel_for_simd:
812 case OMPD_target_teams_distribute_parallel_for:
813 case OMPD_target_teams_distribute_parallel_for_simd:
814 return !hasParallelIfNumThreadsClause(Ctx, D);
815 case OMPD_target_simd:
816 case OMPD_target_teams_distribute_simd:
820 case OMPD_parallel_for:
821 case OMPD_parallel_sections:
823 case OMPD_parallel_for_simd:
825 case OMPD_cancellation_point:
827 case OMPD_threadprivate:
842 case OMPD_target_data:
843 case OMPD_target_exit_data:
844 case OMPD_target_enter_data:
845 case OMPD_distribute:
846 case OMPD_distribute_simd:
847 case OMPD_distribute_parallel_for:
848 case OMPD_distribute_parallel_for_simd:
849 case OMPD_teams_distribute:
850 case OMPD_teams_distribute_simd:
851 case OMPD_teams_distribute_parallel_for:
852 case OMPD_teams_distribute_parallel_for_simd:
853 case OMPD_target_update:
854 case OMPD_declare_simd:
855 case OMPD_declare_target:
856 case OMPD_end_declare_target:
857 case OMPD_declare_reduction:
859 case OMPD_taskloop_simd:
864 "Unknown programming model for OpenMP directive on NVPTX target.");
867 void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
868 StringRef ParentName,
869 llvm::Function *&OutlinedFn,
870 llvm::Constant *&OutlinedFnID,
872 const RegionCodeGenTy &CodeGen) {
873 ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/false);
874 EntryFunctionState EST;
875 WorkerFunctionState WST(CGM, D.getLocStart());
877 WrapperFunctionsMap.clear();
879 // Emit target region as a standalone region.
880 class NVPTXPrePostActionTy : public PrePostActionTy {
881 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
882 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
885 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
886 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
887 : EST(EST), WST(WST) {}
888 void Enter(CodeGenFunction &CGF) override {
889 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
890 .emitNonSPMDEntryHeader(CGF, EST, WST);
892 void Exit(CodeGenFunction &CGF) override {
893 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
894 .emitNonSPMDEntryFooter(CGF, EST);
897 CodeGen.setAction(Action);
898 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
899 IsOffloadEntry, CodeGen);
901 // Now change the name of the worker function to correspond to this target
902 // region's entry function.
903 WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
905 // Create the worker function
906 emitWorkerFunction(WST);
909 // Setup NVPTX threads for master-worker OpenMP scheme.
910 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
911 EntryFunctionState &EST,
912 WorkerFunctionState &WST) {
913 CGBuilderTy &Bld = CGF.Builder;
915 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
916 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
917 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
918 EST.ExitBB = CGF.createBasicBlock(".exit");
920 llvm::Value *IsWorker =
921 Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
922 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
924 CGF.EmitBlock(WorkerBB);
925 emitCall(CGF, WST.Loc, WST.WorkerFn);
926 CGF.EmitBranch(EST.ExitBB);
928 CGF.EmitBlock(MasterCheckBB);
929 llvm::Value *IsMaster =
930 Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
931 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
933 CGF.EmitBlock(MasterBB);
934 IsInTargetMasterThreadRegion = true;
935 // SEQUENTIAL (MASTER) REGION START
936 // First action in sequential region:
937 // Initialize the state of the OpenMP runtime library on the GPU.
938 // TODO: Optimize runtime initialization and pass in correct value.
939 llvm::Value *Args[] = {getThreadLimit(CGF),
940 Bld.getInt16(/*RequiresOMPRuntime=*/1)};
942 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
944 // For data sharing, we need to initialize the stack.
946 createNVPTXRuntimeFunction(
947 OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
949 emitGenericVarsProlog(CGF, WST.Loc);
952 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
953 EntryFunctionState &EST) {
954 IsInTargetMasterThreadRegion = false;
955 if (!CGF.HaveInsertPoint())
958 emitGenericVarsEpilog(CGF);
961 EST.ExitBB = CGF.createBasicBlock(".exit");
963 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
964 CGF.EmitBranch(TerminateBB);
966 CGF.EmitBlock(TerminateBB);
967 // Signal termination condition.
968 // TODO: Optimize runtime initialization and pass in correct value.
969 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
971 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
972 // Barrier to terminate worker threads.
974 // Master thread jumps to exit point.
975 CGF.EmitBranch(EST.ExitBB);
977 CGF.EmitBlock(EST.ExitBB);
978 EST.ExitBB = nullptr;
981 void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
982 StringRef ParentName,
983 llvm::Function *&OutlinedFn,
984 llvm::Constant *&OutlinedFnID,
986 const RegionCodeGenTy &CodeGen) {
987 ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/true);
988 EntryFunctionState EST;
990 // Emit target region as a standalone region.
991 class NVPTXPrePostActionTy : public PrePostActionTy {
992 CGOpenMPRuntimeNVPTX &RT;
993 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
994 const OMPExecutableDirective &D;
997 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
998 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
999 const OMPExecutableDirective &D)
1000 : RT(RT), EST(EST), D(D) {}
1001 void Enter(CodeGenFunction &CGF) override {
1002 RT.emitSPMDEntryHeader(CGF, EST, D);
1004 void Exit(CodeGenFunction &CGF) override {
1005 RT.emitSPMDEntryFooter(CGF, EST);
1007 } Action(*this, EST, D);
1008 CodeGen.setAction(Action);
1009 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1010 IsOffloadEntry, CodeGen);
1013 void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
1014 CodeGenFunction &CGF, EntryFunctionState &EST,
1015 const OMPExecutableDirective &D) {
1016 CGBuilderTy &Bld = CGF.Builder;
1018 // Setup BBs in entry function.
1019 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1020 EST.ExitBB = CGF.createBasicBlock(".exit");
1022 // Initialize the OMP state in the runtime; called by all active threads.
1023 // TODO: Set RequiresOMPRuntime and RequiresDataSharing parameters
1024 // based on code analysis of the target region.
1025 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1026 /*RequiresOMPRuntime=*/Bld.getInt16(1),
1027 /*RequiresDataSharing=*/Bld.getInt16(1)};
1028 CGF.EmitRuntimeCall(
1029 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
1031 // For data sharing, we need to initialize the stack.
1032 CGF.EmitRuntimeCall(
1033 createNVPTXRuntimeFunction(
1034 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
1036 CGF.EmitBranch(ExecuteBB);
1038 CGF.EmitBlock(ExecuteBB);
1040 IsInTargetMasterThreadRegion = true;
1043 void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
1044 EntryFunctionState &EST) {
1045 IsInTargetMasterThreadRegion = false;
1046 if (!CGF.HaveInsertPoint())
1050 EST.ExitBB = CGF.createBasicBlock(".exit");
1052 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1053 CGF.EmitBranch(OMPDeInitBB);
1055 CGF.EmitBlock(OMPDeInitBB);
1056 // DeInitialize the OMP state in the runtime; called by all active threads.
1057 CGF.EmitRuntimeCall(
1058 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_deinit), None);
1059 CGF.EmitBranch(EST.ExitBB);
1061 CGF.EmitBlock(EST.ExitBB);
1062 EST.ExitBB = nullptr;
1065 // Create a unique global variable to indicate the execution mode of this target
1066 // region. The execution mode is either 'generic', or 'spmd' depending on the
1067 // target directive. This variable is picked up by the offload library to setup
1068 // the device appropriately before kernel launch. If the execution mode is
1069 // 'generic', the runtime reserves one warp for the master, otherwise, all
1070 // warps participate in parallel work.
1071 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1074 new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1075 llvm::GlobalValue::WeakAnyLinkage,
1076 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1077 Twine(Name, "_exec_mode"));
1078 CGM.addCompilerUsedGlobal(GVMode);
1081 void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
1082 ASTContext &Ctx = CGM.getContext();
1084 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1085 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1087 emitWorkerLoop(CGF, WST);
1088 CGF.FinishFunction();
1091 void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
1092 WorkerFunctionState &WST) {
1094 // The workers enter this loop and wait for parallel work from the master.
1095 // When the master encounters a parallel region it sets up the work + variable
1096 // arguments, and wakes up the workers. The workers first check to see if
1097 // they are required for the parallel region, i.e., within the # of requested
1098 // parallel threads. The activated workers load the variable arguments and
1099 // execute the parallel work.
1102 CGBuilderTy &Bld = CGF.Builder;
1104 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1105 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1106 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1107 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1108 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1109 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1111 CGF.EmitBranch(AwaitBB);
1113 // Workers wait for work from master.
1114 CGF.EmitBlock(AwaitBB);
1115 // Wait for parallel work
1116 syncCTAThreads(CGF);
1119 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1120 Address ExecStatus =
1121 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1122 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1123 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1125 // TODO: Optimize runtime initialization and pass in correct value.
1126 llvm::Value *Args[] = {WorkFn.getPointer(),
1127 /*RequiresOMPRuntime=*/Bld.getInt16(1)};
1128 llvm::Value *Ret = CGF.EmitRuntimeCall(
1129 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
1130 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1132 // On termination condition (workid == 0), exit loop.
1133 llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1134 llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1135 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1137 // Activate requested workers.
1138 CGF.EmitBlock(SelectWorkersBB);
1139 llvm::Value *IsActive =
1140 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1141 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1143 // Signal start of parallel region.
1144 CGF.EmitBlock(ExecuteBB);
1146 // Process work items: outlined parallel functions.
1147 for (llvm::Function *W : Work) {
1148 // Try to match this outlined function.
1149 llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1151 llvm::Value *WorkFnMatch =
1152 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1154 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1155 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1156 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1158 // Execute this outlined function.
1159 CGF.EmitBlock(ExecuteFNBB);
1161 // Insert call to work function via shared wrapper. The shared
1162 // wrapper takes two arguments:
1163 // - the parallelism level;
1165 emitCall(CGF, WST.Loc, W,
1166 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1168 // Go to end of parallel region.
1169 CGF.EmitBranch(TerminateBB);
1171 CGF.EmitBlock(CheckNextBB);
1173 // Default case: call to outlined function through pointer if the target
1174 // region makes a declare target call that may contain an orphaned parallel
1176 auto *ParallelFnTy =
1177 llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1180 llvm::Value *WorkFnCast = Bld.CreateBitCast(WorkID, ParallelFnTy);
1181 // Insert call to work function via shared wrapper. The shared
1182 // wrapper takes two arguments:
1183 // - the parallelism level;
1185 emitCall(CGF, WST.Loc, WorkFnCast,
1186 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1187 // Go to end of parallel region.
1188 CGF.EmitBranch(TerminateBB);
1190 // Signal end of parallel region.
1191 CGF.EmitBlock(TerminateBB);
1192 CGF.EmitRuntimeCall(
1193 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
1195 CGF.EmitBranch(BarrierBB);
1197 // All active and inactive workers wait at a barrier after parallel region.
1198 CGF.EmitBlock(BarrierBB);
1199 // Barrier after parallel region.
1200 syncCTAThreads(CGF);
1201 CGF.EmitBranch(AwaitBB);
1203 // Exit target region.
1204 CGF.EmitBlock(ExitBB);
1207 /// Returns specified OpenMP runtime function for the current OpenMP
1208 /// implementation. Specialized for the NVPTX device.
1209 /// \param Function OpenMP runtime function.
1210 /// \return Specified function.
1212 CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
1213 llvm::Constant *RTLFn = nullptr;
1214 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
1215 case OMPRTL_NVPTX__kmpc_kernel_init: {
1216 // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
1217 // RequiresOMPRuntime);
1218 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
1220 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1221 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
1224 case OMPRTL_NVPTX__kmpc_kernel_deinit: {
1225 // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
1226 llvm::Type *TypeParams[] = {CGM.Int16Ty};
1228 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1229 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
1232 case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
1233 // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
1234 // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
1235 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1237 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1238 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
1241 case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit: {
1242 // Build void __kmpc_spmd_kernel_deinit();
1244 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1245 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit");
1248 case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
1249 /// Build void __kmpc_kernel_prepare_parallel(
1250 /// void *outlined_function, int16_t IsOMPRuntimeInitialized);
1251 llvm::Type *TypeParams[] = {CGM.Int8PtrTy, CGM.Int16Ty};
1253 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1254 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
1257 case OMPRTL_NVPTX__kmpc_kernel_parallel: {
1258 /// Build bool __kmpc_kernel_parallel(void **outlined_function,
1259 /// int16_t IsOMPRuntimeInitialized);
1260 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy, CGM.Int16Ty};
1261 llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
1263 llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
1264 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
1267 case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
1268 /// Build void __kmpc_kernel_end_parallel();
1270 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1271 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
1274 case OMPRTL_NVPTX__kmpc_serialized_parallel: {
1275 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1277 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1279 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1280 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1283 case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
1284 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1286 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1288 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1289 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1292 case OMPRTL_NVPTX__kmpc_shuffle_int32: {
1293 // Build int32_t __kmpc_shuffle_int32(int32_t element,
1294 // int16_t lane_offset, int16_t warp_size);
1295 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1297 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1298 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
1301 case OMPRTL_NVPTX__kmpc_shuffle_int64: {
1302 // Build int64_t __kmpc_shuffle_int64(int64_t element,
1303 // int16_t lane_offset, int16_t warp_size);
1304 llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
1306 llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
1307 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
1310 case OMPRTL_NVPTX__kmpc_parallel_reduce_nowait: {
1311 // Build int32_t kmpc_nvptx_parallel_reduce_nowait(kmp_int32 global_tid,
1312 // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
1313 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
1314 // lane_offset, int16_t Algorithm Version),
1315 // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
1316 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1317 CGM.Int16Ty, CGM.Int16Ty};
1318 auto *ShuffleReduceFnTy =
1319 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1320 /*isVarArg=*/false);
1321 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1322 auto *InterWarpCopyFnTy =
1323 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1324 /*isVarArg=*/false);
1325 llvm::Type *TypeParams[] = {CGM.Int32Ty,
1329 ShuffleReduceFnTy->getPointerTo(),
1330 InterWarpCopyFnTy->getPointerTo()};
1332 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1333 RTLFn = CGM.CreateRuntimeFunction(
1334 FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait");
1337 case OMPRTL_NVPTX__kmpc_simd_reduce_nowait: {
1338 // Build int32_t kmpc_nvptx_simd_reduce_nowait(kmp_int32 global_tid,
1339 // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
1340 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
1341 // lane_offset, int16_t Algorithm Version),
1342 // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
1343 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1344 CGM.Int16Ty, CGM.Int16Ty};
1345 auto *ShuffleReduceFnTy =
1346 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1347 /*isVarArg=*/false);
1348 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1349 auto *InterWarpCopyFnTy =
1350 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1351 /*isVarArg=*/false);
1352 llvm::Type *TypeParams[] = {CGM.Int32Ty,
1356 ShuffleReduceFnTy->getPointerTo(),
1357 InterWarpCopyFnTy->getPointerTo()};
1359 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1360 RTLFn = CGM.CreateRuntimeFunction(
1361 FnTy, /*Name=*/"__kmpc_nvptx_simd_reduce_nowait");
1364 case OMPRTL_NVPTX__kmpc_teams_reduce_nowait: {
1365 // Build int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
1366 // int32_t num_vars, size_t reduce_size, void *reduce_data,
1367 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
1368 // lane_offset, int16_t shortCircuit),
1369 // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
1370 // void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
1371 // int32_t index, int32_t width),
1372 // void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad,
1373 // int32_t index, int32_t width, int32_t reduce))
1374 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1375 CGM.Int16Ty, CGM.Int16Ty};
1376 auto *ShuffleReduceFnTy =
1377 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1378 /*isVarArg=*/false);
1379 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1380 auto *InterWarpCopyFnTy =
1381 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1382 /*isVarArg=*/false);
1383 llvm::Type *CopyToScratchpadTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy,
1384 CGM.Int32Ty, CGM.Int32Ty};
1385 auto *CopyToScratchpadFnTy =
1386 llvm::FunctionType::get(CGM.VoidTy, CopyToScratchpadTypeParams,
1387 /*isVarArg=*/false);
1388 llvm::Type *LoadReduceTypeParams[] = {
1389 CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty};
1390 auto *LoadReduceFnTy =
1391 llvm::FunctionType::get(CGM.VoidTy, LoadReduceTypeParams,
1392 /*isVarArg=*/false);
1393 llvm::Type *TypeParams[] = {CGM.Int32Ty,
1397 ShuffleReduceFnTy->getPointerTo(),
1398 InterWarpCopyFnTy->getPointerTo(),
1399 CopyToScratchpadFnTy->getPointerTo(),
1400 LoadReduceFnTy->getPointerTo()};
1402 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1403 RTLFn = CGM.CreateRuntimeFunction(
1404 FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait");
1407 case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
1408 // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
1409 llvm::Type *TypeParams[] = {CGM.Int32Ty};
1411 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1412 RTLFn = CGM.CreateRuntimeFunction(
1413 FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
1416 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
1417 /// Build void __kmpc_data_sharing_init_stack();
1419 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1420 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
1423 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
1424 /// Build void __kmpc_data_sharing_init_stack_spmd();
1426 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1427 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
1430 case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
1431 // Build void *__kmpc_data_sharing_push_stack(size_t size,
1432 // int16_t UseSharedMemory);
1433 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1435 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1436 RTLFn = CGM.CreateRuntimeFunction(
1437 FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
1440 case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
1441 // Build void __kmpc_data_sharing_pop_stack(void *a);
1442 llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
1444 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1445 RTLFn = CGM.CreateRuntimeFunction(FnTy,
1446 /*Name=*/"__kmpc_data_sharing_pop_stack");
1449 case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
1450 /// Build void __kmpc_begin_sharing_variables(void ***args,
1452 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
1454 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1455 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
1458 case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
1459 /// Build void __kmpc_end_sharing_variables();
1461 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1462 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
1465 case OMPRTL_NVPTX__kmpc_get_shared_variables: {
1466 /// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
1467 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
1469 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1470 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
1473 case OMPRTL_NVPTX__kmpc_parallel_level: {
1474 // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
1475 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1477 llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
1478 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
1481 case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
1482 // Build int8_t __kmpc_is_spmd_exec_mode();
1483 auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
1484 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
1491 void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
1492 llvm::Constant *Addr,
1493 uint64_t Size, int32_t,
1494 llvm::GlobalValue::LinkageTypes) {
1495 // TODO: Add support for global variables on the device after declare target
1497 if (!isa<llvm::Function>(Addr))
1499 llvm::Module &M = CGM.getModule();
1500 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1502 // Get "nvvm.annotations" metadata node
1503 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1505 llvm::Metadata *MDVals[] = {
1506 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1507 llvm::ConstantAsMetadata::get(
1508 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1509 // Append metadata to nvvm.annotations
1510 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1513 void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
1514 const OMPExecutableDirective &D, StringRef ParentName,
1515 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1516 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1517 if (!IsOffloadEntry) // Nothing to do.
1520 assert(!ParentName.empty() && "Invalid target region parent name!");
1522 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1524 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1527 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1530 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1533 CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
1534 : CGOpenMPRuntime(CGM, "_", "$") {
1535 if (!CGM.getLangOpts().OpenMPIsDevice)
1536 llvm_unreachable("OpenMP NVPTX can only handle device code.");
1539 void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
1540 OpenMPProcBindClauseKind ProcBind,
1541 SourceLocation Loc) {
1542 // Do nothing in case of SPMD mode and L0 parallel.
1543 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1546 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1549 void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
1550 llvm::Value *NumThreads,
1551 SourceLocation Loc) {
1552 // Do nothing in case of SPMD mode and L0 parallel.
1553 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1556 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1559 void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
1560 const Expr *NumTeams,
1561 const Expr *ThreadLimit,
1562 SourceLocation Loc) {}
1564 llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
1565 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1566 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1567 // Emit target region as a standalone region.
1568 class NVPTXPrePostActionTy : public PrePostActionTy {
1569 bool &IsInParallelRegion;
1570 bool PrevIsInParallelRegion;
1573 NVPTXPrePostActionTy(bool &IsInParallelRegion)
1574 : IsInParallelRegion(IsInParallelRegion) {}
1575 void Enter(CodeGenFunction &CGF) override {
1576 PrevIsInParallelRegion = IsInParallelRegion;
1577 IsInParallelRegion = true;
1579 void Exit(CodeGenFunction &CGF) override {
1580 IsInParallelRegion = PrevIsInParallelRegion;
1582 } Action(IsInParallelRegion);
1583 CodeGen.setAction(Action);
1584 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1585 IsInTargetMasterThreadRegion = false;
1587 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1588 D, ThreadIDVar, InnermostKind, CodeGen));
1589 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1590 if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
1591 !IsInParallelRegion) {
1592 llvm::Function *WrapperFun =
1593 createParallelDataSharingWrapper(OutlinedFun, D);
1594 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1600 llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
1601 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1602 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1603 SourceLocation Loc = D.getLocStart();
1605 // Emit target region as a standalone region.
1606 class NVPTXPrePostActionTy : public PrePostActionTy {
1607 SourceLocation &Loc;
1610 NVPTXPrePostActionTy(SourceLocation &Loc) : Loc(Loc) {}
1611 void Enter(CodeGenFunction &CGF) override {
1612 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
1613 .emitGenericVarsProlog(CGF, Loc);
1615 void Exit(CodeGenFunction &CGF) override {
1616 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
1617 .emitGenericVarsEpilog(CGF);
1620 CodeGen.setAction(Action);
1621 llvm::Value *OutlinedFunVal = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1622 D, ThreadIDVar, InnermostKind, CodeGen);
1623 llvm::Function *OutlinedFun = cast<llvm::Function>(OutlinedFunVal);
1624 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
1625 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
1626 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
1631 void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
1632 SourceLocation Loc) {
1633 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
1636 CGBuilderTy &Bld = CGF.Builder;
1638 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1639 if (I == FunctionGlobalizedDecls.end())
1641 if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
1642 QualType RecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
1644 // Recover pointer to this function's global record. The runtime will
1645 // handle the specifics of the allocation of the memory.
1646 // Use actual memory size of the record including the padding
1647 // for alignment purposes.
1648 unsigned Alignment =
1649 CGM.getContext().getTypeAlignInChars(RecTy).getQuantity();
1650 unsigned GlobalRecordSize =
1651 CGM.getContext().getTypeSizeInChars(RecTy).getQuantity();
1652 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
1653 // TODO: allow the usage of shared memory to be controlled by
1654 // the user, for now, default to global.
1655 llvm::Value *GlobalRecordSizeArg[] = {
1656 llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
1657 CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1658 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1659 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
1660 GlobalRecordSizeArg);
1661 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1662 GlobalRecValue, CGF.ConvertTypeForMem(RecTy)->getPointerTo());
1664 CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, RecTy);
1665 I->getSecond().GlobalRecordAddr = GlobalRecValue;
1667 // Emit the "global alloca" which is a GEP from the global declaration
1668 // record using the pointer returned by the runtime.
1669 for (auto &Rec : I->getSecond().LocalVarData) {
1670 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1671 llvm::Value *ParValue;
1673 const auto *VD = cast<VarDecl>(Rec.first);
1675 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1676 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1678 const FieldDecl *FD = Rec.second.first;
1679 LValue VarAddr = CGF.EmitLValueForField(Base, FD);
1680 Rec.second.second = VarAddr.getAddress();
1682 const auto *VD = cast<VarDecl>(Rec.first);
1683 CGF.EmitStoreOfScalar(ParValue, VarAddr);
1684 I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
1688 for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
1689 // Recover pointer to this function's global record. The runtime will
1690 // handle the specifics of the allocation of the memory.
1691 // Use actual memory size of the record including the padding
1692 // for alignment purposes.
1693 CGBuilderTy &Bld = CGF.Builder;
1694 llvm::Value *Size = CGF.getTypeSize(VD->getType());
1695 CharUnits Align = CGM.getContext().getDeclAlign(VD);
1696 Size = Bld.CreateNUWAdd(
1697 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1698 llvm::Value *AlignVal =
1699 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1700 Size = Bld.CreateUDiv(Size, AlignVal);
1701 Size = Bld.CreateNUWMul(Size, AlignVal);
1702 // TODO: allow the usage of shared memory to be controlled by
1703 // the user, for now, default to global.
1704 llvm::Value *GlobalRecordSizeArg[] = {
1705 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
1706 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
1707 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
1708 GlobalRecordSizeArg);
1709 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1710 GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
1711 LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
1712 CGM.getContext().getDeclAlign(VD),
1713 AlignmentSource::Decl);
1714 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1716 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
1718 I->getSecond().MappedParams->apply(CGF);
1721 void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF) {
1722 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
1725 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1726 if (I != FunctionGlobalizedDecls.end()) {
1727 I->getSecond().MappedParams->restore(CGF);
1728 if (!CGF.HaveInsertPoint())
1730 for (llvm::Value *Addr :
1731 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
1732 CGF.EmitRuntimeCall(
1733 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
1736 if (I->getSecond().GlobalRecordAddr) {
1737 CGF.EmitRuntimeCall(
1738 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
1739 I->getSecond().GlobalRecordAddr);
1744 void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
1745 const OMPExecutableDirective &D,
1747 llvm::Value *OutlinedFn,
1748 ArrayRef<llvm::Value *> CapturedVars) {
1749 if (!CGF.HaveInsertPoint())
1752 Address ZeroAddr = CGF.CreateMemTemp(
1753 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
1754 /*Name*/ ".zero.addr");
1755 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
1756 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1757 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
1758 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1759 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1760 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1763 void CGOpenMPRuntimeNVPTX::emitParallelCall(
1764 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
1765 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
1766 if (!CGF.HaveInsertPoint())
1769 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1770 emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
1772 emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
1775 void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
1776 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
1777 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
1778 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
1780 // Force inline this outlined function at its call site.
1781 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
1783 Address ZeroAddr = CGF.CreateMemTemp(CGF.getContext().getIntTypeForBitwidth(
1784 /*DestWidth=*/32, /*Signed=*/1),
1786 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
1787 // ThreadId for serialized parallels is 0.
1788 Address ThreadIDAddr = ZeroAddr;
1789 auto &&CodeGen = [this, Fn, CapturedVars, Loc, ZeroAddr, &ThreadIDAddr](
1790 CodeGenFunction &CGF, PrePostActionTy &Action) {
1793 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1794 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
1795 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1796 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1797 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
1799 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
1800 PrePostActionTy &) {
1802 RegionCodeGenTy RCG(CodeGen);
1803 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1804 llvm::Value *ThreadID = getThreadID(CGF, Loc);
1805 llvm::Value *Args[] = {RTLoc, ThreadID};
1807 NVPTXActionTy Action(
1808 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
1810 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
1812 RCG.setAction(Action);
1816 auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
1817 PrePostActionTy &Action) {
1818 CGBuilderTy &Bld = CGF.Builder;
1819 llvm::Function *WFn = WrapperFunctionsMap[Fn];
1820 assert(WFn && "Wrapper function does not exist!");
1821 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
1823 // Prepare for parallel region. Indicate the outlined function.
1824 llvm::Value *Args[] = {ID, /*RequiresOMPRuntime=*/Bld.getInt16(1)};
1825 CGF.EmitRuntimeCall(
1826 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
1829 // Create a private scope that will globalize the arguments
1830 // passed from the outside of the target region.
1831 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
1833 // There's somehting to share.
1834 if (!CapturedVars.empty()) {
1835 // Prepare for parallel region. Indicate the outlined function.
1836 Address SharedArgs =
1837 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
1838 llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
1840 llvm::Value *DataSharingArgs[] = {
1842 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
1843 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
1844 OMPRTL_NVPTX__kmpc_begin_sharing_variables),
1847 // Store variable address in a list of references to pass to workers.
1849 ASTContext &Ctx = CGF.getContext();
1850 Address SharedArgListAddress = CGF.EmitLoadOfPointer(
1851 SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
1852 .castAs<PointerType>());
1853 for (llvm::Value *V : CapturedVars) {
1854 Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
1855 CGF.getPointerSize());
1857 if (V->getType()->isIntegerTy())
1858 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
1860 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
1861 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
1862 Ctx.getPointerType(Ctx.VoidPtrTy));
1867 // Activate workers. This barrier is used by the master to signal
1868 // work for the workers.
1869 syncCTAThreads(CGF);
1871 // OpenMP [2.5, Parallel Construct, p.49]
1872 // There is an implied barrier at the end of a parallel region. After the
1873 // end of a parallel region, only the master thread of the team resumes
1874 // execution of the enclosing task region.
1876 // The master waits at this barrier until all workers are done.
1877 syncCTAThreads(CGF);
1879 if (!CapturedVars.empty())
1880 CGF.EmitRuntimeCall(
1881 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
1883 // Remember for post-processing in worker loop.
1884 Work.emplace_back(WFn);
1887 auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen, &CodeGen,
1888 &ThreadIDAddr](CodeGenFunction &CGF,
1889 PrePostActionTy &Action) {
1890 RegionCodeGenTy RCG(CodeGen);
1891 if (IsInParallelRegion) {
1892 SeqGen(CGF, Action);
1893 } else if (IsInTargetMasterThreadRegion) {
1894 L0ParallelGen(CGF, Action);
1895 } else if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_NonSPMD) {
1898 // Check for master and then parallelism:
1899 // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
1900 // Serialized execution.
1901 // } else if (master) {
1904 // Outlined function call.
1906 CGBuilderTy &Bld = CGF.Builder;
1907 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1908 llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
1909 llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
1910 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1911 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
1912 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
1913 Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
1914 // There is no need to emit line number for unconditional branch.
1915 (void)ApplyDebugLocation::CreateEmpty(CGF);
1916 CGF.EmitBlock(ParallelCheckBB);
1917 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1918 llvm::Value *ThreadID = getThreadID(CGF, Loc);
1919 llvm::Value *PL = CGF.EmitRuntimeCall(
1920 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
1922 llvm::Value *Res = Bld.CreateIsNotNull(PL);
1923 Bld.CreateCondBr(Res, SeqBB, MasterCheckBB);
1924 CGF.EmitBlock(SeqBB);
1925 SeqGen(CGF, Action);
1926 CGF.EmitBranch(ExitBB);
1927 // There is no need to emit line number for unconditional branch.
1928 (void)ApplyDebugLocation::CreateEmpty(CGF);
1929 CGF.EmitBlock(MasterCheckBB);
1930 llvm::BasicBlock *MasterThenBB = CGF.createBasicBlock("master.then");
1931 llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
1932 llvm::Value *IsMaster =
1933 Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
1934 Bld.CreateCondBr(IsMaster, MasterThenBB, ElseBlock);
1935 CGF.EmitBlock(MasterThenBB);
1936 L0ParallelGen(CGF, Action);
1937 CGF.EmitBranch(ExitBB);
1938 // There is no need to emit line number for unconditional branch.
1939 (void)ApplyDebugLocation::CreateEmpty(CGF);
1940 CGF.EmitBlock(ElseBlock);
1941 // In the worker need to use the real thread id.
1942 ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
1944 // There is no need to emit line number for unconditional branch.
1945 (void)ApplyDebugLocation::CreateEmpty(CGF);
1946 // Emit the continuation block for code after the if.
1947 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1952 emitOMPIfClause(CGF, IfCond, LNParallelGen, SeqGen);
1954 CodeGenFunction::RunCleanupsScope Scope(CGF);
1955 RegionCodeGenTy ThenRCG(LNParallelGen);
1960 void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
1961 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
1962 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
1963 // Just call the outlined function to execute the parallel region.
1964 // OutlinedFn(>id, &zero, CapturedStruct);
1966 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1968 Address ZeroAddr = CGF.CreateMemTemp(CGF.getContext().getIntTypeForBitwidth(
1969 /*DestWidth=*/32, /*Signed=*/1),
1971 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
1972 // ThreadId for serialized parallels is 0.
1973 Address ThreadIDAddr = ZeroAddr;
1974 auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, ZeroAddr,
1975 &ThreadIDAddr](CodeGenFunction &CGF,
1976 PrePostActionTy &Action) {
1979 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1980 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
1981 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1982 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1983 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1985 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
1986 PrePostActionTy &) {
1988 RegionCodeGenTy RCG(CodeGen);
1989 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1990 llvm::Value *ThreadID = getThreadID(CGF, Loc);
1991 llvm::Value *Args[] = {RTLoc, ThreadID};
1993 NVPTXActionTy Action(
1994 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
1996 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
1998 RCG.setAction(Action);
2002 if (IsInTargetMasterThreadRegion) {
2003 // In the worker need to use the real thread id.
2004 ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2005 RegionCodeGenTy RCG(CodeGen);
2008 // If we are not in the target region, it is definitely L2 parallelism or
2009 // more, because for SPMD mode we always has L1 parallel level, sowe don't
2010 // need to check for orphaned directives.
2011 RegionCodeGenTy RCG(SeqGen);
2016 void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
2017 CodeGenFunction &CGF, StringRef CriticalName,
2018 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2020 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2021 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2022 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2023 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2024 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2026 // Fetch team-local id of the thread.
2027 llvm::Value *ThreadID = getNVPTXThreadID(CGF);
2029 // Get the width of the team.
2030 llvm::Value *TeamWidth = getNVPTXNumThreads(CGF);
2032 // Initialize the counter variable for the loop.
2034 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2035 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2036 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2037 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2040 // Block checks if loop counter exceeds upper bound.
2041 CGF.EmitBlock(LoopBB);
2042 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2043 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2044 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2046 // Block tests which single thread should execute region, and which threads
2047 // should go straight to synchronisation point.
2048 CGF.EmitBlock(TestBB);
2049 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2050 llvm::Value *CmpThreadToCounter =
2051 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2052 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2054 // Block emits the body of the critical region.
2055 CGF.EmitBlock(BodyBB);
2057 // Output the critical statement.
2060 // After the body surrounded by the critical region, the single executing
2061 // thread will jump to the synchronisation point.
2062 // Block waits for all threads in current team to finish then increments the
2063 // counter variable and returns to the loop.
2064 CGF.EmitBlock(SyncBB);
2065 getNVPTXCTABarrier(CGF);
2067 llvm::Value *IncCounterVal =
2068 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2069 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2070 CGF.EmitBranch(LoopBB);
2072 // Block that is reached when all threads in the team complete the region.
2073 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2076 /// Cast value to the specified type.
2077 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2078 QualType ValTy, QualType CastTy,
2079 SourceLocation Loc) {
2080 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
2081 "Cast type must sized.");
2082 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
2083 "Val type must sized.");
2084 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2085 if (ValTy == CastTy)
2087 if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2088 CGF.getContext().getTypeSizeInChars(CastTy))
2089 return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2090 if (CastTy->isIntegerType() && ValTy->isIntegerType())
2091 return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2092 CastTy->hasSignedIntegerRepresentation());
2093 Address CastItem = CGF.CreateMemTemp(CastTy);
2094 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2095 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2096 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy);
2097 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc);
2100 /// This function creates calls to one of two shuffle functions to copy
2101 /// variables between lanes in a warp.
2102 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2105 llvm::Value *Offset,
2106 SourceLocation Loc) {
2107 CodeGenModule &CGM = CGF.CGM;
2108 CGBuilderTy &Bld = CGF.Builder;
2109 CGOpenMPRuntimeNVPTX &RT =
2110 *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
2112 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2113 assert(Size.getQuantity() <= 8 &&
2114 "Unsupported bitwidth in shuffle instruction.");
2116 OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
2117 ? OMPRTL_NVPTX__kmpc_shuffle_int32
2118 : OMPRTL_NVPTX__kmpc_shuffle_int64;
2120 // Cast all types to 32- or 64-bit values before calling shuffle routines.
2121 QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2122 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
2123 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2124 llvm::Value *WarpSize =
2125 Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2127 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2128 RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
2130 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2133 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2134 Address DestAddr, QualType ElemType,
2135 llvm::Value *Offset, SourceLocation Loc) {
2136 CGBuilderTy &Bld = CGF.Builder;
2138 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2139 // Create the loop over the big sized data.
2140 // ptr = (void*)Elem;
2141 // ptrEnd = (void*) Elem + 1;
2143 // while (ptr + Step < ptrEnd)
2144 // shuffle((int64_t)*ptr);
2146 // while (ptr + Step < ptrEnd)
2147 // shuffle((int32_t)*ptr);
2149 Address ElemPtr = DestAddr;
2150 Address Ptr = SrcAddr;
2151 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2152 Bld.CreateConstGEP(SrcAddr, 1, Size), CGF.VoidPtrTy);
2153 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
2154 if (Size < CharUnits::fromQuantity(IntSize))
2156 QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2157 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2159 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2160 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2162 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2163 if (Size.getQuantity() / IntSize > 1) {
2164 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2165 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2166 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2167 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2168 CGF.EmitBlock(PreCondBB);
2169 llvm::PHINode *PhiSrc =
2170 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2171 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2172 llvm::PHINode *PhiDest =
2173 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2174 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2175 Ptr = Address(PhiSrc, Ptr.getAlignment());
2176 ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2177 llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2178 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2179 Ptr.getPointer(), CGF.VoidPtrTy));
2180 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2182 CGF.EmitBlock(ThenBB);
2183 llvm::Value *Res = createRuntimeShuffleFunction(
2184 CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2185 IntType, Offset, Loc);
2186 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2187 Ptr = Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
2189 Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
2190 PhiSrc->addIncoming(Ptr.getPointer(), ThenBB);
2191 PhiDest->addIncoming(ElemPtr.getPointer(), ThenBB);
2192 CGF.EmitBranch(PreCondBB);
2193 CGF.EmitBlock(ExitBB);
2195 llvm::Value *Res = createRuntimeShuffleFunction(
2196 CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2197 IntType, Offset, Loc);
2198 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2199 Ptr = Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
2201 Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
2203 Size = Size % IntSize;
2208 enum CopyAction : unsigned {
2209 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2210 // the warp using shuffle instructions.
2212 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2214 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2216 // ScratchpadToThread: Copy from a scratchpad array in global memory
2217 // containing team-reduced data to a thread's stack.
2222 struct CopyOptionsTy {
2223 llvm::Value *RemoteLaneOffset;
2224 llvm::Value *ScratchpadIndex;
2225 llvm::Value *ScratchpadWidth;
2228 /// Emit instructions to copy a Reduce list, which contains partially
2229 /// aggregated values, in the specified direction.
2230 static void emitReductionListCopy(
2231 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2232 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2233 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2235 CodeGenModule &CGM = CGF.CGM;
2236 ASTContext &C = CGM.getContext();
2237 CGBuilderTy &Bld = CGF.Builder;
2239 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
2240 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
2241 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
2243 // Iterates, element-by-element, through the source Reduce list and
2246 unsigned Size = Privates.size();
2247 for (const Expr *Private : Privates) {
2248 Address SrcElementAddr = Address::invalid();
2249 Address DestElementAddr = Address::invalid();
2250 Address DestElementPtrAddr = Address::invalid();
2251 // Should we shuffle in an element from a remote lane?
2252 bool ShuffleInElement = false;
2253 // Set to true to update the pointer in the dest Reduce list to a
2254 // newly created element.
2255 bool UpdateDestListPtr = false;
2256 // Increment the src or dest pointer to the scratchpad, for each
2258 bool IncrScratchpadSrc = false;
2259 bool IncrScratchpadDest = false;
2262 case RemoteLaneToThread: {
2263 // Step 1.1: Get the address for the src element in the Reduce list.
2264 Address SrcElementPtrAddr =
2265 Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
2266 SrcElementAddr = CGF.EmitLoadOfPointer(
2268 C.getPointerType(Private->getType())->castAs<PointerType>());
2270 // Step 1.2: Create a temporary to store the element in the destination
2272 DestElementPtrAddr =
2273 Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
2275 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2276 ShuffleInElement = true;
2277 UpdateDestListPtr = true;
2281 // Step 1.1: Get the address for the src element in the Reduce list.
2282 Address SrcElementPtrAddr =
2283 Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
2284 SrcElementAddr = CGF.EmitLoadOfPointer(
2286 C.getPointerType(Private->getType())->castAs<PointerType>());
2288 // Step 1.2: Get the address for dest element. The destination
2289 // element has already been created on the thread's stack.
2290 DestElementPtrAddr =
2291 Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
2292 DestElementAddr = CGF.EmitLoadOfPointer(
2294 C.getPointerType(Private->getType())->castAs<PointerType>());
2297 case ThreadToScratchpad: {
2298 // Step 1.1: Get the address for the src element in the Reduce list.
2299 Address SrcElementPtrAddr =
2300 Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
2301 SrcElementAddr = CGF.EmitLoadOfPointer(
2303 C.getPointerType(Private->getType())->castAs<PointerType>());
2305 // Step 1.2: Get the address for dest element:
2306 // address = base + index * ElementSizeInChars.
2307 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2308 llvm::Value *CurrentOffset =
2309 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2310 llvm::Value *ScratchPadElemAbsolutePtrVal =
2311 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
2312 ScratchPadElemAbsolutePtrVal =
2313 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2314 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2315 C.getTypeAlignInChars(Private->getType()));
2316 IncrScratchpadDest = true;
2319 case ScratchpadToThread: {
2320 // Step 1.1: Get the address for the src element in the scratchpad.
2321 // address = base + index * ElementSizeInChars.
2322 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2323 llvm::Value *CurrentOffset =
2324 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
2325 llvm::Value *ScratchPadElemAbsolutePtrVal =
2326 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
2327 ScratchPadElemAbsolutePtrVal =
2328 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
2329 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
2330 C.getTypeAlignInChars(Private->getType()));
2331 IncrScratchpadSrc = true;
2333 // Step 1.2: Create a temporary to store the element in the destination
2335 DestElementPtrAddr =
2336 Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
2338 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
2339 UpdateDestListPtr = true;
2344 // Regardless of src and dest of copy, we emit the load of src
2345 // element as this is required in all directions
2346 SrcElementAddr = Bld.CreateElementBitCast(
2347 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
2348 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
2349 SrcElementAddr.getElementType());
2351 // Now that all active lanes have read the element in the
2352 // Reduce list, shuffle over the value from the remote lane.
2353 if (ShuffleInElement) {
2354 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
2355 RemoteLaneOffset, Private->getExprLoc());
2357 if (Private->getType()->isScalarType()) {
2359 CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
2360 Private->getType(), Private->getExprLoc());
2361 // Store the source element value to the dest element address.
2362 CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
2363 Private->getType());
2365 CGF.EmitAggregateCopy(
2366 CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
2367 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
2368 Private->getType(), AggValueSlot::DoesNotOverlap);
2372 // Step 3.1: Modify reference in dest Reduce list as needed.
2373 // Modifying the reference in Reduce list to point to the newly
2374 // created element. The element is live in the current function
2375 // scope and that of functions it invokes (i.e., reduce_function).
2376 // RemoteReduceData[i] = (void*)&RemoteElem
2377 if (UpdateDestListPtr) {
2378 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
2379 DestElementAddr.getPointer(), CGF.VoidPtrTy),
2380 DestElementPtrAddr, /*Volatile=*/false,
2384 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2385 // address of the next element in scratchpad memory, unless we're currently
2386 // processing the last one. Memory alignment is also taken care of here.
2387 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
2388 llvm::Value *ScratchpadBasePtr =
2389 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2390 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2391 ScratchpadBasePtr = Bld.CreateNUWAdd(
2393 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2395 // Take care of global memory alignment for performance
2396 ScratchpadBasePtr = Bld.CreateNUWSub(
2397 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2398 ScratchpadBasePtr = Bld.CreateUDiv(
2400 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2401 ScratchpadBasePtr = Bld.CreateNUWAdd(
2402 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2403 ScratchpadBasePtr = Bld.CreateNUWMul(
2405 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2407 if (IncrScratchpadDest)
2408 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2409 else /* IncrScratchpadSrc = true */
2410 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2417 /// This function emits a helper that loads data from the scratchpad array
2418 /// and (optionally) reduces it with the input operand.
2420 /// load_and_reduce(local, scratchpad, index, width, should_reduce)
2421 /// reduce_data remote;
2422 /// for elem in remote:
2423 /// remote.elem = Scratchpad[elem_id][index]
2424 /// if (should_reduce)
2425 /// local = local @ remote
2428 static llvm::Value *emitReduceScratchpadFunction(
2429 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2430 QualType ReductionArrayTy, llvm::Value *ReduceFn, SourceLocation Loc) {
2431 ASTContext &C = CGM.getContext();
2432 QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
2434 // Destination of the copy.
2435 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2436 C.VoidPtrTy, ImplicitParamDecl::Other);
2437 // Base address of the scratchpad array, with each element storing a
2438 // Reduce list per team.
2439 ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2440 C.VoidPtrTy, ImplicitParamDecl::Other);
2441 // A source index into the scratchpad array.
2442 ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
2443 ImplicitParamDecl::Other);
2444 // Row width of an element in the scratchpad array, typically
2445 // the number of teams.
2446 ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
2447 ImplicitParamDecl::Other);
2448 // If should_reduce == 1, then it's load AND reduce,
2449 // If should_reduce == 0 (or otherwise), then it only loads (+ copy).
2450 // The latter case is used for initialization.
2451 ImplicitParamDecl ShouldReduceArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2452 Int32Ty, ImplicitParamDecl::Other);
2454 FunctionArgList Args;
2455 Args.push_back(&ReduceListArg);
2456 Args.push_back(&ScratchPadArg);
2457 Args.push_back(&IndexArg);
2458 Args.push_back(&WidthArg);
2459 Args.push_back(&ShouldReduceArg);
2461 const CGFunctionInfo &CGFI =
2462 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2463 auto *Fn = llvm::Function::Create(
2464 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2465 "_omp_reduction_load_and_reduce", &CGM.getModule());
2466 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2467 Fn->setDoesNotRecurse();
2468 CodeGenFunction CGF(CGM);
2469 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2471 CGBuilderTy &Bld = CGF.Builder;
2473 // Get local Reduce list pointer.
2474 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2475 Address ReduceListAddr(
2476 Bld.CreatePointerBitCastOrAddrSpaceCast(
2477 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2479 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2480 CGF.getPointerAlign());
2482 Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
2483 llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
2484 AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2486 Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
2487 llvm::Value *IndexVal = Bld.CreateIntCast(
2488 CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
2489 CGM.SizeTy, /*isSigned=*/true);
2491 Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
2492 llvm::Value *WidthVal = Bld.CreateIntCast(
2493 CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false, Int32Ty, Loc),
2494 CGM.SizeTy, /*isSigned=*/true);
2496 Address AddrShouldReduceArg = CGF.GetAddrOfLocalVar(&ShouldReduceArg);
2497 llvm::Value *ShouldReduceVal = CGF.EmitLoadOfScalar(
2498 AddrShouldReduceArg, /*Volatile=*/false, Int32Ty, Loc);
2500 // The absolute ptr address to the base addr of the next element to copy.
2501 llvm::Value *CumulativeElemBasePtr =
2502 Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
2503 Address SrcDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
2505 // Create a Remote Reduce list to store the elements read from the
2506 // scratchpad array.
2507 Address RemoteReduceList =
2508 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_red_list");
2510 // Assemble remote Reduce list from scratchpad array.
2511 emitReductionListCopy(ScratchpadToThread, CGF, ReductionArrayTy, Privates,
2512 SrcDataAddr, RemoteReduceList,
2513 {/*RemoteLaneOffset=*/nullptr,
2514 /*ScratchpadIndex=*/IndexVal,
2515 /*ScratchpadWidth=*/WidthVal});
2517 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2518 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2519 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2521 llvm::Value *CondReduce = Bld.CreateIsNotNull(ShouldReduceVal);
2522 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
2524 CGF.EmitBlock(ThenBB);
2525 // We should reduce with the local Reduce list.
2526 // reduce_function(LocalReduceList, RemoteReduceList)
2527 llvm::Value *LocalDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2528 ReduceListAddr.getPointer(), CGF.VoidPtrTy);
2529 llvm::Value *RemoteDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2530 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
2531 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2532 CGF, Loc, ReduceFn, {LocalDataPtr, RemoteDataPtr});
2533 Bld.CreateBr(MergeBB);
2535 CGF.EmitBlock(ElseBB);
2536 // No reduction; just copy:
2537 // Local Reduce list = Remote Reduce list.
2538 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
2539 RemoteReduceList, ReduceListAddr);
2540 Bld.CreateBr(MergeBB);
2542 CGF.EmitBlock(MergeBB);
2544 CGF.FinishFunction();
2548 /// This function emits a helper that stores reduced data from the team
2549 /// master to a scratchpad array in global memory.
2551 /// for elem in Reduce List:
2552 /// scratchpad[elem_id][index] = elem
2554 static llvm::Value *emitCopyToScratchpad(CodeGenModule &CGM,
2555 ArrayRef<const Expr *> Privates,
2556 QualType ReductionArrayTy,
2557 SourceLocation Loc) {
2559 ASTContext &C = CGM.getContext();
2560 QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
2562 // Source of the copy.
2563 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2564 C.VoidPtrTy, ImplicitParamDecl::Other);
2565 // Base address of the scratchpad array, with each element storing a
2566 // Reduce list per team.
2567 ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2568 C.VoidPtrTy, ImplicitParamDecl::Other);
2569 // A destination index into the scratchpad array, typically the team
2571 ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
2572 ImplicitParamDecl::Other);
2573 // Row width of an element in the scratchpad array, typically
2574 // the number of teams.
2575 ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
2576 ImplicitParamDecl::Other);
2578 FunctionArgList Args;
2579 Args.push_back(&ReduceListArg);
2580 Args.push_back(&ScratchPadArg);
2581 Args.push_back(&IndexArg);
2582 Args.push_back(&WidthArg);
2584 const CGFunctionInfo &CGFI =
2585 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2586 auto *Fn = llvm::Function::Create(
2587 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2588 "_omp_reduction_copy_to_scratchpad", &CGM.getModule());
2589 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2590 Fn->setDoesNotRecurse();
2591 CodeGenFunction CGF(CGM);
2592 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2594 CGBuilderTy &Bld = CGF.Builder;
2596 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2597 Address SrcDataAddr(
2598 Bld.CreatePointerBitCastOrAddrSpaceCast(
2599 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2601 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2602 CGF.getPointerAlign());
2604 Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
2605 llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
2606 AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2608 Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
2609 llvm::Value *IndexVal = Bld.CreateIntCast(
2610 CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
2611 CGF.SizeTy, /*isSigned=*/true);
2613 Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
2614 llvm::Value *WidthVal =
2615 Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
2616 Int32Ty, SourceLocation()),
2617 CGF.SizeTy, /*isSigned=*/true);
2619 // The absolute ptr address to the base addr of the next element to copy.
2620 llvm::Value *CumulativeElemBasePtr =
2621 Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
2622 Address DestDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
2624 emitReductionListCopy(ThreadToScratchpad, CGF, ReductionArrayTy, Privates,
2625 SrcDataAddr, DestDataAddr,
2626 {/*RemoteLaneOffset=*/nullptr,
2627 /*ScratchpadIndex=*/IndexVal,
2628 /*ScratchpadWidth=*/WidthVal});
2630 CGF.FinishFunction();
2634 /// This function emits a helper that gathers Reduce lists from the first
2635 /// lane of every active warp to lanes in the first warp.
2637 /// void inter_warp_copy_func(void* reduce_data, num_warps)
2638 /// shared smem[warp_size];
2639 /// For all data entries D in reduce_data:
2640 /// If (I am the first lane in each warp)
2641 /// Copy my local D to smem[warp_id]
2643 /// if (I am the first warp)
2644 /// Copy smem[thread_id] to my local D
2646 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2647 ArrayRef<const Expr *> Privates,
2648 QualType ReductionArrayTy,
2649 SourceLocation Loc) {
2650 ASTContext &C = CGM.getContext();
2651 llvm::Module &M = CGM.getModule();
2653 // ReduceList: thread local Reduce list.
2654 // At the stage of the computation when this function is called, partially
2655 // aggregated values reside in the first lane of every active warp.
2656 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2657 C.VoidPtrTy, ImplicitParamDecl::Other);
2658 // NumWarps: number of warps active in the parallel region. This could
2659 // be smaller than 32 (max warps in a CTA) for partial block reduction.
2660 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2661 C.getIntTypeForBitwidth(32, /* Signed */ true),
2662 ImplicitParamDecl::Other);
2663 FunctionArgList Args;
2664 Args.push_back(&ReduceListArg);
2665 Args.push_back(&NumWarpsArg);
2667 const CGFunctionInfo &CGFI =
2668 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2669 auto *Fn = llvm::Function::Create(
2670 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2671 "_omp_reduction_inter_warp_copy_func", &CGM.getModule());
2672 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2673 Fn->setDoesNotRecurse();
2674 CodeGenFunction CGF(CGM);
2675 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2677 CGBuilderTy &Bld = CGF.Builder;
2679 // This array is used as a medium to transfer, one reduce element at a time,
2680 // the data from the first lane of every warp to lanes in the first warp
2681 // in order to perform the final step of a reduction in a parallel region
2682 // (reduction across warps). The array is placed in NVPTX __shared__ memory
2683 // for reduced latency, as well as to have a distinct copy for concurrently
2684 // executing target regions. The array is declared with common linkage so
2685 // as to be shared across compilation units.
2686 StringRef TransferMediumName =
2687 "__openmp_nvptx_data_transfer_temporary_storage";
2688 llvm::GlobalVariable *TransferMedium =
2689 M.getGlobalVariable(TransferMediumName);
2690 if (!TransferMedium) {
2691 auto *Ty = llvm::ArrayType::get(CGM.Int64Ty, WarpSize);
2692 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2693 TransferMedium = new llvm::GlobalVariable(
2695 /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
2696 llvm::Constant::getNullValue(Ty), TransferMediumName,
2697 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2698 SharedAddressSpace);
2699 CGM.addCompilerUsedGlobal(TransferMedium);
2702 // Get the CUDA thread id of the current OpenMP thread on the GPU.
2703 llvm::Value *ThreadID = getNVPTXThreadID(CGF);
2704 // nvptx_lane_id = nvptx_id % warpsize
2705 llvm::Value *LaneID = getNVPTXLaneID(CGF);
2706 // nvptx_warp_id = nvptx_id / warpsize
2707 llvm::Value *WarpID = getNVPTXWarpID(CGF);
2709 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2710 Address LocalReduceList(
2711 Bld.CreatePointerBitCastOrAddrSpaceCast(
2712 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2713 C.VoidPtrTy, SourceLocation()),
2714 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2715 CGF.getPointerAlign());
2718 for (const Expr *Private : Privates) {
2720 // Warp master copies reduce element to transfer medium in __shared__
2723 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2724 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2725 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2727 // if (lane_id == 0)
2728 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2729 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2730 CGF.EmitBlock(ThenBB);
2732 // Reduce element = LocalReduceList[i]
2733 Address ElemPtrPtrAddr =
2734 Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
2735 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2736 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2737 // elemptr = (type[i]*)(elemptrptr)
2739 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
2740 ElemPtr = Bld.CreateElementBitCast(
2741 ElemPtr, CGF.ConvertTypeForMem(Private->getType()));
2743 // Get pointer to location in transfer medium.
2744 // MediumPtr = &medium[warp_id]
2745 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2746 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2747 Address MediumPtr(MediumPtrVal, C.getTypeAlignInChars(Private->getType()));
2748 // Casting to actual data type.
2749 // MediumPtr = (type[i]*)MediumPtrAddr;
2750 MediumPtr = Bld.CreateElementBitCast(
2751 MediumPtr, CGF.ConvertTypeForMem(Private->getType()));
2755 if (Private->getType()->isScalarType()) {
2756 llvm::Value *Elem = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
2757 Private->getType(), Loc);
2758 // Store the source element value to the dest element address.
2759 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/false,
2760 Private->getType());
2762 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2763 CGF.MakeAddrLValue(MediumPtr, Private->getType()),
2764 Private->getType(), AggValueSlot::DoesNotOverlap);
2767 Bld.CreateBr(MergeBB);
2769 CGF.EmitBlock(ElseBB);
2770 Bld.CreateBr(MergeBB);
2772 CGF.EmitBlock(MergeBB);
2774 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2775 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2776 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, SourceLocation());
2778 llvm::Value *NumActiveThreads = Bld.CreateNSWMul(
2779 NumWarpsVal, getNVPTXWarpSize(CGF), "num_active_threads");
2780 // named_barrier_sync(ParallelBarrierID, num_active_threads)
2781 syncParallelThreads(CGF, NumActiveThreads);
2784 // Warp 0 copies reduce element from transfer medium.
2786 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2787 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2788 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2790 // Up to 32 threads in warp 0 are active.
2791 llvm::Value *IsActiveThread =
2792 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2793 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2795 CGF.EmitBlock(W0ThenBB);
2797 // SrcMediumPtr = &medium[tid]
2798 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2799 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2800 Address SrcMediumPtr(SrcMediumPtrVal,
2801 C.getTypeAlignInChars(Private->getType()));
2802 // SrcMediumVal = *SrcMediumPtr;
2803 SrcMediumPtr = Bld.CreateElementBitCast(
2804 SrcMediumPtr, CGF.ConvertTypeForMem(Private->getType()));
2806 // TargetElemPtr = (type[i]*)(SrcDataAddr[i])
2807 Address TargetElemPtrPtr =
2808 Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
2809 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
2810 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2811 Address TargetElemPtr =
2812 Address(TargetElemPtrVal, C.getTypeAlignInChars(Private->getType()));
2813 TargetElemPtr = Bld.CreateElementBitCast(
2814 TargetElemPtr, CGF.ConvertTypeForMem(Private->getType()));
2816 // *TargetElemPtr = SrcMediumVal;
2817 if (Private->getType()->isScalarType()) {
2818 llvm::Value *SrcMediumValue = CGF.EmitLoadOfScalar(
2819 SrcMediumPtr, /*Volatile=*/false, Private->getType(), Loc);
2820 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
2821 Private->getType());
2823 CGF.EmitAggregateCopy(
2824 CGF.MakeAddrLValue(SrcMediumPtr, Private->getType()),
2825 CGF.MakeAddrLValue(TargetElemPtr, Private->getType()),
2826 Private->getType(), AggValueSlot::DoesNotOverlap);
2828 Bld.CreateBr(W0MergeBB);
2830 CGF.EmitBlock(W0ElseBB);
2831 Bld.CreateBr(W0MergeBB);
2833 CGF.EmitBlock(W0MergeBB);
2835 // While warp 0 copies values from transfer medium, all other warps must
2837 syncParallelThreads(CGF, NumActiveThreads);
2841 CGF.FinishFunction();
2845 /// Emit a helper that reduces data across two OpenMP threads (lanes)
2846 /// in the same warp. It uses shuffle instructions to copy over data from
2847 /// a remote lane's stack. The reduction algorithm performed is specified
2848 /// by the fourth parameter.
2850 /// Algorithm Versions.
2851 /// Full Warp Reduce (argument value 0):
2852 /// This algorithm assumes that all 32 lanes are active and gathers
2853 /// data from these 32 lanes, producing a single resultant value.
2854 /// Contiguous Partial Warp Reduce (argument value 1):
2855 /// This algorithm assumes that only a *contiguous* subset of lanes
2856 /// are active. This happens for the last warp in a parallel region
2857 /// when the user specified num_threads is not an integer multiple of
2858 /// 32. This contiguous subset always starts with the zeroth lane.
2859 /// Partial Warp Reduce (argument value 2):
2860 /// This algorithm gathers data from any number of lanes at any position.
2861 /// All reduced values are stored in the lowest possible lane. The set
2862 /// of problems every algorithm addresses is a super set of those
2863 /// addressable by algorithms with a lower version number. Overhead
2864 /// increases as algorithm version increases.
2868 /// Reduce element refers to the individual data field with primitive
2869 /// data types to be combined and reduced across threads.
2871 /// Reduce list refers to a collection of local, thread-private
2872 /// reduce elements.
2873 /// Remote Reduce list:
2874 /// Remote Reduce list refers to a collection of remote (relative to
2875 /// the current thread) reduce elements.
2877 /// We distinguish between three states of threads that are important to
2878 /// the implementation of this function.
2880 /// Threads in a warp executing the SIMT instruction, as distinguished from
2881 /// threads that are inactive due to divergent control flow.
2883 /// The minimal set of threads that has to be alive upon entry to this
2884 /// function. The computation is correct iff active threads are alive.
2885 /// Some threads are alive but they are not active because they do not
2886 /// contribute to the computation in any useful manner. Turning them off
2887 /// may introduce control flow overheads without any tangible benefits.
2888 /// Effective threads:
2889 /// In order to comply with the argument requirements of the shuffle
2890 /// function, we must keep all lanes holding data alive. But at most
2891 /// half of them perform value aggregation; we refer to this half of
2892 /// threads as effective. The other half is simply handing off their
2897 /// In this step active threads transfer data from higher lane positions
2898 /// in the warp to lower lane positions, creating Remote Reduce list.
2899 /// Value aggregation:
2900 /// In this step, effective threads combine their thread local Reduce list
2901 /// with Remote Reduce list and store the result in the thread local
2904 /// In this step, we deal with the assumption made by algorithm 2
2905 /// (i.e. contiguity assumption). When we have an odd number of lanes
2906 /// active, say 2k+1, only k threads will be effective and therefore k
2907 /// new values will be produced. However, the Reduce list owned by the
2908 /// (2k+1)th thread is ignored in the value aggregation. Therefore
2909 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
2910 /// that the contiguity assumption still holds.
2911 static llvm::Value *emitShuffleAndReduceFunction(
2912 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2913 QualType ReductionArrayTy, llvm::Value *ReduceFn, SourceLocation Loc) {
2914 ASTContext &C = CGM.getContext();
2916 // Thread local Reduce list used to host the values of data to be reduced.
2917 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2918 C.VoidPtrTy, ImplicitParamDecl::Other);
2919 // Current lane id; could be logical.
2920 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
2921 ImplicitParamDecl::Other);
2922 // Offset of the remote source lane relative to the current lane.
2923 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2924 C.ShortTy, ImplicitParamDecl::Other);
2925 // Algorithm version. This is expected to be known at compile time.
2926 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2927 C.ShortTy, ImplicitParamDecl::Other);
2928 FunctionArgList Args;
2929 Args.push_back(&ReduceListArg);
2930 Args.push_back(&LaneIDArg);
2931 Args.push_back(&RemoteLaneOffsetArg);
2932 Args.push_back(&AlgoVerArg);
2934 const CGFunctionInfo &CGFI =
2935 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2936 auto *Fn = llvm::Function::Create(
2937 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2938 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
2939 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2940 Fn->setDoesNotRecurse();
2941 CodeGenFunction CGF(CGM);
2942 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2944 CGBuilderTy &Bld = CGF.Builder;
2946 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2947 Address LocalReduceList(
2948 Bld.CreatePointerBitCastOrAddrSpaceCast(
2949 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2950 C.VoidPtrTy, SourceLocation()),
2951 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2952 CGF.getPointerAlign());
2954 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
2955 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
2956 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2958 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
2959 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
2960 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2962 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
2963 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
2964 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2966 // Create a local thread-private variable to host the Reduce list
2967 // from a remote lane.
2968 Address RemoteReduceList =
2969 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
2971 // This loop iterates through the list of reduce elements and copies,
2972 // element by element, from a remote lane in the warp to RemoteReduceList,
2973 // hosted on the thread's stack.
2974 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
2975 LocalReduceList, RemoteReduceList,
2976 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
2977 /*ScratchpadIndex=*/nullptr,
2978 /*ScratchpadWidth=*/nullptr});
2980 // The actions to be performed on the Remote Reduce list is dependent
2981 // on the algorithm version.
2983 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
2984 // LaneId % 2 == 0 && Offset > 0):
2985 // do the reduction value aggregation
2987 // The thread local variable Reduce list is mutated in place to host the
2988 // reduced data, which is the aggregated value produced from local and
2991 // Note that AlgoVer is expected to be a constant integer known at compile
2993 // When AlgoVer==0, the first conjunction evaluates to true, making
2994 // the entire predicate true during compile time.
2995 // When AlgoVer==1, the second conjunction has only the second part to be
2996 // evaluated during runtime. Other conjunctions evaluates to false
2997 // during compile time.
2998 // When AlgoVer==2, the third conjunction has only the second part to be
2999 // evaluated during runtime. Other conjunctions evaluates to false
3000 // during compile time.
3001 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3003 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3004 llvm::Value *CondAlgo1 = Bld.CreateAnd(
3005 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3007 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3008 llvm::Value *CondAlgo2 = Bld.CreateAnd(
3009 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3010 CondAlgo2 = Bld.CreateAnd(
3011 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3013 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3014 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3016 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3017 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3018 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3019 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3021 CGF.EmitBlock(ThenBB);
3022 // reduce_function(LocalReduceList, RemoteReduceList)
3023 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3024 LocalReduceList.getPointer(), CGF.VoidPtrTy);
3025 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3026 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3027 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3028 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3029 Bld.CreateBr(MergeBB);
3031 CGF.EmitBlock(ElseBB);
3032 Bld.CreateBr(MergeBB);
3034 CGF.EmitBlock(MergeBB);
3036 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3038 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3039 llvm::Value *CondCopy = Bld.CreateAnd(
3040 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3042 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3043 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3044 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3045 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3047 CGF.EmitBlock(CpyThenBB);
3048 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3049 RemoteReduceList, LocalReduceList);
3050 Bld.CreateBr(CpyMergeBB);
3052 CGF.EmitBlock(CpyElseBB);
3053 Bld.CreateBr(CpyMergeBB);
3055 CGF.EmitBlock(CpyMergeBB);
3057 CGF.FinishFunction();
3062 /// Design of OpenMP reductions on the GPU
3064 /// Consider a typical OpenMP program with one or more reduction
3069 /// #pragma omp target teams distribute parallel for \
3070 /// reduction(+:foo) reduction(*:bar)
3071 /// for (int i = 0; i < N; i++) {
3072 /// foo += A[i]; bar *= B[i];
3075 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
3076 /// all teams. In our OpenMP implementation on the NVPTX device an
3077 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
3078 /// within a team are mapped to CUDA threads within a threadblock.
3079 /// Our goal is to efficiently aggregate values across all OpenMP
3080 /// threads such that:
3082 /// - the compiler and runtime are logically concise, and
3083 /// - the reduction is performed efficiently in a hierarchical
3084 /// manner as follows: within OpenMP threads in the same warp,
3085 /// across warps in a threadblock, and finally across teams on
3086 /// the NVPTX device.
3088 /// Introduction to Decoupling
3090 /// We would like to decouple the compiler and the runtime so that the
3091 /// latter is ignorant of the reduction variables (number, data types)
3092 /// and the reduction operators. This allows a simpler interface
3093 /// and implementation while still attaining good performance.
3095 /// Pseudocode for the aforementioned OpenMP program generated by the
3096 /// compiler is as follows:
3098 /// 1. Create private copies of reduction variables on each OpenMP
3099 /// thread: 'foo_private', 'bar_private'
3100 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
3101 /// to it and writes the result in 'foo_private' and 'bar_private'
3103 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
3104 /// and store the result on the team master:
3106 /// __kmpc_nvptx_parallel_reduce_nowait(...,
3107 /// reduceData, shuffleReduceFn, interWarpCpyFn)
3110 /// struct ReduceData {
3114 /// reduceData.foo = &foo_private
3115 /// reduceData.bar = &bar_private
3117 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
3118 /// auxiliary functions generated by the compiler that operate on
3119 /// variables of type 'ReduceData'. They aid the runtime perform
3120 /// algorithmic steps in a data agnostic manner.
3122 /// 'shuffleReduceFn' is a pointer to a function that reduces data
3123 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
3124 /// same warp. It takes the following arguments as input:
3126 /// a. variable of type 'ReduceData' on the calling lane,
3128 /// c. an offset relative to the current lane_id to generate a
3129 /// remote_lane_id. The remote lane contains the second
3130 /// variable of type 'ReduceData' that is to be reduced.
3131 /// d. an algorithm version parameter determining which reduction
3132 /// algorithm to use.
3134 /// 'shuffleReduceFn' retrieves data from the remote lane using
3135 /// efficient GPU shuffle intrinsics and reduces, using the
3136 /// algorithm specified by the 4th parameter, the two operands
3137 /// element-wise. The result is written to the first operand.
3139 /// Different reduction algorithms are implemented in different
3140 /// runtime functions, all calling 'shuffleReduceFn' to perform
3141 /// the essential reduction step. Therefore, based on the 4th
3142 /// parameter, this function behaves slightly differently to
3143 /// cooperate with the runtime to ensure correctness under
3144 /// different circumstances.
3146 /// 'InterWarpCpyFn' is a pointer to a function that transfers
3147 /// reduced variables across warps. It tunnels, through CUDA
3148 /// shared memory, the thread-private data of type 'ReduceData'
3149 /// from lane 0 of each warp to a lane in the first warp.
3150 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3151 /// The last team writes the global reduced value to memory.
3153 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
3154 /// reduceData, shuffleReduceFn, interWarpCpyFn,
3155 /// scratchpadCopyFn, loadAndReduceFn)
3157 /// 'scratchpadCopyFn' is a helper that stores reduced
3158 /// data from the team master to a scratchpad array in
3161 /// 'loadAndReduceFn' is a helper that loads data from
3162 /// the scratchpad array and reduces it with the input
3165 /// These compiler generated functions hide address
3166 /// calculation and alignment information from the runtime.
3168 /// The team master of the last team stores the reduced
3169 /// result to the globals in memory.
3170 /// foo += reduceData.foo; bar *= reduceData.bar
3173 /// Warp Reduction Algorithms
3175 /// On the warp level, we have three algorithms implemented in the
3176 /// OpenMP runtime depending on the number of active lanes:
3178 /// Full Warp Reduction
3180 /// The reduce algorithm within a warp where all lanes are active
3181 /// is implemented in the runtime as follows:
3183 /// full_warp_reduce(void *reduce_data,
3184 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3185 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3186 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
3189 /// The algorithm completes in log(2, WARPSIZE) steps.
3191 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3192 /// not used therefore we save instructions by not retrieving lane_id
3193 /// from the corresponding special registers. The 4th parameter, which
3194 /// represents the version of the algorithm being used, is set to 0 to
3195 /// signify full warp reduction.
3197 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3199 /// #reduce_elem refers to an element in the local lane's data structure
3200 /// #remote_elem is retrieved from a remote lane
3201 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3202 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3204 /// Contiguous Partial Warp Reduction
3206 /// This reduce algorithm is used within a warp where only the first
3207 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
3208 /// number of OpenMP threads in a parallel region is not a multiple of
3209 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
3212 /// contiguous_partial_reduce(void *reduce_data,
3213 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
3214 /// int size, int lane_id) {
3217 /// curr_size = size;
3218 /// mask = curr_size/2;
3219 /// while (offset>0) {
3220 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3221 /// curr_size = (curr_size+1)/2;
3222 /// offset = curr_size/2;
3226 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3228 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3229 /// if (lane_id < offset)
3230 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
3232 /// reduce_elem = remote_elem
3234 /// This algorithm assumes that the data to be reduced are located in a
3235 /// contiguous subset of lanes starting from the first. When there is
3236 /// an odd number of active lanes, the data in the last lane is not
3237 /// aggregated with any other lane's dat but is instead copied over.
3239 /// Dispersed Partial Warp Reduction
3241 /// This algorithm is used within a warp when any discontiguous subset of
3242 /// lanes are active. It is used to implement the reduction operation
3243 /// across lanes in an OpenMP simd region or in a nested parallel region.
3246 /// dispersed_partial_reduce(void *reduce_data,
3247 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3248 /// int size, remote_id;
3249 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
3251 /// remote_id = next_active_lane_id_right_after_me();
3252 /// # the above function returns 0 of no active lane
3253 /// # is present right after the current lane.
3254 /// size = number_of_active_lanes_in_this_warp();
3255 /// logical_lane_id /= 2;
3256 /// ShuffleReduceFn(reduce_data, logical_lane_id,
3257 /// remote_id-1-threadIdx.x, 2);
3258 /// } while (logical_lane_id % 2 == 0 && size > 1);
3261 /// There is no assumption made about the initial state of the reduction.
3262 /// Any number of lanes (>=1) could be active at any position. The reduction
3263 /// result is returned in the first active lane.
3265 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3267 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3268 /// if (lane_id % 2 == 0 && offset > 0)
3269 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
3271 /// reduce_elem = remote_elem
3274 /// Intra-Team Reduction
3276 /// This function, as implemented in the runtime call
3277 /// '__kmpc_nvptx_parallel_reduce_nowait', aggregates data across OpenMP
3278 /// threads in a team. It first reduces within a warp using the
3279 /// aforementioned algorithms. We then proceed to gather all such
3280 /// reduced values at the first warp.
3282 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
3283 /// data from each of the "warp master" (zeroth lane of each warp, where
3284 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
3285 /// a mathematical sense) the problem of reduction across warp masters in
3286 /// a block to the problem of warp reduction.
3289 /// Inter-Team Reduction
3291 /// Once a team has reduced its data to a single value, it is stored in
3292 /// a global scratchpad array. Since each team has a distinct slot, this
3293 /// can be done without locking.
3295 /// The last team to write to the scratchpad array proceeds to reduce the
3296 /// scratchpad array. One or more workers in the last team use the helper
3297 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3298 /// the k'th worker reduces every k'th element.
3300 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait' to
3301 /// reduce across workers and compute a globally reduced value.
3303 void CGOpenMPRuntimeNVPTX::emitReduction(
3304 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3305 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3306 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3307 if (!CGF.HaveInsertPoint())
3310 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3311 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3312 bool SimdReduction = isOpenMPSimdDirective(Options.ReductionKind);
3313 assert((TeamsReduction || ParallelReduction || SimdReduction) &&
3314 "Invalid reduction selection in emitReduction.");
3316 if (Options.SimpleReduction) {
3317 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3318 ReductionOps, Options);
3322 ASTContext &C = CGM.getContext();
3324 // 1. Build a list of reduction variables.
3325 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3326 auto Size = RHSExprs.size();
3327 for (const Expr *E : Privates) {
3328 if (E->getType()->isVariablyModifiedType())
3329 // Reserve place for array size.
3332 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3333 QualType ReductionArrayTy =
3334 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
3335 /*IndexTypeQuals=*/0);
3336 Address ReductionList =
3337 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3338 auto IPriv = Privates.begin();
3340 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
3341 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
3342 CGF.getPointerSize());
3343 CGF.Builder.CreateStore(
3344 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3345 CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
3347 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3348 // Store array size.
3350 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
3351 CGF.getPointerSize());
3352 llvm::Value *Size = CGF.Builder.CreateIntCast(
3354 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3356 CGF.SizeTy, /*isSigned=*/false);
3357 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3362 // 2. Emit reduce_func().
3363 llvm::Value *ReductionFn = emitReductionFunction(
3364 CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
3365 Privates, LHSExprs, RHSExprs, ReductionOps);
3367 // 4. Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3368 // RedList, shuffle_reduce_func, interwarp_copy_func);
3369 llvm::Value *ThreadId = getThreadID(CGF, Loc);
3370 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3371 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3372 ReductionList.getPointer(), CGF.VoidPtrTy);
3374 llvm::Value *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3375 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3376 llvm::Value *InterWarpCopyFn =
3377 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3379 llvm::Value *Args[] = {ThreadId,
3380 CGF.Builder.getInt32(RHSExprs.size()),
3381 ReductionArrayTySize,
3386 llvm::Value *Res = nullptr;
3387 if (ParallelReduction)
3388 Res = CGF.EmitRuntimeCall(
3389 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_reduce_nowait),
3391 else if (SimdReduction)
3392 Res = CGF.EmitRuntimeCall(
3393 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_simd_reduce_nowait),
3396 if (TeamsReduction) {
3397 llvm::Value *ScratchPadCopyFn =
3398 emitCopyToScratchpad(CGM, Privates, ReductionArrayTy, Loc);
3399 llvm::Value *LoadAndReduceFn = emitReduceScratchpadFunction(
3400 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3402 llvm::Value *Args[] = {ThreadId,
3403 CGF.Builder.getInt32(RHSExprs.size()),
3404 ReductionArrayTySize,
3410 Res = CGF.EmitRuntimeCall(
3411 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_teams_reduce_nowait),
3415 // 5. Build switch(res)
3416 llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
3417 llvm::SwitchInst *SwInst =
3418 CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/1);
3420 // 6. Build case 1: where we have reduced values in the master
3421 // thread in each team.
3422 // __kmpc_end_reduce{_nowait}(<gtid>);
3424 llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
3425 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
3426 CGF.EmitBlock(Case1BB);
3428 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
3429 llvm::Value *EndArgs[] = {ThreadId};
3430 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
3431 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
3432 auto IPriv = Privates.begin();
3433 auto ILHS = LHSExprs.begin();
3434 auto IRHS = RHSExprs.begin();
3435 for (const Expr *E : ReductionOps) {
3436 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
3437 cast<DeclRefExpr>(*IRHS));
3443 RegionCodeGenTy RCG(CodeGen);
3444 NVPTXActionTy Action(
3445 nullptr, llvm::None,
3446 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
3448 RCG.setAction(Action);
3450 CGF.EmitBranch(DefaultBB);
3451 CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
3455 CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
3456 const VarDecl *NativeParam) const {
3457 if (!NativeParam->getType()->isReferenceType())
3459 QualType ArgType = NativeParam->getType();
3460 QualifierCollector QC;
3461 const Type *NonQualTy = QC.strip(ArgType);
3462 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3463 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
3464 if (Attr->getCaptureKind() == OMPC_map) {
3465 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
3466 LangAS::opencl_global);
3469 ArgType = CGM.getContext().getPointerType(PointeeTy);
3471 enum { NVPTX_local_addr = 5 };
3472 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
3473 ArgType = QC.apply(CGM.getContext(), ArgType);
3474 if (isa<ImplicitParamDecl>(NativeParam))
3475 return ImplicitParamDecl::Create(
3476 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
3477 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
3478 return ParmVarDecl::Create(
3480 const_cast<DeclContext *>(NativeParam->getDeclContext()),
3481 NativeParam->getLocStart(), NativeParam->getLocation(),
3482 NativeParam->getIdentifier(), ArgType,
3483 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
3487 CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
3488 const VarDecl *NativeParam,
3489 const VarDecl *TargetParam) const {
3490 assert(NativeParam != TargetParam &&
3491 NativeParam->getType()->isReferenceType() &&
3492 "Native arg must not be the same as target arg.");
3493 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
3494 QualType NativeParamType = NativeParam->getType();
3495 QualifierCollector QC;
3496 const Type *NonQualTy = QC.strip(NativeParamType);
3497 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3498 unsigned NativePointeeAddrSpace =
3499 CGF.getContext().getTargetAddressSpace(NativePointeeTy);
3500 QualType TargetTy = TargetParam->getType();
3501 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
3502 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
3503 // First cast to generic.
3504 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3505 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3507 // Cast from generic to native address space.
3508 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3509 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3510 NativePointeeAddrSpace));
3511 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
3512 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
3514 return NativeParamAddr;
3517 void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
3518 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
3519 ArrayRef<llvm::Value *> Args) const {
3520 SmallVector<llvm::Value *, 4> TargetArgs;
3521 TargetArgs.reserve(Args.size());
3523 cast<llvm::FunctionType>(OutlinedFn->getType()->getPointerElementType());
3524 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
3525 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
3526 TargetArgs.append(std::next(Args.begin(), I), Args.end());
3529 llvm::Type *TargetType = FnType->getParamType(I);
3530 llvm::Value *NativeArg = Args[I];
3531 if (!TargetType->isPointerTy()) {
3532 TargetArgs.emplace_back(NativeArg);
3535 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3537 NativeArg->getType()->getPointerElementType()->getPointerTo());
3538 TargetArgs.emplace_back(
3539 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
3541 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
3544 /// Emit function which wraps the outline parallel region
3545 /// and controls the arguments which are passed to this function.
3546 /// The wrapper ensures that the outlined function is called
3547 /// with the correct arguments when data is shared.
3548 llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
3549 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
3550 ASTContext &Ctx = CGM.getContext();
3551 const auto &CS = *D.getCapturedStmt(OMPD_parallel);
3553 // Create a function that takes as argument the source thread.
3554 FunctionArgList WrapperArgs;
3556 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
3558 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
3559 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getLocStart(),
3560 /*Id=*/nullptr, Int16QTy,
3561 ImplicitParamDecl::Other);
3562 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getLocStart(),
3563 /*Id=*/nullptr, Int32QTy,
3564 ImplicitParamDecl::Other);
3565 WrapperArgs.emplace_back(&ParallelLevelArg);
3566 WrapperArgs.emplace_back(&WrapperArg);
3568 const CGFunctionInfo &CGFI =
3569 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
3571 auto *Fn = llvm::Function::Create(
3572 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3573 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
3574 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3575 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
3576 Fn->setDoesNotRecurse();
3578 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3579 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
3580 D.getLocStart(), D.getLocStart());
3582 const auto *RD = CS.getCapturedRecordDecl();
3583 auto CurField = RD->field_begin();
3585 Address ZeroAddr = CGF.CreateMemTemp(
3586 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
3587 /*Name*/ ".zero.addr");
3588 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
3589 // Get the array of arguments.
3590 SmallVector<llvm::Value *, 8> Args;
3592 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
3593 Args.emplace_back(ZeroAddr.getPointer());
3595 CGBuilderTy &Bld = CGF.Builder;
3596 auto CI = CS.capture_begin();
3598 // Use global memory for data sharing.
3599 // Handle passing of global args to workers.
3600 Address GlobalArgs =
3601 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
3602 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
3603 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
3604 CGF.EmitRuntimeCall(
3605 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
3608 // Retrieve the shared variables from the list of references returned
3609 // by the runtime. Pass the variables to the outlined function.
3610 Address SharedArgListAddress = Address::invalid();
3611 if (CS.capture_size() > 0 ||
3612 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3613 SharedArgListAddress = CGF.EmitLoadOfPointer(
3614 GlobalArgs, CGF.getContext()
3615 .getPointerType(CGF.getContext().getPointerType(
3616 CGF.getContext().VoidPtrTy))
3617 .castAs<PointerType>());
3620 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3621 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
3622 CGF.getPointerSize());
3623 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3624 Src, CGF.SizeTy->getPointerTo());
3625 llvm::Value *LB = CGF.EmitLoadOfScalar(
3628 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3629 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
3630 Args.emplace_back(LB);
3632 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
3633 CGF.getPointerSize());
3634 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3635 Src, CGF.SizeTy->getPointerTo());
3636 llvm::Value *UB = CGF.EmitLoadOfScalar(
3639 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3640 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
3641 Args.emplace_back(UB);
3644 if (CS.capture_size() > 0) {
3645 ASTContext &CGFContext = CGF.getContext();
3646 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
3647 QualType ElemTy = CurField->getType();
3648 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx,
3649 CGF.getPointerSize());
3650 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3651 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
3652 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
3654 CGFContext.getPointerType(ElemTy),
3656 if (CI->capturesVariableByCopy() &&
3657 !CI->getCapturedVar()->getType()->isAnyPointerType()) {
3658 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
3661 Args.emplace_back(Arg);
3665 emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedParallelFn, Args);
3666 CGF.FinishFunction();
3670 void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
3672 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
3675 assert(D && "Expected function or captured|block decl.");
3676 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
3677 "Function is registered already.");
3678 const Stmt *Body = nullptr;
3679 bool NeedToDelayGlobalization = false;
3680 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3681 Body = FD->getBody();
3682 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
3683 Body = BD->getBody();
3684 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
3685 Body = CD->getBody();
3686 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
3690 CheckVarsEscapingDeclContext VarChecker(CGF);
3691 VarChecker.Visit(Body);
3692 const RecordDecl *GlobalizedVarsRecord = VarChecker.getGlobalizedRecord();
3693 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
3694 VarChecker.getEscapedVariableLengthDecls();
3695 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
3697 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
3698 I->getSecond().MappedParams =
3699 llvm::make_unique<CodeGenFunction::OMPMapVars>();
3700 I->getSecond().GlobalRecord = GlobalizedVarsRecord;
3701 I->getSecond().EscapedParameters.insert(
3702 VarChecker.getEscapedParameters().begin(),
3703 VarChecker.getEscapedParameters().end());
3704 I->getSecond().EscapedVariableLengthDecls.append(
3705 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
3706 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
3707 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3708 assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3709 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
3710 Data.insert(std::make_pair(VD, std::make_pair(FD, Address::invalid())));
3712 if (!NeedToDelayGlobalization) {
3713 emitGenericVarsProlog(CGF, D->getLocStart());
3714 struct GlobalizationScope final : EHScopeStack::Cleanup {
3715 GlobalizationScope() = default;
3717 void Emit(CodeGenFunction &CGF, Flags flags) override {
3718 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
3719 .emitGenericVarsEpilog(CGF);
3722 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
3726 Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
3727 const VarDecl *VD) {
3728 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
3729 return Address::invalid();
3731 VD = VD->getCanonicalDecl();
3732 auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
3733 if (I == FunctionGlobalizedDecls.end())
3734 return Address::invalid();
3735 auto VDI = I->getSecond().LocalVarData.find(VD);
3736 if (VDI != I->getSecond().LocalVarData.end())
3737 return VDI->second.second;
3738 if (VD->hasAttrs()) {
3739 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
3742 auto VDI = I->getSecond().LocalVarData.find(
3743 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
3744 ->getCanonicalDecl());
3745 if (VDI != I->getSecond().LocalVarData.end())
3746 return VDI->second.second;
3749 return Address::invalid();
3752 void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
3753 FunctionGlobalizedDecls.erase(CGF.CurFn);
3754 CGOpenMPRuntime::functionFinished(CGF);