1 //===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This provides a class for OpenMP runtime code generation specialized to NVPTX
12 //===----------------------------------------------------------------------===//
14 #include "CGOpenMPRuntimeNVPTX.h"
15 #include "CodeGenFunction.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/DeclOpenMP.h"
18 #include "clang/AST/StmtOpenMP.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "clang/Basic/Cuda.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/IR/IntrinsicsNVPTX.h"
24 using namespace clang;
25 using namespace CodeGen;
26 using namespace llvm::omp;
29 enum OpenMPRTLFunctionNVPTX {
30 /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
31 /// int16_t RequiresOMPRuntime);
32 OMPRTL_NVPTX__kmpc_kernel_init,
33 /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
34 OMPRTL_NVPTX__kmpc_kernel_deinit,
35 /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
36 /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
37 OMPRTL_NVPTX__kmpc_spmd_kernel_init,
38 /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
39 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
40 /// Call to void __kmpc_kernel_prepare_parallel(void
41 /// *outlined_function);
42 OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
43 /// Call to bool __kmpc_kernel_parallel(void **outlined_function);
44 OMPRTL_NVPTX__kmpc_kernel_parallel,
45 /// Call to void __kmpc_kernel_end_parallel();
46 OMPRTL_NVPTX__kmpc_kernel_end_parallel,
47 /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
49 OMPRTL_NVPTX__kmpc_serialized_parallel,
50 /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
52 OMPRTL_NVPTX__kmpc_end_serialized_parallel,
53 /// Call to int32_t __kmpc_shuffle_int32(int32_t element,
54 /// int16_t lane_offset, int16_t warp_size);
55 OMPRTL_NVPTX__kmpc_shuffle_int32,
56 /// Call to int64_t __kmpc_shuffle_int64(int64_t element,
57 /// int16_t lane_offset, int16_t warp_size);
58 OMPRTL_NVPTX__kmpc_shuffle_int64,
59 /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32
60 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
61 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
62 /// lane_offset, int16_t shortCircuit),
63 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
64 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2,
65 /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
66 /// global_tid, void *global_buffer, int32_t num_of_records, void*
68 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
69 /// lane_offset, int16_t shortCircuit),
70 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
71 /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
72 /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
73 /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
74 /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
75 /// *buffer, int idx, void *reduce_data));
76 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2,
77 /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
78 OMPRTL_NVPTX__kmpc_end_reduce_nowait,
79 /// Call to void __kmpc_data_sharing_init_stack();
80 OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
81 /// Call to void __kmpc_data_sharing_init_stack_spmd();
82 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
83 /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
84 /// int16_t UseSharedMemory);
85 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
86 /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t
88 OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
89 /// Call to void __kmpc_data_sharing_pop_stack(void *a);
90 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
91 /// Call to void __kmpc_begin_sharing_variables(void ***args,
93 OMPRTL_NVPTX__kmpc_begin_sharing_variables,
94 /// Call to void __kmpc_end_sharing_variables();
95 OMPRTL_NVPTX__kmpc_end_sharing_variables,
96 /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
97 OMPRTL_NVPTX__kmpc_get_shared_variables,
98 /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
100 OMPRTL_NVPTX__kmpc_parallel_level,
101 /// Call to int8_t __kmpc_is_spmd_exec_mode();
102 OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
103 /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
104 /// const void *buf, size_t size, int16_t is_shared, const void **res);
105 OMPRTL_NVPTX__kmpc_get_team_static_memory,
106 /// Call to void __kmpc_restore_team_static_memory(int16_t
107 /// isSPMDExecutionMode, int16_t is_shared);
108 OMPRTL_NVPTX__kmpc_restore_team_static_memory,
109 /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
110 OMPRTL__kmpc_barrier,
111 /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
113 OMPRTL__kmpc_barrier_simple_spmd,
114 /// Call to int32_t __kmpc_warp_active_thread_mask(void);
115 OMPRTL_NVPTX__kmpc_warp_active_thread_mask,
116 /// Call to void __kmpc_syncwarp(int32_t Mask);
117 OMPRTL_NVPTX__kmpc_syncwarp,
120 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
121 class NVPTXActionTy final : public PrePostActionTy {
122 llvm::FunctionCallee EnterCallee = nullptr;
123 ArrayRef<llvm::Value *> EnterArgs;
124 llvm::FunctionCallee ExitCallee = nullptr;
125 ArrayRef<llvm::Value *> ExitArgs;
126 bool Conditional = false;
127 llvm::BasicBlock *ContBlock = nullptr;
130 NVPTXActionTy(llvm::FunctionCallee EnterCallee,
131 ArrayRef<llvm::Value *> EnterArgs,
132 llvm::FunctionCallee ExitCallee,
133 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
134 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
135 ExitArgs(ExitArgs), Conditional(Conditional) {}
136 void Enter(CodeGenFunction &CGF) override {
137 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
139 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
140 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
141 ContBlock = CGF.createBasicBlock("omp_if.end");
142 // Generate the branch (If-stmt)
143 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
144 CGF.EmitBlock(ThenBlock);
147 void Done(CodeGenFunction &CGF) {
148 // Emit the rest of blocks/branches
149 CGF.EmitBranch(ContBlock);
150 CGF.EmitBlock(ContBlock, true);
152 void Exit(CodeGenFunction &CGF) override {
153 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
157 /// A class to track the execution mode when codegening directives within
158 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
159 /// to the target region and used by containing directives such as 'parallel'
160 /// to emit optimized code.
161 class ExecutionRuntimeModesRAII {
163 CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
164 CGOpenMPRuntimeNVPTX::EM_Unknown;
165 CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
166 bool SavedRuntimeMode = false;
167 bool *RuntimeMode = nullptr;
170 /// Constructor for Non-SPMD mode.
171 ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
172 : ExecMode(ExecMode) {
173 SavedExecMode = ExecMode;
174 ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
176 /// Constructor for SPMD mode.
177 ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
178 bool &RuntimeMode, bool FullRuntimeMode)
179 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
180 SavedExecMode = ExecMode;
181 SavedRuntimeMode = RuntimeMode;
182 ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
183 RuntimeMode = FullRuntimeMode;
185 ~ExecutionRuntimeModesRAII() {
186 ExecMode = SavedExecMode;
188 *RuntimeMode = SavedRuntimeMode;
192 /// GPU Configuration: This information can be derived from cuda registers,
193 /// however, providing compile time constants helps generate more efficient
194 /// code. For all practical purposes this is fine because the configuration
195 /// is the same for all known NVPTX architectures.
196 enum MachineConfiguration : unsigned {
198 /// Number of bits required to represent a lane identifier, which is
199 /// computed as log_2(WarpSize).
201 LaneIDMask = WarpSize - 1,
203 /// Global memory alignment for performance.
204 GlobalMemoryAlignment = 128,
206 /// Maximal size of the shared memory buffer.
207 SharedMemorySize = 128,
210 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
211 RefExpr = RefExpr->IgnoreParens();
212 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
213 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
214 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
215 Base = TempASE->getBase()->IgnoreParenImpCasts();
217 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
218 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
219 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
220 Base = TempOASE->getBase()->IgnoreParenImpCasts();
221 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
222 Base = TempASE->getBase()->IgnoreParenImpCasts();
225 RefExpr = RefExpr->IgnoreParenImpCasts();
226 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
227 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
228 const auto *ME = cast<MemberExpr>(RefExpr);
229 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
233 static RecordDecl *buildRecordForGlobalizedVars(
234 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
235 ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
236 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
237 &MappedDeclsFields, int BufSize) {
238 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
239 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
241 SmallVector<VarsDataTy, 4> GlobalizedVars;
242 for (const ValueDecl *D : EscapedDecls)
243 GlobalizedVars.emplace_back(
244 CharUnits::fromQuantity(std::max(
245 C.getDeclAlign(D).getQuantity(),
246 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
248 for (const ValueDecl *D : EscapedDeclsForTeams)
249 GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
250 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
251 return L.first > R.first;
254 // Build struct _globalized_locals_ty {
255 // /* globalized vars */[WarSize] align (max(decl_align,
256 // GlobalMemoryAlignment))
257 // /* globalized vars */ for EscapedDeclsForTeams
259 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
260 GlobalizedRD->startDefinition();
261 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
262 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
263 for (const auto &Pair : GlobalizedVars) {
264 const ValueDecl *VD = Pair.second;
265 QualType Type = VD->getType();
266 if (Type->isLValueReferenceType())
267 Type = C.getPointerType(Type.getNonReferenceType());
269 Type = Type.getNonReferenceType();
270 SourceLocation Loc = VD->getLocation();
272 if (SingleEscaped.count(VD)) {
273 Field = FieldDecl::Create(
274 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
275 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
276 /*BW=*/nullptr, /*Mutable=*/false,
277 /*InitStyle=*/ICIS_NoInit);
278 Field->setAccess(AS_public);
279 if (VD->hasAttrs()) {
280 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
281 E(VD->getAttrs().end());
286 llvm::APInt ArraySize(32, BufSize);
287 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
289 Field = FieldDecl::Create(
290 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
291 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
292 /*BW=*/nullptr, /*Mutable=*/false,
293 /*InitStyle=*/ICIS_NoInit);
294 Field->setAccess(AS_public);
295 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
296 static_cast<CharUnits::QuantityType>(
297 GlobalMemoryAlignment)));
298 Field->addAttr(AlignedAttr::CreateImplicit(
299 C, /*IsAlignmentExpr=*/true,
300 IntegerLiteral::Create(C, Align,
301 C.getIntTypeForBitwidth(32, /*Signed=*/0),
303 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
305 GlobalizedRD->addDecl(Field);
306 MappedDeclsFields.try_emplace(VD, Field);
308 GlobalizedRD->completeDefinition();
312 /// Get the list of variables that can escape their declaration context.
313 class CheckVarsEscapingDeclContext final
314 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
315 CodeGenFunction &CGF;
316 llvm::SetVector<const ValueDecl *> EscapedDecls;
317 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
318 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
319 RecordDecl *GlobalizedRD = nullptr;
320 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
321 bool AllEscaped = false;
322 bool IsForCombinedParallelRegion = false;
324 void markAsEscaped(const ValueDecl *VD) {
325 // Do not globalize declare target variables.
326 if (!isa<VarDecl>(VD) ||
327 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
329 VD = cast<ValueDecl>(VD->getCanonicalDecl());
330 // Use user-specified allocation.
331 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
333 // Variables captured by value must be globalized.
334 if (auto *CSI = CGF.CapturedStmtInfo) {
335 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
336 // Check if need to capture the variable that was already captured by
337 // value in the outer region.
338 if (!IsForCombinedParallelRegion) {
341 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
344 if (((Attr->getCaptureKind() != OMPC_map) &&
345 !isOpenMPPrivate(Attr->getCaptureKind())) ||
346 ((Attr->getCaptureKind() == OMPC_map) &&
347 !FD->getType()->isAnyPointerType()))
350 if (!FD->getType()->isReferenceType()) {
351 assert(!VD->getType()->isVariablyModifiedType() &&
352 "Parameter captured by value with variably modified type");
353 EscapedParameters.insert(VD);
354 } else if (!IsForCombinedParallelRegion) {
359 if ((!CGF.CapturedStmtInfo ||
360 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
361 VD->getType()->isReferenceType())
362 // Do not globalize variables with reference type.
364 if (VD->getType()->isVariablyModifiedType())
365 EscapedVariableLengthDecls.insert(VD);
367 EscapedDecls.insert(VD);
370 void VisitValueDecl(const ValueDecl *VD) {
371 if (VD->getType()->isLValueReferenceType())
373 if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
374 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
375 const bool SavedAllEscaped = AllEscaped;
376 AllEscaped = VD->getType()->isLValueReferenceType();
377 Visit(VarD->getInit());
378 AllEscaped = SavedAllEscaped;
382 void VisitOpenMPCapturedStmt(const CapturedStmt *S,
383 ArrayRef<OMPClause *> Clauses,
384 bool IsCombinedParallelRegion) {
387 for (const CapturedStmt::Capture &C : S->captures()) {
388 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
389 const ValueDecl *VD = C.getCapturedVar();
390 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
391 if (IsCombinedParallelRegion) {
392 // Check if the variable is privatized in the combined construct and
393 // those private copies must be shared in the inner parallel
395 IsForCombinedParallelRegion = false;
396 for (const OMPClause *C : Clauses) {
397 if (!isOpenMPPrivate(C->getClauseKind()) ||
398 C->getClauseKind() == OMPC_reduction ||
399 C->getClauseKind() == OMPC_linear ||
400 C->getClauseKind() == OMPC_private)
402 ArrayRef<const Expr *> Vars;
403 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
404 Vars = PC->getVarRefs();
405 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
406 Vars = PC->getVarRefs();
408 llvm_unreachable("Unexpected clause.");
409 for (const auto *E : Vars) {
411 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
412 if (D == VD->getCanonicalDecl()) {
413 IsForCombinedParallelRegion = true;
417 if (IsForCombinedParallelRegion)
422 if (isa<OMPCapturedExprDecl>(VD))
424 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
429 void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
430 assert(!GlobalizedRD &&
431 "Record for globalized variables is built already.");
432 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
434 EscapedDeclsForTeams = EscapedDecls.getArrayRef();
436 EscapedDeclsForParallel = EscapedDecls.getArrayRef();
437 GlobalizedRD = ::buildRecordForGlobalizedVars(
438 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
439 MappedDeclsFields, WarpSize);
443 CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
444 ArrayRef<const ValueDecl *> TeamsReductions)
445 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
447 virtual ~CheckVarsEscapingDeclContext() = default;
448 void VisitDeclStmt(const DeclStmt *S) {
451 for (const Decl *D : S->decls())
452 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
455 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
458 if (!D->hasAssociatedStmt())
461 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
462 // Do not analyze directives that do not actually require capturing,
463 // like `omp for` or `omp simd` directives.
464 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
465 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
466 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
467 VisitStmt(S->getCapturedStmt());
470 VisitOpenMPCapturedStmt(
472 CaptureRegions.back() == OMPD_parallel &&
473 isOpenMPDistributeDirective(D->getDirectiveKind()));
476 void VisitCapturedStmt(const CapturedStmt *S) {
479 for (const CapturedStmt::Capture &C : S->captures()) {
480 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
481 const ValueDecl *VD = C.getCapturedVar();
483 if (isa<OMPCapturedExprDecl>(VD))
488 void VisitLambdaExpr(const LambdaExpr *E) {
491 for (const LambdaCapture &C : E->captures()) {
492 if (C.capturesVariable()) {
493 if (C.getCaptureKind() == LCK_ByRef) {
494 const ValueDecl *VD = C.getCapturedVar();
496 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
502 void VisitBlockExpr(const BlockExpr *E) {
505 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
507 const VarDecl *VD = C.getVariable();
509 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
514 void VisitCallExpr(const CallExpr *E) {
517 for (const Expr *Arg : E->arguments()) {
520 if (Arg->isLValue()) {
521 const bool SavedAllEscaped = AllEscaped;
524 AllEscaped = SavedAllEscaped;
529 Visit(E->getCallee());
531 void VisitDeclRefExpr(const DeclRefExpr *E) {
534 const ValueDecl *VD = E->getDecl();
537 if (isa<OMPCapturedExprDecl>(VD))
539 else if (const auto *VarD = dyn_cast<VarDecl>(VD))
540 if (VarD->isInitCapture())
543 void VisitUnaryOperator(const UnaryOperator *E) {
546 if (E->getOpcode() == UO_AddrOf) {
547 const bool SavedAllEscaped = AllEscaped;
549 Visit(E->getSubExpr());
550 AllEscaped = SavedAllEscaped;
552 Visit(E->getSubExpr());
555 void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
558 if (E->getCastKind() == CK_ArrayToPointerDecay) {
559 const bool SavedAllEscaped = AllEscaped;
561 Visit(E->getSubExpr());
562 AllEscaped = SavedAllEscaped;
564 Visit(E->getSubExpr());
567 void VisitExpr(const Expr *E) {
570 bool SavedAllEscaped = AllEscaped;
573 for (const Stmt *Child : E->children())
576 AllEscaped = SavedAllEscaped;
578 void VisitStmt(const Stmt *S) {
581 for (const Stmt *Child : S->children())
586 /// Returns the record that handles all the escaped local variables and used
587 /// instead of their original storage.
588 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
590 buildRecordForGlobalizedVars(IsInTTDRegion);
594 /// Returns the field in the globalized record for the escaped variable.
595 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
596 assert(GlobalizedRD &&
597 "Record for globalized variables must be generated already.");
598 auto I = MappedDeclsFields.find(VD);
599 if (I == MappedDeclsFields.end())
601 return I->getSecond();
604 /// Returns the list of the escaped local variables/parameters.
605 ArrayRef<const ValueDecl *> getEscapedDecls() const {
606 return EscapedDecls.getArrayRef();
609 /// Checks if the escaped local variable is actually a parameter passed by
611 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
612 return EscapedParameters;
615 /// Returns the list of the escaped variables with the variably modified
617 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
618 return EscapedVariableLengthDecls.getArrayRef();
621 } // anonymous namespace
623 /// Get the GPU warp size.
624 static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
625 return CGF.EmitRuntimeCall(
626 llvm::Intrinsic::getDeclaration(
627 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
631 /// Get the id of the current thread on the GPU.
632 static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
633 return CGF.EmitRuntimeCall(
634 llvm::Intrinsic::getDeclaration(
635 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
639 /// Get the id of the warp in the block.
640 /// We assume that the warp size is 32, which is always the case
641 /// on the NVPTX device, to generate more efficient code.
642 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
643 CGBuilderTy &Bld = CGF.Builder;
644 return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
647 /// Get the id of the current lane in the Warp.
648 /// We assume that the warp size is 32, which is always the case
649 /// on the NVPTX device, to generate more efficient code.
650 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
651 CGBuilderTy &Bld = CGF.Builder;
652 return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
656 /// Get the maximum number of threads in a block of the GPU.
657 static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
658 return CGF.EmitRuntimeCall(
659 llvm::Intrinsic::getDeclaration(
660 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
661 "nvptx_num_threads");
664 /// Get the value of the thread_limit clause in the teams directive.
665 /// For the 'generic' execution mode, the runtime encodes thread_limit in
666 /// the launch parameters, always starting thread_limit+warpSize threads per
667 /// CTA. The threads in the last warp are reserved for master execution.
668 /// For the 'spmd' execution mode, all threads in a CTA are part of the team.
669 static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
670 bool IsInSPMDExecutionMode = false) {
671 CGBuilderTy &Bld = CGF.Builder;
672 return IsInSPMDExecutionMode
673 ? getNVPTXNumThreads(CGF)
674 : Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
678 /// Get the thread id of the OMP master thread.
679 /// The master thread id is the first thread (lane) of the last warp in the
680 /// GPU block. Warp size is assumed to be some power of 2.
681 /// Thread id is 0 indexed.
682 /// E.g: If NumThreads is 33, master id is 32.
683 /// If NumThreads is 64, master id is 32.
684 /// If NumThreads is 1024, master id is 992.
685 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
686 CGBuilderTy &Bld = CGF.Builder;
687 llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
689 // We assume that the warp size is a power of 2.
690 llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
692 return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
693 Bld.CreateNot(Mask), "master_tid");
696 CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
697 CodeGenModule &CGM, SourceLocation Loc)
698 : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
700 createWorkerFunction(CGM);
703 void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
704 CodeGenModule &CGM) {
705 // Create an worker function with no arguments.
707 WorkerFn = llvm::Function::Create(
708 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
709 /*placeholder=*/"_worker", &CGM.getModule());
710 CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
711 WorkerFn->setDoesNotRecurse();
714 CGOpenMPRuntimeNVPTX::ExecutionMode
715 CGOpenMPRuntimeNVPTX::getExecutionMode() const {
716 return CurrentExecutionMode;
719 static CGOpenMPRuntimeNVPTX::DataSharingMode
720 getDataSharingMode(CodeGenModule &CGM) {
721 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA
722 : CGOpenMPRuntimeNVPTX::Generic;
725 /// Check for inner (nested) SPMD construct, if any
726 static bool hasNestedSPMDDirective(ASTContext &Ctx,
727 const OMPExecutableDirective &D) {
728 const auto *CS = D.getInnermostCapturedStmt();
730 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
731 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
733 if (const auto *NestedDir =
734 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
735 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
736 switch (D.getDirectiveKind()) {
738 if (isOpenMPParallelDirective(DKind))
740 if (DKind == OMPD_teams) {
741 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
742 /*IgnoreCaptured=*/true);
745 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
746 if (const auto *NND =
747 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
748 DKind = NND->getDirectiveKind();
749 if (isOpenMPParallelDirective(DKind))
754 case OMPD_target_teams:
755 return isOpenMPParallelDirective(DKind);
756 case OMPD_target_simd:
757 case OMPD_target_parallel:
758 case OMPD_target_parallel_for:
759 case OMPD_target_parallel_for_simd:
760 case OMPD_target_teams_distribute:
761 case OMPD_target_teams_distribute_simd:
762 case OMPD_target_teams_distribute_parallel_for:
763 case OMPD_target_teams_distribute_parallel_for_simd:
766 case OMPD_parallel_for:
767 case OMPD_parallel_master:
768 case OMPD_parallel_sections:
770 case OMPD_parallel_for_simd:
772 case OMPD_cancellation_point:
774 case OMPD_threadprivate:
792 case OMPD_target_data:
793 case OMPD_target_exit_data:
794 case OMPD_target_enter_data:
795 case OMPD_distribute:
796 case OMPD_distribute_simd:
797 case OMPD_distribute_parallel_for:
798 case OMPD_distribute_parallel_for_simd:
799 case OMPD_teams_distribute:
800 case OMPD_teams_distribute_simd:
801 case OMPD_teams_distribute_parallel_for:
802 case OMPD_teams_distribute_parallel_for_simd:
803 case OMPD_target_update:
804 case OMPD_declare_simd:
805 case OMPD_declare_variant:
806 case OMPD_begin_declare_variant:
807 case OMPD_end_declare_variant:
808 case OMPD_declare_target:
809 case OMPD_end_declare_target:
810 case OMPD_declare_reduction:
811 case OMPD_declare_mapper:
813 case OMPD_taskloop_simd:
814 case OMPD_master_taskloop:
815 case OMPD_master_taskloop_simd:
816 case OMPD_parallel_master_taskloop:
817 case OMPD_parallel_master_taskloop_simd:
821 llvm_unreachable("Unexpected directive.");
828 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
829 const OMPExecutableDirective &D) {
830 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
831 switch (DirectiveKind) {
833 case OMPD_target_teams:
834 return hasNestedSPMDDirective(Ctx, D);
835 case OMPD_target_parallel:
836 case OMPD_target_parallel_for:
837 case OMPD_target_parallel_for_simd:
838 case OMPD_target_teams_distribute_parallel_for:
839 case OMPD_target_teams_distribute_parallel_for_simd:
840 case OMPD_target_simd:
841 case OMPD_target_teams_distribute_simd:
843 case OMPD_target_teams_distribute:
847 case OMPD_parallel_for:
848 case OMPD_parallel_master:
849 case OMPD_parallel_sections:
851 case OMPD_parallel_for_simd:
853 case OMPD_cancellation_point:
855 case OMPD_threadprivate:
873 case OMPD_target_data:
874 case OMPD_target_exit_data:
875 case OMPD_target_enter_data:
876 case OMPD_distribute:
877 case OMPD_distribute_simd:
878 case OMPD_distribute_parallel_for:
879 case OMPD_distribute_parallel_for_simd:
880 case OMPD_teams_distribute:
881 case OMPD_teams_distribute_simd:
882 case OMPD_teams_distribute_parallel_for:
883 case OMPD_teams_distribute_parallel_for_simd:
884 case OMPD_target_update:
885 case OMPD_declare_simd:
886 case OMPD_declare_variant:
887 case OMPD_begin_declare_variant:
888 case OMPD_end_declare_variant:
889 case OMPD_declare_target:
890 case OMPD_end_declare_target:
891 case OMPD_declare_reduction:
892 case OMPD_declare_mapper:
894 case OMPD_taskloop_simd:
895 case OMPD_master_taskloop:
896 case OMPD_master_taskloop_simd:
897 case OMPD_parallel_master_taskloop:
898 case OMPD_parallel_master_taskloop_simd:
905 "Unknown programming model for OpenMP directive on NVPTX target.");
908 /// Check if the directive is loops based and has schedule clause at all or has
909 /// static scheduling.
910 static bool hasStaticScheduling(const OMPExecutableDirective &D) {
911 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
912 isOpenMPLoopDirective(D.getDirectiveKind()) &&
913 "Expected loop-based directive.");
914 return !D.hasClausesOfKind<OMPOrderedClause>() &&
915 (!D.hasClausesOfKind<OMPScheduleClause>() ||
916 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
917 [](const OMPScheduleClause *C) {
918 return C->getScheduleKind() == OMPC_SCHEDULE_static;
922 /// Check for inner (nested) lightweight runtime construct, if any
923 static bool hasNestedLightweightDirective(ASTContext &Ctx,
924 const OMPExecutableDirective &D) {
925 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
926 const auto *CS = D.getInnermostCapturedStmt();
928 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
929 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
931 if (const auto *NestedDir =
932 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
933 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
934 switch (D.getDirectiveKind()) {
936 if (isOpenMPParallelDirective(DKind) &&
937 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
938 hasStaticScheduling(*NestedDir))
940 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
942 if (DKind == OMPD_parallel) {
943 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
944 /*IgnoreCaptured=*/true);
947 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
948 if (const auto *NND =
949 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
950 DKind = NND->getDirectiveKind();
951 if (isOpenMPWorksharingDirective(DKind) &&
952 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
955 } else if (DKind == OMPD_teams) {
956 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
957 /*IgnoreCaptured=*/true);
960 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
961 if (const auto *NND =
962 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
963 DKind = NND->getDirectiveKind();
964 if (isOpenMPParallelDirective(DKind) &&
965 isOpenMPWorksharingDirective(DKind) &&
966 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
968 if (DKind == OMPD_parallel) {
969 Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
970 /*IgnoreCaptured=*/true);
973 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
974 if (const auto *NND =
975 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
976 DKind = NND->getDirectiveKind();
977 if (isOpenMPWorksharingDirective(DKind) &&
978 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
985 case OMPD_target_teams:
986 if (isOpenMPParallelDirective(DKind) &&
987 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
988 hasStaticScheduling(*NestedDir))
990 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
992 if (DKind == OMPD_parallel) {
993 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
994 /*IgnoreCaptured=*/true);
997 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
998 if (const auto *NND =
999 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
1000 DKind = NND->getDirectiveKind();
1001 if (isOpenMPWorksharingDirective(DKind) &&
1002 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
1007 case OMPD_target_parallel:
1008 if (DKind == OMPD_simd)
1010 return isOpenMPWorksharingDirective(DKind) &&
1011 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
1012 case OMPD_target_teams_distribute:
1013 case OMPD_target_simd:
1014 case OMPD_target_parallel_for:
1015 case OMPD_target_parallel_for_simd:
1016 case OMPD_target_teams_distribute_simd:
1017 case OMPD_target_teams_distribute_parallel_for:
1018 case OMPD_target_teams_distribute_parallel_for_simd:
1021 case OMPD_parallel_for:
1022 case OMPD_parallel_master:
1023 case OMPD_parallel_sections:
1025 case OMPD_parallel_for_simd:
1027 case OMPD_cancellation_point:
1029 case OMPD_threadprivate:
1038 case OMPD_taskyield:
1041 case OMPD_taskgroup:
1047 case OMPD_target_data:
1048 case OMPD_target_exit_data:
1049 case OMPD_target_enter_data:
1050 case OMPD_distribute:
1051 case OMPD_distribute_simd:
1052 case OMPD_distribute_parallel_for:
1053 case OMPD_distribute_parallel_for_simd:
1054 case OMPD_teams_distribute:
1055 case OMPD_teams_distribute_simd:
1056 case OMPD_teams_distribute_parallel_for:
1057 case OMPD_teams_distribute_parallel_for_simd:
1058 case OMPD_target_update:
1059 case OMPD_declare_simd:
1060 case OMPD_declare_variant:
1061 case OMPD_begin_declare_variant:
1062 case OMPD_end_declare_variant:
1063 case OMPD_declare_target:
1064 case OMPD_end_declare_target:
1065 case OMPD_declare_reduction:
1066 case OMPD_declare_mapper:
1068 case OMPD_taskloop_simd:
1069 case OMPD_master_taskloop:
1070 case OMPD_master_taskloop_simd:
1071 case OMPD_parallel_master_taskloop:
1072 case OMPD_parallel_master_taskloop_simd:
1076 llvm_unreachable("Unexpected directive.");
1083 /// Checks if the construct supports lightweight runtime. It must be SPMD
1084 /// construct + inner loop-based construct with static scheduling.
1085 static bool supportsLightweightRuntime(ASTContext &Ctx,
1086 const OMPExecutableDirective &D) {
1087 if (!supportsSPMDExecutionMode(Ctx, D))
1089 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
1090 switch (DirectiveKind) {
1092 case OMPD_target_teams:
1093 case OMPD_target_parallel:
1094 return hasNestedLightweightDirective(Ctx, D);
1095 case OMPD_target_parallel_for:
1096 case OMPD_target_parallel_for_simd:
1097 case OMPD_target_teams_distribute_parallel_for:
1098 case OMPD_target_teams_distribute_parallel_for_simd:
1099 // (Last|First)-privates must be shared in parallel region.
1100 return hasStaticScheduling(D);
1101 case OMPD_target_simd:
1102 case OMPD_target_teams_distribute_simd:
1104 case OMPD_target_teams_distribute:
1108 case OMPD_parallel_for:
1109 case OMPD_parallel_master:
1110 case OMPD_parallel_sections:
1112 case OMPD_parallel_for_simd:
1114 case OMPD_cancellation_point:
1116 case OMPD_threadprivate:
1125 case OMPD_taskyield:
1128 case OMPD_taskgroup:
1134 case OMPD_target_data:
1135 case OMPD_target_exit_data:
1136 case OMPD_target_enter_data:
1137 case OMPD_distribute:
1138 case OMPD_distribute_simd:
1139 case OMPD_distribute_parallel_for:
1140 case OMPD_distribute_parallel_for_simd:
1141 case OMPD_teams_distribute:
1142 case OMPD_teams_distribute_simd:
1143 case OMPD_teams_distribute_parallel_for:
1144 case OMPD_teams_distribute_parallel_for_simd:
1145 case OMPD_target_update:
1146 case OMPD_declare_simd:
1147 case OMPD_declare_variant:
1148 case OMPD_begin_declare_variant:
1149 case OMPD_end_declare_variant:
1150 case OMPD_declare_target:
1151 case OMPD_end_declare_target:
1152 case OMPD_declare_reduction:
1153 case OMPD_declare_mapper:
1155 case OMPD_taskloop_simd:
1156 case OMPD_master_taskloop:
1157 case OMPD_master_taskloop_simd:
1158 case OMPD_parallel_master_taskloop:
1159 case OMPD_parallel_master_taskloop_simd:
1166 "Unknown programming model for OpenMP directive on NVPTX target.");
1169 void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
1170 StringRef ParentName,
1171 llvm::Function *&OutlinedFn,
1172 llvm::Constant *&OutlinedFnID,
1173 bool IsOffloadEntry,
1174 const RegionCodeGenTy &CodeGen) {
1175 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1176 EntryFunctionState EST;
1177 WorkerFunctionState WST(CGM, D.getBeginLoc());
1179 WrapperFunctionsMap.clear();
1181 // Emit target region as a standalone region.
1182 class NVPTXPrePostActionTy : public PrePostActionTy {
1183 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
1184 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
1187 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
1188 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
1189 : EST(EST), WST(WST) {}
1190 void Enter(CodeGenFunction &CGF) override {
1192 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
1193 RT.emitNonSPMDEntryHeader(CGF, EST, WST);
1194 // Skip target region initialization.
1195 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1197 void Exit(CodeGenFunction &CGF) override {
1199 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
1200 RT.clearLocThreadIdInsertPt(CGF);
1201 RT.emitNonSPMDEntryFooter(CGF, EST);
1204 CodeGen.setAction(Action);
1205 IsInTTDRegion = true;
1206 // Reserve place for the globalized memory.
1207 GlobalizedRecords.emplace_back();
1208 if (!KernelStaticGlobalized) {
1209 KernelStaticGlobalized = new llvm::GlobalVariable(
1210 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1211 llvm::GlobalValue::InternalLinkage,
1212 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1213 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1214 llvm::GlobalValue::NotThreadLocal,
1215 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1217 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1218 IsOffloadEntry, CodeGen);
1219 IsInTTDRegion = false;
1221 // Now change the name of the worker function to correspond to this target
1222 // region's entry function.
1223 WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
1225 // Create the worker function
1226 emitWorkerFunction(WST);
1229 // Setup NVPTX threads for master-worker OpenMP scheme.
1230 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
1231 EntryFunctionState &EST,
1232 WorkerFunctionState &WST) {
1233 CGBuilderTy &Bld = CGF.Builder;
1235 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
1236 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1237 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
1238 EST.ExitBB = CGF.createBasicBlock(".exit");
1240 llvm::Value *IsWorker =
1241 Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
1242 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
1244 CGF.EmitBlock(WorkerBB);
1245 emitCall(CGF, WST.Loc, WST.WorkerFn);
1246 CGF.EmitBranch(EST.ExitBB);
1248 CGF.EmitBlock(MasterCheckBB);
1249 llvm::Value *IsMaster =
1250 Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
1251 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
1253 CGF.EmitBlock(MasterBB);
1254 IsInTargetMasterThreadRegion = true;
1255 // SEQUENTIAL (MASTER) REGION START
1256 // First action in sequential region:
1257 // Initialize the state of the OpenMP runtime library on the GPU.
1258 // TODO: Optimize runtime initialization and pass in correct value.
1259 llvm::Value *Args[] = {getThreadLimit(CGF),
1260 Bld.getInt16(/*RequiresOMPRuntime=*/1)};
1261 CGF.EmitRuntimeCall(
1262 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
1264 // For data sharing, we need to initialize the stack.
1265 CGF.EmitRuntimeCall(
1266 createNVPTXRuntimeFunction(
1267 OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
1269 emitGenericVarsProlog(CGF, WST.Loc);
1272 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
1273 EntryFunctionState &EST) {
1274 IsInTargetMasterThreadRegion = false;
1275 if (!CGF.HaveInsertPoint())
1278 emitGenericVarsEpilog(CGF);
1281 EST.ExitBB = CGF.createBasicBlock(".exit");
1283 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
1284 CGF.EmitBranch(TerminateBB);
1286 CGF.EmitBlock(TerminateBB);
1287 // Signal termination condition.
1288 // TODO: Optimize runtime initialization and pass in correct value.
1289 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
1290 CGF.EmitRuntimeCall(
1291 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
1292 // Barrier to terminate worker threads.
1293 syncCTAThreads(CGF);
1294 // Master thread jumps to exit point.
1295 CGF.EmitBranch(EST.ExitBB);
1297 CGF.EmitBlock(EST.ExitBB);
1298 EST.ExitBB = nullptr;
1301 void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
1302 StringRef ParentName,
1303 llvm::Function *&OutlinedFn,
1304 llvm::Constant *&OutlinedFnID,
1305 bool IsOffloadEntry,
1306 const RegionCodeGenTy &CodeGen) {
1307 ExecutionRuntimeModesRAII ModeRAII(
1308 CurrentExecutionMode, RequiresFullRuntime,
1309 CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1310 !supportsLightweightRuntime(CGM.getContext(), D));
1311 EntryFunctionState EST;
1313 // Emit target region as a standalone region.
1314 class NVPTXPrePostActionTy : public PrePostActionTy {
1315 CGOpenMPRuntimeNVPTX &RT;
1316 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
1317 const OMPExecutableDirective &D;
1320 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
1321 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
1322 const OMPExecutableDirective &D)
1323 : RT(RT), EST(EST), D(D) {}
1324 void Enter(CodeGenFunction &CGF) override {
1325 RT.emitSPMDEntryHeader(CGF, EST, D);
1326 // Skip target region initialization.
1327 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1329 void Exit(CodeGenFunction &CGF) override {
1330 RT.clearLocThreadIdInsertPt(CGF);
1331 RT.emitSPMDEntryFooter(CGF, EST);
1333 } Action(*this, EST, D);
1334 CodeGen.setAction(Action);
1335 IsInTTDRegion = true;
1336 // Reserve place for the globalized memory.
1337 GlobalizedRecords.emplace_back();
1338 if (!KernelStaticGlobalized) {
1339 KernelStaticGlobalized = new llvm::GlobalVariable(
1340 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1341 llvm::GlobalValue::InternalLinkage,
1342 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1343 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1344 llvm::GlobalValue::NotThreadLocal,
1345 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1347 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1348 IsOffloadEntry, CodeGen);
1349 IsInTTDRegion = false;
1352 void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
1353 CodeGenFunction &CGF, EntryFunctionState &EST,
1354 const OMPExecutableDirective &D) {
1355 CGBuilderTy &Bld = CGF.Builder;
1357 // Setup BBs in entry function.
1358 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1359 EST.ExitBB = CGF.createBasicBlock(".exit");
1361 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1362 /*RequiresOMPRuntime=*/
1363 Bld.getInt16(RequiresFullRuntime ? 1 : 0),
1364 /*RequiresDataSharing=*/Bld.getInt16(0)};
1365 CGF.EmitRuntimeCall(
1366 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
1368 if (RequiresFullRuntime) {
1369 // For data sharing, we need to initialize the stack.
1370 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
1371 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
1374 CGF.EmitBranch(ExecuteBB);
1376 CGF.EmitBlock(ExecuteBB);
1378 IsInTargetMasterThreadRegion = true;
1381 void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
1382 EntryFunctionState &EST) {
1383 IsInTargetMasterThreadRegion = false;
1384 if (!CGF.HaveInsertPoint())
1388 EST.ExitBB = CGF.createBasicBlock(".exit");
1390 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1391 CGF.EmitBranch(OMPDeInitBB);
1393 CGF.EmitBlock(OMPDeInitBB);
1394 // DeInitialize the OMP state in the runtime; called by all active threads.
1395 llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
1396 CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
1397 CGF.EmitRuntimeCall(
1398 createNVPTXRuntimeFunction(
1399 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
1400 CGF.EmitBranch(EST.ExitBB);
1402 CGF.EmitBlock(EST.ExitBB);
1403 EST.ExitBB = nullptr;
1406 // Create a unique global variable to indicate the execution mode of this target
1407 // region. The execution mode is either 'generic', or 'spmd' depending on the
1408 // target directive. This variable is picked up by the offload library to setup
1409 // the device appropriately before kernel launch. If the execution mode is
1410 // 'generic', the runtime reserves one warp for the master, otherwise, all
1411 // warps participate in parallel work.
1412 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1415 new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1416 llvm::GlobalValue::WeakAnyLinkage,
1417 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1418 Twine(Name, "_exec_mode"));
1419 CGM.addCompilerUsedGlobal(GVMode);
1422 void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
1423 ASTContext &Ctx = CGM.getContext();
1425 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1426 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1428 emitWorkerLoop(CGF, WST);
1429 CGF.FinishFunction();
1432 void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
1433 WorkerFunctionState &WST) {
1435 // The workers enter this loop and wait for parallel work from the master.
1436 // When the master encounters a parallel region it sets up the work + variable
1437 // arguments, and wakes up the workers. The workers first check to see if
1438 // they are required for the parallel region, i.e., within the # of requested
1439 // parallel threads. The activated workers load the variable arguments and
1440 // execute the parallel work.
1443 CGBuilderTy &Bld = CGF.Builder;
1445 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1446 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1447 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1448 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1449 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1450 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1452 CGF.EmitBranch(AwaitBB);
1454 // Workers wait for work from master.
1455 CGF.EmitBlock(AwaitBB);
1456 // Wait for parallel work
1457 syncCTAThreads(CGF);
1460 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1461 Address ExecStatus =
1462 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1463 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1464 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1466 // TODO: Optimize runtime initialization and pass in correct value.
1467 llvm::Value *Args[] = {WorkFn.getPointer()};
1468 llvm::Value *Ret = CGF.EmitRuntimeCall(
1469 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
1470 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1472 // On termination condition (workid == 0), exit loop.
1473 llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1474 llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1475 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1477 // Activate requested workers.
1478 CGF.EmitBlock(SelectWorkersBB);
1479 llvm::Value *IsActive =
1480 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1481 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1483 // Signal start of parallel region.
1484 CGF.EmitBlock(ExecuteBB);
1485 // Skip initialization.
1486 setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1488 // Process work items: outlined parallel functions.
1489 for (llvm::Function *W : Work) {
1490 // Try to match this outlined function.
1491 llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1493 llvm::Value *WorkFnMatch =
1494 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1496 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1497 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1498 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1500 // Execute this outlined function.
1501 CGF.EmitBlock(ExecuteFNBB);
1503 // Insert call to work function via shared wrapper. The shared
1504 // wrapper takes two arguments:
1505 // - the parallelism level;
1507 emitCall(CGF, WST.Loc, W,
1508 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1510 // Go to end of parallel region.
1511 CGF.EmitBranch(TerminateBB);
1513 CGF.EmitBlock(CheckNextBB);
1515 // Default case: call to outlined function through pointer if the target
1516 // region makes a declare target call that may contain an orphaned parallel
1518 auto *ParallelFnTy =
1519 llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1520 /*isVarArg=*/false);
1521 llvm::Value *WorkFnCast =
1522 Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
1523 // Insert call to work function via shared wrapper. The shared
1524 // wrapper takes two arguments:
1525 // - the parallelism level;
1527 emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
1528 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1529 // Go to end of parallel region.
1530 CGF.EmitBranch(TerminateBB);
1532 // Signal end of parallel region.
1533 CGF.EmitBlock(TerminateBB);
1534 CGF.EmitRuntimeCall(
1535 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
1537 CGF.EmitBranch(BarrierBB);
1539 // All active and inactive workers wait at a barrier after parallel region.
1540 CGF.EmitBlock(BarrierBB);
1541 // Barrier after parallel region.
1542 syncCTAThreads(CGF);
1543 CGF.EmitBranch(AwaitBB);
1545 // Exit target region.
1546 CGF.EmitBlock(ExitBB);
1547 // Skip initialization.
1548 clearLocThreadIdInsertPt(CGF);
1551 /// Returns specified OpenMP runtime function for the current OpenMP
1552 /// implementation. Specialized for the NVPTX device.
1553 /// \param Function OpenMP runtime function.
1554 /// \return Specified function.
1555 llvm::FunctionCallee
1556 CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
1557 llvm::FunctionCallee RTLFn = nullptr;
1558 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
1559 case OMPRTL_NVPTX__kmpc_kernel_init: {
1560 // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
1561 // RequiresOMPRuntime);
1562 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
1564 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1565 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
1568 case OMPRTL_NVPTX__kmpc_kernel_deinit: {
1569 // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
1570 llvm::Type *TypeParams[] = {CGM.Int16Ty};
1572 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1573 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
1576 case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
1577 // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
1578 // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
1579 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1581 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1582 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
1585 case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
1586 // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
1587 llvm::Type *TypeParams[] = {CGM.Int16Ty};
1589 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1590 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
1593 case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
1594 /// Build void __kmpc_kernel_prepare_parallel(
1595 /// void *outlined_function);
1596 llvm::Type *TypeParams[] = {CGM.Int8PtrTy};
1598 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1599 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
1602 case OMPRTL_NVPTX__kmpc_kernel_parallel: {
1603 /// Build bool __kmpc_kernel_parallel(void **outlined_function);
1604 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy};
1605 llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
1607 llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
1608 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
1611 case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
1612 /// Build void __kmpc_kernel_end_parallel();
1614 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1615 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
1618 case OMPRTL_NVPTX__kmpc_serialized_parallel: {
1619 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1621 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1623 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1624 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1627 case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
1628 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1630 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1632 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1633 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1636 case OMPRTL_NVPTX__kmpc_shuffle_int32: {
1637 // Build int32_t __kmpc_shuffle_int32(int32_t element,
1638 // int16_t lane_offset, int16_t warp_size);
1639 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1641 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1642 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
1645 case OMPRTL_NVPTX__kmpc_shuffle_int64: {
1646 // Build int64_t __kmpc_shuffle_int64(int64_t element,
1647 // int16_t lane_offset, int16_t warp_size);
1648 llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
1650 llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
1651 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
1654 case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: {
1655 // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
1656 // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
1657 // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
1658 // lane_id, int16_t lane_offset, int16_t Algorithm Version), void
1659 // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
1660 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1661 CGM.Int16Ty, CGM.Int16Ty};
1662 auto *ShuffleReduceFnTy =
1663 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1664 /*isVarArg=*/false);
1665 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1666 auto *InterWarpCopyFnTy =
1667 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1668 /*isVarArg=*/false);
1669 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1674 ShuffleReduceFnTy->getPointerTo(),
1675 InterWarpCopyFnTy->getPointerTo()};
1677 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1678 RTLFn = CGM.CreateRuntimeFunction(
1679 FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2");
1682 case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
1683 // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
1684 llvm::Type *TypeParams[] = {CGM.Int32Ty};
1686 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1687 RTLFn = CGM.CreateRuntimeFunction(
1688 FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
1691 case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: {
1692 // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
1693 // global_tid, void *global_buffer, int32_t num_of_records, void*
1695 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
1696 // lane_offset, int16_t shortCircuit),
1697 // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
1698 // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
1699 // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
1700 // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
1701 // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
1702 // *buffer, int idx, void *reduce_data));
1703 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1704 CGM.Int16Ty, CGM.Int16Ty};
1705 auto *ShuffleReduceFnTy =
1706 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1707 /*isVarArg=*/false);
1708 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1709 auto *InterWarpCopyFnTy =
1710 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1711 /*isVarArg=*/false);
1712 llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy,
1714 auto *GlobalListFnTy =
1715 llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams,
1716 /*isVarArg=*/false);
1717 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1722 ShuffleReduceFnTy->getPointerTo(),
1723 InterWarpCopyFnTy->getPointerTo(),
1724 GlobalListFnTy->getPointerTo(),
1725 GlobalListFnTy->getPointerTo(),
1726 GlobalListFnTy->getPointerTo(),
1727 GlobalListFnTy->getPointerTo()};
1729 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1730 RTLFn = CGM.CreateRuntimeFunction(
1731 FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2");
1734 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
1735 /// Build void __kmpc_data_sharing_init_stack();
1737 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1738 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
1741 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
1742 /// Build void __kmpc_data_sharing_init_stack_spmd();
1744 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1746 CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
1749 case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
1750 // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
1751 // int16_t UseSharedMemory);
1752 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1754 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1755 RTLFn = CGM.CreateRuntimeFunction(
1756 FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
1759 case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
1760 // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t
1761 // UseSharedMemory);
1762 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1764 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1765 RTLFn = CGM.CreateRuntimeFunction(
1766 FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
1769 case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
1770 // Build void __kmpc_data_sharing_pop_stack(void *a);
1771 llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
1773 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1774 RTLFn = CGM.CreateRuntimeFunction(FnTy,
1775 /*Name=*/"__kmpc_data_sharing_pop_stack");
1778 case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
1779 /// Build void __kmpc_begin_sharing_variables(void ***args,
1781 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
1783 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1784 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
1787 case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
1788 /// Build void __kmpc_end_sharing_variables();
1790 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1791 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
1794 case OMPRTL_NVPTX__kmpc_get_shared_variables: {
1795 /// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
1796 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
1798 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1799 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
1802 case OMPRTL_NVPTX__kmpc_parallel_level: {
1803 // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
1804 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1806 llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
1807 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
1810 case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
1811 // Build int8_t __kmpc_is_spmd_exec_mode();
1812 auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
1813 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
1816 case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
1817 // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
1818 // const void *buf, size_t size, int16_t is_shared, const void **res);
1819 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy,
1820 CGM.Int16Ty, CGM.VoidPtrPtrTy};
1822 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1823 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
1826 case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
1827 // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
1828 // int16_t is_shared);
1829 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty};
1831 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1833 CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
1836 case OMPRTL__kmpc_barrier: {
1837 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1838 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1840 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1842 CGM.CreateConvergentRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1845 case OMPRTL__kmpc_barrier_simple_spmd: {
1846 // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
1848 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1850 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1851 RTLFn = CGM.CreateConvergentRuntimeFunction(
1852 FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
1855 case OMPRTL_NVPTX__kmpc_warp_active_thread_mask: {
1856 // Build int32_t __kmpc_warp_active_thread_mask(void);
1858 llvm::FunctionType::get(CGM.Int32Ty, llvm::None, /*isVarArg=*/false);
1859 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_warp_active_thread_mask");
1862 case OMPRTL_NVPTX__kmpc_syncwarp: {
1863 // Build void __kmpc_syncwarp(kmp_int32 Mask);
1865 llvm::FunctionType::get(CGM.VoidTy, CGM.Int32Ty, /*isVarArg=*/false);
1866 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_syncwarp");
1873 void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
1874 llvm::Constant *Addr,
1875 uint64_t Size, int32_t,
1876 llvm::GlobalValue::LinkageTypes) {
1877 // TODO: Add support for global variables on the device after declare target
1879 if (!isa<llvm::Function>(Addr))
1881 llvm::Module &M = CGM.getModule();
1882 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1884 // Get "nvvm.annotations" metadata node
1885 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1887 llvm::Metadata *MDVals[] = {
1888 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1889 llvm::ConstantAsMetadata::get(
1890 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1891 // Append metadata to nvvm.annotations
1892 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1895 void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
1896 const OMPExecutableDirective &D, StringRef ParentName,
1897 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1898 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1899 if (!IsOffloadEntry) // Nothing to do.
1902 assert(!ParentName.empty() && "Invalid target region parent name!");
1904 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1906 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1909 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1912 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1916 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1917 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1918 enum ModeFlagsTy : unsigned {
1919 /// Bit set to 1 when in SPMD mode.
1920 KMP_IDENT_SPMD_MODE = 0x01,
1921 /// Bit set to 1 when a simplified runtime is used.
1922 KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1923 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1926 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1927 static const ModeFlagsTy UndefinedMode =
1928 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1929 } // anonymous namespace
1931 unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
1932 switch (getExecutionMode()) {
1934 if (requiresFullRuntime())
1935 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1936 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1938 assert(requiresFullRuntime() && "Expected full runtime.");
1939 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1941 return UndefinedMode;
1943 llvm_unreachable("Unknown flags are requested.");
1946 CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
1947 : CGOpenMPRuntime(CGM, "_", "$") {
1948 if (!CGM.getLangOpts().OpenMPIsDevice)
1949 llvm_unreachable("OpenMP NVPTX can only handle device code.");
1952 void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
1953 ProcBindKind ProcBind,
1954 SourceLocation Loc) {
1955 // Do nothing in case of SPMD mode and L0 parallel.
1956 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1959 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1962 void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
1963 llvm::Value *NumThreads,
1964 SourceLocation Loc) {
1965 // Do nothing in case of SPMD mode and L0 parallel.
1966 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1969 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1972 void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
1973 const Expr *NumTeams,
1974 const Expr *ThreadLimit,
1975 SourceLocation Loc) {}
1977 llvm::Function *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
1978 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1979 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1980 // Emit target region as a standalone region.
1981 class NVPTXPrePostActionTy : public PrePostActionTy {
1982 bool &IsInParallelRegion;
1983 bool PrevIsInParallelRegion;
1986 NVPTXPrePostActionTy(bool &IsInParallelRegion)
1987 : IsInParallelRegion(IsInParallelRegion) {}
1988 void Enter(CodeGenFunction &CGF) override {
1989 PrevIsInParallelRegion = IsInParallelRegion;
1990 IsInParallelRegion = true;
1992 void Exit(CodeGenFunction &CGF) override {
1993 IsInParallelRegion = PrevIsInParallelRegion;
1995 } Action(IsInParallelRegion);
1996 CodeGen.setAction(Action);
1997 bool PrevIsInTTDRegion = IsInTTDRegion;
1998 IsInTTDRegion = false;
1999 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
2000 IsInTargetMasterThreadRegion = false;
2002 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
2003 D, ThreadIDVar, InnermostKind, CodeGen));
2004 if (CGM.getLangOpts().Optimize) {
2005 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
2006 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
2007 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
2009 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
2010 IsInTTDRegion = PrevIsInTTDRegion;
2011 if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
2012 !IsInParallelRegion) {
2013 llvm::Function *WrapperFun =
2014 createParallelDataSharingWrapper(OutlinedFun, D);
2015 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
2021 /// Get list of lastprivate variables from the teams distribute ... or
2022 /// teams {distribute ...} directives.
2024 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
2025 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2026 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
2027 "expected teams directive.");
2028 const OMPExecutableDirective *Dir = &D;
2029 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
2030 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
2032 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
2033 /*IgnoreCaptured=*/true))) {
2034 Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
2035 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
2041 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
2042 for (const Expr *E : C->getVarRefs())
2043 Vars.push_back(getPrivateItem(E));
2047 /// Get list of reduction variables from the teams ... directives.
2049 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
2050 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2051 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
2052 "expected teams directive.");
2053 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
2054 for (const Expr *E : C->privates())
2055 Vars.push_back(getPrivateItem(E));
2059 llvm::Function *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
2060 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
2061 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
2062 SourceLocation Loc = D.getBeginLoc();
2064 const RecordDecl *GlobalizedRD = nullptr;
2065 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
2066 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
2067 // Globalize team reductions variable unconditionally in all modes.
2068 if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2069 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
2070 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
2071 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
2072 if (!LastPrivatesReductions.empty()) {
2073 GlobalizedRD = ::buildRecordForGlobalizedVars(
2074 CGM.getContext(), llvm::None, LastPrivatesReductions,
2075 MappedDeclsFields, WarpSize);
2077 } else if (!LastPrivatesReductions.empty()) {
2078 assert(!TeamAndReductions.first &&
2079 "Previous team declaration is not expected.");
2080 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
2081 std::swap(TeamAndReductions.second, LastPrivatesReductions);
2084 // Emit target region as a standalone region.
2085 class NVPTXPrePostActionTy : public PrePostActionTy {
2086 SourceLocation &Loc;
2087 const RecordDecl *GlobalizedRD;
2088 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2092 NVPTXPrePostActionTy(
2093 SourceLocation &Loc, const RecordDecl *GlobalizedRD,
2094 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2096 : Loc(Loc), GlobalizedRD(GlobalizedRD),
2097 MappedDeclsFields(MappedDeclsFields) {}
2098 void Enter(CodeGenFunction &CGF) override {
2100 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
2102 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
2103 I->getSecond().GlobalRecord = GlobalizedRD;
2104 I->getSecond().MappedParams =
2105 std::make_unique<CodeGenFunction::OMPMapVars>();
2106 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
2107 for (const auto &Pair : MappedDeclsFields) {
2108 assert(Pair.getFirst()->isCanonicalDecl() &&
2109 "Expected canonical declaration");
2110 Data.insert(std::make_pair(Pair.getFirst(),
2111 MappedVarData(Pair.getSecond(),
2112 /*IsOnePerTeam=*/true)));
2115 Rt.emitGenericVarsProlog(CGF, Loc);
2117 void Exit(CodeGenFunction &CGF) override {
2118 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
2119 .emitGenericVarsEpilog(CGF);
2121 } Action(Loc, GlobalizedRD, MappedDeclsFields);
2122 CodeGen.setAction(Action);
2123 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
2124 D, ThreadIDVar, InnermostKind, CodeGen);
2125 if (CGM.getLangOpts().Optimize) {
2126 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
2127 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
2128 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
2134 void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
2136 bool WithSPMDCheck) {
2137 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
2138 getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2141 CGBuilderTy &Bld = CGF.Builder;
2143 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2144 if (I == FunctionGlobalizedDecls.end())
2146 if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
2147 QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
2148 QualType SecGlobalRecTy;
2150 // Recover pointer to this function's global record. The runtime will
2151 // handle the specifics of the allocation of the memory.
2152 // Use actual memory size of the record including the padding
2153 // for alignment purposes.
2154 unsigned Alignment =
2155 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2156 unsigned GlobalRecordSize =
2157 CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
2158 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2160 llvm::PointerType *GlobalRecPtrTy =
2161 CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
2162 llvm::Value *GlobalRecCastAddr;
2163 llvm::Value *IsTTD = nullptr;
2164 if (!IsInTTDRegion &&
2166 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2167 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2168 llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
2169 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2170 if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
2171 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2172 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2173 llvm::Value *PL = CGF.EmitRuntimeCall(
2174 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2176 IsTTD = Bld.CreateIsNull(PL);
2178 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2179 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2180 Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
2181 // There is no need to emit line number for unconditional branch.
2182 (void)ApplyDebugLocation::CreateEmpty(CGF);
2183 CGF.EmitBlock(SPMDBB);
2184 Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
2185 CharUnits::fromQuantity(Alignment));
2186 CGF.EmitBranch(ExitBB);
2187 // There is no need to emit line number for unconditional branch.
2188 (void)ApplyDebugLocation::CreateEmpty(CGF);
2189 CGF.EmitBlock(NonSPMDBB);
2190 llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
2191 if (const RecordDecl *SecGlobalizedVarsRecord =
2192 I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
2194 CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
2196 // Recover pointer to this function's global record. The runtime will
2197 // handle the specifics of the allocation of the memory.
2198 // Use actual memory size of the record including the padding
2199 // for alignment purposes.
2200 unsigned Alignment =
2201 CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
2202 unsigned GlobalRecordSize =
2203 CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
2204 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2205 Size = Bld.CreateSelect(
2206 IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
2208 // TODO: allow the usage of shared memory to be controlled by
2209 // the user, for now, default to global.
2210 llvm::Value *GlobalRecordSizeArg[] = {
2211 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2212 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2213 createNVPTXRuntimeFunction(
2214 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2215 GlobalRecordSizeArg);
2216 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2217 GlobalRecValue, GlobalRecPtrTy);
2218 CGF.EmitBlock(ExitBB);
2219 auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
2220 /*NumReservedValues=*/2, "_select_stack");
2221 Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
2222 Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
2223 GlobalRecCastAddr = Phi;
2224 I->getSecond().GlobalRecordAddr = Phi;
2225 I->getSecond().IsInSPMDModeFlag = IsSPMD;
2226 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2227 assert(GlobalizedRecords.back().Records.size() < 2 &&
2228 "Expected less than 2 globalized records: one for target and one "
2230 unsigned Offset = 0;
2231 for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
2232 QualType RDTy = CGM.getContext().getRecordType(RD);
2233 unsigned Alignment =
2234 CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
2235 unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
2237 llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
2239 unsigned Alignment =
2240 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2241 Offset = llvm::alignTo(Offset, Alignment);
2242 GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
2243 ++GlobalizedRecords.back().RegionCounter;
2244 if (GlobalizedRecords.back().Records.size() == 1) {
2245 assert(KernelStaticGlobalized &&
2246 "Kernel static pointer must be initialized already.");
2247 auto *UseSharedMemory = new llvm::GlobalVariable(
2248 CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
2249 llvm::GlobalValue::InternalLinkage, nullptr,
2250 "_openmp_static_kernel$is_shared");
2251 UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2252 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2253 /*DestWidth=*/16, /*Signed=*/0);
2254 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2255 Address(UseSharedMemory,
2256 CGM.getContext().getTypeAlignInChars(Int16Ty)),
2257 /*Volatile=*/false, Int16Ty, Loc);
2258 auto *StaticGlobalized = new llvm::GlobalVariable(
2259 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2260 llvm::GlobalValue::CommonLinkage, nullptr);
2261 auto *RecSize = new llvm::GlobalVariable(
2262 CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
2263 llvm::GlobalValue::InternalLinkage, nullptr,
2264 "_openmp_static_kernel$size");
2265 RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2266 llvm::Value *Ld = CGF.EmitLoadOfScalar(
2267 Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
2268 CGM.getContext().getSizeType(), Loc);
2269 llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2270 KernelStaticGlobalized, CGM.VoidPtrPtrTy);
2271 llvm::Value *GlobalRecordSizeArg[] = {
2272 llvm::ConstantInt::get(
2274 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
2275 StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
2276 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2277 OMPRTL_NVPTX__kmpc_get_team_static_memory),
2278 GlobalRecordSizeArg);
2279 GlobalizedRecords.back().Buffer = StaticGlobalized;
2280 GlobalizedRecords.back().RecSize = RecSize;
2281 GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
2282 GlobalizedRecords.back().Loc = Loc;
2284 assert(KernelStaticGlobalized && "Global address must be set already.");
2285 Address FrameAddr = CGF.EmitLoadOfPointer(
2286 Address(KernelStaticGlobalized, CGM.getPointerAlign()),
2288 .getPointerType(CGM.getContext().VoidPtrTy)
2289 .castAs<PointerType>());
2290 llvm::Value *GlobalRecValue =
2291 Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
2292 I->getSecond().GlobalRecordAddr = GlobalRecValue;
2293 I->getSecond().IsInSPMDModeFlag = nullptr;
2294 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2295 GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
2297 // TODO: allow the usage of shared memory to be controlled by
2298 // the user, for now, default to global.
2299 bool UseSharedMemory =
2300 IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
2301 llvm::Value *GlobalRecordSizeArg[] = {
2302 llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
2303 CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
2304 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2305 createNVPTXRuntimeFunction(
2307 ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack
2308 : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2309 GlobalRecordSizeArg);
2310 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2311 GlobalRecValue, GlobalRecPtrTy);
2312 I->getSecond().GlobalRecordAddr = GlobalRecValue;
2313 I->getSecond().IsInSPMDModeFlag = nullptr;
2316 CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
2318 // Emit the "global alloca" which is a GEP from the global declaration
2319 // record using the pointer returned by the runtime.
2321 decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
2323 SecIt = I->getSecond().SecondaryLocalVarData->begin();
2324 llvm::PointerType *SecGlobalRecPtrTy =
2325 CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
2326 SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
2327 Bld.CreatePointerBitCastOrAddrSpaceCast(
2328 I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
2331 for (auto &Rec : I->getSecond().LocalVarData) {
2332 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
2333 llvm::Value *ParValue;
2335 const auto *VD = cast<VarDecl>(Rec.first);
2337 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
2338 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
2340 LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
2341 // Emit VarAddr basing on lane-id if required.
2343 if (Rec.second.IsOnePerTeam) {
2344 VarTy = Rec.second.FD->getType();
2346 llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
2347 VarAddr.getAddress(CGF).getPointer(),
2348 {Bld.getInt32(0), getNVPTXLaneID(CGF)});
2350 Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
2351 VarAddr = CGF.MakeAddrLValue(
2352 Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
2353 AlignmentSource::Decl);
2355 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
2356 if (!IsInTTDRegion &&
2358 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2359 assert(I->getSecond().IsInSPMDModeFlag &&
2360 "Expected unknown execution mode or required SPMD check.");
2362 assert(SecIt->second.IsOnePerTeam &&
2363 "Secondary glob data must be one per team.");
2364 LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
2366 Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
2367 VarAddr.getPointer(CGF)),
2368 VarAddr.getAlignment()));
2369 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
2371 Address GlobalPtr = Rec.second.PrivateAddr;
2372 Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
2373 Rec.second.PrivateAddr = Address(
2374 Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
2375 LocalAddr.getPointer(), GlobalPtr.getPointer()),
2376 LocalAddr.getAlignment());
2379 const auto *VD = cast<VarDecl>(Rec.first);
2380 CGF.EmitStoreOfScalar(ParValue, VarAddr);
2381 I->getSecond().MappedParams->setVarAddr(CGF, VD,
2382 VarAddr.getAddress(CGF));
2388 for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
2389 // Recover pointer to this function's global record. The runtime will
2390 // handle the specifics of the allocation of the memory.
2391 // Use actual memory size of the record including the padding
2392 // for alignment purposes.
2393 CGBuilderTy &Bld = CGF.Builder;
2394 llvm::Value *Size = CGF.getTypeSize(VD->getType());
2395 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2396 Size = Bld.CreateNUWAdd(
2397 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
2398 llvm::Value *AlignVal =
2399 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
2400 Size = Bld.CreateUDiv(Size, AlignVal);
2401 Size = Bld.CreateNUWMul(Size, AlignVal);
2402 // TODO: allow the usage of shared memory to be controlled by
2403 // the user, for now, default to global.
2404 llvm::Value *GlobalRecordSizeArg[] = {
2405 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2406 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2407 createNVPTXRuntimeFunction(
2408 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2409 GlobalRecordSizeArg);
2410 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2411 GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
2412 LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
2413 CGM.getContext().getDeclAlign(VD),
2414 AlignmentSource::Decl);
2415 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
2416 Base.getAddress(CGF));
2417 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
2419 I->getSecond().MappedParams->apply(CGF);
2422 void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
2423 bool WithSPMDCheck) {
2424 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
2425 getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2428 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2429 if (I != FunctionGlobalizedDecls.end()) {
2430 I->getSecond().MappedParams->restore(CGF);
2431 if (!CGF.HaveInsertPoint())
2433 for (llvm::Value *Addr :
2434 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
2435 CGF.EmitRuntimeCall(
2436 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2439 if (I->getSecond().GlobalRecordAddr) {
2440 if (!IsInTTDRegion &&
2442 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2443 CGBuilderTy &Bld = CGF.Builder;
2444 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2445 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2446 Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
2447 // There is no need to emit line number for unconditional branch.
2448 (void)ApplyDebugLocation::CreateEmpty(CGF);
2449 CGF.EmitBlock(NonSPMDBB);
2450 CGF.EmitRuntimeCall(
2451 createNVPTXRuntimeFunction(
2452 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2453 CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
2454 CGF.EmitBlock(ExitBB);
2455 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2456 assert(GlobalizedRecords.back().RegionCounter > 0 &&
2457 "region counter must be > 0.");
2458 --GlobalizedRecords.back().RegionCounter;
2459 // Emit the restore function only in the target region.
2460 if (GlobalizedRecords.back().RegionCounter == 0) {
2461 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2462 /*DestWidth=*/16, /*Signed=*/0);
2463 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2464 Address(GlobalizedRecords.back().UseSharedMemory,
2465 CGM.getContext().getTypeAlignInChars(Int16Ty)),
2466 /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
2467 llvm::Value *Args[] = {
2468 llvm::ConstantInt::get(
2470 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
2472 CGF.EmitRuntimeCall(
2473 createNVPTXRuntimeFunction(
2474 OMPRTL_NVPTX__kmpc_restore_team_static_memory),
2478 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2479 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2480 I->getSecond().GlobalRecordAddr);
2486 void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
2487 const OMPExecutableDirective &D,
2489 llvm::Function *OutlinedFn,
2490 ArrayRef<llvm::Value *> CapturedVars) {
2491 if (!CGF.HaveInsertPoint())
2494 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2495 /*Name=*/".zero.addr");
2496 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2497 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2498 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
2499 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2500 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2501 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2504 void CGOpenMPRuntimeNVPTX::emitParallelCall(
2505 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2506 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2507 if (!CGF.HaveInsertPoint())
2510 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
2511 emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2513 emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2516 void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
2517 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2518 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2519 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
2521 // Force inline this outlined function at its call site.
2522 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2524 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2525 /*Name=*/".zero.addr");
2526 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2527 // ThreadId for serialized parallels is 0.
2528 Address ThreadIDAddr = ZeroAddr;
2529 auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
2530 CodeGenFunction &CGF, PrePostActionTy &Action) {
2534 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2535 /*Name=*/".bound.zero.addr");
2536 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2537 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2538 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2539 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2540 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2541 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
2543 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2544 PrePostActionTy &) {
2546 RegionCodeGenTy RCG(CodeGen);
2547 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2548 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2549 llvm::Value *Args[] = {RTLoc, ThreadID};
2551 NVPTXActionTy Action(
2552 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2554 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2556 RCG.setAction(Action);
2560 auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
2561 PrePostActionTy &Action) {
2562 CGBuilderTy &Bld = CGF.Builder;
2563 llvm::Function *WFn = WrapperFunctionsMap[Fn];
2564 assert(WFn && "Wrapper function does not exist!");
2565 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
2567 // Prepare for parallel region. Indicate the outlined function.
2568 llvm::Value *Args[] = {ID};
2569 CGF.EmitRuntimeCall(
2570 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
2573 // Create a private scope that will globalize the arguments
2574 // passed from the outside of the target region.
2575 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
2577 // There's something to share.
2578 if (!CapturedVars.empty()) {
2579 // Prepare for parallel region. Indicate the outlined function.
2580 Address SharedArgs =
2581 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
2582 llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
2584 llvm::Value *DataSharingArgs[] = {
2586 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
2587 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2588 OMPRTL_NVPTX__kmpc_begin_sharing_variables),
2591 // Store variable address in a list of references to pass to workers.
2593 ASTContext &Ctx = CGF.getContext();
2594 Address SharedArgListAddress = CGF.EmitLoadOfPointer(
2595 SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
2596 .castAs<PointerType>());
2597 for (llvm::Value *V : CapturedVars) {
2598 Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
2600 if (V->getType()->isIntegerTy())
2601 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
2603 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
2604 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
2605 Ctx.getPointerType(Ctx.VoidPtrTy));
2610 // Activate workers. This barrier is used by the master to signal
2611 // work for the workers.
2612 syncCTAThreads(CGF);
2614 // OpenMP [2.5, Parallel Construct, p.49]
2615 // There is an implied barrier at the end of a parallel region. After the
2616 // end of a parallel region, only the master thread of the team resumes
2617 // execution of the enclosing task region.
2619 // The master waits at this barrier until all workers are done.
2620 syncCTAThreads(CGF);
2622 if (!CapturedVars.empty())
2623 CGF.EmitRuntimeCall(
2624 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
2626 // Remember for post-processing in worker loop.
2627 Work.emplace_back(WFn);
2630 auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
2631 CodeGenFunction &CGF, PrePostActionTy &Action) {
2632 if (IsInParallelRegion) {
2633 SeqGen(CGF, Action);
2634 } else if (IsInTargetMasterThreadRegion) {
2635 L0ParallelGen(CGF, Action);
2637 // Check for master and then parallelism:
2638 // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
2639 // Serialized execution.
2643 CGBuilderTy &Bld = CGF.Builder;
2644 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2645 llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
2646 llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
2647 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
2648 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2649 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2650 Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
2651 // There is no need to emit line number for unconditional branch.
2652 (void)ApplyDebugLocation::CreateEmpty(CGF);
2653 CGF.EmitBlock(ParallelCheckBB);
2654 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2655 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2656 llvm::Value *PL = CGF.EmitRuntimeCall(
2657 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2659 llvm::Value *Res = Bld.CreateIsNotNull(PL);
2660 Bld.CreateCondBr(Res, SeqBB, MasterBB);
2661 CGF.EmitBlock(SeqBB);
2662 SeqGen(CGF, Action);
2663 CGF.EmitBranch(ExitBB);
2664 // There is no need to emit line number for unconditional branch.
2665 (void)ApplyDebugLocation::CreateEmpty(CGF);
2666 CGF.EmitBlock(MasterBB);
2667 L0ParallelGen(CGF, Action);
2668 CGF.EmitBranch(ExitBB);
2669 // There is no need to emit line number for unconditional branch.
2670 (void)ApplyDebugLocation::CreateEmpty(CGF);
2671 // Emit the continuation block for code after the if.
2672 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2677 emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
2679 CodeGenFunction::RunCleanupsScope Scope(CGF);
2680 RegionCodeGenTy ThenRCG(LNParallelGen);
2685 void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
2686 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2687 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2688 // Just call the outlined function to execute the parallel region.
2689 // OutlinedFn(>id, &zero, CapturedStruct);
2691 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2693 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2694 /*Name=*/".zero.addr");
2695 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2696 // ThreadId for serialized parallels is 0.
2697 Address ThreadIDAddr = ZeroAddr;
2698 auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
2699 CodeGenFunction &CGF, PrePostActionTy &Action) {
2703 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2704 /*Name=*/".bound.zero.addr");
2705 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2706 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2707 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2708 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2709 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2710 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2712 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2713 PrePostActionTy &) {
2715 RegionCodeGenTy RCG(CodeGen);
2716 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2717 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2718 llvm::Value *Args[] = {RTLoc, ThreadID};
2720 NVPTXActionTy Action(
2721 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2723 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2725 RCG.setAction(Action);
2729 if (IsInTargetMasterThreadRegion) {
2730 // In the worker need to use the real thread id.
2731 ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2732 RegionCodeGenTy RCG(CodeGen);
2735 // If we are not in the target region, it is definitely L2 parallelism or
2736 // more, because for SPMD mode we always has L1 parallel level, sowe don't
2737 // need to check for orphaned directives.
2738 RegionCodeGenTy RCG(SeqGen);
2743 void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
2744 // Always emit simple barriers!
2745 if (!CGF.HaveInsertPoint())
2747 // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
2748 // This function does not use parameters, so we can emit just default values.
2749 llvm::Value *Args[] = {
2750 llvm::ConstantPointerNull::get(
2751 cast<llvm::PointerType>(getIdentTyPointerTy())),
2752 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
2753 llvm::CallInst *Call = CGF.EmitRuntimeCall(
2754 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
2755 Call->setConvergent();
2758 void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
2760 OpenMPDirectiveKind Kind, bool,
2762 // Always emit simple barriers!
2763 if (!CGF.HaveInsertPoint())
2765 // Build call __kmpc_cancel_barrier(loc, thread_id);
2766 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2767 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2768 getThreadID(CGF, Loc)};
2769 llvm::CallInst *Call = CGF.EmitRuntimeCall(
2770 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
2771 Call->setConvergent();
2774 void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
2775 CodeGenFunction &CGF, StringRef CriticalName,
2776 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2778 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2779 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2780 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2781 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2782 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2784 // Get the mask of active threads in the warp.
2785 llvm::Value *Mask = CGF.EmitRuntimeCall(
2786 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_warp_active_thread_mask));
2787 // Fetch team-local id of the thread.
2788 llvm::Value *ThreadID = getNVPTXThreadID(CGF);
2790 // Get the width of the team.
2791 llvm::Value *TeamWidth = getNVPTXNumThreads(CGF);
2793 // Initialize the counter variable for the loop.
2795 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2796 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2797 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2798 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2801 // Block checks if loop counter exceeds upper bound.
2802 CGF.EmitBlock(LoopBB);
2803 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2804 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2805 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2807 // Block tests which single thread should execute region, and which threads
2808 // should go straight to synchronisation point.
2809 CGF.EmitBlock(TestBB);
2810 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2811 llvm::Value *CmpThreadToCounter =
2812 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2813 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2815 // Block emits the body of the critical region.
2816 CGF.EmitBlock(BodyBB);
2818 // Output the critical statement.
2819 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
2822 // After the body surrounded by the critical region, the single executing
2823 // thread will jump to the synchronisation point.
2824 // Block waits for all threads in current team to finish then increments the
2825 // counter variable and returns to the loop.
2826 CGF.EmitBlock(SyncBB);
2827 // Reconverge active threads in the warp.
2828 (void)CGF.EmitRuntimeCall(
2829 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_syncwarp), Mask);
2831 llvm::Value *IncCounterVal =
2832 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2833 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2834 CGF.EmitBranch(LoopBB);
2836 // Block that is reached when all threads in the team complete the region.
2837 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2840 /// Cast value to the specified type.
2841 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2842 QualType ValTy, QualType CastTy,
2843 SourceLocation Loc) {
2844 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
2845 "Cast type must sized.");
2846 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
2847 "Val type must sized.");
2848 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2849 if (ValTy == CastTy)
2851 if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2852 CGF.getContext().getTypeSizeInChars(CastTy))
2853 return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2854 if (CastTy->isIntegerType() && ValTy->isIntegerType())
2855 return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2856 CastTy->hasSignedIntegerRepresentation());
2857 Address CastItem = CGF.CreateMemTemp(CastTy);
2858 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2859 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2860 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy);
2861 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc);
2864 /// This function creates calls to one of two shuffle functions to copy
2865 /// variables between lanes in a warp.
2866 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2869 llvm::Value *Offset,
2870 SourceLocation Loc) {
2871 CodeGenModule &CGM = CGF.CGM;
2872 CGBuilderTy &Bld = CGF.Builder;
2873 CGOpenMPRuntimeNVPTX &RT =
2874 *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
2876 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2877 assert(Size.getQuantity() <= 8 &&
2878 "Unsupported bitwidth in shuffle instruction.");
2880 OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
2881 ? OMPRTL_NVPTX__kmpc_shuffle_int32
2882 : OMPRTL_NVPTX__kmpc_shuffle_int64;
2884 // Cast all types to 32- or 64-bit values before calling shuffle routines.
2885 QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2886 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
2887 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2888 llvm::Value *WarpSize =
2889 Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2891 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2892 RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
2894 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2897 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2898 Address DestAddr, QualType ElemType,
2899 llvm::Value *Offset, SourceLocation Loc) {
2900 CGBuilderTy &Bld = CGF.Builder;
2902 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2903 // Create the loop over the big sized data.
2904 // ptr = (void*)Elem;
2905 // ptrEnd = (void*) Elem + 1;
2907 // while (ptr + Step < ptrEnd)
2908 // shuffle((int64_t)*ptr);
2910 // while (ptr + Step < ptrEnd)
2911 // shuffle((int32_t)*ptr);
2913 Address ElemPtr = DestAddr;
2914 Address Ptr = SrcAddr;
2915 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2916 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
2917 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
2918 if (Size < CharUnits::fromQuantity(IntSize))
2920 QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2921 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2923 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2924 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2926 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2927 if (Size.getQuantity() / IntSize > 1) {
2928 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2929 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2930 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2931 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2932 CGF.EmitBlock(PreCondBB);
2933 llvm::PHINode *PhiSrc =
2934 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2935 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2936 llvm::PHINode *PhiDest =
2937 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2938 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2939 Ptr = Address(PhiSrc, Ptr.getAlignment());
2940 ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2941 llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2942 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2943 Ptr.getPointer(), CGF.VoidPtrTy));
2944 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2946 CGF.EmitBlock(ThenBB);
2947 llvm::Value *Res = createRuntimeShuffleFunction(
2948 CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2949 IntType, Offset, Loc);
2950 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2951 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
2952 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2953 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
2954 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
2955 CGF.EmitBranch(PreCondBB);
2956 CGF.EmitBlock(ExitBB);
2958 llvm::Value *Res = createRuntimeShuffleFunction(
2959 CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2960 IntType, Offset, Loc);
2961 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2962 Ptr = Bld.CreateConstGEP(Ptr, 1);
2963 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2965 Size = Size % IntSize;
2970 enum CopyAction : unsigned {
2971 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2972 // the warp using shuffle instructions.
2974 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2976 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2978 // ScratchpadToThread: Copy from a scratchpad array in global memory
2979 // containing team-reduced data to a thread's stack.
2984 struct CopyOptionsTy {
2985 llvm::Value *RemoteLaneOffset;
2986 llvm::Value *ScratchpadIndex;
2987 llvm::Value *ScratchpadWidth;
2990 /// Emit instructions to copy a Reduce list, which contains partially
2991 /// aggregated values, in the specified direction.
2992 static void emitReductionListCopy(
2993 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2994 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2995 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2997 CodeGenModule &CGM = CGF.CGM;
2998 ASTContext &C = CGM.getContext();
2999 CGBuilderTy &Bld = CGF.Builder;
3001 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
3002 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
3003 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
3005 // Iterates, element-by-element, through the source Reduce list and
3008 unsigned Size = Privates.size();
3009 for (const Expr *Private : Privates) {
3010 Address SrcElementAddr = Address::invalid();
3011 Address DestElementAddr = Address::invalid();
3012 Address DestElementPtrAddr = Address::invalid();
3013 // Should we shuffle in an element from a remote lane?
3014 bool ShuffleInElement = false;
3015 // Set to true to update the pointer in the dest Reduce list to a
3016 // newly created element.
3017 bool UpdateDestListPtr = false;
3018 // Increment the src or dest pointer to the scratchpad, for each
3020 bool IncrScratchpadSrc = false;
3021 bool IncrScratchpadDest = false;
3024 case RemoteLaneToThread: {
3025 // Step 1.1: Get the address for the src element in the Reduce list.
3026 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3027 SrcElementAddr = CGF.EmitLoadOfPointer(
3029 C.getPointerType(Private->getType())->castAs<PointerType>());
3031 // Step 1.2: Create a temporary to store the element in the destination
3033 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3035 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3036 ShuffleInElement = true;
3037 UpdateDestListPtr = true;
3041 // Step 1.1: Get the address for the src element in the Reduce list.
3042 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3043 SrcElementAddr = CGF.EmitLoadOfPointer(
3045 C.getPointerType(Private->getType())->castAs<PointerType>());
3047 // Step 1.2: Get the address for dest element. The destination
3048 // element has already been created on the thread's stack.
3049 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3050 DestElementAddr = CGF.EmitLoadOfPointer(
3052 C.getPointerType(Private->getType())->castAs<PointerType>());
3055 case ThreadToScratchpad: {
3056 // Step 1.1: Get the address for the src element in the Reduce list.
3057 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3058 SrcElementAddr = CGF.EmitLoadOfPointer(
3060 C.getPointerType(Private->getType())->castAs<PointerType>());
3062 // Step 1.2: Get the address for dest element:
3063 // address = base + index * ElementSizeInChars.
3064 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3065 llvm::Value *CurrentOffset =
3066 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3067 llvm::Value *ScratchPadElemAbsolutePtrVal =
3068 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
3069 ScratchPadElemAbsolutePtrVal =
3070 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3071 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3072 C.getTypeAlignInChars(Private->getType()));
3073 IncrScratchpadDest = true;
3076 case ScratchpadToThread: {
3077 // Step 1.1: Get the address for the src element in the scratchpad.
3078 // address = base + index * ElementSizeInChars.
3079 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3080 llvm::Value *CurrentOffset =
3081 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3082 llvm::Value *ScratchPadElemAbsolutePtrVal =
3083 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
3084 ScratchPadElemAbsolutePtrVal =
3085 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3086 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3087 C.getTypeAlignInChars(Private->getType()));
3088 IncrScratchpadSrc = true;
3090 // Step 1.2: Create a temporary to store the element in the destination
3092 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3094 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3095 UpdateDestListPtr = true;
3100 // Regardless of src and dest of copy, we emit the load of src
3101 // element as this is required in all directions
3102 SrcElementAddr = Bld.CreateElementBitCast(
3103 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
3104 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
3105 SrcElementAddr.getElementType());
3107 // Now that all active lanes have read the element in the
3108 // Reduce list, shuffle over the value from the remote lane.
3109 if (ShuffleInElement) {
3110 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
3111 RemoteLaneOffset, Private->getExprLoc());
3113 switch (CGF.getEvaluationKind(Private->getType())) {
3116 CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
3117 Private->getType(), Private->getExprLoc());
3118 // Store the source element value to the dest element address.
3119 CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
3120 Private->getType());
3124 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
3125 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3126 Private->getExprLoc());
3127 CGF.EmitStoreOfComplex(
3128 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3133 CGF.EmitAggregateCopy(
3134 CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3135 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3136 Private->getType(), AggValueSlot::DoesNotOverlap);
3141 // Step 3.1: Modify reference in dest Reduce list as needed.
3142 // Modifying the reference in Reduce list to point to the newly
3143 // created element. The element is live in the current function
3144 // scope and that of functions it invokes (i.e., reduce_function).
3145 // RemoteReduceData[i] = (void*)&RemoteElem
3146 if (UpdateDestListPtr) {
3147 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
3148 DestElementAddr.getPointer(), CGF.VoidPtrTy),
3149 DestElementPtrAddr, /*Volatile=*/false,
3153 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
3154 // address of the next element in scratchpad memory, unless we're currently
3155 // processing the last one. Memory alignment is also taken care of here.
3156 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
3157 llvm::Value *ScratchpadBasePtr =
3158 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
3159 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3160 ScratchpadBasePtr = Bld.CreateNUWAdd(
3162 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
3164 // Take care of global memory alignment for performance
3165 ScratchpadBasePtr = Bld.CreateNUWSub(
3166 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3167 ScratchpadBasePtr = Bld.CreateUDiv(
3169 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3170 ScratchpadBasePtr = Bld.CreateNUWAdd(
3171 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3172 ScratchpadBasePtr = Bld.CreateNUWMul(
3174 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3176 if (IncrScratchpadDest)
3177 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3178 else /* IncrScratchpadSrc = true */
3179 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3186 /// This function emits a helper that gathers Reduce lists from the first
3187 /// lane of every active warp to lanes in the first warp.
3189 /// void inter_warp_copy_func(void* reduce_data, num_warps)
3190 /// shared smem[warp_size];
3191 /// For all data entries D in reduce_data:
3193 /// If (I am the first lane in each warp)
3194 /// Copy my local D to smem[warp_id]
3196 /// if (I am the first warp)
3197 /// Copy smem[thread_id] to my local D
3198 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
3199 ArrayRef<const Expr *> Privates,
3200 QualType ReductionArrayTy,
3201 SourceLocation Loc) {
3202 ASTContext &C = CGM.getContext();
3203 llvm::Module &M = CGM.getModule();
3205 // ReduceList: thread local Reduce list.
3206 // At the stage of the computation when this function is called, partially
3207 // aggregated values reside in the first lane of every active warp.
3208 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3209 C.VoidPtrTy, ImplicitParamDecl::Other);
3210 // NumWarps: number of warps active in the parallel region. This could
3211 // be smaller than 32 (max warps in a CTA) for partial block reduction.
3212 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3213 C.getIntTypeForBitwidth(32, /* Signed */ true),
3214 ImplicitParamDecl::Other);
3215 FunctionArgList Args;
3216 Args.push_back(&ReduceListArg);
3217 Args.push_back(&NumWarpsArg);
3219 const CGFunctionInfo &CGFI =
3220 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3221 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3222 llvm::GlobalValue::InternalLinkage,
3223 "_omp_reduction_inter_warp_copy_func", &M);
3224 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3225 Fn->setDoesNotRecurse();
3226 CodeGenFunction CGF(CGM);
3227 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3229 CGBuilderTy &Bld = CGF.Builder;
3231 // This array is used as a medium to transfer, one reduce element at a time,
3232 // the data from the first lane of every warp to lanes in the first warp
3233 // in order to perform the final step of a reduction in a parallel region
3234 // (reduction across warps). The array is placed in NVPTX __shared__ memory
3235 // for reduced latency, as well as to have a distinct copy for concurrently
3236 // executing target regions. The array is declared with common linkage so
3237 // as to be shared across compilation units.
3238 StringRef TransferMediumName =
3239 "__openmp_nvptx_data_transfer_temporary_storage";
3240 llvm::GlobalVariable *TransferMedium =
3241 M.getGlobalVariable(TransferMediumName);
3242 if (!TransferMedium) {
3243 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
3244 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
3245 TransferMedium = new llvm::GlobalVariable(
3246 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
3247 llvm::Constant::getNullValue(Ty), TransferMediumName,
3248 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
3249 SharedAddressSpace);
3250 CGM.addCompilerUsedGlobal(TransferMedium);
3253 // Get the CUDA thread id of the current OpenMP thread on the GPU.
3254 llvm::Value *ThreadID = getNVPTXThreadID(CGF);
3255 // nvptx_lane_id = nvptx_id % warpsize
3256 llvm::Value *LaneID = getNVPTXLaneID(CGF);
3257 // nvptx_warp_id = nvptx_id / warpsize
3258 llvm::Value *WarpID = getNVPTXWarpID(CGF);
3260 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3261 Address LocalReduceList(
3262 Bld.CreatePointerBitCastOrAddrSpaceCast(
3263 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3265 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3266 CGF.getPointerAlign());
3269 for (const Expr *Private : Privates) {
3271 // Warp master copies reduce element to transfer medium in __shared__
3274 unsigned RealTySize =
3275 C.getTypeSizeInChars(Private->getType())
3276 .alignTo(C.getTypeAlignInChars(Private->getType()))
3278 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
3279 unsigned NumIters = RealTySize / TySize;
3282 QualType CType = C.getIntTypeForBitwidth(
3283 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
3284 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
3285 CharUnits Align = CharUnits::fromQuantity(TySize);
3286 llvm::Value *Cnt = nullptr;
3287 Address CntAddr = Address::invalid();
3288 llvm::BasicBlock *PrecondBB = nullptr;
3289 llvm::BasicBlock *ExitBB = nullptr;
3291 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
3292 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
3293 /*Volatile=*/false, C.IntTy);
3294 PrecondBB = CGF.createBasicBlock("precond");
3295 ExitBB = CGF.createBasicBlock("exit");
3296 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
3297 // There is no need to emit line number for unconditional branch.
3298 (void)ApplyDebugLocation::CreateEmpty(CGF);
3299 CGF.EmitBlock(PrecondBB);
3300 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
3302 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
3303 Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
3304 CGF.EmitBlock(BodyBB);
3307 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3308 /*EmitChecks=*/false,
3309 /*ForceSimpleCall=*/true);
3310 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3311 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3312 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3314 // if (lane_id == 0)
3315 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
3316 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
3317 CGF.EmitBlock(ThenBB);
3319 // Reduce element = LocalReduceList[i]
3320 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3321 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3322 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3323 // elemptr = ((CopyType*)(elemptrptr)) + I
3324 Address ElemPtr = Address(ElemPtrPtr, Align);
3325 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
3327 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
3328 ElemPtr.getAlignment());
3331 // Get pointer to location in transfer medium.
3332 // MediumPtr = &medium[warp_id]
3333 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
3334 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
3335 Address MediumPtr(MediumPtrVal, Align);
3336 // Casting to actual data type.
3337 // MediumPtr = (CopyType*)MediumPtrAddr;
3338 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
3343 CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false, CType, Loc);
3344 // Store the source element value to the dest element address.
3345 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType);
3347 Bld.CreateBr(MergeBB);
3349 CGF.EmitBlock(ElseBB);
3350 Bld.CreateBr(MergeBB);
3352 CGF.EmitBlock(MergeBB);
3355 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3356 /*EmitChecks=*/false,
3357 /*ForceSimpleCall=*/true);
3360 // Warp 0 copies reduce element from transfer medium.
3362 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
3363 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
3364 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
3366 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
3367 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
3368 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
3370 // Up to 32 threads in warp 0 are active.
3371 llvm::Value *IsActiveThread =
3372 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
3373 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
3375 CGF.EmitBlock(W0ThenBB);
3377 // SrcMediumPtr = &medium[tid]
3378 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
3380 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
3381 Address SrcMediumPtr(SrcMediumPtrVal, Align);
3382 // SrcMediumVal = *SrcMediumPtr;
3383 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
3385 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
3386 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3387 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
3388 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
3389 Address TargetElemPtr = Address(TargetElemPtrVal, Align);
3390 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
3392 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
3393 TargetElemPtr.getAlignment());
3396 // *TargetElemPtr = SrcMediumVal;
3397 llvm::Value *SrcMediumValue =
3398 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
3399 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
3401 Bld.CreateBr(W0MergeBB);
3403 CGF.EmitBlock(W0ElseBB);
3404 Bld.CreateBr(W0MergeBB);
3406 CGF.EmitBlock(W0MergeBB);
3409 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
3410 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
3411 CGF.EmitBranch(PrecondBB);
3412 (void)ApplyDebugLocation::CreateEmpty(CGF);
3413 CGF.EmitBlock(ExitBB);
3415 RealTySize %= TySize;
3420 CGF.FinishFunction();
3424 /// Emit a helper that reduces data across two OpenMP threads (lanes)
3425 /// in the same warp. It uses shuffle instructions to copy over data from
3426 /// a remote lane's stack. The reduction algorithm performed is specified
3427 /// by the fourth parameter.
3429 /// Algorithm Versions.
3430 /// Full Warp Reduce (argument value 0):
3431 /// This algorithm assumes that all 32 lanes are active and gathers
3432 /// data from these 32 lanes, producing a single resultant value.
3433 /// Contiguous Partial Warp Reduce (argument value 1):
3434 /// This algorithm assumes that only a *contiguous* subset of lanes
3435 /// are active. This happens for the last warp in a parallel region
3436 /// when the user specified num_threads is not an integer multiple of
3437 /// 32. This contiguous subset always starts with the zeroth lane.
3438 /// Partial Warp Reduce (argument value 2):
3439 /// This algorithm gathers data from any number of lanes at any position.
3440 /// All reduced values are stored in the lowest possible lane. The set
3441 /// of problems every algorithm addresses is a super set of those
3442 /// addressable by algorithms with a lower version number. Overhead
3443 /// increases as algorithm version increases.
3447 /// Reduce element refers to the individual data field with primitive
3448 /// data types to be combined and reduced across threads.
3450 /// Reduce list refers to a collection of local, thread-private
3451 /// reduce elements.
3452 /// Remote Reduce list:
3453 /// Remote Reduce list refers to a collection of remote (relative to
3454 /// the current thread) reduce elements.
3456 /// We distinguish between three states of threads that are important to
3457 /// the implementation of this function.
3459 /// Threads in a warp executing the SIMT instruction, as distinguished from
3460 /// threads that are inactive due to divergent control flow.
3462 /// The minimal set of threads that has to be alive upon entry to this
3463 /// function. The computation is correct iff active threads are alive.
3464 /// Some threads are alive but they are not active because they do not
3465 /// contribute to the computation in any useful manner. Turning them off
3466 /// may introduce control flow overheads without any tangible benefits.
3467 /// Effective threads:
3468 /// In order to comply with the argument requirements of the shuffle
3469 /// function, we must keep all lanes holding data alive. But at most
3470 /// half of them perform value aggregation; we refer to this half of
3471 /// threads as effective. The other half is simply handing off their
3476 /// In this step active threads transfer data from higher lane positions
3477 /// in the warp to lower lane positions, creating Remote Reduce list.
3478 /// Value aggregation:
3479 /// In this step, effective threads combine their thread local Reduce list
3480 /// with Remote Reduce list and store the result in the thread local
3483 /// In this step, we deal with the assumption made by algorithm 2
3484 /// (i.e. contiguity assumption). When we have an odd number of lanes
3485 /// active, say 2k+1, only k threads will be effective and therefore k
3486 /// new values will be produced. However, the Reduce list owned by the
3487 /// (2k+1)th thread is ignored in the value aggregation. Therefore
3488 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
3489 /// that the contiguity assumption still holds.
3490 static llvm::Function *emitShuffleAndReduceFunction(
3491 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3492 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
3493 ASTContext &C = CGM.getContext();
3495 // Thread local Reduce list used to host the values of data to be reduced.
3496 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3497 C.VoidPtrTy, ImplicitParamDecl::Other);
3498 // Current lane id; could be logical.
3499 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
3500 ImplicitParamDecl::Other);
3501 // Offset of the remote source lane relative to the current lane.
3502 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3503 C.ShortTy, ImplicitParamDecl::Other);
3504 // Algorithm version. This is expected to be known at compile time.
3505 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3506 C.ShortTy, ImplicitParamDecl::Other);
3507 FunctionArgList Args;
3508 Args.push_back(&ReduceListArg);
3509 Args.push_back(&LaneIDArg);
3510 Args.push_back(&RemoteLaneOffsetArg);
3511 Args.push_back(&AlgoVerArg);
3513 const CGFunctionInfo &CGFI =
3514 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3515 auto *Fn = llvm::Function::Create(
3516 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3517 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
3518 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3519 Fn->setDoesNotRecurse();
3520 if (CGM.getLangOpts().Optimize) {
3521 Fn->removeFnAttr(llvm::Attribute::NoInline);
3522 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
3523 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
3526 CodeGenFunction CGF(CGM);
3527 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3529 CGBuilderTy &Bld = CGF.Builder;
3531 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3532 Address LocalReduceList(
3533 Bld.CreatePointerBitCastOrAddrSpaceCast(
3534 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3535 C.VoidPtrTy, SourceLocation()),
3536 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3537 CGF.getPointerAlign());
3539 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
3540 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
3541 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3543 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
3544 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
3545 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3547 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
3548 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
3549 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3551 // Create a local thread-private variable to host the Reduce list
3552 // from a remote lane.
3553 Address RemoteReduceList =
3554 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
3556 // This loop iterates through the list of reduce elements and copies,
3557 // element by element, from a remote lane in the warp to RemoteReduceList,
3558 // hosted on the thread's stack.
3559 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
3560 LocalReduceList, RemoteReduceList,
3561 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
3562 /*ScratchpadIndex=*/nullptr,
3563 /*ScratchpadWidth=*/nullptr});
3565 // The actions to be performed on the Remote Reduce list is dependent
3566 // on the algorithm version.
3568 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
3569 // LaneId % 2 == 0 && Offset > 0):
3570 // do the reduction value aggregation
3572 // The thread local variable Reduce list is mutated in place to host the
3573 // reduced data, which is the aggregated value produced from local and
3576 // Note that AlgoVer is expected to be a constant integer known at compile
3578 // When AlgoVer==0, the first conjunction evaluates to true, making
3579 // the entire predicate true during compile time.
3580 // When AlgoVer==1, the second conjunction has only the second part to be
3581 // evaluated during runtime. Other conjunctions evaluates to false
3582 // during compile time.
3583 // When AlgoVer==2, the third conjunction has only the second part to be
3584 // evaluated during runtime. Other conjunctions evaluates to false
3585 // during compile time.
3586 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3588 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3589 llvm::Value *CondAlgo1 = Bld.CreateAnd(
3590 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3592 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3593 llvm::Value *CondAlgo2 = Bld.CreateAnd(
3594 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3595 CondAlgo2 = Bld.CreateAnd(
3596 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3598 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3599 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3601 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3602 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3603 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3604 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3606 CGF.EmitBlock(ThenBB);
3607 // reduce_function(LocalReduceList, RemoteReduceList)
3608 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3609 LocalReduceList.getPointer(), CGF.VoidPtrTy);
3610 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3611 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3612 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3613 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3614 Bld.CreateBr(MergeBB);
3616 CGF.EmitBlock(ElseBB);
3617 Bld.CreateBr(MergeBB);
3619 CGF.EmitBlock(MergeBB);
3621 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3623 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3624 llvm::Value *CondCopy = Bld.CreateAnd(
3625 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3627 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3628 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3629 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3630 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3632 CGF.EmitBlock(CpyThenBB);
3633 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3634 RemoteReduceList, LocalReduceList);
3635 Bld.CreateBr(CpyMergeBB);
3637 CGF.EmitBlock(CpyElseBB);
3638 Bld.CreateBr(CpyMergeBB);
3640 CGF.EmitBlock(CpyMergeBB);
3642 CGF.FinishFunction();
3646 /// This function emits a helper that copies all the reduction variables from
3647 /// the team into the provided global buffer for the reduction variables.
3649 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3650 /// For all data entries D in reduce_data:
3651 /// Copy local D to buffer.D[Idx]
3652 static llvm::Value *emitListToGlobalCopyFunction(
3653 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3654 QualType ReductionArrayTy, SourceLocation Loc,
3655 const RecordDecl *TeamReductionRec,
3656 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3658 ASTContext &C = CGM.getContext();
3660 // Buffer: global reduction buffer.
3661 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3662 C.VoidPtrTy, ImplicitParamDecl::Other);
3663 // Idx: index of the buffer.
3664 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3665 ImplicitParamDecl::Other);
3666 // ReduceList: thread local Reduce list.
3667 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3668 C.VoidPtrTy, ImplicitParamDecl::Other);
3669 FunctionArgList Args;
3670 Args.push_back(&BufferArg);
3671 Args.push_back(&IdxArg);
3672 Args.push_back(&ReduceListArg);
3674 const CGFunctionInfo &CGFI =
3675 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3676 auto *Fn = llvm::Function::Create(
3677 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3678 "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
3679 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3680 Fn->setDoesNotRecurse();
3681 CodeGenFunction CGF(CGM);
3682 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3684 CGBuilderTy &Bld = CGF.Builder;
3686 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3687 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3688 Address LocalReduceList(
3689 Bld.CreatePointerBitCastOrAddrSpaceCast(
3690 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3692 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3693 CGF.getPointerAlign());
3694 QualType StaticTy = C.getRecordType(TeamReductionRec);
3695 llvm::Type *LLVMReductionsBufferTy =
3696 CGM.getTypes().ConvertTypeForMem(StaticTy);
3697 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3698 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3699 LLVMReductionsBufferTy->getPointerTo());
3700 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3701 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3702 /*Volatile=*/false, C.IntTy,
3705 for (const Expr *Private : Privates) {
3706 // Reduce element = LocalReduceList[i]
3707 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3708 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3709 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3710 // elemptr = ((CopyType*)(elemptrptr)) + I
3711 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3712 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3714 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3715 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3716 // Global = Buffer.VD[Idx];
3717 const FieldDecl *FD = VarFieldMap.lookup(VD);
3718 LValue GlobLVal = CGF.EmitLValueForField(
3719 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3720 llvm::Value *BufferPtr =
3721 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3722 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3723 switch (CGF.getEvaluationKind(Private->getType())) {
3725 llvm::Value *V = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
3726 Private->getType(), Loc);
3727 CGF.EmitStoreOfScalar(V, GlobLVal);
3731 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
3732 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
3733 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
3737 CGF.EmitAggregateCopy(GlobLVal,
3738 CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3739 Private->getType(), AggValueSlot::DoesNotOverlap);
3745 CGF.FinishFunction();
3749 /// This function emits a helper that reduces all the reduction variables from
3750 /// the team into the provided global buffer for the reduction variables.
3752 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
3753 /// void *GlobPtrs[];
3754 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3756 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3757 /// reduce_function(GlobPtrs, reduce_data);
3758 static llvm::Value *emitListToGlobalReduceFunction(
3759 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3760 QualType ReductionArrayTy, SourceLocation Loc,
3761 const RecordDecl *TeamReductionRec,
3762 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3764 llvm::Function *ReduceFn) {
3765 ASTContext &C = CGM.getContext();
3767 // Buffer: global reduction buffer.
3768 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3769 C.VoidPtrTy, ImplicitParamDecl::Other);
3770 // Idx: index of the buffer.
3771 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3772 ImplicitParamDecl::Other);
3773 // ReduceList: thread local Reduce list.
3774 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3775 C.VoidPtrTy, ImplicitParamDecl::Other);
3776 FunctionArgList Args;
3777 Args.push_back(&BufferArg);
3778 Args.push_back(&IdxArg);
3779 Args.push_back(&ReduceListArg);
3781 const CGFunctionInfo &CGFI =
3782 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3783 auto *Fn = llvm::Function::Create(
3784 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3785 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
3786 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3787 Fn->setDoesNotRecurse();
3788 CodeGenFunction CGF(CGM);
3789 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3791 CGBuilderTy &Bld = CGF.Builder;
3793 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3794 QualType StaticTy = C.getRecordType(TeamReductionRec);
3795 llvm::Type *LLVMReductionsBufferTy =
3796 CGM.getTypes().ConvertTypeForMem(StaticTy);
3797 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3798 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3799 LLVMReductionsBufferTy->getPointerTo());
3801 // 1. Build a list of reduction variables.
3802 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3803 Address ReductionList =
3804 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3805 auto IPriv = Privates.begin();
3806 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3807 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3808 /*Volatile=*/false, C.IntTy,
3811 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3812 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3813 // Global = Buffer.VD[Idx];
3814 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3815 const FieldDecl *FD = VarFieldMap.lookup(VD);
3816 LValue GlobLVal = CGF.EmitLValueForField(
3817 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3818 llvm::Value *BufferPtr =
3819 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3820 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3821 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3822 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3823 // Store array size.
3825 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3826 llvm::Value *Size = CGF.Builder.CreateIntCast(
3828 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3830 CGF.SizeTy, /*isSigned=*/false);
3831 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3836 // Call reduce_function(GlobalReduceList, ReduceList)
3837 llvm::Value *GlobalReduceList =
3838 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3839 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3840 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3841 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3842 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3843 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
3844 CGF.FinishFunction();
3848 /// This function emits a helper that copies all the reduction variables from
3849 /// the team into the provided global buffer for the reduction variables.
3851 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3852 /// For all data entries D in reduce_data:
3853 /// Copy buffer.D[Idx] to local D;
3854 static llvm::Value *emitGlobalToListCopyFunction(
3855 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3856 QualType ReductionArrayTy, SourceLocation Loc,
3857 const RecordDecl *TeamReductionRec,
3858 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3860 ASTContext &C = CGM.getContext();
3862 // Buffer: global reduction buffer.
3863 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3864 C.VoidPtrTy, ImplicitParamDecl::Other);
3865 // Idx: index of the buffer.
3866 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3867 ImplicitParamDecl::Other);
3868 // ReduceList: thread local Reduce list.
3869 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3870 C.VoidPtrTy, ImplicitParamDecl::Other);
3871 FunctionArgList Args;
3872 Args.push_back(&BufferArg);
3873 Args.push_back(&IdxArg);
3874 Args.push_back(&ReduceListArg);
3876 const CGFunctionInfo &CGFI =
3877 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3878 auto *Fn = llvm::Function::Create(
3879 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3880 "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
3881 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3882 Fn->setDoesNotRecurse();
3883 CodeGenFunction CGF(CGM);
3884 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3886 CGBuilderTy &Bld = CGF.Builder;
3888 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3889 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3890 Address LocalReduceList(
3891 Bld.CreatePointerBitCastOrAddrSpaceCast(
3892 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3894 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3895 CGF.getPointerAlign());
3896 QualType StaticTy = C.getRecordType(TeamReductionRec);
3897 llvm::Type *LLVMReductionsBufferTy =
3898 CGM.getTypes().ConvertTypeForMem(StaticTy);
3899 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3900 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3901 LLVMReductionsBufferTy->getPointerTo());
3903 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3904 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3905 /*Volatile=*/false, C.IntTy,
3908 for (const Expr *Private : Privates) {
3909 // Reduce element = LocalReduceList[i]
3910 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3911 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3912 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3913 // elemptr = ((CopyType*)(elemptrptr)) + I
3914 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3915 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3917 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3918 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3919 // Global = Buffer.VD[Idx];
3920 const FieldDecl *FD = VarFieldMap.lookup(VD);
3921 LValue GlobLVal = CGF.EmitLValueForField(
3922 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3923 llvm::Value *BufferPtr =
3924 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3925 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3926 switch (CGF.getEvaluationKind(Private->getType())) {
3928 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
3929 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType());
3933 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
3934 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3939 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3940 GlobLVal, Private->getType(),
3941 AggValueSlot::DoesNotOverlap);
3947 CGF.FinishFunction();
3951 /// This function emits a helper that reduces all the reduction variables from
3952 /// the team into the provided global buffer for the reduction variables.
3954 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
3955 /// void *GlobPtrs[];
3956 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3958 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3959 /// reduce_function(reduce_data, GlobPtrs);
3960 static llvm::Value *emitGlobalToListReduceFunction(
3961 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3962 QualType ReductionArrayTy, SourceLocation Loc,
3963 const RecordDecl *TeamReductionRec,
3964 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3966 llvm::Function *ReduceFn) {
3967 ASTContext &C = CGM.getContext();
3969 // Buffer: global reduction buffer.
3970 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3971 C.VoidPtrTy, ImplicitParamDecl::Other);
3972 // Idx: index of the buffer.
3973 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3974 ImplicitParamDecl::Other);
3975 // ReduceList: thread local Reduce list.
3976 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3977 C.VoidPtrTy, ImplicitParamDecl::Other);
3978 FunctionArgList Args;
3979 Args.push_back(&BufferArg);
3980 Args.push_back(&IdxArg);
3981 Args.push_back(&ReduceListArg);
3983 const CGFunctionInfo &CGFI =
3984 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3985 auto *Fn = llvm::Function::Create(
3986 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3987 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
3988 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3989 Fn->setDoesNotRecurse();
3990 CodeGenFunction CGF(CGM);
3991 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3993 CGBuilderTy &Bld = CGF.Builder;
3995 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3996 QualType StaticTy = C.getRecordType(TeamReductionRec);
3997 llvm::Type *LLVMReductionsBufferTy =
3998 CGM.getTypes().ConvertTypeForMem(StaticTy);
3999 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
4000 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
4001 LLVMReductionsBufferTy->getPointerTo());
4003 // 1. Build a list of reduction variables.
4004 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
4005 Address ReductionList =
4006 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
4007 auto IPriv = Privates.begin();
4008 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
4009 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
4010 /*Volatile=*/false, C.IntTy,
4013 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
4014 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4015 // Global = Buffer.VD[Idx];
4016 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
4017 const FieldDecl *FD = VarFieldMap.lookup(VD);
4018 LValue GlobLVal = CGF.EmitLValueForField(
4019 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
4020 llvm::Value *BufferPtr =
4021 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
4022 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
4023 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
4024 if ((*IPriv)->getType()->isVariablyModifiedType()) {
4025 // Store array size.
4027 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4028 llvm::Value *Size = CGF.Builder.CreateIntCast(
4030 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
4032 CGF.SizeTy, /*isSigned=*/false);
4033 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
4038 // Call reduce_function(ReduceList, GlobalReduceList)
4039 llvm::Value *GlobalReduceList =
4040 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
4041 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
4042 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
4043 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
4044 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4045 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
4046 CGF.FinishFunction();
4051 /// Design of OpenMP reductions on the GPU
4053 /// Consider a typical OpenMP program with one or more reduction
4058 /// #pragma omp target teams distribute parallel for \
4059 /// reduction(+:foo) reduction(*:bar)
4060 /// for (int i = 0; i < N; i++) {
4061 /// foo += A[i]; bar *= B[i];
4064 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
4065 /// all teams. In our OpenMP implementation on the NVPTX device an
4066 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
4067 /// within a team are mapped to CUDA threads within a threadblock.
4068 /// Our goal is to efficiently aggregate values across all OpenMP
4069 /// threads such that:
4071 /// - the compiler and runtime are logically concise, and
4072 /// - the reduction is performed efficiently in a hierarchical
4073 /// manner as follows: within OpenMP threads in the same warp,
4074 /// across warps in a threadblock, and finally across teams on
4075 /// the NVPTX device.
4077 /// Introduction to Decoupling
4079 /// We would like to decouple the compiler and the runtime so that the
4080 /// latter is ignorant of the reduction variables (number, data types)
4081 /// and the reduction operators. This allows a simpler interface
4082 /// and implementation while still attaining good performance.
4084 /// Pseudocode for the aforementioned OpenMP program generated by the
4085 /// compiler is as follows:
4087 /// 1. Create private copies of reduction variables on each OpenMP
4088 /// thread: 'foo_private', 'bar_private'
4089 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
4090 /// to it and writes the result in 'foo_private' and 'bar_private'
4092 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
4093 /// and store the result on the team master:
4095 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
4096 /// reduceData, shuffleReduceFn, interWarpCpyFn)
4099 /// struct ReduceData {
4103 /// reduceData.foo = &foo_private
4104 /// reduceData.bar = &bar_private
4106 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
4107 /// auxiliary functions generated by the compiler that operate on
4108 /// variables of type 'ReduceData'. They aid the runtime perform
4109 /// algorithmic steps in a data agnostic manner.
4111 /// 'shuffleReduceFn' is a pointer to a function that reduces data
4112 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
4113 /// same warp. It takes the following arguments as input:
4115 /// a. variable of type 'ReduceData' on the calling lane,
4117 /// c. an offset relative to the current lane_id to generate a
4118 /// remote_lane_id. The remote lane contains the second
4119 /// variable of type 'ReduceData' that is to be reduced.
4120 /// d. an algorithm version parameter determining which reduction
4121 /// algorithm to use.
4123 /// 'shuffleReduceFn' retrieves data from the remote lane using
4124 /// efficient GPU shuffle intrinsics and reduces, using the
4125 /// algorithm specified by the 4th parameter, the two operands
4126 /// element-wise. The result is written to the first operand.
4128 /// Different reduction algorithms are implemented in different
4129 /// runtime functions, all calling 'shuffleReduceFn' to perform
4130 /// the essential reduction step. Therefore, based on the 4th
4131 /// parameter, this function behaves slightly differently to
4132 /// cooperate with the runtime to ensure correctness under
4133 /// different circumstances.
4135 /// 'InterWarpCpyFn' is a pointer to a function that transfers
4136 /// reduced variables across warps. It tunnels, through CUDA
4137 /// shared memory, the thread-private data of type 'ReduceData'
4138 /// from lane 0 of each warp to a lane in the first warp.
4139 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
4140 /// The last team writes the global reduced value to memory.
4142 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
4143 /// reduceData, shuffleReduceFn, interWarpCpyFn,
4144 /// scratchpadCopyFn, loadAndReduceFn)
4146 /// 'scratchpadCopyFn' is a helper that stores reduced
4147 /// data from the team master to a scratchpad array in
4150 /// 'loadAndReduceFn' is a helper that loads data from
4151 /// the scratchpad array and reduces it with the input
4154 /// These compiler generated functions hide address
4155 /// calculation and alignment information from the runtime.
4157 /// The team master of the last team stores the reduced
4158 /// result to the globals in memory.
4159 /// foo += reduceData.foo; bar *= reduceData.bar
4162 /// Warp Reduction Algorithms
4164 /// On the warp level, we have three algorithms implemented in the
4165 /// OpenMP runtime depending on the number of active lanes:
4167 /// Full Warp Reduction
4169 /// The reduce algorithm within a warp where all lanes are active
4170 /// is implemented in the runtime as follows:
4172 /// full_warp_reduce(void *reduce_data,
4173 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4174 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
4175 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
4178 /// The algorithm completes in log(2, WARPSIZE) steps.
4180 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
4181 /// not used therefore we save instructions by not retrieving lane_id
4182 /// from the corresponding special registers. The 4th parameter, which
4183 /// represents the version of the algorithm being used, is set to 0 to
4184 /// signify full warp reduction.
4186 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4188 /// #reduce_elem refers to an element in the local lane's data structure
4189 /// #remote_elem is retrieved from a remote lane
4190 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4191 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
4193 /// Contiguous Partial Warp Reduction
4195 /// This reduce algorithm is used within a warp where only the first
4196 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
4197 /// number of OpenMP threads in a parallel region is not a multiple of
4198 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
4201 /// contiguous_partial_reduce(void *reduce_data,
4202 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
4203 /// int size, int lane_id) {
4206 /// curr_size = size;
4207 /// mask = curr_size/2;
4208 /// while (offset>0) {
4209 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
4210 /// curr_size = (curr_size+1)/2;
4211 /// offset = curr_size/2;
4215 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4217 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4218 /// if (lane_id < offset)
4219 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
4221 /// reduce_elem = remote_elem
4223 /// This algorithm assumes that the data to be reduced are located in a
4224 /// contiguous subset of lanes starting from the first. When there is
4225 /// an odd number of active lanes, the data in the last lane is not
4226 /// aggregated with any other lane's dat but is instead copied over.
4228 /// Dispersed Partial Warp Reduction
4230 /// This algorithm is used within a warp when any discontiguous subset of
4231 /// lanes are active. It is used to implement the reduction operation
4232 /// across lanes in an OpenMP simd region or in a nested parallel region.
4235 /// dispersed_partial_reduce(void *reduce_data,
4236 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4237 /// int size, remote_id;
4238 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
4240 /// remote_id = next_active_lane_id_right_after_me();
4241 /// # the above function returns 0 of no active lane
4242 /// # is present right after the current lane.
4243 /// size = number_of_active_lanes_in_this_warp();
4244 /// logical_lane_id /= 2;
4245 /// ShuffleReduceFn(reduce_data, logical_lane_id,
4246 /// remote_id-1-threadIdx.x, 2);
4247 /// } while (logical_lane_id % 2 == 0 && size > 1);
4250 /// There is no assumption made about the initial state of the reduction.
4251 /// Any number of lanes (>=1) could be active at any position. The reduction
4252 /// result is returned in the first active lane.
4254 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4256 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4257 /// if (lane_id % 2 == 0 && offset > 0)
4258 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
4260 /// reduce_elem = remote_elem
4263 /// Intra-Team Reduction
4265 /// This function, as implemented in the runtime call
4266 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
4267 /// threads in a team. It first reduces within a warp using the
4268 /// aforementioned algorithms. We then proceed to gather all such
4269 /// reduced values at the first warp.
4271 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
4272 /// data from each of the "warp master" (zeroth lane of each warp, where
4273 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
4274 /// a mathematical sense) the problem of reduction across warp masters in
4275 /// a block to the problem of warp reduction.
4278 /// Inter-Team Reduction
4280 /// Once a team has reduced its data to a single value, it is stored in
4281 /// a global scratchpad array. Since each team has a distinct slot, this
4282 /// can be done without locking.
4284 /// The last team to write to the scratchpad array proceeds to reduce the
4285 /// scratchpad array. One or more workers in the last team use the helper
4286 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
4287 /// the k'th worker reduces every k'th element.
4289 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
4290 /// reduce across workers and compute a globally reduced value.
4292 void CGOpenMPRuntimeNVPTX::emitReduction(
4293 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
4294 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
4295 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
4296 if (!CGF.HaveInsertPoint())
4299 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
4301 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
4304 if (Options.SimpleReduction) {
4305 assert(!TeamsReduction && !ParallelReduction &&
4306 "Invalid reduction selection in emitReduction.");
4307 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
4308 ReductionOps, Options);
4312 assert((TeamsReduction || ParallelReduction) &&
4313 "Invalid reduction selection in emitReduction.");
4315 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
4316 // RedList, shuffle_reduce_func, interwarp_copy_func);
4318 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
4319 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
4320 llvm::Value *ThreadId = getThreadID(CGF, Loc);
4323 ASTContext &C = CGM.getContext();
4324 // 1. Build a list of reduction variables.
4325 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
4326 auto Size = RHSExprs.size();
4327 for (const Expr *E : Privates) {
4328 if (E->getType()->isVariablyModifiedType())
4329 // Reserve place for array size.
4332 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
4333 QualType ReductionArrayTy =
4334 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
4335 /*IndexTypeQuals=*/0);
4336 Address ReductionList =
4337 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
4338 auto IPriv = Privates.begin();
4340 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
4341 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4342 CGF.Builder.CreateStore(
4343 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4344 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
4346 if ((*IPriv)->getType()->isVariablyModifiedType()) {
4347 // Store array size.
4349 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4350 llvm::Value *Size = CGF.Builder.CreateIntCast(
4352 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
4354 CGF.SizeTy, /*isSigned=*/false);
4355 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
4360 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4361 ReductionList.getPointer(), CGF.VoidPtrTy);
4362 llvm::Function *ReductionFn = emitReductionFunction(
4363 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
4364 LHSExprs, RHSExprs, ReductionOps);
4365 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
4366 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
4367 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
4368 llvm::Value *InterWarpCopyFn =
4369 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
4371 if (ParallelReduction) {
4372 llvm::Value *Args[] = {RTLoc,
4374 CGF.Builder.getInt32(RHSExprs.size()),
4375 ReductionArrayTySize,
4380 Res = CGF.EmitRuntimeCall(
4381 createNVPTXRuntimeFunction(
4382 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2),
4385 assert(TeamsReduction && "expected teams reduction.");
4386 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
4387 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
4389 for (const Expr *DRE : Privates) {
4390 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
4393 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
4394 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
4395 C.getLangOpts().OpenMPCUDAReductionBufNum);
4396 TeamsReductions.push_back(TeamReductionRec);
4397 if (!KernelTeamsReductionPtr) {
4398 KernelTeamsReductionPtr = new llvm::GlobalVariable(
4399 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
4400 llvm::GlobalValue::InternalLinkage, nullptr,
4401 "_openmp_teams_reductions_buffer_$_$ptr");
4403 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
4404 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
4405 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
4406 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
4407 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4408 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
4409 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4411 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
4412 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4413 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
4414 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4417 llvm::Value *Args[] = {
4421 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
4425 GlobalToBufferCpyFn,
4426 GlobalToBufferRedFn,
4427 BufferToGlobalCpyFn,
4428 BufferToGlobalRedFn};
4430 Res = CGF.EmitRuntimeCall(
4431 createNVPTXRuntimeFunction(
4432 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2),
4436 // 5. Build if (res == 1)
4437 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
4438 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
4439 llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
4440 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
4441 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
4443 // 6. Build then branch: where we have reduced values in the master
4444 // thread in each team.
4445 // __kmpc_end_reduce{_nowait}(<gtid>);
4447 CGF.EmitBlock(ThenBB);
4449 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
4450 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
4451 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
4452 auto IPriv = Privates.begin();
4453 auto ILHS = LHSExprs.begin();
4454 auto IRHS = RHSExprs.begin();
4455 for (const Expr *E : ReductionOps) {
4456 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
4457 cast<DeclRefExpr>(*IRHS));
4463 llvm::Value *EndArgs[] = {ThreadId};
4464 RegionCodeGenTy RCG(CodeGen);
4465 NVPTXActionTy Action(
4466 nullptr, llvm::None,
4467 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
4469 RCG.setAction(Action);
4471 // There is no need to emit line number for unconditional branch.
4472 (void)ApplyDebugLocation::CreateEmpty(CGF);
4473 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4477 CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
4478 const VarDecl *NativeParam) const {
4479 if (!NativeParam->getType()->isReferenceType())
4481 QualType ArgType = NativeParam->getType();
4482 QualifierCollector QC;
4483 const Type *NonQualTy = QC.strip(ArgType);
4484 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4485 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
4486 if (Attr->getCaptureKind() == OMPC_map) {
4487 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4488 LangAS::opencl_global);
4489 } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
4490 PointeeTy.isConstant(CGM.getContext())) {
4491 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4492 LangAS::opencl_generic);
4495 ArgType = CGM.getContext().getPointerType(PointeeTy);
4497 enum { NVPTX_local_addr = 5 };
4498 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
4499 ArgType = QC.apply(CGM.getContext(), ArgType);
4500 if (isa<ImplicitParamDecl>(NativeParam))
4501 return ImplicitParamDecl::Create(
4502 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
4503 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
4504 return ParmVarDecl::Create(
4506 const_cast<DeclContext *>(NativeParam->getDeclContext()),
4507 NativeParam->getBeginLoc(), NativeParam->getLocation(),
4508 NativeParam->getIdentifier(), ArgType,
4509 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
4513 CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
4514 const VarDecl *NativeParam,
4515 const VarDecl *TargetParam) const {
4516 assert(NativeParam != TargetParam &&
4517 NativeParam->getType()->isReferenceType() &&
4518 "Native arg must not be the same as target arg.");
4519 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
4520 QualType NativeParamType = NativeParam->getType();
4521 QualifierCollector QC;
4522 const Type *NonQualTy = QC.strip(NativeParamType);
4523 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4524 unsigned NativePointeeAddrSpace =
4525 CGF.getContext().getTargetAddressSpace(NativePointeeTy);
4526 QualType TargetTy = TargetParam->getType();
4527 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
4528 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
4529 // First cast to generic.
4530 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4531 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4533 // Cast from generic to native address space.
4534 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4535 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4536 NativePointeeAddrSpace));
4537 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
4538 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
4540 return NativeParamAddr;
4543 void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
4544 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
4545 ArrayRef<llvm::Value *> Args) const {
4546 SmallVector<llvm::Value *, 4> TargetArgs;
4547 TargetArgs.reserve(Args.size());
4548 auto *FnType = OutlinedFn.getFunctionType();
4549 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
4550 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
4551 TargetArgs.append(std::next(Args.begin(), I), Args.end());
4554 llvm::Type *TargetType = FnType->getParamType(I);
4555 llvm::Value *NativeArg = Args[I];
4556 if (!TargetType->isPointerTy()) {
4557 TargetArgs.emplace_back(NativeArg);
4560 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4562 NativeArg->getType()->getPointerElementType()->getPointerTo());
4563 TargetArgs.emplace_back(
4564 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
4566 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
4569 /// Emit function which wraps the outline parallel region
4570 /// and controls the arguments which are passed to this function.
4571 /// The wrapper ensures that the outlined function is called
4572 /// with the correct arguments when data is shared.
4573 llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
4574 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
4575 ASTContext &Ctx = CGM.getContext();
4576 const auto &CS = *D.getCapturedStmt(OMPD_parallel);
4578 // Create a function that takes as argument the source thread.
4579 FunctionArgList WrapperArgs;
4581 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
4583 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
4584 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4585 /*Id=*/nullptr, Int16QTy,
4586 ImplicitParamDecl::Other);
4587 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4588 /*Id=*/nullptr, Int32QTy,
4589 ImplicitParamDecl::Other);
4590 WrapperArgs.emplace_back(&ParallelLevelArg);
4591 WrapperArgs.emplace_back(&WrapperArg);
4593 const CGFunctionInfo &CGFI =
4594 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
4596 auto *Fn = llvm::Function::Create(
4597 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4598 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
4599 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4600 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
4601 Fn->setDoesNotRecurse();
4603 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4604 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
4605 D.getBeginLoc(), D.getBeginLoc());
4607 const auto *RD = CS.getCapturedRecordDecl();
4608 auto CurField = RD->field_begin();
4610 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
4611 /*Name=*/".zero.addr");
4612 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
4613 // Get the array of arguments.
4614 SmallVector<llvm::Value *, 8> Args;
4616 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
4617 Args.emplace_back(ZeroAddr.getPointer());
4619 CGBuilderTy &Bld = CGF.Builder;
4620 auto CI = CS.capture_begin();
4622 // Use global memory for data sharing.
4623 // Handle passing of global args to workers.
4624 Address GlobalArgs =
4625 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
4626 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
4627 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
4628 CGF.EmitRuntimeCall(
4629 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
4632 // Retrieve the shared variables from the list of references returned
4633 // by the runtime. Pass the variables to the outlined function.
4634 Address SharedArgListAddress = Address::invalid();
4635 if (CS.capture_size() > 0 ||
4636 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4637 SharedArgListAddress = CGF.EmitLoadOfPointer(
4638 GlobalArgs, CGF.getContext()
4639 .getPointerType(CGF.getContext().getPointerType(
4640 CGF.getContext().VoidPtrTy))
4641 .castAs<PointerType>());
4644 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4645 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4646 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4647 Src, CGF.SizeTy->getPointerTo());
4648 llvm::Value *LB = CGF.EmitLoadOfScalar(
4651 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4652 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
4653 Args.emplace_back(LB);
4655 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4656 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4657 Src, CGF.SizeTy->getPointerTo());
4658 llvm::Value *UB = CGF.EmitLoadOfScalar(
4661 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4662 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
4663 Args.emplace_back(UB);
4666 if (CS.capture_size() > 0) {
4667 ASTContext &CGFContext = CGF.getContext();
4668 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
4669 QualType ElemTy = CurField->getType();
4670 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
4671 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4672 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
4673 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
4675 CGFContext.getPointerType(ElemTy),
4677 if (CI->capturesVariableByCopy() &&
4678 !CI->getCapturedVar()->getType()->isAnyPointerType()) {
4679 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
4682 Args.emplace_back(Arg);
4686 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
4687 CGF.FinishFunction();
4691 void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
4693 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
4696 assert(D && "Expected function or captured|block decl.");
4697 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
4698 "Function is registered already.");
4699 assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
4700 "Team is set but not processed.");
4701 const Stmt *Body = nullptr;
4702 bool NeedToDelayGlobalization = false;
4703 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
4704 Body = FD->getBody();
4705 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
4706 Body = BD->getBody();
4707 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
4708 Body = CD->getBody();
4709 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
4710 if (NeedToDelayGlobalization &&
4711 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
4716 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
4717 VarChecker.Visit(Body);
4718 const RecordDecl *GlobalizedVarsRecord =
4719 VarChecker.getGlobalizedRecord(IsInTTDRegion);
4720 TeamAndReductions.first = nullptr;
4721 TeamAndReductions.second.clear();
4722 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
4723 VarChecker.getEscapedVariableLengthDecls();
4724 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
4726 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
4727 I->getSecond().MappedParams =
4728 std::make_unique<CodeGenFunction::OMPMapVars>();
4729 I->getSecond().GlobalRecord = GlobalizedVarsRecord;
4730 I->getSecond().EscapedParameters.insert(
4731 VarChecker.getEscapedParameters().begin(),
4732 VarChecker.getEscapedParameters().end());
4733 I->getSecond().EscapedVariableLengthDecls.append(
4734 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
4735 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
4736 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4737 assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4738 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4739 Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
4741 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
4742 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
4743 VarChecker.Visit(Body);
4744 I->getSecond().SecondaryGlobalRecord =
4745 VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
4746 I->getSecond().SecondaryLocalVarData.emplace();
4747 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
4748 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4749 assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4750 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4752 std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
4755 if (!NeedToDelayGlobalization) {
4756 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
4757 struct GlobalizationScope final : EHScopeStack::Cleanup {
4758 GlobalizationScope() = default;
4760 void Emit(CodeGenFunction &CGF, Flags flags) override {
4761 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
4762 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
4765 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
4769 Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
4770 const VarDecl *VD) {
4771 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
4772 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4773 auto AS = LangAS::Default;
4774 switch (A->getAllocatorType()) {
4775 // Use the default allocator here as by default local vars are
4777 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4778 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4779 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4780 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4781 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4782 // Follow the user decision - use default allocation.
4783 return Address::invalid();
4784 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4785 // TODO: implement aupport for user-defined allocators.
4786 return Address::invalid();
4787 case OMPAllocateDeclAttr::OMPConstMemAlloc:
4788 AS = LangAS::cuda_constant;
4790 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4791 AS = LangAS::cuda_shared;
4793 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4794 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4797 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4798 auto *GV = new llvm::GlobalVariable(
4799 CGM.getModule(), VarTy, /*isConstant=*/false,
4800 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
4802 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4803 CGM.getContext().getTargetAddressSpace(AS));
4804 CharUnits Align = CGM.getContext().getDeclAlign(VD);
4805 GV->setAlignment(Align.getAsAlign());
4807 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4808 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
4809 VD->getType().getAddressSpace()))),
4813 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
4814 return Address::invalid();
4816 VD = VD->getCanonicalDecl();
4817 auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
4818 if (I == FunctionGlobalizedDecls.end())
4819 return Address::invalid();
4820 auto VDI = I->getSecond().LocalVarData.find(VD);
4821 if (VDI != I->getSecond().LocalVarData.end())
4822 return VDI->second.PrivateAddr;
4823 if (VD->hasAttrs()) {
4824 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
4827 auto VDI = I->getSecond().LocalVarData.find(
4828 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
4829 ->getCanonicalDecl());
4830 if (VDI != I->getSecond().LocalVarData.end())
4831 return VDI->second.PrivateAddr;
4835 return Address::invalid();
4838 void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
4839 FunctionGlobalizedDecls.erase(CGF.CurFn);
4840 CGOpenMPRuntime::functionFinished(CGF);
4843 void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk(
4844 CodeGenFunction &CGF, const OMPLoopDirective &S,
4845 OpenMPDistScheduleClauseKind &ScheduleKind,
4846 llvm::Value *&Chunk) const {
4847 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
4848 ScheduleKind = OMPC_DIST_SCHEDULE_static;
4849 Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF),
4850 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4851 S.getIterationVariable()->getType(), S.getBeginLoc());
4854 CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
4855 CGF, S, ScheduleKind, Chunk);
4858 void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk(
4859 CodeGenFunction &CGF, const OMPLoopDirective &S,
4860 OpenMPScheduleClauseKind &ScheduleKind,
4861 const Expr *&ChunkExpr) const {
4862 ScheduleKind = OMPC_SCHEDULE_static;
4863 // Chunk size is 1 in this case.
4864 llvm::APInt ChunkSize(32, 1);
4865 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
4866 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4870 void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
4871 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
4872 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
4873 " Expected target-based directive.");
4874 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
4875 for (const CapturedStmt::Capture &C : CS->captures()) {
4876 // Capture variables captured by reference in lambdas for target-based
4878 if (!C.capturesVariable())
4880 const VarDecl *VD = C.getCapturedVar();
4881 const auto *RD = VD->getType()
4883 .getNonReferenceType()
4884 ->getAsCXXRecordDecl();
4885 if (!RD || !RD->isLambda())
4887 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4889 if (VD->getType().getCanonicalType()->isReferenceType())
4890 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
4892 VDLVal = CGF.MakeAddrLValue(
4893 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
4894 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4895 FieldDecl *ThisCapture = nullptr;
4896 RD->getCaptureFields(Captures, ThisCapture);
4897 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
4899 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
4900 llvm::Value *CXXThis = CGF.LoadCXXThis();
4901 CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
4903 for (const LambdaCapture &LC : RD->captures()) {
4904 if (LC.getCaptureKind() != LCK_ByRef)
4906 const VarDecl *VD = LC.getCapturedVar();
4907 if (!CS->capturesVariable(VD))
4909 auto It = Captures.find(VD);
4910 assert(It != Captures.end() && "Found lambda capture without field.");
4911 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
4912 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4913 if (VD->getType().getCanonicalType()->isReferenceType())
4914 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
4915 VD->getType().getCanonicalType())
4917 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
4922 unsigned CGOpenMPRuntimeNVPTX::getDefaultFirstprivateAddressSpace() const {
4923 return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
4926 bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
4928 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
4930 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4931 switch(A->getAllocatorType()) {
4932 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4933 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4934 // Not supported, fallback to the default mem space.
4935 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4936 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4937 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4938 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4939 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4940 AS = LangAS::Default;
4942 case OMPAllocateDeclAttr::OMPConstMemAlloc:
4943 AS = LangAS::cuda_constant;
4945 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4946 AS = LangAS::cuda_shared;
4948 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4949 llvm_unreachable("Expected predefined allocator for the variables with the "
4955 // Get current CudaArch and ignore any unknown values
4956 static CudaArch getCudaArch(CodeGenModule &CGM) {
4957 if (!CGM.getTarget().hasFeature("ptx"))
4958 return CudaArch::UNKNOWN;
4959 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
4960 if (Feature.getValue()) {
4961 CudaArch Arch = StringToCudaArch(Feature.getKey());
4962 if (Arch != CudaArch::UNKNOWN)
4966 return CudaArch::UNKNOWN;
4969 /// Check to see if target architecture supports unified addressing which is
4970 /// a restriction for OpenMP requires clause "unified_shared_memory".
4971 void CGOpenMPRuntimeNVPTX::processRequiresDirective(
4972 const OMPRequiresDecl *D) {
4973 for (const OMPClause *Clause : D->clauselists()) {
4974 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
4975 CudaArch Arch = getCudaArch(CGM);
4977 case CudaArch::SM_20:
4978 case CudaArch::SM_21:
4979 case CudaArch::SM_30:
4980 case CudaArch::SM_32:
4981 case CudaArch::SM_35:
4982 case CudaArch::SM_37:
4983 case CudaArch::SM_50:
4984 case CudaArch::SM_52:
4985 case CudaArch::SM_53:
4986 case CudaArch::SM_60:
4987 case CudaArch::SM_61:
4988 case CudaArch::SM_62: {
4989 SmallString<256> Buffer;
4990 llvm::raw_svector_ostream Out(Buffer);
4991 Out << "Target architecture " << CudaArchToString(Arch)
4992 << " does not support unified addressing";
4993 CGM.Error(Clause->getBeginLoc(), Out.str());
4996 case CudaArch::SM_70:
4997 case CudaArch::SM_72:
4998 case CudaArch::SM_75:
4999 case CudaArch::SM_80:
5000 case CudaArch::GFX600:
5001 case CudaArch::GFX601:
5002 case CudaArch::GFX700:
5003 case CudaArch::GFX701:
5004 case CudaArch::GFX702:
5005 case CudaArch::GFX703:
5006 case CudaArch::GFX704:
5007 case CudaArch::GFX801:
5008 case CudaArch::GFX802:
5009 case CudaArch::GFX803:
5010 case CudaArch::GFX810:
5011 case CudaArch::GFX900:
5012 case CudaArch::GFX902:
5013 case CudaArch::GFX904:
5014 case CudaArch::GFX906:
5015 case CudaArch::GFX908:
5016 case CudaArch::GFX909:
5017 case CudaArch::GFX1010:
5018 case CudaArch::GFX1011:
5019 case CudaArch::GFX1012:
5020 case CudaArch::GFX1030:
5021 case CudaArch::UNKNOWN:
5023 case CudaArch::LAST:
5024 llvm_unreachable("Unexpected Cuda arch.");
5028 CGOpenMPRuntime::processRequiresDirective(D);
5031 /// Get number of SMs and number of blocks per SM.
5032 static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
5033 std::pair<unsigned, unsigned> Data;
5034 if (CGM.getLangOpts().OpenMPCUDANumSMs)
5035 Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
5036 if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
5037 Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
5038 if (Data.first && Data.second)
5040 switch (getCudaArch(CGM)) {
5041 case CudaArch::SM_20:
5042 case CudaArch::SM_21:
5043 case CudaArch::SM_30:
5044 case CudaArch::SM_32:
5045 case CudaArch::SM_35:
5046 case CudaArch::SM_37:
5047 case CudaArch::SM_50:
5048 case CudaArch::SM_52:
5049 case CudaArch::SM_53:
5051 case CudaArch::SM_60:
5052 case CudaArch::SM_61:
5053 case CudaArch::SM_62:
5055 case CudaArch::SM_70:
5056 case CudaArch::SM_72:
5057 case CudaArch::SM_75:
5058 case CudaArch::SM_80:
5060 case CudaArch::GFX600:
5061 case CudaArch::GFX601:
5062 case CudaArch::GFX700:
5063 case CudaArch::GFX701:
5064 case CudaArch::GFX702:
5065 case CudaArch::GFX703:
5066 case CudaArch::GFX704:
5067 case CudaArch::GFX801:
5068 case CudaArch::GFX802:
5069 case CudaArch::GFX803:
5070 case CudaArch::GFX810:
5071 case CudaArch::GFX900:
5072 case CudaArch::GFX902:
5073 case CudaArch::GFX904:
5074 case CudaArch::GFX906:
5075 case CudaArch::GFX908:
5076 case CudaArch::GFX909:
5077 case CudaArch::GFX1010:
5078 case CudaArch::GFX1011:
5079 case CudaArch::GFX1012:
5080 case CudaArch::GFX1030:
5081 case CudaArch::UNKNOWN:
5083 case CudaArch::LAST:
5084 llvm_unreachable("Unexpected Cuda arch.");
5086 llvm_unreachable("Unexpected NVPTX target without ptx feature.");
5089 void CGOpenMPRuntimeNVPTX::clear() {
5090 if (!GlobalizedRecords.empty() &&
5091 !CGM.getLangOpts().OpenMPCUDATargetParallel) {
5092 ASTContext &C = CGM.getContext();
5093 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
5094 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
5095 RecordDecl *StaticRD = C.buildImplicitRecord(
5096 "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5097 StaticRD->startDefinition();
5098 RecordDecl *SharedStaticRD = C.buildImplicitRecord(
5099 "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5100 SharedStaticRD->startDefinition();
5101 for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
5102 if (Records.Records.empty())
5105 unsigned RecAlignment = 0;
5106 for (const RecordDecl *RD : Records.Records) {
5107 QualType RDTy = C.getRecordType(RD);
5108 unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
5109 RecAlignment = std::max(RecAlignment, Alignment);
5110 unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
5112 llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
5114 Size = llvm::alignTo(Size, RecAlignment);
5115 llvm::APInt ArySize(/*numBits=*/64, Size);
5116 QualType SubTy = C.getConstantArrayType(
5117 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5118 const bool UseSharedMemory = Size <= SharedMemorySize;
5120 FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
5121 SourceLocation(), SourceLocation(), nullptr, SubTy,
5122 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5123 /*BW=*/nullptr, /*Mutable=*/false,
5124 /*InitStyle=*/ICIS_NoInit);
5125 Field->setAccess(AS_public);
5126 if (UseSharedMemory) {
5127 SharedStaticRD->addDecl(Field);
5128 SharedRecs.push_back(&Records);
5130 StaticRD->addDecl(Field);
5131 GlobalRecs.push_back(&Records);
5133 Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
5134 Records.UseSharedMemory->setInitializer(
5135 llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
5137 // Allocate SharedMemorySize buffer for the shared memory.
5138 // FIXME: nvlink does not handle weak linkage correctly (object with the
5139 // different size are reported as erroneous).
5140 // Restore this code as sson as nvlink is fixed.
5141 if (!SharedStaticRD->field_empty()) {
5142 llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
5143 QualType SubTy = C.getConstantArrayType(
5144 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5145 auto *Field = FieldDecl::Create(
5146 C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
5147 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5148 /*BW=*/nullptr, /*Mutable=*/false,
5149 /*InitStyle=*/ICIS_NoInit);
5150 Field->setAccess(AS_public);
5151 SharedStaticRD->addDecl(Field);
5153 SharedStaticRD->completeDefinition();
5154 if (!SharedStaticRD->field_empty()) {
5155 QualType StaticTy = C.getRecordType(SharedStaticRD);
5156 llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
5157 auto *GV = new llvm::GlobalVariable(
5158 CGM.getModule(), LLVMStaticTy,
5159 /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
5160 llvm::Constant::getNullValue(LLVMStaticTy),
5161 "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
5162 llvm::GlobalValue::NotThreadLocal,
5163 C.getTargetAddressSpace(LangAS::cuda_shared));
5164 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5166 for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
5167 Rec->Buffer->replaceAllUsesWith(Replacement);
5168 Rec->Buffer->eraseFromParent();
5171 StaticRD->completeDefinition();
5172 if (!StaticRD->field_empty()) {
5173 QualType StaticTy = C.getRecordType(StaticRD);
5174 std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
5175 llvm::APInt Size1(32, SMsBlockPerSM.second);
5177 C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
5178 /*IndexTypeQuals=*/0);
5179 llvm::APInt Size2(32, SMsBlockPerSM.first);
5181 C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
5182 /*IndexTypeQuals=*/0);
5183 llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
5184 // FIXME: nvlink does not handle weak linkage correctly (object with the
5185 // different size are reported as erroneous).
5186 // Restore CommonLinkage as soon as nvlink is fixed.
5187 auto *GV = new llvm::GlobalVariable(
5188 CGM.getModule(), LLVMArr2Ty,
5189 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5190 llvm::Constant::getNullValue(LLVMArr2Ty),
5191 "_openmp_static_glob_rd_$_");
5192 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5194 for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
5195 Rec->Buffer->replaceAllUsesWith(Replacement);
5196 Rec->Buffer->eraseFromParent();
5200 if (!TeamsReductions.empty()) {
5201 ASTContext &C = CGM.getContext();
5202 RecordDecl *StaticRD = C.buildImplicitRecord(
5203 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
5204 StaticRD->startDefinition();
5205 for (const RecordDecl *TeamReductionRec : TeamsReductions) {
5206 QualType RecTy = C.getRecordType(TeamReductionRec);
5207 auto *Field = FieldDecl::Create(
5208 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
5209 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
5210 /*BW=*/nullptr, /*Mutable=*/false,
5211 /*InitStyle=*/ICIS_NoInit);
5212 Field->setAccess(AS_public);
5213 StaticRD->addDecl(Field);
5215 StaticRD->completeDefinition();
5216 QualType StaticTy = C.getRecordType(StaticRD);
5217 llvm::Type *LLVMReductionsBufferTy =
5218 CGM.getTypes().ConvertTypeForMem(StaticTy);
5219 // FIXME: nvlink does not handle weak linkage correctly (object with the
5220 // different size are reported as erroneous).
5221 // Restore CommonLinkage as soon as nvlink is fixed.
5222 auto *GV = new llvm::GlobalVariable(
5223 CGM.getModule(), LLVMReductionsBufferTy,
5224 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5225 llvm::Constant::getNullValue(LLVMReductionsBufferTy),
5226 "_openmp_teams_reductions_buffer_$_");
5227 KernelTeamsReductionPtr->setInitializer(
5228 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
5231 CGOpenMPRuntime::clear();