1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides a class for OpenMP runtime code generation.
12 //===----------------------------------------------------------------------===//
15 #include "CGCleanup.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenFunction.h"
19 #include "clang/CodeGen/ConstantInitBuilder.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/StmtOpenMP.h"
22 #include "clang/Basic/BitmaskEnum.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/Bitcode/BitcodeReader.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/GlobalValue.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/Format.h"
30 #include "llvm/Support/raw_ostream.h"
33 using namespace clang;
34 using namespace CodeGen;
37 /// Base class for handling code generation inside OpenMP regions.
38 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
40 /// Kinds of OpenMP regions used in codegen.
41 enum CGOpenMPRegionKind {
42 /// Region with outlined function for standalone 'parallel'
44 ParallelOutlinedRegion,
45 /// Region with outlined function for standalone 'task' directive.
47 /// Region for constructs that do not require function outlining,
48 /// like 'for', 'sections', 'atomic' etc. directives.
50 /// Region with outlined function for standalone 'target' directive.
54 CGOpenMPRegionInfo(const CapturedStmt &CS,
55 const CGOpenMPRegionKind RegionKind,
56 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
58 : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
59 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
61 CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
62 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
64 : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
65 Kind(Kind), HasCancel(HasCancel) {}
67 /// Get a variable or parameter for storing global thread id
68 /// inside OpenMP construct.
69 virtual const VarDecl *getThreadIDVariable() const = 0;
71 /// Emit the captured statement body.
72 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
74 /// Get an LValue for the current ThreadID variable.
75 /// \return LValue for thread id variable. This LValue always has type int32*.
76 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
78 virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
80 CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
82 OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
84 bool hasCancel() const { return HasCancel; }
86 static bool classof(const CGCapturedStmtInfo *Info) {
87 return Info->getKind() == CR_OpenMP;
90 ~CGOpenMPRegionInfo() override = default;
93 CGOpenMPRegionKind RegionKind;
94 RegionCodeGenTy CodeGen;
95 OpenMPDirectiveKind Kind;
99 /// API for captured statement code generation in OpenMP constructs.
100 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
102 CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
103 const RegionCodeGenTy &CodeGen,
104 OpenMPDirectiveKind Kind, bool HasCancel,
105 StringRef HelperName)
106 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
108 ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
109 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
112 /// Get a variable or parameter for storing global thread id
113 /// inside OpenMP construct.
114 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
116 /// Get the name of the capture helper.
117 StringRef getHelperName() const override { return HelperName; }
119 static bool classof(const CGCapturedStmtInfo *Info) {
120 return CGOpenMPRegionInfo::classof(Info) &&
121 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
122 ParallelOutlinedRegion;
126 /// A variable or parameter storing global thread id for OpenMP
128 const VarDecl *ThreadIDVar;
129 StringRef HelperName;
132 /// API for captured statement code generation in OpenMP constructs.
133 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
135 class UntiedTaskActionTy final : public PrePostActionTy {
137 const VarDecl *PartIDVar;
138 const RegionCodeGenTy UntiedCodeGen;
139 llvm::SwitchInst *UntiedSwitch = nullptr;
142 UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
143 const RegionCodeGenTy &UntiedCodeGen)
144 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
145 void Enter(CodeGenFunction &CGF) override {
147 // Emit task switching point.
148 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
149 CGF.GetAddrOfLocalVar(PartIDVar),
150 PartIDVar->getType()->castAs<PointerType>());
152 CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
153 llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
154 UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
155 CGF.EmitBlock(DoneBB);
156 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
157 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
158 UntiedSwitch->addCase(CGF.Builder.getInt32(0),
159 CGF.Builder.GetInsertBlock());
160 emitUntiedSwitch(CGF);
163 void emitUntiedSwitch(CodeGenFunction &CGF) const {
165 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
166 CGF.GetAddrOfLocalVar(PartIDVar),
167 PartIDVar->getType()->castAs<PointerType>());
168 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
171 CodeGenFunction::JumpDest CurPoint =
172 CGF.getJumpDestInCurrentScope(".untied.next.");
173 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
174 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
175 UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
176 CGF.Builder.GetInsertBlock());
177 CGF.EmitBranchThroughCleanup(CurPoint);
178 CGF.EmitBlock(CurPoint.getBlock());
181 unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
183 CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
184 const VarDecl *ThreadIDVar,
185 const RegionCodeGenTy &CodeGen,
186 OpenMPDirectiveKind Kind, bool HasCancel,
187 const UntiedTaskActionTy &Action)
188 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
189 ThreadIDVar(ThreadIDVar), Action(Action) {
190 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
193 /// Get a variable or parameter for storing global thread id
194 /// inside OpenMP construct.
195 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
197 /// Get an LValue for the current ThreadID variable.
198 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
200 /// Get the name of the capture helper.
201 StringRef getHelperName() const override { return ".omp_outlined."; }
203 void emitUntiedSwitch(CodeGenFunction &CGF) override {
204 Action.emitUntiedSwitch(CGF);
207 static bool classof(const CGCapturedStmtInfo *Info) {
208 return CGOpenMPRegionInfo::classof(Info) &&
209 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
214 /// A variable or parameter storing global thread id for OpenMP
216 const VarDecl *ThreadIDVar;
217 /// Action for emitting code for untied tasks.
218 const UntiedTaskActionTy &Action;
221 /// API for inlined captured statement code generation in OpenMP
223 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
225 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
226 const RegionCodeGenTy &CodeGen,
227 OpenMPDirectiveKind Kind, bool HasCancel)
228 : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
230 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
232 // Retrieve the value of the context parameter.
233 llvm::Value *getContextValue() const override {
235 return OuterRegionInfo->getContextValue();
236 llvm_unreachable("No context value for inlined OpenMP region");
239 void setContextValue(llvm::Value *V) override {
240 if (OuterRegionInfo) {
241 OuterRegionInfo->setContextValue(V);
244 llvm_unreachable("No context value for inlined OpenMP region");
247 /// Lookup the captured field decl for a variable.
248 const FieldDecl *lookup(const VarDecl *VD) const override {
250 return OuterRegionInfo->lookup(VD);
251 // If there is no outer outlined region,no need to lookup in a list of
252 // captured variables, we can use the original one.
256 FieldDecl *getThisFieldDecl() const override {
258 return OuterRegionInfo->getThisFieldDecl();
262 /// Get a variable or parameter for storing global thread id
263 /// inside OpenMP construct.
264 const VarDecl *getThreadIDVariable() const override {
266 return OuterRegionInfo->getThreadIDVariable();
270 /// Get an LValue for the current ThreadID variable.
271 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
273 return OuterRegionInfo->getThreadIDVariableLValue(CGF);
274 llvm_unreachable("No LValue for inlined OpenMP construct");
277 /// Get the name of the capture helper.
278 StringRef getHelperName() const override {
279 if (auto *OuterRegionInfo = getOldCSI())
280 return OuterRegionInfo->getHelperName();
281 llvm_unreachable("No helper name for inlined OpenMP construct");
284 void emitUntiedSwitch(CodeGenFunction &CGF) override {
286 OuterRegionInfo->emitUntiedSwitch(CGF);
289 CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
291 static bool classof(const CGCapturedStmtInfo *Info) {
292 return CGOpenMPRegionInfo::classof(Info) &&
293 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
296 ~CGOpenMPInlinedRegionInfo() override = default;
299 /// CodeGen info about outer OpenMP region.
300 CodeGenFunction::CGCapturedStmtInfo *OldCSI;
301 CGOpenMPRegionInfo *OuterRegionInfo;
304 /// API for captured statement code generation in OpenMP target
305 /// constructs. For this captures, implicit parameters are used instead of the
306 /// captured fields. The name of the target region has to be unique in a given
307 /// application so it is provided by the client, because only the client has
308 /// the information to generate that.
309 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
311 CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
312 const RegionCodeGenTy &CodeGen, StringRef HelperName)
313 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
314 /*HasCancel=*/false),
315 HelperName(HelperName) {}
317 /// This is unused for target regions because each starts executing
318 /// with a single thread.
319 const VarDecl *getThreadIDVariable() const override { return nullptr; }
321 /// Get the name of the capture helper.
322 StringRef getHelperName() const override { return HelperName; }
324 static bool classof(const CGCapturedStmtInfo *Info) {
325 return CGOpenMPRegionInfo::classof(Info) &&
326 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
330 StringRef HelperName;
333 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
334 llvm_unreachable("No codegen for expressions");
336 /// API for generation of expressions captured in a innermost OpenMP
338 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
340 CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
341 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
343 /*HasCancel=*/false),
345 // Make sure the globals captured in the provided statement are local by
346 // using the privatization logic. We assume the same variable is not
347 // captured more than once.
348 for (const auto &C : CS.captures()) {
349 if (!C.capturesVariable() && !C.capturesVariableByCopy())
352 const VarDecl *VD = C.getCapturedVar();
353 if (VD->isLocalVarDeclOrParm())
356 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
357 /*RefersToEnclosingVariableOrCapture=*/false,
358 VD->getType().getNonReferenceType(), VK_LValue,
360 PrivScope.addPrivate(
361 VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
363 (void)PrivScope.Privatize();
366 /// Lookup the captured field decl for a variable.
367 const FieldDecl *lookup(const VarDecl *VD) const override {
368 if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
373 /// Emit the captured statement body.
374 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
375 llvm_unreachable("No body for expressions");
378 /// Get a variable or parameter for storing global thread id
379 /// inside OpenMP construct.
380 const VarDecl *getThreadIDVariable() const override {
381 llvm_unreachable("No thread id for expressions");
384 /// Get the name of the capture helper.
385 StringRef getHelperName() const override {
386 llvm_unreachable("No helper name for expressions");
389 static bool classof(const CGCapturedStmtInfo *Info) { return false; }
392 /// Private scope to capture global variables.
393 CodeGenFunction::OMPPrivateScope PrivScope;
396 /// RAII for emitting code of OpenMP constructs.
397 class InlinedOpenMPRegionRAII {
398 CodeGenFunction &CGF;
399 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
400 FieldDecl *LambdaThisCaptureField = nullptr;
401 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
404 /// Constructs region for combined constructs.
405 /// \param CodeGen Code generation sequence for combined directives. Includes
406 /// a list of functions used for code generation of implicitly inlined
408 InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
409 OpenMPDirectiveKind Kind, bool HasCancel)
411 // Start emission for the construct.
412 CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
413 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
414 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
415 LambdaThisCaptureField = CGF.LambdaThisCaptureField;
416 CGF.LambdaThisCaptureField = nullptr;
417 BlockInfo = CGF.BlockInfo;
418 CGF.BlockInfo = nullptr;
421 ~InlinedOpenMPRegionRAII() {
422 // Restore original CapturedStmtInfo only if we're done with code emission.
424 cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
425 delete CGF.CapturedStmtInfo;
426 CGF.CapturedStmtInfo = OldCSI;
427 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
428 CGF.LambdaThisCaptureField = LambdaThisCaptureField;
429 CGF.BlockInfo = BlockInfo;
433 /// Values for bit flags used in the ident_t to describe the fields.
434 /// All enumeric elements are named and described in accordance with the code
435 /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
436 enum OpenMPLocationFlags : unsigned {
437 /// Use trampoline for internal microtask.
438 OMP_IDENT_IMD = 0x01,
439 /// Use c-style ident structure.
440 OMP_IDENT_KMPC = 0x02,
441 /// Atomic reduction option for kmpc_reduce.
442 OMP_ATOMIC_REDUCE = 0x10,
443 /// Explicit 'barrier' directive.
444 OMP_IDENT_BARRIER_EXPL = 0x20,
445 /// Implicit barrier in code.
446 OMP_IDENT_BARRIER_IMPL = 0x40,
447 /// Implicit barrier in 'for' directive.
448 OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
449 /// Implicit barrier in 'sections' directive.
450 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
451 /// Implicit barrier in 'single' directive.
452 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
453 /// Call of __kmp_for_static_init for static loop.
454 OMP_IDENT_WORK_LOOP = 0x200,
455 /// Call of __kmp_for_static_init for sections.
456 OMP_IDENT_WORK_SECTIONS = 0x400,
457 /// Call of __kmp_for_static_init for distribute.
458 OMP_IDENT_WORK_DISTRIBUTE = 0x800,
459 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
462 /// Describes ident structure that describes a source location.
463 /// All descriptions are taken from
464 /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
465 /// Original structure:
466 /// typedef struct ident {
467 /// kmp_int32 reserved_1; /**< might be used in Fortran;
469 /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
470 /// KMP_IDENT_KMPC identifies this union
472 /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
475 /// /* but currently used for storing
476 /// region-specific ITT */
477 /// /* contextual information. */
478 ///#endif /* USE_ITT_BUILD */
479 /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
481 /// char const *psource; /**< String describing the source location.
482 /// The string is composed of semi-colon separated
483 // fields which describe the source file,
484 /// the function and a pair of line numbers that
485 /// delimit the construct.
488 enum IdentFieldIndex {
489 /// might be used in Fortran
490 IdentField_Reserved_1,
491 /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
493 /// Not really used in Fortran any more
494 IdentField_Reserved_2,
495 /// Source[4] in Fortran, do not use for C++
496 IdentField_Reserved_3,
497 /// String describing the source location. The string is composed of
498 /// semi-colon separated fields which describe the source file, the function
499 /// and a pair of line numbers that delimit the construct.
503 /// Schedule types for 'omp for' loops (these enumerators are taken from
504 /// the enum sched_type in kmp.h).
505 enum OpenMPSchedType {
506 /// Lower bound for default (unordered) versions.
508 OMP_sch_static_chunked = 33,
510 OMP_sch_dynamic_chunked = 35,
511 OMP_sch_guided_chunked = 36,
512 OMP_sch_runtime = 37,
514 /// static with chunk adjustment (e.g., simd)
515 OMP_sch_static_balanced_chunked = 45,
516 /// Lower bound for 'ordered' versions.
518 OMP_ord_static_chunked = 65,
520 OMP_ord_dynamic_chunked = 67,
521 OMP_ord_guided_chunked = 68,
522 OMP_ord_runtime = 69,
524 OMP_sch_default = OMP_sch_static,
525 /// dist_schedule types
526 OMP_dist_sch_static_chunked = 91,
527 OMP_dist_sch_static = 92,
528 /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
529 /// Set if the monotonic schedule modifier was present.
530 OMP_sch_modifier_monotonic = (1 << 29),
531 /// Set if the nonmonotonic schedule modifier was present.
532 OMP_sch_modifier_nonmonotonic = (1 << 30),
535 enum OpenMPRTLFunction {
536 /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
537 /// kmpc_micro microtask, ...);
538 OMPRTL__kmpc_fork_call,
539 /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
540 /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
541 OMPRTL__kmpc_threadprivate_cached,
542 /// Call to void __kmpc_threadprivate_register( ident_t *,
543 /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
544 OMPRTL__kmpc_threadprivate_register,
545 // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
546 OMPRTL__kmpc_global_thread_num,
547 // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
548 // kmp_critical_name *crit);
549 OMPRTL__kmpc_critical,
550 // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
551 // global_tid, kmp_critical_name *crit, uintptr_t hint);
552 OMPRTL__kmpc_critical_with_hint,
553 // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
554 // kmp_critical_name *crit);
555 OMPRTL__kmpc_end_critical,
556 // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
558 OMPRTL__kmpc_cancel_barrier,
559 // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
560 OMPRTL__kmpc_barrier,
561 // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
562 OMPRTL__kmpc_for_static_fini,
563 // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
565 OMPRTL__kmpc_serialized_parallel,
566 // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
568 OMPRTL__kmpc_end_serialized_parallel,
569 // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
570 // kmp_int32 num_threads);
571 OMPRTL__kmpc_push_num_threads,
572 // Call to void __kmpc_flush(ident_t *loc);
574 // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
576 // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
577 OMPRTL__kmpc_end_master,
578 // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
580 OMPRTL__kmpc_omp_taskyield,
581 // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
583 // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
584 OMPRTL__kmpc_end_single,
585 // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
586 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
587 // kmp_routine_entry_t *task_entry);
588 OMPRTL__kmpc_omp_task_alloc,
589 // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
591 OMPRTL__kmpc_omp_task,
592 // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
593 // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
595 OMPRTL__kmpc_copyprivate,
596 // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
597 // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
598 // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
600 // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
601 // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
602 // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
604 OMPRTL__kmpc_reduce_nowait,
605 // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
606 // kmp_critical_name *lck);
607 OMPRTL__kmpc_end_reduce,
608 // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
609 // kmp_critical_name *lck);
610 OMPRTL__kmpc_end_reduce_nowait,
611 // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
612 // kmp_task_t * new_task);
613 OMPRTL__kmpc_omp_task_begin_if0,
614 // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
615 // kmp_task_t * new_task);
616 OMPRTL__kmpc_omp_task_complete_if0,
617 // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
618 OMPRTL__kmpc_ordered,
619 // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
620 OMPRTL__kmpc_end_ordered,
621 // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
623 OMPRTL__kmpc_omp_taskwait,
624 // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
625 OMPRTL__kmpc_taskgroup,
626 // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
627 OMPRTL__kmpc_end_taskgroup,
628 // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
630 OMPRTL__kmpc_push_proc_bind,
631 // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
632 // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
633 // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
634 OMPRTL__kmpc_omp_task_with_deps,
635 // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
636 // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
637 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
638 OMPRTL__kmpc_omp_wait_deps,
639 // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
640 // global_tid, kmp_int32 cncl_kind);
641 OMPRTL__kmpc_cancellationpoint,
642 // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
643 // kmp_int32 cncl_kind);
645 // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
646 // kmp_int32 num_teams, kmp_int32 thread_limit);
647 OMPRTL__kmpc_push_num_teams,
648 // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
650 OMPRTL__kmpc_fork_teams,
651 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
652 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
653 // sched, kmp_uint64 grainsize, void *task_dup);
654 OMPRTL__kmpc_taskloop,
655 // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
656 // num_dims, struct kmp_dim *dims);
657 OMPRTL__kmpc_doacross_init,
658 // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
659 OMPRTL__kmpc_doacross_fini,
660 // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
662 OMPRTL__kmpc_doacross_post,
663 // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
665 OMPRTL__kmpc_doacross_wait,
666 // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
668 OMPRTL__kmpc_task_reduction_init,
669 // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
671 OMPRTL__kmpc_task_reduction_get_th_data,
674 // Offloading related calls
676 // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
677 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
680 // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
681 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
683 OMPRTL__tgt_target_nowait,
684 // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
685 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
686 // *arg_types, int32_t num_teams, int32_t thread_limit);
687 OMPRTL__tgt_target_teams,
688 // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
689 // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
690 // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
691 OMPRTL__tgt_target_teams_nowait,
692 // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
693 OMPRTL__tgt_register_lib,
694 // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
695 OMPRTL__tgt_unregister_lib,
696 // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
697 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
698 OMPRTL__tgt_target_data_begin,
699 // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
700 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
702 OMPRTL__tgt_target_data_begin_nowait,
703 // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
704 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
705 OMPRTL__tgt_target_data_end,
706 // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
707 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
709 OMPRTL__tgt_target_data_end_nowait,
710 // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
711 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
712 OMPRTL__tgt_target_data_update,
713 // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
714 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
716 OMPRTL__tgt_target_data_update_nowait,
719 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
721 class CleanupTy final : public EHScopeStack::Cleanup {
722 PrePostActionTy *Action;
725 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
726 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
727 if (!CGF.HaveInsertPoint())
733 } // anonymous namespace
735 void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
736 CodeGenFunction::RunCleanupsScope Scope(CGF);
738 CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
739 Callback(CodeGen, CGF, *PrePostAction);
741 PrePostActionTy Action;
742 Callback(CodeGen, CGF, Action);
746 /// Check if the combiner is a call to UDR combiner and if it is so return the
747 /// UDR decl used for reduction.
748 static const OMPDeclareReductionDecl *
749 getReductionInit(const Expr *ReductionOp) {
750 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
751 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
752 if (const auto *DRE =
753 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
754 if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
759 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
760 const OMPDeclareReductionDecl *DRD,
762 Address Private, Address Original,
764 if (DRD->getInitializer()) {
765 std::pair<llvm::Function *, llvm::Function *> Reduction =
766 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
767 const auto *CE = cast<CallExpr>(InitOp);
768 const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
769 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
770 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
772 cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
774 cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
775 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
776 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
777 [=]() { return Private; });
778 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
779 [=]() { return Original; });
780 (void)PrivateScope.Privatize();
781 RValue Func = RValue::get(Reduction.second);
782 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
783 CGF.EmitIgnoredExpr(InitOp);
785 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
786 std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
787 auto *GV = new llvm::GlobalVariable(
788 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
789 llvm::GlobalValue::PrivateLinkage, Init, Name);
790 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
792 switch (CGF.getEvaluationKind(Ty)) {
794 InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
798 RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
801 InitRVal = RValue::getAggregate(LV.getAddress());
804 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
805 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
806 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
807 /*IsInitializer=*/false);
811 /// Emit initialization of arrays of complex types.
812 /// \param DestAddr Address of the array.
813 /// \param Type Type of array.
814 /// \param Init Initial expression of array.
815 /// \param SrcAddr Address of the original array.
816 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
817 QualType Type, bool EmitDeclareReductionInit,
819 const OMPDeclareReductionDecl *DRD,
820 Address SrcAddr = Address::invalid()) {
821 // Perform element-by-element initialization.
824 // Drill down to the base element type on both arrays.
825 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
826 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
828 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
831 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
833 llvm::Value *SrcBegin = nullptr;
835 SrcBegin = SrcAddr.getPointer();
836 llvm::Value *DestBegin = DestAddr.getPointer();
837 // Cast from pointer to array type to pointer to single element.
838 llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
839 // The basic structure here is a while-do loop.
840 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
841 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
842 llvm::Value *IsEmpty =
843 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
844 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
846 // Enter the loop body, making that address the current address.
847 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
848 CGF.EmitBlock(BodyBB);
850 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
852 llvm::PHINode *SrcElementPHI = nullptr;
853 Address SrcElementCurrent = Address::invalid();
855 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
856 "omp.arraycpy.srcElementPast");
857 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
859 Address(SrcElementPHI,
860 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
862 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
863 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
864 DestElementPHI->addIncoming(DestBegin, EntryBB);
865 Address DestElementCurrent =
866 Address(DestElementPHI,
867 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
871 CodeGenFunction::RunCleanupsScope InitScope(CGF);
872 if (EmitDeclareReductionInit) {
873 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
874 SrcElementCurrent, ElementTy);
876 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
877 /*IsInitializer=*/false);
881 // Shift the address forward by one element.
882 llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
883 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
884 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
887 // Shift the address forward by one element.
888 llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
889 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
890 // Check whether we've reached the end.
892 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
893 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
894 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
897 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
900 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
901 return CGF.EmitOMPSharedLValue(E);
904 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
906 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
907 return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
911 void ReductionCodeGen::emitAggregateInitialization(
912 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
913 const OMPDeclareReductionDecl *DRD) {
914 // Emit VarDecl with copy init for arrays.
915 // Get the address of the original variable captured in current
917 const auto *PrivateVD =
918 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
919 bool EmitDeclareReductionInit =
920 DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
921 EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
922 EmitDeclareReductionInit,
923 EmitDeclareReductionInit ? ClausesData[N].ReductionOp
924 : PrivateVD->getInit(),
925 DRD, SharedLVal.getAddress());
928 ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
929 ArrayRef<const Expr *> Privates,
930 ArrayRef<const Expr *> ReductionOps) {
931 ClausesData.reserve(Shareds.size());
932 SharedAddresses.reserve(Shareds.size());
933 Sizes.reserve(Shareds.size());
934 BaseDecls.reserve(Shareds.size());
935 auto IPriv = Privates.begin();
936 auto IRed = ReductionOps.begin();
937 for (const Expr *Ref : Shareds) {
938 ClausesData.emplace_back(Ref, *IPriv, *IRed);
939 std::advance(IPriv, 1);
940 std::advance(IRed, 1);
944 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
945 assert(SharedAddresses.size() == N &&
946 "Number of generated lvalues must be exactly N.");
947 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
948 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
949 SharedAddresses.emplace_back(First, Second);
952 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
953 const auto *PrivateVD =
954 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
955 QualType PrivateType = PrivateVD->getType();
956 bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
957 if (!PrivateType->isVariablyModifiedType()) {
960 SharedAddresses[N].first.getType().getNonReferenceType()),
965 llvm::Value *SizeInChars;
967 cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
969 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
970 if (AsArraySection) {
971 Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
972 SharedAddresses[N].first.getPointer());
973 Size = CGF.Builder.CreateNUWAdd(
974 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
975 SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
977 SizeInChars = CGF.getTypeSize(
978 SharedAddresses[N].first.getType().getNonReferenceType());
979 Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
981 Sizes.emplace_back(SizeInChars, Size);
982 CodeGenFunction::OpaqueValueMapping OpaqueMap(
984 cast<OpaqueValueExpr>(
985 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
987 CGF.EmitVariablyModifiedType(PrivateType);
990 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
992 const auto *PrivateVD =
993 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
994 QualType PrivateType = PrivateVD->getType();
995 if (!PrivateType->isVariablyModifiedType()) {
996 assert(!Size && !Sizes[N].second &&
997 "Size should be nullptr for non-variably modified reduction "
1001 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1003 cast<OpaqueValueExpr>(
1004 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1006 CGF.EmitVariablyModifiedType(PrivateType);
1009 void ReductionCodeGen::emitInitialization(
1010 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1011 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1012 assert(SharedAddresses.size() > N && "No variable was generated");
1013 const auto *PrivateVD =
1014 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1015 const OMPDeclareReductionDecl *DRD =
1016 getReductionInit(ClausesData[N].ReductionOp);
1017 QualType PrivateType = PrivateVD->getType();
1018 PrivateAddr = CGF.Builder.CreateElementBitCast(
1019 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1020 QualType SharedType = SharedAddresses[N].first.getType();
1021 SharedLVal = CGF.MakeAddrLValue(
1022 CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1023 CGF.ConvertTypeForMem(SharedType)),
1024 SharedType, SharedAddresses[N].first.getBaseInfo(),
1025 CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1026 if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1027 emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1028 } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1029 emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1030 PrivateAddr, SharedLVal.getAddress(),
1031 SharedLVal.getType());
1032 } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1033 !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1034 CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1035 PrivateVD->getType().getQualifiers(),
1036 /*IsInitializer=*/false);
1040 bool ReductionCodeGen::needCleanups(unsigned N) {
1041 const auto *PrivateVD =
1042 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1043 QualType PrivateType = PrivateVD->getType();
1044 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1045 return DTorKind != QualType::DK_none;
1048 void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
1049 Address PrivateAddr) {
1050 const auto *PrivateVD =
1051 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1052 QualType PrivateType = PrivateVD->getType();
1053 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1054 if (needCleanups(N)) {
1055 PrivateAddr = CGF.Builder.CreateElementBitCast(
1056 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1057 CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1061 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1063 BaseTy = BaseTy.getNonReferenceType();
1064 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1065 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1066 if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
1067 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1069 LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1070 BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1072 BaseTy = BaseTy->getPointeeType();
1074 return CGF.MakeAddrLValue(
1075 CGF.Builder.CreateElementBitCast(BaseLV.getAddress(),
1076 CGF.ConvertTypeForMem(ElTy)),
1077 BaseLV.getType(), BaseLV.getBaseInfo(),
1078 CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1081 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1082 llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1083 llvm::Value *Addr) {
1084 Address Tmp = Address::invalid();
1085 Address TopTmp = Address::invalid();
1086 Address MostTopTmp = Address::invalid();
1087 BaseTy = BaseTy.getNonReferenceType();
1088 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1089 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1090 Tmp = CGF.CreateMemTemp(BaseTy);
1091 if (TopTmp.isValid())
1092 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1096 BaseTy = BaseTy->getPointeeType();
1098 llvm::Type *Ty = BaseLVType;
1100 Ty = Tmp.getElementType();
1101 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1102 if (Tmp.isValid()) {
1103 CGF.Builder.CreateStore(Addr, Tmp);
1106 return Address(Addr, BaseLVAlignment);
1109 static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
1110 const VarDecl *OrigVD = nullptr;
1111 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
1112 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
1113 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1114 Base = TempOASE->getBase()->IgnoreParenImpCasts();
1115 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1116 Base = TempASE->getBase()->IgnoreParenImpCasts();
1117 DE = cast<DeclRefExpr>(Base);
1118 OrigVD = cast<VarDecl>(DE->getDecl());
1119 } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
1120 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
1121 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1122 Base = TempASE->getBase()->IgnoreParenImpCasts();
1123 DE = cast<DeclRefExpr>(Base);
1124 OrigVD = cast<VarDecl>(DE->getDecl());
1129 Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1130 Address PrivateAddr) {
1131 const DeclRefExpr *DE;
1132 if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1133 BaseDecls.emplace_back(OrigVD);
1134 LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1136 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1137 OriginalBaseLValue);
1138 llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1139 BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1140 llvm::Value *PrivatePointer =
1141 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1142 PrivateAddr.getPointer(),
1143 SharedAddresses[N].first.getAddress().getType());
1144 llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1145 return castToBase(CGF, OrigVD->getType(),
1146 SharedAddresses[N].first.getType(),
1147 OriginalBaseLValue.getAddress().getType(),
1148 OriginalBaseLValue.getAlignment(), Ptr);
1150 BaseDecls.emplace_back(
1151 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1155 bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1156 const OMPDeclareReductionDecl *DRD =
1157 getReductionInit(ClausesData[N].ReductionOp);
1158 return DRD && DRD->getInitializer();
1161 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1162 return CGF.EmitLoadOfPointerLValue(
1163 CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1164 getThreadIDVariable()->getType()->castAs<PointerType>());
1167 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1168 if (!CGF.HaveInsertPoint())
1170 // 1.2.2 OpenMP Language Terminology
1171 // Structured block - An executable statement with a single entry at the
1172 // top and a single exit at the bottom.
1173 // The point of exit cannot be a branch out of the structured block.
1174 // longjmp() and throw() must not violate the entry/exit criteria.
1175 CGF.EHStack.pushTerminate();
1177 CGF.EHStack.popTerminate();
1180 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1181 CodeGenFunction &CGF) {
1182 return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1183 getThreadIDVariable()->getType(),
1184 AlignmentSource::Decl);
1187 static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
1189 auto *Field = FieldDecl::Create(
1190 C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1191 C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1192 /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1193 Field->setAccess(AS_public);
1198 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1199 StringRef Separator)
1200 : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1201 OffloadEntriesInfoManager(CGM) {
1202 ASTContext &C = CGM.getContext();
1203 RecordDecl *RD = C.buildImplicitRecord("ident_t");
1204 QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1205 RD->startDefinition();
1207 addFieldToRecordDecl(C, RD, KmpInt32Ty);
1209 addFieldToRecordDecl(C, RD, KmpInt32Ty);
1211 addFieldToRecordDecl(C, RD, KmpInt32Ty);
1213 addFieldToRecordDecl(C, RD, KmpInt32Ty);
1215 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
1216 RD->completeDefinition();
1217 IdentQTy = C.getRecordType(RD);
1218 IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
1219 KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1221 loadOffloadInfoMetadata();
1224 void CGOpenMPRuntime::clear() {
1225 InternalVars.clear();
1228 std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1229 SmallString<128> Buffer;
1230 llvm::raw_svector_ostream OS(Buffer);
1231 StringRef Sep = FirstSeparator;
1232 for (StringRef Part : Parts) {
1239 static llvm::Function *
1240 emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1241 const Expr *CombinerInitializer, const VarDecl *In,
1242 const VarDecl *Out, bool IsCombiner) {
1243 // void .omp_combiner.(Ty *in, Ty *out);
1244 ASTContext &C = CGM.getContext();
1245 QualType PtrTy = C.getPointerType(Ty).withRestrict();
1246 FunctionArgList Args;
1247 ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1248 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1249 ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1250 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1251 Args.push_back(&OmpOutParm);
1252 Args.push_back(&OmpInParm);
1253 const CGFunctionInfo &FnInfo =
1254 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1255 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1256 std::string Name = CGM.getOpenMPRuntime().getName(
1257 {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1258 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1259 Name, &CGM.getModule());
1260 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1261 Fn->removeFnAttr(llvm::Attribute::NoInline);
1262 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1263 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1264 CodeGenFunction CGF(CGM);
1265 // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1266 // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1267 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1268 Out->getLocation());
1269 CodeGenFunction::OMPPrivateScope Scope(CGF);
1270 Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1271 Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1272 return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1275 Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1276 Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1277 return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1280 (void)Scope.Privatize();
1281 if (!IsCombiner && Out->hasInit() &&
1282 !CGF.isTrivialInitializer(Out->getInit())) {
1283 CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1284 Out->getType().getQualifiers(),
1285 /*IsInitializer=*/true);
1287 if (CombinerInitializer)
1288 CGF.EmitIgnoredExpr(CombinerInitializer);
1289 Scope.ForceCleanup();
1290 CGF.FinishFunction();
1294 void CGOpenMPRuntime::emitUserDefinedReduction(
1295 CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1296 if (UDRMap.count(D) > 0)
1298 ASTContext &C = CGM.getContext();
1300 In = &C.Idents.get("omp_in");
1301 Out = &C.Idents.get("omp_out");
1303 llvm::Function *Combiner = emitCombinerOrInitializer(
1304 CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
1305 cast<VarDecl>(D->lookup(Out).front()),
1306 /*IsCombiner=*/true);
1307 llvm::Function *Initializer = nullptr;
1308 if (const Expr *Init = D->getInitializer()) {
1309 if (!Priv || !Orig) {
1310 Priv = &C.Idents.get("omp_priv");
1311 Orig = &C.Idents.get("omp_orig");
1313 Initializer = emitCombinerOrInitializer(
1315 D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1317 cast<VarDecl>(D->lookup(Orig).front()),
1318 cast<VarDecl>(D->lookup(Priv).front()),
1319 /*IsCombiner=*/false);
1321 UDRMap.try_emplace(D, Combiner, Initializer);
1323 auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1324 Decls.second.push_back(D);
1328 std::pair<llvm::Function *, llvm::Function *>
1329 CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1330 auto I = UDRMap.find(D);
1331 if (I != UDRMap.end())
1333 emitUserDefinedReduction(/*CGF=*/nullptr, D);
1334 return UDRMap.lookup(D);
1337 static llvm::Value *emitParallelOrTeamsOutlinedFunction(
1338 CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1339 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1340 const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1341 assert(ThreadIDVar->getType()->isPointerType() &&
1342 "thread id variable must be of type kmp_int32 *");
1343 CodeGenFunction CGF(CGM, true);
1344 bool HasCancel = false;
1345 if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1346 HasCancel = OPD->hasCancel();
1347 else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1348 HasCancel = OPSD->hasCancel();
1349 else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1350 HasCancel = OPFD->hasCancel();
1351 else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1352 HasCancel = OPFD->hasCancel();
1353 else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1354 HasCancel = OPFD->hasCancel();
1355 else if (const auto *OPFD =
1356 dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1357 HasCancel = OPFD->hasCancel();
1358 else if (const auto *OPFD =
1359 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1360 HasCancel = OPFD->hasCancel();
1361 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1362 HasCancel, OutlinedHelperName);
1363 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1364 return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1367 llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
1368 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1369 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1370 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1371 return emitParallelOrTeamsOutlinedFunction(
1372 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1375 llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1376 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1377 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1378 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1379 return emitParallelOrTeamsOutlinedFunction(
1380 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1383 llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
1384 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1385 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1386 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1387 bool Tied, unsigned &NumberOfParts) {
1388 auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1389 PrePostActionTy &) {
1390 llvm::Value *ThreadID = getThreadID(CGF, D.getLocStart());
1391 llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
1392 llvm::Value *TaskArgs[] = {
1394 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1395 TaskTVar->getType()->castAs<PointerType>())
1397 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1399 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1401 CodeGen.setAction(Action);
1402 assert(!ThreadIDVar->getType()->isPointerType() &&
1403 "thread id variable must be of type kmp_int32 for tasks");
1404 const OpenMPDirectiveKind Region =
1405 isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1407 const CapturedStmt *CS = D.getCapturedStmt(Region);
1408 const auto *TD = dyn_cast<OMPTaskDirective>(&D);
1409 CodeGenFunction CGF(CGM, true);
1410 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1412 TD ? TD->hasCancel() : false, Action);
1413 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1414 llvm::Value *Res = CGF.GenerateCapturedStmtFunction(*CS);
1416 NumberOfParts = Action.getNumberOfParts();
1420 static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
1421 const RecordDecl *RD, const CGRecordLayout &RL,
1422 ArrayRef<llvm::Constant *> Data) {
1423 llvm::StructType *StructTy = RL.getLLVMType();
1424 unsigned PrevIdx = 0;
1425 ConstantInitBuilder CIBuilder(CGM);
1426 auto DI = Data.begin();
1427 for (const FieldDecl *FD : RD->fields()) {
1428 unsigned Idx = RL.getLLVMFieldNo(FD);
1429 // Fill the alignment.
1430 for (unsigned I = PrevIdx; I < Idx; ++I)
1431 Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1438 template <class... As>
1439 static llvm::GlobalVariable *
1440 createConstantGlobalStruct(CodeGenModule &CGM, QualType Ty,
1441 ArrayRef<llvm::Constant *> Data, const Twine &Name,
1443 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1444 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1445 ConstantInitBuilder CIBuilder(CGM);
1446 ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1447 buildStructValue(Fields, CGM, RD, RL, Data);
1448 return Fields.finishAndCreateGlobal(
1449 Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty),
1450 /*isConstant=*/true, std::forward<As>(Args)...);
1453 template <typename T>
1455 createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
1456 ArrayRef<llvm::Constant *> Data,
1458 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1459 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1460 ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1461 buildStructValue(Fields, CGM, RD, RL, Data);
1462 Fields.finishAndAddTo(Parent);
1465 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1466 CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1467 llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
1469 if (!DefaultOpenMPPSource) {
1470 // Initialize default location for psource field of ident_t structure of
1471 // all ident_t objects. Format is ";file;function;line;column;;".
1473 // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
1474 DefaultOpenMPPSource =
1475 CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1476 DefaultOpenMPPSource =
1477 llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1480 llvm::Constant *Data[] = {llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1481 llvm::ConstantInt::get(CGM.Int32Ty, Flags),
1482 llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1483 llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1484 DefaultOpenMPPSource};
1485 llvm::GlobalValue *DefaultOpenMPLocation = createConstantGlobalStruct(
1486 CGM, IdentQTy, Data, "", llvm::GlobalValue::PrivateLinkage);
1487 DefaultOpenMPLocation->setUnnamedAddr(
1488 llvm::GlobalValue::UnnamedAddr::Global);
1490 OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
1492 return Address(Entry, Align);
1495 llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1498 Flags |= OMP_IDENT_KMPC;
1499 // If no debug info is generated - return global default location.
1500 if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1502 return getOrCreateDefaultLocation(Flags).getPointer();
1504 assert(CGF.CurFn && "No function in current CodeGenFunction.");
1506 CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1507 Address LocValue = Address::invalid();
1508 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1509 if (I != OpenMPLocThreadIDMap.end())
1510 LocValue = Address(I->second.DebugLoc, Align);
1512 // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1513 // GetOpenMPThreadID was called before this routine.
1514 if (!LocValue.isValid()) {
1515 // Generate "ident_t .kmpc_loc.addr;"
1516 Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
1517 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1518 Elem.second.DebugLoc = AI.getPointer();
1521 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1522 CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1523 CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1524 CGF.getTypeSize(IdentQTy));
1527 // char **psource = &.kmpc_loc_<flags>.addr.psource;
1528 LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
1529 auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
1531 CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
1533 llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1534 if (OMPDebugLoc == nullptr) {
1535 SmallString<128> Buffer2;
1536 llvm::raw_svector_ostream OS2(Buffer2);
1537 // Build debug location
1538 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1539 OS2 << ";" << PLoc.getFilename() << ";";
1540 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1541 OS2 << FD->getQualifiedNameAsString();
1542 OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1543 OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1544 OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1546 // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1547 CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
1549 // Our callers always pass this to a runtime function, so for
1550 // convenience, go ahead and return a naked pointer.
1551 return LocValue.getPointer();
1554 llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1555 SourceLocation Loc) {
1556 assert(CGF.CurFn && "No function in current CodeGenFunction.");
1558 llvm::Value *ThreadID = nullptr;
1559 // Check whether we've already cached a load of the thread id in this
1561 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1562 if (I != OpenMPLocThreadIDMap.end()) {
1563 ThreadID = I->second.ThreadID;
1564 if (ThreadID != nullptr)
1567 // If exceptions are enabled, do not use parameter to avoid possible crash.
1568 if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1569 !CGF.getLangOpts().CXXExceptions ||
1570 CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1571 if (auto *OMPRegionInfo =
1572 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1573 if (OMPRegionInfo->getThreadIDVariable()) {
1574 // Check if this an outlined function with thread id passed as argument.
1575 LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1576 ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1577 // If value loaded in entry block, cache it and use it everywhere in
1579 if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1580 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1581 Elem.second.ThreadID = ThreadID;
1588 // This is not an outlined function region - need to call __kmpc_int32
1589 // kmpc_global_thread_num(ident_t *loc).
1590 // Generate thread id value and cache this value for use across the
1592 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1593 CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1594 llvm::CallInst *Call = CGF.Builder.CreateCall(
1595 createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
1596 emitUpdateLocation(CGF, Loc));
1597 Call->setCallingConv(CGF.getRuntimeCC());
1598 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1599 Elem.second.ThreadID = Call;
1603 void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1604 assert(CGF.CurFn && "No function in current CodeGenFunction.");
1605 if (OpenMPLocThreadIDMap.count(CGF.CurFn))
1606 OpenMPLocThreadIDMap.erase(CGF.CurFn);
1607 if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1608 for(auto *D : FunctionUDRMap[CGF.CurFn])
1610 FunctionUDRMap.erase(CGF.CurFn);
1614 llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1615 return IdentTy->getPointerTo();
1618 llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1619 if (!Kmpc_MicroTy) {
1620 // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1621 llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1622 llvm::PointerType::getUnqual(CGM.Int32Ty)};
1623 Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1625 return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1629 CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1630 llvm::Constant *RTLFn = nullptr;
1631 switch (static_cast<OpenMPRTLFunction>(Function)) {
1632 case OMPRTL__kmpc_fork_call: {
1633 // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1635 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1636 getKmpc_MicroPointerTy()};
1638 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1639 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1642 case OMPRTL__kmpc_global_thread_num: {
1643 // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1644 llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1646 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1647 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1650 case OMPRTL__kmpc_threadprivate_cached: {
1651 // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1652 // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1653 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1654 CGM.VoidPtrTy, CGM.SizeTy,
1655 CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1657 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1658 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1661 case OMPRTL__kmpc_critical: {
1662 // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1663 // kmp_critical_name *crit);
1664 llvm::Type *TypeParams[] = {
1665 getIdentTyPointerTy(), CGM.Int32Ty,
1666 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1668 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1669 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1672 case OMPRTL__kmpc_critical_with_hint: {
1673 // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1674 // kmp_critical_name *crit, uintptr_t hint);
1675 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1676 llvm::PointerType::getUnqual(KmpCriticalNameTy),
1679 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1680 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1683 case OMPRTL__kmpc_threadprivate_register: {
1684 // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1685 // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1686 // typedef void *(*kmpc_ctor)(void *);
1688 llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1689 /*isVarArg*/ false)->getPointerTo();
1690 // typedef void *(*kmpc_cctor)(void *, void *);
1691 llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1692 auto *KmpcCopyCtorTy =
1693 llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1696 // typedef void (*kmpc_dtor)(void *);
1698 llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1700 llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1701 KmpcCopyCtorTy, KmpcDtorTy};
1702 auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1703 /*isVarArg*/ false);
1704 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1707 case OMPRTL__kmpc_end_critical: {
1708 // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1709 // kmp_critical_name *crit);
1710 llvm::Type *TypeParams[] = {
1711 getIdentTyPointerTy(), CGM.Int32Ty,
1712 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1714 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1715 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1718 case OMPRTL__kmpc_cancel_barrier: {
1719 // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1721 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1723 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1724 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1727 case OMPRTL__kmpc_barrier: {
1728 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1729 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1731 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1732 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1735 case OMPRTL__kmpc_for_static_fini: {
1736 // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1737 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1739 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1740 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1743 case OMPRTL__kmpc_push_num_threads: {
1744 // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1745 // kmp_int32 num_threads)
1746 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1749 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1750 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1753 case OMPRTL__kmpc_serialized_parallel: {
1754 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1756 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1758 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1759 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1762 case OMPRTL__kmpc_end_serialized_parallel: {
1763 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1765 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1767 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1768 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1771 case OMPRTL__kmpc_flush: {
1772 // Build void __kmpc_flush(ident_t *loc);
1773 llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1775 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1776 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1779 case OMPRTL__kmpc_master: {
1780 // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1781 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1783 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1784 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1787 case OMPRTL__kmpc_end_master: {
1788 // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1789 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1791 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1792 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1795 case OMPRTL__kmpc_omp_taskyield: {
1796 // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1798 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1800 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1801 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1804 case OMPRTL__kmpc_single: {
1805 // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1806 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1808 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1809 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1812 case OMPRTL__kmpc_end_single: {
1813 // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1814 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1816 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1817 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1820 case OMPRTL__kmpc_omp_task_alloc: {
1821 // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1822 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1823 // kmp_routine_entry_t *task_entry);
1824 assert(KmpRoutineEntryPtrTy != nullptr &&
1825 "Type kmp_routine_entry_t must be created.");
1826 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1827 CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1828 // Return void * and then cast to particular kmp_task_t type.
1830 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1831 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1834 case OMPRTL__kmpc_omp_task: {
1835 // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1837 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1840 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1841 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1844 case OMPRTL__kmpc_copyprivate: {
1845 // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1846 // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1847 // kmp_int32 didit);
1848 llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1850 llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1851 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1852 CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1855 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1856 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1859 case OMPRTL__kmpc_reduce: {
1860 // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1861 // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1862 // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1863 llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1864 auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1865 /*isVarArg=*/false);
1866 llvm::Type *TypeParams[] = {
1867 getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1868 CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1869 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1871 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1872 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1875 case OMPRTL__kmpc_reduce_nowait: {
1876 // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1877 // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1878 // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1880 llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1881 auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1882 /*isVarArg=*/false);
1883 llvm::Type *TypeParams[] = {
1884 getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1885 CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1886 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1888 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1889 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1892 case OMPRTL__kmpc_end_reduce: {
1893 // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
1894 // kmp_critical_name *lck);
1895 llvm::Type *TypeParams[] = {
1896 getIdentTyPointerTy(), CGM.Int32Ty,
1897 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1899 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1900 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
1903 case OMPRTL__kmpc_end_reduce_nowait: {
1904 // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
1905 // kmp_critical_name *lck);
1906 llvm::Type *TypeParams[] = {
1907 getIdentTyPointerTy(), CGM.Int32Ty,
1908 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1910 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1912 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
1915 case OMPRTL__kmpc_omp_task_begin_if0: {
1916 // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1918 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1921 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1923 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
1926 case OMPRTL__kmpc_omp_task_complete_if0: {
1927 // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1929 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1932 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1933 RTLFn = CGM.CreateRuntimeFunction(FnTy,
1934 /*Name=*/"__kmpc_omp_task_complete_if0");
1937 case OMPRTL__kmpc_ordered: {
1938 // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
1939 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1941 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1942 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
1945 case OMPRTL__kmpc_end_ordered: {
1946 // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
1947 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1949 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1950 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
1953 case OMPRTL__kmpc_omp_taskwait: {
1954 // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
1955 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1957 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1958 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
1961 case OMPRTL__kmpc_taskgroup: {
1962 // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
1963 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1965 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1966 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
1969 case OMPRTL__kmpc_end_taskgroup: {
1970 // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
1971 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1973 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1974 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
1977 case OMPRTL__kmpc_push_proc_bind: {
1978 // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
1980 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1982 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1983 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
1986 case OMPRTL__kmpc_omp_task_with_deps: {
1987 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
1988 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
1989 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
1990 llvm::Type *TypeParams[] = {
1991 getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
1992 CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
1994 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1996 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
1999 case OMPRTL__kmpc_omp_wait_deps: {
2000 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
2001 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
2002 // kmp_depend_info_t *noalias_dep_list);
2003 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2004 CGM.Int32Ty, CGM.VoidPtrTy,
2005 CGM.Int32Ty, CGM.VoidPtrTy};
2007 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2008 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
2011 case OMPRTL__kmpc_cancellationpoint: {
2012 // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
2013 // global_tid, kmp_int32 cncl_kind)
2014 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2016 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2017 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
2020 case OMPRTL__kmpc_cancel: {
2021 // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
2022 // kmp_int32 cncl_kind)
2023 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2025 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2026 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
2029 case OMPRTL__kmpc_push_num_teams: {
2030 // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
2031 // kmp_int32 num_teams, kmp_int32 num_threads)
2032 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2035 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2036 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
2039 case OMPRTL__kmpc_fork_teams: {
2040 // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
2042 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2043 getKmpc_MicroPointerTy()};
2045 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
2046 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
2049 case OMPRTL__kmpc_taskloop: {
2050 // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
2051 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
2052 // sched, kmp_uint64 grainsize, void *task_dup);
2053 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2057 CGM.Int64Ty->getPointerTo(),
2058 CGM.Int64Ty->getPointerTo(),
2065 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2066 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
2069 case OMPRTL__kmpc_doacross_init: {
2070 // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
2071 // num_dims, struct kmp_dim *dims);
2072 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2077 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2078 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2081 case OMPRTL__kmpc_doacross_fini: {
2082 // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2083 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2085 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2086 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2089 case OMPRTL__kmpc_doacross_post: {
2090 // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2092 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2093 CGM.Int64Ty->getPointerTo()};
2095 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2096 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2099 case OMPRTL__kmpc_doacross_wait: {
2100 // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2102 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2103 CGM.Int64Ty->getPointerTo()};
2105 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2106 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2109 case OMPRTL__kmpc_task_reduction_init: {
2110 // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2112 llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2114 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2116 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2119 case OMPRTL__kmpc_task_reduction_get_th_data: {
2120 // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2122 llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2124 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2125 RTLFn = CGM.CreateRuntimeFunction(
2126 FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2129 case OMPRTL__tgt_target: {
2130 // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2131 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2133 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2138 CGM.SizeTy->getPointerTo(),
2139 CGM.Int64Ty->getPointerTo()};
2141 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2142 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2145 case OMPRTL__tgt_target_nowait: {
2146 // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2147 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2148 // int64_t *arg_types);
2149 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2154 CGM.SizeTy->getPointerTo(),
2155 CGM.Int64Ty->getPointerTo()};
2157 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2158 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2161 case OMPRTL__tgt_target_teams: {
2162 // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2163 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2164 // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2165 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2170 CGM.SizeTy->getPointerTo(),
2171 CGM.Int64Ty->getPointerTo(),
2175 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2176 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2179 case OMPRTL__tgt_target_teams_nowait: {
2180 // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2181 // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2182 // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2183 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2188 CGM.SizeTy->getPointerTo(),
2189 CGM.Int64Ty->getPointerTo(),
2193 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2194 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2197 case OMPRTL__tgt_register_lib: {
2198 // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2200 CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2201 llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2203 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2204 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2207 case OMPRTL__tgt_unregister_lib: {
2208 // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2210 CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2211 llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2213 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2214 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2217 case OMPRTL__tgt_target_data_begin: {
2218 // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2219 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2220 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2224 CGM.SizeTy->getPointerTo(),
2225 CGM.Int64Ty->getPointerTo()};
2227 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2228 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2231 case OMPRTL__tgt_target_data_begin_nowait: {
2232 // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2233 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2235 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2239 CGM.SizeTy->getPointerTo(),
2240 CGM.Int64Ty->getPointerTo()};
2242 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2243 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2246 case OMPRTL__tgt_target_data_end: {
2247 // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2248 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2249 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2253 CGM.SizeTy->getPointerTo(),
2254 CGM.Int64Ty->getPointerTo()};
2256 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2257 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2260 case OMPRTL__tgt_target_data_end_nowait: {
2261 // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2262 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2264 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2268 CGM.SizeTy->getPointerTo(),
2269 CGM.Int64Ty->getPointerTo()};
2271 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2272 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2275 case OMPRTL__tgt_target_data_update: {
2276 // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2277 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2278 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2282 CGM.SizeTy->getPointerTo(),
2283 CGM.Int64Ty->getPointerTo()};
2285 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2286 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2289 case OMPRTL__tgt_target_data_update_nowait: {
2290 // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2291 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2293 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2297 CGM.SizeTy->getPointerTo(),
2298 CGM.Int64Ty->getPointerTo()};
2300 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2301 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2305 assert(RTLFn && "Unable to find OpenMP runtime function");
2309 llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
2311 assert((IVSize == 32 || IVSize == 64) &&
2312 "IV size is not compatible with the omp runtime");
2313 StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2314 : "__kmpc_for_static_init_4u")
2315 : (IVSigned ? "__kmpc_for_static_init_8"
2316 : "__kmpc_for_static_init_8u");
2317 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2318 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2319 llvm::Type *TypeParams[] = {
2320 getIdentTyPointerTy(), // loc
2322 CGM.Int32Ty, // schedtype
2323 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2331 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2332 return CGM.CreateRuntimeFunction(FnTy, Name);
2335 llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
2337 assert((IVSize == 32 || IVSize == 64) &&
2338 "IV size is not compatible with the omp runtime");
2341 ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2342 : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2343 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2344 llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2346 CGM.Int32Ty, // schedtype
2353 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2354 return CGM.CreateRuntimeFunction(FnTy, Name);
2357 llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
2359 assert((IVSize == 32 || IVSize == 64) &&
2360 "IV size is not compatible with the omp runtime");
2363 ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2364 : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2365 llvm::Type *TypeParams[] = {
2366 getIdentTyPointerTy(), // loc
2370 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2371 return CGM.CreateRuntimeFunction(FnTy, Name);
2374 llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
2376 assert((IVSize == 32 || IVSize == 64) &&
2377 "IV size is not compatible with the omp runtime");
2380 ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2381 : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2382 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2383 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2384 llvm::Type *TypeParams[] = {
2385 getIdentTyPointerTy(), // loc
2387 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2393 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2394 return CGM.CreateRuntimeFunction(FnTy, Name);
2397 Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
2398 if (CGM.getLangOpts().OpenMPSimd)
2399 return Address::invalid();
2400 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2401 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2402 if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
2403 SmallString<64> PtrName;
2405 llvm::raw_svector_ostream OS(PtrName);
2406 OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_link_ptr";
2408 llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
2410 QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
2411 Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
2413 if (!CGM.getLangOpts().OpenMPIsDevice) {
2414 auto *GV = cast<llvm::GlobalVariable>(Ptr);
2415 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
2416 GV->setInitializer(CGM.GetAddrOfGlobal(VD));
2418 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ptr));
2419 registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
2421 return Address(Ptr, CGM.getContext().getDeclAlign(VD));
2423 return Address::invalid();
2427 CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
2428 assert(!CGM.getLangOpts().OpenMPUseTLS ||
2429 !CGM.getContext().getTargetInfo().isTLSSupported());
2430 // Lookup the entry, lazily creating it if necessary.
2431 std::string Suffix = getName({"cache", ""});
2432 return getOrCreateInternalVariable(
2433 CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
2436 Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
2439 SourceLocation Loc) {
2440 if (CGM.getLangOpts().OpenMPUseTLS &&
2441 CGM.getContext().getTargetInfo().isTLSSupported())
2444 llvm::Type *VarTy = VDAddr.getElementType();
2445 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2446 CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2448 CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
2449 getOrCreateThreadPrivateCache(VD)};
2450 return Address(CGF.EmitRuntimeCall(
2451 createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2452 VDAddr.getAlignment());
2455 void CGOpenMPRuntime::emitThreadPrivateVarInit(
2456 CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2457 llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2458 // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2460 llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
2461 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
2463 // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2464 // to register constructor/destructor for variable.
2465 llvm::Value *Args[] = {
2466 OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
2467 Ctor, CopyCtor, Dtor};
2468 CGF.EmitRuntimeCall(
2469 createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
2472 llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
2473 const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2474 bool PerformInit, CodeGenFunction *CGF) {
2475 if (CGM.getLangOpts().OpenMPUseTLS &&
2476 CGM.getContext().getTargetInfo().isTLSSupported())
2479 VD = VD->getDefinition(CGM.getContext());
2480 if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
2481 ThreadPrivateWithDefinition.insert(VD);
2482 QualType ASTTy = VD->getType();
2484 llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2485 const Expr *Init = VD->getAnyInitializer();
2486 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2487 // Generate function that re-emits the declaration's initializer into the
2488 // threadprivate copy of the variable VD
2489 CodeGenFunction CtorCGF(CGM);
2490 FunctionArgList Args;
2491 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2492 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2493 ImplicitParamDecl::Other);
2494 Args.push_back(&Dst);
2496 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2497 CGM.getContext().VoidPtrTy, Args);
2498 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2499 std::string Name = getName({"__kmpc_global_ctor_", ""});
2500 llvm::Function *Fn =
2501 CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2502 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2504 llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
2505 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2506 CGM.getContext().VoidPtrTy, Dst.getLocation());
2507 Address Arg = Address(ArgVal, VDAddr.getAlignment());
2508 Arg = CtorCGF.Builder.CreateElementBitCast(
2509 Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2510 CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2511 /*IsInitializer=*/true);
2512 ArgVal = CtorCGF.EmitLoadOfScalar(
2513 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2514 CGM.getContext().VoidPtrTy, Dst.getLocation());
2515 CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2516 CtorCGF.FinishFunction();
2519 if (VD->getType().isDestructedType() != QualType::DK_none) {
2520 // Generate function that emits destructor call for the threadprivate copy
2521 // of the variable VD
2522 CodeGenFunction DtorCGF(CGM);
2523 FunctionArgList Args;
2524 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2525 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2526 ImplicitParamDecl::Other);
2527 Args.push_back(&Dst);
2529 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2530 CGM.getContext().VoidTy, Args);
2531 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2532 std::string Name = getName({"__kmpc_global_dtor_", ""});
2533 llvm::Function *Fn =
2534 CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2535 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2536 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2538 // Create a scope with an artificial location for the body of this function.
2539 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2540 llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
2541 DtorCGF.GetAddrOfLocalVar(&Dst),
2542 /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2543 DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2544 DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2545 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2546 DtorCGF.FinishFunction();
2549 // Do not emit init function if it is not required.
2553 llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2554 auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2557 // Copying constructor for the threadprivate variable.
2558 // Must be NULL - reserved by runtime, but currently it requires that this
2559 // parameter is always NULL. Otherwise it fires assertion.
2560 CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2561 if (Ctor == nullptr) {
2562 auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2565 Ctor = llvm::Constant::getNullValue(CtorTy);
2567 if (Dtor == nullptr) {
2568 auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2571 Dtor = llvm::Constant::getNullValue(DtorTy);
2574 auto *InitFunctionTy =
2575 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2576 std::string Name = getName({"__omp_threadprivate_init_", ""});
2577 llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2578 InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
2579 CodeGenFunction InitCGF(CGM);
2580 FunctionArgList ArgList;
2581 InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2582 CGM.getTypes().arrangeNullaryFunction(), ArgList,
2584 emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2585 InitCGF.FinishFunction();
2586 return InitFunction;
2588 emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2593 /// Obtain information that uniquely identifies a target entry. This
2594 /// consists of the file and device IDs as well as line number associated with
2595 /// the relevant entry source location.
2596 static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
2597 unsigned &DeviceID, unsigned &FileID,
2598 unsigned &LineNum) {
2599 SourceManager &SM = C.getSourceManager();
2601 // The loc should be always valid and have a file ID (the user cannot use
2602 // #pragma directives in macros)
2604 assert(Loc.isValid() && "Source location is expected to be always valid.");
2606 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2607 assert(PLoc.isValid() && "Source location is expected to be always valid.");
2609 llvm::sys::fs::UniqueID ID;
2610 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
2611 SM.getDiagnostics().Report(diag::err_cannot_open_file)
2612 << PLoc.getFilename() << EC.message();
2614 DeviceID = ID.getDevice();
2615 FileID = ID.getFile();
2616 LineNum = PLoc.getLine();
2619 bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
2620 llvm::GlobalVariable *Addr,
2622 Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2623 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2624 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
2626 VD = VD->getDefinition(CGM.getContext());
2627 if (VD && !DeclareTargetWithDefinition.insert(VD).second)
2628 return CGM.getLangOpts().OpenMPIsDevice;
2630 QualType ASTTy = VD->getType();
2632 SourceLocation Loc = VD->getCanonicalDecl()->getLocStart();
2633 // Produce the unique prefix to identify the new target regions. We use
2634 // the source location of the variable declaration which we know to not
2635 // conflict with any target region.
2639 getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
2640 SmallString<128> Buffer, Out;
2642 llvm::raw_svector_ostream OS(Buffer);
2643 OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
2644 << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
2647 const Expr *Init = VD->getAnyInitializer();
2648 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2649 llvm::Constant *Ctor;
2651 if (CGM.getLangOpts().OpenMPIsDevice) {
2652 // Generate function that re-emits the declaration's initializer into
2653 // the threadprivate copy of the variable VD
2654 CodeGenFunction CtorCGF(CGM);
2656 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2657 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2658 llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2659 FTy, Twine(Buffer, "_ctor"), FI, Loc);
2660 auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
2661 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2662 FunctionArgList(), Loc, Loc);
2663 auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
2664 CtorCGF.EmitAnyExprToMem(Init,
2665 Address(Addr, CGM.getContext().getDeclAlign(VD)),
2666 Init->getType().getQualifiers(),
2667 /*IsInitializer=*/true);
2668 CtorCGF.FinishFunction();
2670 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2671 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
2673 Ctor = new llvm::GlobalVariable(
2674 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2675 llvm::GlobalValue::PrivateLinkage,
2676 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
2680 // Register the information for the entry associated with the constructor.
2682 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2683 DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
2684 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
2686 if (VD->getType().isDestructedType() != QualType::DK_none) {
2687 llvm::Constant *Dtor;
2689 if (CGM.getLangOpts().OpenMPIsDevice) {
2690 // Generate function that emits destructor call for the threadprivate
2691 // copy of the variable VD
2692 CodeGenFunction DtorCGF(CGM);
2694 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2695 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2696 llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2697 FTy, Twine(Buffer, "_dtor"), FI, Loc);
2698 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2699 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2700 FunctionArgList(), Loc, Loc);
2701 // Create a scope with an artificial location for the body of this
2703 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2704 DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
2705 ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2706 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2707 DtorCGF.FinishFunction();
2709 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2710 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
2712 Dtor = new llvm::GlobalVariable(
2713 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2714 llvm::GlobalValue::PrivateLinkage,
2715 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
2718 // Register the information for the entry associated with the destructor.
2720 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2721 DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2722 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
2724 return CGM.getLangOpts().OpenMPIsDevice;
2727 Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2730 std::string Suffix = getName({"artificial", ""});
2731 std::string CacheSuffix = getName({"cache", ""});
2732 llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2733 llvm::Value *GAddr =
2734 getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2735 llvm::Value *Args[] = {
2736 emitUpdateLocation(CGF, SourceLocation()),
2737 getThreadID(CGF, SourceLocation()),
2738 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2739 CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2740 /*IsSigned=*/false),
2741 getOrCreateInternalVariable(
2742 CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2744 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2745 CGF.EmitRuntimeCall(
2746 createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2747 VarLVType->getPointerTo(/*AddrSpace=*/0)),
2748 CGM.getPointerAlign());
2751 void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
2752 const RegionCodeGenTy &ThenGen,
2753 const RegionCodeGenTy &ElseGen) {
2754 CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2756 // If the condition constant folds and can be elided, try to avoid emitting
2757 // the condition and the dead arm of the if/else.
2759 if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2767 // Otherwise, the condition did not fold, or we couldn't elide it. Just
2768 // emit the conditional branch.
2769 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2770 llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2771 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2772 CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2774 // Emit the 'then' code.
2775 CGF.EmitBlock(ThenBlock);
2777 CGF.EmitBranch(ContBlock);
2778 // Emit the 'else' code if present.
2779 // There is no need to emit line number for unconditional branch.
2780 (void)ApplyDebugLocation::CreateEmpty(CGF);
2781 CGF.EmitBlock(ElseBlock);
2783 // There is no need to emit line number for unconditional branch.
2784 (void)ApplyDebugLocation::CreateEmpty(CGF);
2785 CGF.EmitBranch(ContBlock);
2786 // Emit the continuation block for code after the if.
2787 CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2790 void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2791 llvm::Value *OutlinedFn,
2792 ArrayRef<llvm::Value *> CapturedVars,
2793 const Expr *IfCond) {
2794 if (!CGF.HaveInsertPoint())
2796 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2797 auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2798 PrePostActionTy &) {
2799 // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2800 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2801 llvm::Value *Args[] = {
2803 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2804 CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2805 llvm::SmallVector<llvm::Value *, 16> RealArgs;
2806 RealArgs.append(std::begin(Args), std::end(Args));
2807 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2809 llvm::Value *RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2810 CGF.EmitRuntimeCall(RTLFn, RealArgs);
2812 auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2813 PrePostActionTy &) {
2814 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2815 llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2817 // __kmpc_serialized_parallel(&Loc, GTid);
2818 llvm::Value *Args[] = {RTLoc, ThreadID};
2819 CGF.EmitRuntimeCall(
2820 RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2822 // OutlinedFn(>id, &zero, CapturedStruct);
2823 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2824 /*Name*/ ".zero.addr");
2825 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2826 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2827 // ThreadId for serialized parallels is 0.
2828 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2829 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2830 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2831 RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2833 // __kmpc_end_serialized_parallel(&Loc, GTid);
2834 llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2835 CGF.EmitRuntimeCall(
2836 RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2840 emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
2842 RegionCodeGenTy ThenRCG(ThenGen);
2847 // If we're inside an (outlined) parallel region, use the region info's
2848 // thread-ID variable (it is passed in a first argument of the outlined function
2849 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2850 // regular serial code region, get thread ID by calling kmp_int32
2851 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2852 // return the address of that temp.
2853 Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2854 SourceLocation Loc) {
2855 if (auto *OMPRegionInfo =
2856 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2857 if (OMPRegionInfo->getThreadIDVariable())
2858 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
2860 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2862 CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2863 Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2864 CGF.EmitStoreOfScalar(ThreadID,
2865 CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2867 return ThreadIDTemp;
2871 CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
2872 const llvm::Twine &Name) {
2873 SmallString<256> Buffer;
2874 llvm::raw_svector_ostream Out(Buffer);
2876 StringRef RuntimeName = Out.str();
2877 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2879 assert(Elem.second->getType()->getPointerElementType() == Ty &&
2880 "OMP internal variable has different type than requested");
2881 return &*Elem.second;
2884 return Elem.second = new llvm::GlobalVariable(
2885 CGM.getModule(), Ty, /*IsConstant*/ false,
2886 llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2890 llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2891 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2892 std::string Name = getName({Prefix, "var"});
2893 return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
2897 /// Common pre(post)-action for different OpenMP constructs.
2898 class CommonActionTy final : public PrePostActionTy {
2899 llvm::Value *EnterCallee;
2900 ArrayRef<llvm::Value *> EnterArgs;
2901 llvm::Value *ExitCallee;
2902 ArrayRef<llvm::Value *> ExitArgs;
2904 llvm::BasicBlock *ContBlock = nullptr;
2907 CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
2908 llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
2909 bool Conditional = false)
2910 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2911 ExitArgs(ExitArgs), Conditional(Conditional) {}
2912 void Enter(CodeGenFunction &CGF) override {
2913 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2915 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2916 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2917 ContBlock = CGF.createBasicBlock("omp_if.end");
2918 // Generate the branch (If-stmt)
2919 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2920 CGF.EmitBlock(ThenBlock);
2923 void Done(CodeGenFunction &CGF) {
2924 // Emit the rest of blocks/branches
2925 CGF.EmitBranch(ContBlock);
2926 CGF.EmitBlock(ContBlock, true);
2928 void Exit(CodeGenFunction &CGF) override {
2929 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2932 } // anonymous namespace
2934 void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2935 StringRef CriticalName,
2936 const RegionCodeGenTy &CriticalOpGen,
2937 SourceLocation Loc, const Expr *Hint) {
2938 // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2940 // __kmpc_end_critical(ident_t *, gtid, Lock);
2941 // Prepare arguments and build a call to __kmpc_critical
2942 if (!CGF.HaveInsertPoint())
2944 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2945 getCriticalRegionLock(CriticalName)};
2946 llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2949 EnterArgs.push_back(CGF.Builder.CreateIntCast(
2950 CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
2952 CommonActionTy Action(
2953 createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
2954 : OMPRTL__kmpc_critical),
2955 EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
2956 CriticalOpGen.setAction(Action);
2957 emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2960 void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2961 const RegionCodeGenTy &MasterOpGen,
2962 SourceLocation Loc) {
2963 if (!CGF.HaveInsertPoint())
2965 // if(__kmpc_master(ident_t *, gtid)) {
2967 // __kmpc_end_master(ident_t *, gtid);
2969 // Prepare arguments and build a call to __kmpc_master
2970 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2971 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
2972 createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
2973 /*Conditional=*/true);
2974 MasterOpGen.setAction(Action);
2975 emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2979 void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2980 SourceLocation Loc) {
2981 if (!CGF.HaveInsertPoint())
2983 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2984 llvm::Value *Args[] = {
2985 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2986 llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2987 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
2988 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2989 Region->emitUntiedSwitch(CGF);
2992 void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
2993 const RegionCodeGenTy &TaskgroupOpGen,
2994 SourceLocation Loc) {
2995 if (!CGF.HaveInsertPoint())
2997 // __kmpc_taskgroup(ident_t *, gtid);
2998 // TaskgroupOpGen();
2999 // __kmpc_end_taskgroup(ident_t *, gtid);
3000 // Prepare arguments and build a call to __kmpc_taskgroup
3001 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3002 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
3003 createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
3005 TaskgroupOpGen.setAction(Action);
3006 emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
3009 /// Given an array of pointers to variables, project the address of a
3011 static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
3012 unsigned Index, const VarDecl *Var) {
3013 // Pull out the pointer to the variable.
3015 CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
3016 llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
3018 Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
3019 Addr = CGF.Builder.CreateElementBitCast(
3020 Addr, CGF.ConvertTypeForMem(Var->getType()));
3024 static llvm::Value *emitCopyprivateCopyFunction(
3025 CodeGenModule &CGM, llvm::Type *ArgsType,
3026 ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
3027 ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
3028 SourceLocation Loc) {
3029 ASTContext &C = CGM.getContext();
3030 // void copy_func(void *LHSArg, void *RHSArg);
3031 FunctionArgList Args;
3032 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3033 ImplicitParamDecl::Other);
3034 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3035 ImplicitParamDecl::Other);
3036 Args.push_back(&LHSArg);
3037 Args.push_back(&RHSArg);
3039 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3041 CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
3042 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3043 llvm::GlobalValue::InternalLinkage, Name,
3045 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3046 Fn->setDoesNotRecurse();
3047 CodeGenFunction CGF(CGM);
3048 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3049 // Dest = (void*[n])(LHSArg);
3050 // Src = (void*[n])(RHSArg);
3051 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3052 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
3053 ArgsType), CGF.getPointerAlign());
3054 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3055 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
3056 ArgsType), CGF.getPointerAlign());
3057 // *(Type0*)Dst[0] = *(Type0*)Src[0];
3058 // *(Type1*)Dst[1] = *(Type1*)Src[1];
3060 // *(Typen*)Dst[n] = *(Typen*)Src[n];
3061 for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
3062 const auto *DestVar =
3063 cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
3064 Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
3066 const auto *SrcVar =
3067 cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
3068 Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
3070 const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
3071 QualType Type = VD->getType();
3072 CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
3074 CGF.FinishFunction();
3078 void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
3079 const RegionCodeGenTy &SingleOpGen,
3081 ArrayRef<const Expr *> CopyprivateVars,
3082 ArrayRef<const Expr *> SrcExprs,
3083 ArrayRef<const Expr *> DstExprs,
3084 ArrayRef<const Expr *> AssignmentOps) {
3085 if (!CGF.HaveInsertPoint())
3087 assert(CopyprivateVars.size() == SrcExprs.size() &&
3088 CopyprivateVars.size() == DstExprs.size() &&
3089 CopyprivateVars.size() == AssignmentOps.size());
3090 ASTContext &C = CGM.getContext();
3091 // int32 did_it = 0;
3092 // if(__kmpc_single(ident_t *, gtid)) {
3094 // __kmpc_end_single(ident_t *, gtid);
3097 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3098 // <copy_func>, did_it);
3100 Address DidIt = Address::invalid();
3101 if (!CopyprivateVars.empty()) {
3102 // int32 did_it = 0;
3103 QualType KmpInt32Ty =
3104 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3105 DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
3106 CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
3108 // Prepare arguments and build a call to __kmpc_single
3109 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3110 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
3111 createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
3112 /*Conditional=*/true);
3113 SingleOpGen.setAction(Action);
3114 emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
3115 if (DidIt.isValid()) {
3117 CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
3120 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3121 // <copy_func>, did_it);
3122 if (DidIt.isValid()) {
3123 llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
3124 QualType CopyprivateArrayTy =
3125 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
3126 /*IndexTypeQuals=*/0);
3127 // Create a list of all private variables for copyprivate.
3128 Address CopyprivateList =
3129 CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
3130 for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
3131 Address Elem = CGF.Builder.CreateConstArrayGEP(
3132 CopyprivateList, I, CGF.getPointerSize());
3133 CGF.Builder.CreateStore(
3134 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3135 CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
3138 // Build function that copies private values from single region to all other
3139 // threads in the corresponding parallel region.
3140 llvm::Value *CpyFn = emitCopyprivateCopyFunction(
3141 CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
3142 CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
3143 llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
3145 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
3147 llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
3148 llvm::Value *Args[] = {
3149 emitUpdateLocation(CGF, Loc), // ident_t *<loc>
3150 getThreadID(CGF, Loc), // i32 <gtid>
3151 BufSize, // size_t <buf_size>
3152 CL.getPointer(), // void *<copyprivate list>
3153 CpyFn, // void (*) (void *, void *) <copy_func>
3154 DidItVal // i32 did_it
3156 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
3160 void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
3161 const RegionCodeGenTy &OrderedOpGen,
3162 SourceLocation Loc, bool IsThreads) {
3163 if (!CGF.HaveInsertPoint())
3165 // __kmpc_ordered(ident_t *, gtid);
3167 // __kmpc_end_ordered(ident_t *, gtid);
3168 // Prepare arguments and build a call to __kmpc_ordered
3170 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3171 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
3172 createRuntimeFunction(OMPRTL__kmpc_end_ordered),
3174 OrderedOpGen.setAction(Action);
3175 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3178 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3181 void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
3182 OpenMPDirectiveKind Kind, bool EmitChecks,
3183 bool ForceSimpleCall) {
3184 if (!CGF.HaveInsertPoint())
3186 // Build call __kmpc_cancel_barrier(loc, thread_id);
3187 // Build call __kmpc_barrier(loc, thread_id);
3189 if (Kind == OMPD_for)
3190 Flags = OMP_IDENT_BARRIER_IMPL_FOR;
3191 else if (Kind == OMPD_sections)
3192 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
3193 else if (Kind == OMPD_single)
3194 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
3195 else if (Kind == OMPD_barrier)
3196 Flags = OMP_IDENT_BARRIER_EXPL;
3198 Flags = OMP_IDENT_BARRIER_IMPL;
3199 // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
3201 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
3202 getThreadID(CGF, Loc)};
3203 if (auto *OMPRegionInfo =
3204 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
3205 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
3206 llvm::Value *Result = CGF.EmitRuntimeCall(
3207 createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
3209 // if (__kmpc_cancel_barrier()) {
3210 // exit from construct;
3212 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
3213 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
3214 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
3215 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
3216 CGF.EmitBlock(ExitBB);
3217 // exit from construct;
3218 CodeGenFunction::JumpDest CancelDestination =
3219 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
3220 CGF.EmitBranchThroughCleanup(CancelDestination);
3221 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
3226 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
3229 /// Map the OpenMP loop schedule to the runtime enumeration.
3230 static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
3231 bool Chunked, bool Ordered) {
3232 switch (ScheduleKind) {
3233 case OMPC_SCHEDULE_static:
3234 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
3235 : (Ordered ? OMP_ord_static : OMP_sch_static);
3236 case OMPC_SCHEDULE_dynamic:
3237 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
3238 case OMPC_SCHEDULE_guided:
3239 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
3240 case OMPC_SCHEDULE_runtime:
3241 return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3242 case OMPC_SCHEDULE_auto:
3243 return Ordered ? OMP_ord_auto : OMP_sch_auto;
3244 case OMPC_SCHEDULE_unknown:
3245 assert(!Chunked && "chunk was specified but schedule kind not known");
3246 return Ordered ? OMP_ord_static : OMP_sch_static;
3248 llvm_unreachable("Unexpected runtime schedule");
3251 /// Map the OpenMP distribute schedule to the runtime enumeration.
3252 static OpenMPSchedType
3253 getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
3254 // only static is allowed for dist_schedule
3255 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
3258 bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
3259 bool Chunked) const {
3260 OpenMPSchedType Schedule =
3261 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3262 return Schedule == OMP_sch_static;
3265 bool CGOpenMPRuntime::isStaticNonchunked(
3266 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3267 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3268 return Schedule == OMP_dist_sch_static;
3272 bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
3273 OpenMPSchedType Schedule =
3274 getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3275 assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3276 return Schedule != OMP_sch_static;
3279 static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
3280 OpenMPScheduleClauseModifier M1,
3281 OpenMPScheduleClauseModifier M2) {
3284 case OMPC_SCHEDULE_MODIFIER_monotonic:
3285 Modifier = OMP_sch_modifier_monotonic;
3287 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3288 Modifier = OMP_sch_modifier_nonmonotonic;
3290 case OMPC_SCHEDULE_MODIFIER_simd:
3291 if (Schedule == OMP_sch_static_chunked)
3292 Schedule = OMP_sch_static_balanced_chunked;
3294 case OMPC_SCHEDULE_MODIFIER_last:
3295 case OMPC_SCHEDULE_MODIFIER_unknown:
3299 case OMPC_SCHEDULE_MODIFIER_monotonic:
3300 Modifier = OMP_sch_modifier_monotonic;
3302 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3303 Modifier = OMP_sch_modifier_nonmonotonic;
3305 case OMPC_SCHEDULE_MODIFIER_simd:
3306 if (Schedule == OMP_sch_static_chunked)
3307 Schedule = OMP_sch_static_balanced_chunked;
3309 case OMPC_SCHEDULE_MODIFIER_last:
3310 case OMPC_SCHEDULE_MODIFIER_unknown:
3313 return Schedule | Modifier;
3316 void CGOpenMPRuntime::emitForDispatchInit(
3317 CodeGenFunction &CGF, SourceLocation Loc,
3318 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3319 bool Ordered, const DispatchRTInput &DispatchValues) {
3320 if (!CGF.HaveInsertPoint())
3322 OpenMPSchedType Schedule = getRuntimeSchedule(
3323 ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3325 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3326 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3327 Schedule != OMP_sch_static_balanced_chunked));
3328 // Call __kmpc_dispatch_init(
3329 // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3330 // kmp_int[32|64] lower, kmp_int[32|64] upper,
3331 // kmp_int[32|64] stride, kmp_int[32|64] chunk);
3333 // If the Chunk was not specified in the clause - use default value 1.
3334 llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3335 : CGF.Builder.getIntN(IVSize, 1);
3336 llvm::Value *Args[] = {
3337 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3338 CGF.Builder.getInt32(addMonoNonMonoModifier(
3339 Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3340 DispatchValues.LB, // Lower
3341 DispatchValues.UB, // Upper
3342 CGF.Builder.getIntN(IVSize, 1), // Stride
3345 CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3348 static void emitForStaticInitCall(
3349 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3350 llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
3351 OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
3352 const CGOpenMPRuntime::StaticRTInput &Values) {
3353 if (!CGF.HaveInsertPoint())
3356 assert(!Values.Ordered);
3357 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3358 Schedule == OMP_sch_static_balanced_chunked ||
3359 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3360 Schedule == OMP_dist_sch_static ||
3361 Schedule == OMP_dist_sch_static_chunked);
3363 // Call __kmpc_for_static_init(
3364 // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3365 // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3366 // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3367 // kmp_int[32|64] incr, kmp_int[32|64] chunk);
3368 llvm::Value *Chunk = Values.Chunk;
3369 if (Chunk == nullptr) {
3370 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3371 Schedule == OMP_dist_sch_static) &&
3372 "expected static non-chunked schedule");
3373 // If the Chunk was not specified in the clause - use default value 1.
3374 Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3376 assert((Schedule == OMP_sch_static_chunked ||
3377 Schedule == OMP_sch_static_balanced_chunked ||
3378 Schedule == OMP_ord_static_chunked ||
3379 Schedule == OMP_dist_sch_static_chunked) &&
3380 "expected static chunked schedule");
3382 llvm::Value *Args[] = {
3385 CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3386 M2)), // Schedule type
3387 Values.IL.getPointer(), // &isLastIter
3388 Values.LB.getPointer(), // &LB
3389 Values.UB.getPointer(), // &UB
3390 Values.ST.getPointer(), // &Stride
3391 CGF.Builder.getIntN(Values.IVSize, 1), // Incr
3394 CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3397 void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
3399 OpenMPDirectiveKind DKind,
3400 const OpenMPScheduleTy &ScheduleKind,
3401 const StaticRTInput &Values) {
3402 OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3403 ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3404 assert(isOpenMPWorksharingDirective(DKind) &&
3405 "Expected loop-based or sections-based directive.");
3406 llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3407 isOpenMPLoopDirective(DKind)
3408 ? OMP_IDENT_WORK_LOOP
3409 : OMP_IDENT_WORK_SECTIONS);
3410 llvm::Value *ThreadId = getThreadID(CGF, Loc);
3411 llvm::Constant *StaticInitFunction =
3412 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3413 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3414 ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3417 void CGOpenMPRuntime::emitDistributeStaticInit(
3418 CodeGenFunction &CGF, SourceLocation Loc,
3419 OpenMPDistScheduleClauseKind SchedKind,
3420 const CGOpenMPRuntime::StaticRTInput &Values) {
3421 OpenMPSchedType ScheduleNum =
3422 getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3423 llvm::Value *UpdatedLocation =
3424 emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3425 llvm::Value *ThreadId = getThreadID(CGF, Loc);
3426 llvm::Constant *StaticInitFunction =
3427 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3428 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3429 ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3430 OMPC_SCHEDULE_MODIFIER_unknown, Values);
3433 void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
3435 OpenMPDirectiveKind DKind) {
3436 if (!CGF.HaveInsertPoint())
3438 // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3439 llvm::Value *Args[] = {
3440 emitUpdateLocation(CGF, Loc,
3441 isOpenMPDistributeDirective(DKind)
3442 ? OMP_IDENT_WORK_DISTRIBUTE
3443 : isOpenMPLoopDirective(DKind)
3444 ? OMP_IDENT_WORK_LOOP
3445 : OMP_IDENT_WORK_SECTIONS),
3446 getThreadID(CGF, Loc)};
3447 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
3451 void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
3455 if (!CGF.HaveInsertPoint())
3457 // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3458 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3459 CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3462 llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
3463 SourceLocation Loc, unsigned IVSize,
3464 bool IVSigned, Address IL,
3465 Address LB, Address UB,
3467 // Call __kmpc_dispatch_next(
3468 // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3469 // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3470 // kmp_int[32|64] *p_stride);
3471 llvm::Value *Args[] = {
3472 emitUpdateLocation(CGF, Loc),
3473 getThreadID(CGF, Loc),
3474 IL.getPointer(), // &isLastIter
3475 LB.getPointer(), // &Lower
3476 UB.getPointer(), // &Upper
3477 ST.getPointer() // &Stride
3480 CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3481 return CGF.EmitScalarConversion(
3482 Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
3483 CGF.getContext().BoolTy, Loc);
3486 void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
3487 llvm::Value *NumThreads,
3488 SourceLocation Loc) {
3489 if (!CGF.HaveInsertPoint())
3491 // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3492 llvm::Value *Args[] = {
3493 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3494 CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3495 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
3499 void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
3500 OpenMPProcBindClauseKind ProcBind,
3501 SourceLocation Loc) {
3502 if (!CGF.HaveInsertPoint())
3504 // Constants for proc bind value accepted by the runtime.
3515 case OMPC_PROC_BIND_master:
3516 RuntimeProcBind = ProcBindMaster;
3518 case OMPC_PROC_BIND_close:
3519 RuntimeProcBind = ProcBindClose;
3521 case OMPC_PROC_BIND_spread:
3522 RuntimeProcBind = ProcBindSpread;
3524 case OMPC_PROC_BIND_unknown:
3525 llvm_unreachable("Unsupported proc_bind value.");
3527 // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3528 llvm::Value *Args[] = {
3529 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3530 llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3531 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
3534 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3535 SourceLocation Loc) {
3536 if (!CGF.HaveInsertPoint())
3538 // Build call void __kmpc_flush(ident_t *loc)
3539 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
3540 emitUpdateLocation(CGF, Loc));
3544 /// Indexes of fields for type kmp_task_t.
3545 enum KmpTaskTFields {
3546 /// List of shared variables.
3550 /// Partition id for the untied tasks.
3552 /// Function with call of destructors for private variables.
3556 /// (Taskloops only) Lower bound.
3558 /// (Taskloops only) Upper bound.
3560 /// (Taskloops only) Stride.
3562 /// (Taskloops only) Is last iteration flag.
3564 /// (Taskloops only) Reduction data.
3567 } // anonymous namespace
3569 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3570 return OffloadEntriesTargetRegion.empty() &&
3571 OffloadEntriesDeviceGlobalVar.empty();
3574 /// Initialize target region entry.
3575 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3576 initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3577 StringRef ParentName, unsigned LineNum,
3579 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3580 "only required for the device "
3581 "code generation.");
3582 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3583 OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3584 OMPTargetRegionEntryTargetRegion);
3585 ++OffloadingEntriesNum;
3588 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3589 registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3590 StringRef ParentName, unsigned LineNum,
3591 llvm::Constant *Addr, llvm::Constant *ID,
3592 OMPTargetRegionEntryKind Flags) {
3593 // If we are emitting code for a target, the entry is already initialized,
3594 // only has to be registered.
3595 if (CGM.getLangOpts().OpenMPIsDevice) {
3596 if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
3597 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3598 DiagnosticsEngine::Error,
3599 "Unable to find target region on line '%0' in the device code.");
3600 CGM.getDiags().Report(DiagID) << LineNum;
3604 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3605 assert(Entry.isValid() && "Entry not initialized!");
3606 Entry.setAddress(Addr);
3608 Entry.setFlags(Flags);
3610 OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3611 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3612 ++OffloadingEntriesNum;
3616 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3617 unsigned DeviceID, unsigned FileID, StringRef ParentName,
3618 unsigned LineNum) const {
3619 auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3620 if (PerDevice == OffloadEntriesTargetRegion.end())
3622 auto PerFile = PerDevice->second.find(FileID);
3623 if (PerFile == PerDevice->second.end())
3625 auto PerParentName = PerFile->second.find(ParentName);
3626 if (PerParentName == PerFile->second.end())
3628 auto PerLine = PerParentName->second.find(LineNum);
3629 if (PerLine == PerParentName->second.end())
3631 // Fail if this entry is already registered.
3632 if (PerLine->second.getAddress() || PerLine->second.getID())
3637 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3638 const OffloadTargetRegionEntryInfoActTy &Action) {
3639 // Scan all target region entries and perform the provided action.
3640 for (const auto &D : OffloadEntriesTargetRegion)
3641 for (const auto &F : D.second)
3642 for (const auto &P : F.second)
3643 for (const auto &L : P.second)
3644 Action(D.first, F.first, P.first(), L.first, L.second);
3647 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3648 initializeDeviceGlobalVarEntryInfo(StringRef Name,
3649 OMPTargetGlobalVarEntryKind Flags,
3651 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3652 "only required for the device "
3653 "code generation.");
3654 OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3655 ++OffloadingEntriesNum;
3658 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3659 registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3661 OMPTargetGlobalVarEntryKind Flags,
3662 llvm::GlobalValue::LinkageTypes Linkage) {
3663 if (CGM.getLangOpts().OpenMPIsDevice) {
3664 auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3665 assert(Entry.isValid() && Entry.getFlags() == Flags &&
3666 "Entry not initialized!");
3667 assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3668 "Resetting with the new address.");
3669 if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName))
3671 Entry.setAddress(Addr);
3672 Entry.setVarSize(VarSize);
3673 Entry.setLinkage(Linkage);
3675 if (hasDeviceGlobalVarEntryInfo(VarName))
3677 OffloadEntriesDeviceGlobalVar.try_emplace(
3678 VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3679 ++OffloadingEntriesNum;
3683 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3684 actOnDeviceGlobalVarEntriesInfo(
3685 const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3686 // Scan all target region entries and perform the provided action.
3687 for (const auto &E : OffloadEntriesDeviceGlobalVar)
3688 Action(E.getKey(), E.getValue());
3692 CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
3693 // If we don't have entries or if we are emitting code for the device, we
3694 // don't need to do anything.
3695 if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3698 llvm::Module &M = CGM.getModule();
3699 ASTContext &C = CGM.getContext();
3701 // Get list of devices we care about
3702 const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
3704 // We should be creating an offloading descriptor only if there are devices
3706 assert(!Devices.empty() && "No OpenMP offloading devices??");
3708 // Create the external variables that will point to the begin and end of the
3709 // host entries section. These will be defined by the linker.
3710 llvm::Type *OffloadEntryTy =
3711 CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
3712 std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
3713 auto *HostEntriesBegin = new llvm::GlobalVariable(
3714 M, OffloadEntryTy, /*isConstant=*/true,
3715 llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3717 std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
3718 auto *HostEntriesEnd =
3719 new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
3720 llvm::GlobalValue::ExternalLinkage,
3721 /*Initializer=*/nullptr, EntriesEndName);
3723 // Create all device images
3724 auto *DeviceImageTy = cast<llvm::StructType>(
3725 CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
3726 ConstantInitBuilder DeviceImagesBuilder(CGM);
3727 ConstantArrayBuilder DeviceImagesEntries =
3728 DeviceImagesBuilder.beginArray(DeviceImageTy);
3730 for (const llvm::Triple &Device : Devices) {
3731 StringRef T = Device.getTriple();
3732 std::string BeginName = getName({"omp_offloading", "img_start", ""});
3733 auto *ImgBegin = new llvm::GlobalVariable(
3734 M, CGM.Int8Ty, /*isConstant=*/true,
3735 llvm::GlobalValue::ExternalWeakLinkage,
3736 /*Initializer=*/nullptr, Twine(BeginName).concat(T));
3737 std::string EndName = getName({"omp_offloading", "img_end", ""});
3738 auto *ImgEnd = new llvm::GlobalVariable(
3739 M, CGM.Int8Ty, /*isConstant=*/true,
3740 llvm::GlobalValue::ExternalWeakLinkage,
3741 /*Initializer=*/nullptr, Twine(EndName).concat(T));
3743 llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
3745 createConstantGlobalStructAndAddToParent(CGM, getTgtDeviceImageQTy(), Data,
3746 DeviceImagesEntries);
3749 // Create device images global array.
3750 std::string ImagesName = getName({"omp_offloading", "device_images"});
3751 llvm::GlobalVariable *DeviceImages =
3752 DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
3753 CGM.getPointerAlign(),
3754 /*isConstant=*/true);
3755 DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3757 // This is a Zero array to be used in the creation of the constant expressions
3758 llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3759 llvm::Constant::getNullValue(CGM.Int32Ty)};
3761 // Create the target region descriptor.
3762 llvm::Constant *Data[] = {
3763 llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
3764 llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3765 DeviceImages, Index),
3766 HostEntriesBegin, HostEntriesEnd};
3767 std::string Descriptor = getName({"omp_offloading", "descriptor"});
3768 llvm::GlobalVariable *Desc = createConstantGlobalStruct(
3769 CGM, getTgtBinaryDescriptorQTy(), Data, Descriptor);
3771 // Emit code to register or unregister the descriptor at execution
3772 // startup or closing, respectively.
3774 llvm::Function *UnRegFn;
3776 FunctionArgList Args;
3777 ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
3778 Args.push_back(&DummyPtr);
3780 CodeGenFunction CGF(CGM);
3781 // Disable debug info for global (de-)initializer because they are not part
3782 // of some particular construct.
3783 CGF.disableDebugInfo();
3785 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3786 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3787 std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
3788 UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
3789 CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
3790 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
3792 CGF.FinishFunction();
3794 llvm::Function *RegFn;
3796 CodeGenFunction CGF(CGM);
3797 // Disable debug info for global (de-)initializer because they are not part
3798 // of some particular construct.
3799 CGF.disableDebugInfo();
3800 const auto &FI = CGM.getTypes().arrangeNullaryFunction();
3801 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3802 std::string Descriptor = getName({"omp_offloading", "descriptor_reg"});
3803 RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
3804 CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
3805 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), Desc);
3806 // Create a variable to drive the registration and unregistration of the
3807 // descriptor, so we can reuse the logic that emits Ctors and Dtors.
3808 ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
3809 SourceLocation(), nullptr, C.CharTy,
3810 ImplicitParamDecl::Other);
3811 CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
3812 CGF.FinishFunction();
3814 if (CGM.supportsCOMDAT()) {
3815 // It is sufficient to call registration function only once, so create a
3816 // COMDAT group for registration/unregistration functions and associated
3817 // data. That would reduce startup time and code size. Registration
3818 // function serves as a COMDAT group key.
3819 llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
3820 RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3821 RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3822 RegFn->setComdat(ComdatKey);
3823 UnRegFn->setComdat(ComdatKey);
3824 DeviceImages->setComdat(ComdatKey);
3825 Desc->setComdat(ComdatKey);
3830 void CGOpenMPRuntime::createOffloadEntry(
3831 llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
3832 llvm::GlobalValue::LinkageTypes Linkage) {
3833 StringRef Name = Addr->getName();
3834 llvm::Module &M = CGM.getModule();
3835 llvm::LLVMContext &C = M.getContext();
3837 // Create constant string with the name.
3838 llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3840 std::string StringName = getName({"omp_offloading", "entry_name"});
3841 auto *Str = new llvm::GlobalVariable(
3842 M, StrPtrInit->getType(), /*isConstant=*/true,
3843 llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
3844 Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3846 llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
3847 llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
3848 llvm::ConstantInt::get(CGM.SizeTy, Size),
3849 llvm::ConstantInt::get(CGM.Int32Ty, Flags),
3850 llvm::ConstantInt::get(CGM.Int32Ty, 0)};
3851 std::string EntryName = getName({"omp_offloading", "entry", ""});
3852 llvm::GlobalVariable *Entry = createConstantGlobalStruct(
3853 CGM, getTgtOffloadEntryQTy(), Data, Twine(EntryName).concat(Name),
3854 llvm::GlobalValue::WeakAnyLinkage);
3856 // The entry has to be created in the section the linker expects it to be.
3857 std::string Section = getName({"omp_offloading", "entries"});
3858 Entry->setSection(Section);
3861 void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3862 // Emit the offloading entries and metadata so that the device codegen side
3863 // can easily figure out what to emit. The produced metadata looks like
3866 // !omp_offload.info = !{!1, ...}
3868 // Right now we only generate metadata for function that contain target
3871 // If we do not have entries, we don't need to do anything.
3872 if (OffloadEntriesInfoManager.empty())
3875 llvm::Module &M = CGM.getModule();
3876 llvm::LLVMContext &C = M.getContext();
3877 SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
3878 OrderedEntries(OffloadEntriesInfoManager.size());
3880 // Auxiliary methods to create metadata values and strings.
3881 auto &&GetMDInt = [this](unsigned V) {
3882 return llvm::ConstantAsMetadata::get(
3883 llvm::ConstantInt::get(CGM.Int32Ty, V));
3886 auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
3888 // Create the offloading info metadata node.
3889 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3891 // Create function that emits metadata for each target region entry;
3892 auto &&TargetRegionMetadataEmitter =
3893 [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString](
3894 unsigned DeviceID, unsigned FileID, StringRef ParentName,
3896 const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3897 // Generate metadata for target regions. Each entry of this metadata
3899 // - Entry 0 -> Kind of this type of metadata (0).
3900 // - Entry 1 -> Device ID of the file where the entry was identified.
3901 // - Entry 2 -> File ID of the file where the entry was identified.
3902 // - Entry 3 -> Mangled name of the function where the entry was
3904 // - Entry 4 -> Line in the file where the entry was identified.
3905 // - Entry 5 -> Order the entry was created.
3906 // The first element of the metadata node is the kind.
3907 llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
3908 GetMDInt(FileID), GetMDString(ParentName),
3909 GetMDInt(Line), GetMDInt(E.getOrder())};
3911 // Save this entry in the right position of the ordered entries array.
3912 OrderedEntries[E.getOrder()] = &E;
3914 // Add metadata to the named metadata node.
3915 MD->addOperand(llvm::MDNode::get(C, Ops));
3918 OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3919 TargetRegionMetadataEmitter);
3921 // Create function that emits metadata for each device global variable entry;
3922 auto &&DeviceGlobalVarMetadataEmitter =
3923 [&C, &OrderedEntries, &GetMDInt, &GetMDString,
3924 MD](StringRef MangledName,
3925 const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
3927 // Generate metadata for global variables. Each entry of this metadata
3929 // - Entry 0 -> Kind of this type of metadata (1).
3930 // - Entry 1 -> Mangled name of the variable.
3931 // - Entry 2 -> Declare target kind.
3932 // - Entry 3 -> Order the entry was created.
3933 // The first element of the metadata node is the kind.
3934 llvm::Metadata *Ops[] = {
3935 GetMDInt(E.getKind()), GetMDString(MangledName),
3936 GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
3938 // Save this entry in the right position of the ordered entries array.
3939 OrderedEntries[E.getOrder()] = &E;
3941 // Add metadata to the named metadata node.
3942 MD->addOperand(llvm::MDNode::get(C, Ops));
3945 OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
3946 DeviceGlobalVarMetadataEmitter);
3948 for (const auto *E : OrderedEntries) {
3949 assert(E && "All ordered entries must exist!");
3950 if (const auto *CE =
3951 dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3953 if (!CE->getID() || !CE->getAddress()) {
3954 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3955 DiagnosticsEngine::Error,
3956 "Offloading entry for target region is incorrect: either the "
3957 "address or the ID is invalid.");
3958 CGM.getDiags().Report(DiagID);
3961 createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
3962 CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
3963 } else if (const auto *CE =
3964 dyn_cast<OffloadEntriesInfoManagerTy::
3965 OffloadEntryInfoDeviceGlobalVar>(E)) {
3966 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
3967 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3970 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
3971 if (!CE->getAddress()) {
3972 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3973 DiagnosticsEngine::Error,
3974 "Offloading entry for declare target variable is incorrect: the "
3975 "address is invalid.");
3976 CGM.getDiags().Report(DiagID);
3981 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
3982 assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
3983 (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
3984 "Declaret target link address is set.");
3985 if (CGM.getLangOpts().OpenMPIsDevice)
3987 if (!CE->getAddress()) {
3988 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3989 DiagnosticsEngine::Error,
3990 "Offloading entry for declare target variable is incorrect: the "
3991 "address is invalid.");
3992 CGM.getDiags().Report(DiagID);
3997 createOffloadEntry(CE->getAddress(), CE->getAddress(),
3998 CE->getVarSize().getQuantity(), Flags,
4001 llvm_unreachable("Unsupported entry kind.");
4006 /// Loads all the offload entries information from the host IR
4008 void CGOpenMPRuntime::loadOffloadInfoMetadata() {
4009 // If we are in target mode, load the metadata from the host IR. This code has
4010 // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
4012 if (!CGM.getLangOpts().OpenMPIsDevice)
4015 if (CGM.getLangOpts().OMPHostIRFile.empty())
4018 auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
4019 if (auto EC = Buf.getError()) {
4020 CGM.getDiags().Report(diag::err_cannot_open_file)
4021 << CGM.getLangOpts().OMPHostIRFile << EC.message();
4025 llvm::LLVMContext C;
4026 auto ME = expectedToErrorOrAndEmitErrors(
4027 C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
4029 if (auto EC = ME.getError()) {
4030 unsigned DiagID = CGM.getDiags().getCustomDiagID(
4031 DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
4032 CGM.getDiags().Report(DiagID)
4033 << CGM.getLangOpts().OMPHostIRFile << EC.message();
4037 llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
4041 for (llvm::MDNode *MN : MD->operands()) {
4042 auto &&GetMDInt = [MN](unsigned Idx) {
4043 auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
4044 return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
4047 auto &&GetMDString = [MN](unsigned Idx) {
4048 auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
4049 return V->getString();
4052 switch (GetMDInt(0)) {
4054 llvm_unreachable("Unexpected metadata!");
4056 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4057 OffloadingEntryInfoTargetRegion:
4058 OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
4059 /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
4060 /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
4061 /*Order=*/GetMDInt(5));
4063 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4064 OffloadingEntryInfoDeviceGlobalVar:
4065 OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
4066 /*MangledName=*/GetMDString(1),
4067 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
4068 /*Flags=*/GetMDInt(2)),
4069 /*Order=*/GetMDInt(3));
4075 void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
4076 if (!KmpRoutineEntryPtrTy) {
4077 // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
4078 ASTContext &C = CGM.getContext();
4079 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
4080 FunctionProtoType::ExtProtoInfo EPI;
4081 KmpRoutineEntryPtrQTy = C.getPointerType(
4082 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
4083 KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
4087 QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
4088 // Make sure the type of the entry is already created. This is the type we
4090 // struct __tgt_offload_entry{
4091 // void *addr; // Pointer to the offload entry info.
4092 // // (function or global)
4093 // char *name; // Name of the function or global.
4094 // size_t size; // Size of the entry info (0 if it a function).
4095 // int32_t flags; // Flags associated with the entry, e.g. 'link'.
4096 // int32_t reserved; // Reserved, to use by the runtime library.
4098 if (TgtOffloadEntryQTy.isNull()) {
4099 ASTContext &C = CGM.getContext();
4100 RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
4101 RD->startDefinition();
4102 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4103 addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
4104 addFieldToRecordDecl(C, RD, C.getSizeType());
4105 addFieldToRecordDecl(
4106 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4107 addFieldToRecordDecl(
4108 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4109 RD->completeDefinition();
4110 RD->addAttr(PackedAttr::CreateImplicit(C));
4111 TgtOffloadEntryQTy = C.getRecordType(RD);
4113 return TgtOffloadEntryQTy;
4116 QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
4117 // These are the types we need to build:
4118 // struct __tgt_device_image{
4119 // void *ImageStart; // Pointer to the target code start.
4120 // void *ImageEnd; // Pointer to the target code end.
4121 // // We also add the host entries to the device image, as it may be useful
4122 // // for the target runtime to have access to that information.
4123 // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
4125 // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4126 // // entries (non inclusive).
4128 if (TgtDeviceImageQTy.isNull()) {
4129 ASTContext &C = CGM.getContext();
4130 RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
4131 RD->startDefinition();
4132 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4133 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4134 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4135 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4136 RD->completeDefinition();
4137 TgtDeviceImageQTy = C.getRecordType(RD);
4139 return TgtDeviceImageQTy;
4142 QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
4143 // struct __tgt_bin_desc{
4144 // int32_t NumDevices; // Number of devices supported.
4145 // __tgt_device_image *DeviceImages; // Arrays of device images
4146 // // (one per device).
4147 // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
4149 // __tgt_offload_entry *EntriesEnd; // End of the table with all the
4150 // // entries (non inclusive).
4152 if (TgtBinaryDescriptorQTy.isNull()) {
4153 ASTContext &C = CGM.getContext();
4154 RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
4155 RD->startDefinition();
4156 addFieldToRecordDecl(
4157 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4158 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
4159 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4160 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4161 RD->completeDefinition();
4162 TgtBinaryDescriptorQTy = C.getRecordType(RD);
4164 return TgtBinaryDescriptorQTy;
4168 struct PrivateHelpersTy {
4169 PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
4170 const VarDecl *PrivateElemInit)
4171 : Original(Original), PrivateCopy(PrivateCopy),
4172 PrivateElemInit(PrivateElemInit) {}
4173 const VarDecl *Original;
4174 const VarDecl *PrivateCopy;
4175 const VarDecl *PrivateElemInit;
4177 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
4178 } // anonymous namespace
4181 createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
4182 if (!Privates.empty()) {
4183 ASTContext &C = CGM.getContext();
4184 // Build struct .kmp_privates_t. {
4185 // /* private vars */
4187 RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
4188 RD->startDefinition();
4189 for (const auto &Pair : Privates) {
4190 const VarDecl *VD = Pair.second.Original;
4191 QualType Type = VD->getType().getNonReferenceType();
4192 FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
4193 if (VD->hasAttrs()) {
4194 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
4195 E(VD->getAttrs().end());
4200 RD->completeDefinition();
4207 createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
4208 QualType KmpInt32Ty,
4209 QualType KmpRoutineEntryPointerQTy) {
4210 ASTContext &C = CGM.getContext();
4211 // Build struct kmp_task_t {
4213 // kmp_routine_entry_t routine;
4214 // kmp_int32 part_id;
4215 // kmp_cmplrdata_t data1;
4216 // kmp_cmplrdata_t data2;
4217 // For taskloops additional fields:
4222 // void * reductions;
4224 RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
4225 UD->startDefinition();
4226 addFieldToRecordDecl(C, UD, KmpInt32Ty);
4227 addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
4228 UD->completeDefinition();
4229 QualType KmpCmplrdataTy = C.getRecordType(UD);
4230 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
4231 RD->startDefinition();
4232 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4233 addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
4234 addFieldToRecordDecl(C, RD, KmpInt32Ty);
4235 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4236 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4237 if (isOpenMPTaskLoopDirective(Kind)) {
4238 QualType KmpUInt64Ty =
4239 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4240 QualType KmpInt64Ty =
4241 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4242 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4243 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4244 addFieldToRecordDecl(C, RD, KmpInt64Ty);
4245 addFieldToRecordDecl(C, RD, KmpInt32Ty);
4246 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4248 RD->completeDefinition();
4253 createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
4254 ArrayRef<PrivateDataTy> Privates) {
4255 ASTContext &C = CGM.getContext();
4256 // Build struct kmp_task_t_with_privates {
4257 // kmp_task_t task_data;
4258 // .kmp_privates_t. privates;
4260 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
4261 RD->startDefinition();
4262 addFieldToRecordDecl(C, RD, KmpTaskTQTy);
4263 if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
4264 addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
4265 RD->completeDefinition();
4269 /// Emit a proxy function which accepts kmp_task_t as the second
4272 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
4273 /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
4275 /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4276 /// tt->reductions, tt->shareds);
4280 static llvm::Value *
4281 emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
4282 OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
4283 QualType KmpTaskTWithPrivatesPtrQTy,
4284 QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
4285 QualType SharedsPtrTy, llvm::Value *TaskFunction,
4286 llvm::Value *TaskPrivatesMap) {
4287 ASTContext &C = CGM.getContext();
4288 FunctionArgList Args;
4289 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4290 ImplicitParamDecl::Other);
4291 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4292 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4293 ImplicitParamDecl::Other);
4294 Args.push_back(&GtidArg);
4295 Args.push_back(&TaskTypeArg);
4296 const auto &TaskEntryFnInfo =
4297 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4298 llvm::FunctionType *TaskEntryTy =
4299 CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
4300 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
4301 auto *TaskEntry = llvm::Function::Create(
4302 TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4303 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
4304 TaskEntry->setDoesNotRecurse();
4305 CodeGenFunction CGF(CGM);
4306 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
4309 // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
4312 // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4313 // tt->task_data.shareds);
4314 llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
4315 CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
4316 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4317 CGF.GetAddrOfLocalVar(&TaskTypeArg),
4318 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4319 const auto *KmpTaskTWithPrivatesQTyRD =
4320 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4322 CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4323 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4324 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4325 LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
4326 llvm::Value *PartidParam = PartIdLVal.getPointer();
4328 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
4329 LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
4330 llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4331 CGF.EmitLoadOfScalar(SharedsLVal, Loc),
4332 CGF.ConvertTypeForMem(SharedsPtrTy));
4334 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4335 llvm::Value *PrivatesParam;
4336 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
4337 LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
4338 PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4339 PrivatesLVal.getPointer(), CGF.VoidPtrTy);
4341 PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4344 llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
4347 .CreatePointerBitCastOrAddrSpaceCast(
4348 TDBase.getAddress(), CGF.VoidPtrTy)
4350 SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4351 std::end(CommonArgs));
4352 if (isOpenMPTaskLoopDirective(Kind)) {
4353 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4354 LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4355 llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4356 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4357 LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4358 llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4359 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4360 LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
4361 llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4362 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4363 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4364 llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4365 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4366 LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
4367 llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4368 CallArgs.push_back(LBParam);
4369 CallArgs.push_back(UBParam);
4370 CallArgs.push_back(StParam);
4371 CallArgs.push_back(LIParam);
4372 CallArgs.push_back(RParam);
4374 CallArgs.push_back(SharedsParam);
4376 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4378 CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4379 CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4380 CGF.FinishFunction();
4384 static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
4386 QualType KmpInt32Ty,
4387 QualType KmpTaskTWithPrivatesPtrQTy,
4388 QualType KmpTaskTWithPrivatesQTy) {
4389 ASTContext &C = CGM.getContext();
4390 FunctionArgList Args;
4391 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4392 ImplicitParamDecl::Other);
4393 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4394 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4395 ImplicitParamDecl::Other);
4396 Args.push_back(&GtidArg);
4397 Args.push_back(&TaskTypeArg);
4398 const auto &DestructorFnInfo =
4399 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4400 llvm::FunctionType *DestructorFnTy =
4401 CGM.getTypes().GetFunctionType(DestructorFnInfo);
4403 CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
4404 auto *DestructorFn =
4405 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
4406 Name, &CGM.getModule());
4407 CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
4409 DestructorFn->setDoesNotRecurse();
4410 CodeGenFunction CGF(CGM);
4411 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4414 LValue Base = CGF.EmitLoadOfPointerLValue(
4415 CGF.GetAddrOfLocalVar(&TaskTypeArg),
4416 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4417 const auto *KmpTaskTWithPrivatesQTyRD =
4418 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4419 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4420 Base = CGF.EmitLValueForField(Base, *FI);
4421 for (const auto *Field :
4422 cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4423 if (QualType::DestructionKind DtorKind =
4424 Field->getType().isDestructedType()) {
4425 LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
4426 CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4429 CGF.FinishFunction();
4430 return DestructorFn;
4433 /// Emit a privates mapping function for correct handling of private and
4434 /// firstprivate variables.
4436 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
4437 /// **noalias priv1,..., <tyn> **noalias privn) {
4438 /// *priv1 = &.privates.priv1;
4440 /// *privn = &.privates.privn;
4443 static llvm::Value *
4444 emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
4445 ArrayRef<const Expr *> PrivateVars,
4446 ArrayRef<const Expr *> FirstprivateVars,
4447 ArrayRef<const Expr *> LastprivateVars,
4448 QualType PrivatesQTy,
4449 ArrayRef<PrivateDataTy> Privates) {
4450 ASTContext &C = CGM.getContext();
4451 FunctionArgList Args;
4452 ImplicitParamDecl TaskPrivatesArg(
4453 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4454 C.getPointerType(PrivatesQTy).withConst().withRestrict(),
4455 ImplicitParamDecl::Other);
4456 Args.push_back(&TaskPrivatesArg);
4457 llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
4458 unsigned Counter = 1;
4459 for (const Expr *E : PrivateVars) {
4460 Args.push_back(ImplicitParamDecl::Create(
4461 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4462 C.getPointerType(C.getPointerType(E->getType()))
4465 ImplicitParamDecl::Other));
4466 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4467 PrivateVarsPos[VD] = Counter;
4470 for (const Expr *E : FirstprivateVars) {
4471 Args.push_back(ImplicitParamDecl::Create(
4472 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4473 C.getPointerType(C.getPointerType(E->getType()))
4476 ImplicitParamDecl::Other));
4477 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4478 PrivateVarsPos[VD] = Counter;
4481 for (const Expr *E : LastprivateVars) {
4482 Args.push_back(ImplicitParamDecl::Create(
4483 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4484 C.getPointerType(C.getPointerType(E->getType()))
4487 ImplicitParamDecl::Other));
4488 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4489 PrivateVarsPos[VD] = Counter;
4492 const auto &TaskPrivatesMapFnInfo =
4493 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4494 llvm::FunctionType *TaskPrivatesMapTy =
4495 CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
4497 CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
4498 auto *TaskPrivatesMap = llvm::Function::Create(
4499 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
4501 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
4502 TaskPrivatesMapFnInfo);
4503 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
4504 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
4505 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
4506 CodeGenFunction CGF(CGM);
4507 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
4508 TaskPrivatesMapFnInfo, Args, Loc, Loc);
4510 // *privi = &.privates.privi;
4511 LValue Base = CGF.EmitLoadOfPointerLValue(
4512 CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
4513 TaskPrivatesArg.getType()->castAs<PointerType>());
4514 const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
4516 for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
4517 LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
4518 const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
4520 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
4521 LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
4522 RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
4523 CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
4526 CGF.FinishFunction();
4527 return TaskPrivatesMap;
4530 static bool stable_sort_comparator(const PrivateDataTy P1,
4531 const PrivateDataTy P2) {
4532 return P1.first > P2.first;
4535 /// Emit initialization for private variables in task-based directives.
4536 static void emitPrivatesInit(CodeGenFunction &CGF,
4537 const OMPExecutableDirective &D,
4538 Address KmpTaskSharedsPtr, LValue TDBase,
4539 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4540 QualType SharedsTy, QualType SharedsPtrTy,
4541 const OMPTaskDataTy &Data,
4542 ArrayRef<PrivateDataTy> Privates, bool ForDup) {
4543 ASTContext &C = CGF.getContext();
4544 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4545 LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
4546 OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
4549 const CapturedStmt &CS = *D.getCapturedStmt(Kind);
4550 CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
4553 isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
4554 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
4555 // For target-based directives skip 3 firstprivate arrays BasePointersArray,
4556 // PointersArray and SizesArray. The original variables for these arrays are
4557 // not captured and we get their addresses explicitly.
4558 if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
4559 (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
4560 SrcBase = CGF.MakeAddrLValue(
4561 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4562 KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
4565 FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
4566 for (const PrivateDataTy &Pair : Privates) {
4567 const VarDecl *VD = Pair.second.PrivateCopy;
4568 const Expr *Init = VD->getAnyInitializer();
4569 if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
4570 !CGF.isTrivialInitializer(Init)))) {
4571 LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
4572 if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
4573 const VarDecl *OriginalVD = Pair.second.Original;
4574 // Check if the variable is the target-based BasePointersArray,
4575 // PointersArray or SizesArray.
4576 LValue SharedRefLValue;
4577 QualType Type = OriginalVD->getType();
4578 const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
4579 if (IsTargetTask && !SharedField) {
4580 assert(isa<ImplicitParamDecl>(OriginalVD) &&
4581 isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
4582 cast<CapturedDecl>(OriginalVD->getDeclContext())
4583 ->getNumParams() == 0 &&
4584 isa<TranslationUnitDecl>(
4585 cast<CapturedDecl>(OriginalVD->getDeclContext())
4586 ->getDeclContext()) &&
4587 "Expected artificial target data variable.");
4589 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
4591 SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
4592 SharedRefLValue = CGF.MakeAddrLValue(
4593 Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
4594 SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
4595 SharedRefLValue.getTBAAInfo());
4597 if (Type->isArrayType()) {
4598 // Initialize firstprivate array.
4599 if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
4600 // Perform simple memcpy.
4601 CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
4603 // Initialize firstprivate array using element-by-element
4605 CGF.EmitOMPAggregateAssign(
4606 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
4607 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
4608 Address SrcElement) {
4609 // Clean up any temporaries needed by the initialization.
4610 CodeGenFunction::OMPPrivateScope InitScope(CGF);
4611 InitScope.addPrivate(
4612 Elem, [SrcElement]() -> Address { return SrcElement; });
4613 (void)InitScope.Privatize();
4614 // Emit initialization for single element.
4615 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
4616 CGF, &CapturesInfo);
4617 CGF.EmitAnyExprToMem(Init, DestElement,
4618 Init->getType().getQualifiers(),
4619 /*IsInitializer=*/false);
4623 CodeGenFunction::OMPPrivateScope InitScope(CGF);
4624 InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
4625 return SharedRefLValue.getAddress();
4627 (void)InitScope.Privatize();
4628 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
4629 CGF.EmitExprAsInit(Init, VD, PrivateLValue,
4630 /*capturedByInit=*/false);
4633 CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
4640 /// Check if duplication function is required for taskloops.
4641 static bool checkInitIsRequired(CodeGenFunction &CGF,
4642 ArrayRef<PrivateDataTy> Privates) {
4643 bool InitRequired = false;
4644 for (const PrivateDataTy &Pair : Privates) {
4645 const VarDecl *VD = Pair.second.PrivateCopy;
4646 const Expr *Init = VD->getAnyInitializer();
4647 InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
4648 !CGF.isTrivialInitializer(Init));
4652 return InitRequired;
4656 /// Emit task_dup function (for initialization of
4657 /// private/firstprivate/lastprivate vars and last_iter flag)
4659 /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
4661 /// // setup lastprivate flag
4662 /// task_dst->last = lastpriv;
4663 /// // could be constructor calls here...
4666 static llvm::Value *
4667 emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
4668 const OMPExecutableDirective &D,
4669 QualType KmpTaskTWithPrivatesPtrQTy,
4670 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4671 const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
4672 QualType SharedsPtrTy, const OMPTaskDataTy &Data,
4673 ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
4674 ASTContext &C = CGM.getContext();
4675 FunctionArgList Args;
4676 ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4677 KmpTaskTWithPrivatesPtrQTy,
4678 ImplicitParamDecl::Other);
4679 ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4680 KmpTaskTWithPrivatesPtrQTy,
4681 ImplicitParamDecl::Other);
4682 ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
4683 ImplicitParamDecl::Other);
4684 Args.push_back(&DstArg);
4685 Args.push_back(&SrcArg);
4686 Args.push_back(&LastprivArg);
4687 const auto &TaskDupFnInfo =
4688 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4689 llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4690 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
4691 auto *TaskDup = llvm::Function::Create(
4692 TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4693 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
4694 TaskDup->setDoesNotRecurse();
4695 CodeGenFunction CGF(CGM);
4696 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4699 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4700 CGF.GetAddrOfLocalVar(&DstArg),
4701 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4702 // task_dst->liter = lastpriv;
4704 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4705 LValue Base = CGF.EmitLValueForField(
4706 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4707 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4708 llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4709 CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4710 CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4713 // Emit initial values for private copies (if any).
4714 assert(!Privates.empty());
4715 Address KmpTaskSharedsPtr = Address::invalid();
4716 if (!Data.FirstprivateVars.empty()) {
4717 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4718 CGF.GetAddrOfLocalVar(&SrcArg),
4719 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4720 LValue Base = CGF.EmitLValueForField(
4721 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4722 KmpTaskSharedsPtr = Address(
4723 CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4724 Base, *std::next(KmpTaskTQTyRD->field_begin(),
4727 CGF.getNaturalTypeAlignment(SharedsTy));
4729 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4730 SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4731 CGF.FinishFunction();
4735 /// Checks if destructor function is required to be generated.
4736 /// \return true if cleanups are required, false otherwise.
4738 checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
4739 bool NeedsCleanup = false;
4740 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4741 const auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
4742 for (const FieldDecl *FD : PrivateRD->fields()) {
4743 NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
4747 return NeedsCleanup;
4750 CGOpenMPRuntime::TaskResultTy
4751 CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4752 const OMPExecutableDirective &D,
4753 llvm::Value *TaskFunction, QualType SharedsTy,
4754 Address Shareds, const OMPTaskDataTy &Data) {
4755 ASTContext &C = CGM.getContext();
4756 llvm::SmallVector<PrivateDataTy, 4> Privates;
4757 // Aggregate privates and sort them by the alignment.
4758 auto I = Data.PrivateCopies.begin();
4759 for (const Expr *E : Data.PrivateVars) {
4760 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4761 Privates.emplace_back(
4763 PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4764 /*PrivateElemInit=*/nullptr));
4767 I = Data.FirstprivateCopies.begin();
4768 auto IElemInitRef = Data.FirstprivateInits.begin();
4769 for (const Expr *E : Data.FirstprivateVars) {
4770 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4771 Privates.emplace_back(
4774 VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4775 cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
4779 I = Data.LastprivateCopies.begin();
4780 for (const Expr *E : Data.LastprivateVars) {
4781 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4782 Privates.emplace_back(
4784 PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4785 /*PrivateElemInit=*/nullptr));
4788 std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
4789 QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4790 // Build type kmp_routine_entry_t (if not built yet).
4791 emitKmpRoutineEntryT(KmpInt32Ty);
4792 // Build type kmp_task_t (if not built yet).
4793 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4794 if (SavedKmpTaskloopTQTy.isNull()) {
4795 SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4796 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4798 KmpTaskTQTy = SavedKmpTaskloopTQTy;
4800 assert((D.getDirectiveKind() == OMPD_task ||
4801 isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
4802 isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
4803 "Expected taskloop, task or target directive");
4804 if (SavedKmpTaskTQTy.isNull()) {
4805 SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4806 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4808 KmpTaskTQTy = SavedKmpTaskTQTy;
4810 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4811 // Build particular struct kmp_task_t for the given task.
4812 const RecordDecl *KmpTaskTWithPrivatesQTyRD =
4813 createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4814 QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4815 QualType KmpTaskTWithPrivatesPtrQTy =
4816 C.getPointerType(KmpTaskTWithPrivatesQTy);
4817 llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4818 llvm::Type *KmpTaskTWithPrivatesPtrTy =
4819 KmpTaskTWithPrivatesTy->getPointerTo();
4820 llvm::Value *KmpTaskTWithPrivatesTySize =
4821 CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4822 QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4824 // Emit initial values for private copies (if any).
4825 llvm::Value *TaskPrivatesMap = nullptr;
4826 llvm::Type *TaskPrivatesMapTy =
4827 std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
4828 if (!Privates.empty()) {
4829 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4830 TaskPrivatesMap = emitTaskPrivateMappingFunction(
4831 CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
4832 FI->getType(), Privates);
4833 TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4834 TaskPrivatesMap, TaskPrivatesMapTy);
4836 TaskPrivatesMap = llvm::ConstantPointerNull::get(
4837 cast<llvm::PointerType>(TaskPrivatesMapTy));
4839 // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4841 llvm::Value *TaskEntry = emitProxyTaskFunction(
4842 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4843 KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4846 // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4847 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4848 // kmp_routine_entry_t *task_entry);
4849 // Task flags. Format is taken from
4850 // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
4851 // description of kmp_tasking_flags struct.
4855 DestructorsFlag = 0x8,
4858 unsigned Flags = Data.Tied ? TiedFlag : 0;
4859 bool NeedsCleanup = false;
4860 if (!Privates.empty()) {
4861 NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
4863 Flags = Flags | DestructorsFlag;
4865 if (Data.Priority.getInt())
4866 Flags = Flags | PriorityFlag;
4867 llvm::Value *TaskFlags =
4868 Data.Final.getPointer()
4869 ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4870 CGF.Builder.getInt32(FinalFlag),
4871 CGF.Builder.getInt32(/*C=*/0))
4872 : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4873 TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4874 llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4875 llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
4876 getThreadID(CGF, Loc), TaskFlags,
4877 KmpTaskTWithPrivatesTySize, SharedsSize,
4878 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4879 TaskEntry, KmpRoutineEntryPtrTy)};
4880 llvm::Value *NewTask = CGF.EmitRuntimeCall(
4881 createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
4882 llvm::Value *NewTaskNewTaskTTy =
4883 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4884 NewTask, KmpTaskTWithPrivatesPtrTy);
4885 LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4886 KmpTaskTWithPrivatesQTy);
4888 CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4889 // Fill the data in the resulting kmp_task_t record.
4890 // Copy shareds if there are any.
4891 Address KmpTaskSharedsPtr = Address::invalid();
4892 if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4894 Address(CGF.EmitLoadOfScalar(
4895 CGF.EmitLValueForField(
4896 TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4899 CGF.getNaturalTypeAlignment(SharedsTy));
4900 LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4901 LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4902 CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
4904 // Emit initial values for private copies (if any).
4905 TaskResultTy Result;
4906 if (!Privates.empty()) {
4907 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4908 SharedsTy, SharedsPtrTy, Data, Privates,
4910 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4911 (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4912 Result.TaskDupFn = emitTaskDupFunction(
4913 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4914 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4915 /*WithLastIter=*/!Data.LastprivateVars.empty());
4918 // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4919 enum { Priority = 0, Destructors = 1 };
4920 // Provide pointer to function with destructors for privates.
4921 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4922 const RecordDecl *KmpCmplrdataUD =
4923 (*FI)->getType()->getAsUnionType()->getDecl();
4925 llvm::Value *DestructorFn = emitDestructorsFunction(
4926 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4927 KmpTaskTWithPrivatesQTy);
4928 LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4929 LValue DestructorsLV = CGF.EmitLValueForField(
4930 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4931 CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4932 DestructorFn, KmpRoutineEntryPtrTy),
4936 if (Data.Priority.getInt()) {
4937 LValue Data2LV = CGF.EmitLValueForField(
4938 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4939 LValue PriorityLV = CGF.EmitLValueForField(
4940 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4941 CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4943 Result.NewTask = NewTask;
4944 Result.TaskEntry = TaskEntry;
4945 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4946 Result.TDBase = TDBase;
4947 Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4951 void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
4952 const OMPExecutableDirective &D,
4953 llvm::Value *TaskFunction,
4954 QualType SharedsTy, Address Shareds,
4956 const OMPTaskDataTy &Data) {
4957 if (!CGF.HaveInsertPoint())
4960 TaskResultTy Result =
4961 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4962 llvm::Value *NewTask = Result.NewTask;
4963 llvm::Value *TaskEntry = Result.TaskEntry;
4964 llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
4965 LValue TDBase = Result.TDBase;
4966 const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
4967 ASTContext &C = CGM.getContext();
4968 // Process list of dependences.
4969 Address DependenciesArray = Address::invalid();
4970 unsigned NumDependencies = Data.Dependences.size();
4971 if (NumDependencies) {
4972 // Dependence kind for RTL.
4973 enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
4974 enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4975 RecordDecl *KmpDependInfoRD;
4977 C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4978 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4979 if (KmpDependInfoTy.isNull()) {
4980 KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4981 KmpDependInfoRD->startDefinition();
4982 addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4983 addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4984 addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4985 KmpDependInfoRD->completeDefinition();
4986 KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4988 KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4990 CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
4991 // Define type kmp_depend_info[<Dependences.size()>];
4992 QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4993 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
4994 ArrayType::Normal, /*IndexTypeQuals=*/0);
4995 // kmp_depend_info[<Dependences.size()>] deps;
4997 CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4998 for (unsigned I = 0; I < NumDependencies; ++I) {
4999 const Expr *E = Data.Dependences[I].second;
5000 LValue Addr = CGF.EmitLValue(E);
5002 QualType Ty = E->getType();
5003 if (const auto *ASE =
5004 dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
5006 CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
5007 llvm::Value *UpAddr =
5008 CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
5009 llvm::Value *LowIntPtr =
5010 CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
5011 llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
5012 Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
5014 Size = CGF.getTypeSize(Ty);
5016 LValue Base = CGF.MakeAddrLValue(
5017 CGF.Builder.CreateConstArrayGEP(DependenciesArray, I, DependencySize),
5019 // deps[i].base_addr = &<Dependences[i].second>;
5020 LValue BaseAddrLVal = CGF.EmitLValueForField(
5021 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
5022 CGF.EmitStoreOfScalar(
5023 CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
5025 // deps[i].len = sizeof(<Dependences[i].second>);
5026 LValue LenLVal = CGF.EmitLValueForField(
5027 Base, *std::next(KmpDependInfoRD->field_begin(), Len));
5028 CGF.EmitStoreOfScalar(Size, LenLVal);
5029 // deps[i].flags = <Dependences[i].first>;
5030 RTLDependenceKindTy DepKind;
5031 switch (Data.Dependences[I].first) {
5032 case OMPC_DEPEND_in:
5035 // Out and InOut dependencies must use the same code.
5036 case OMPC_DEPEND_out:
5037 case OMPC_DEPEND_inout:
5040 case OMPC_DEPEND_source:
5041 case OMPC_DEPEND_sink:
5042 case OMPC_DEPEND_unknown:
5043 llvm_unreachable("Unknown task dependence type");
5045 LValue FlagsLVal = CGF.EmitLValueForField(
5046 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
5047 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
5050 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5051 CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
5055 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5057 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
5058 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
5059 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
5060 // list is not empty
5061 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5062 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5063 llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
5064 llvm::Value *DepTaskArgs[7];
5065 if (NumDependencies) {
5066 DepTaskArgs[0] = UpLoc;
5067 DepTaskArgs[1] = ThreadID;
5068 DepTaskArgs[2] = NewTask;
5069 DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
5070 DepTaskArgs[4] = DependenciesArray.getPointer();
5071 DepTaskArgs[5] = CGF.Builder.getInt32(0);
5072 DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5074 auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
5076 &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
5078 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
5079 LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
5080 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
5082 if (NumDependencies) {
5083 CGF.EmitRuntimeCall(
5084 createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
5086 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
5089 // Check if parent region is untied and build return for untied task;
5091 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5092 Region->emitUntiedSwitch(CGF);
5095 llvm::Value *DepWaitTaskArgs[6];
5096 if (NumDependencies) {
5097 DepWaitTaskArgs[0] = UpLoc;
5098 DepWaitTaskArgs[1] = ThreadID;
5099 DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
5100 DepWaitTaskArgs[3] = DependenciesArray.getPointer();
5101 DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
5102 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5104 auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
5105 NumDependencies, &DepWaitTaskArgs,
5106 Loc](CodeGenFunction &CGF, PrePostActionTy &) {
5107 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5108 CodeGenFunction::RunCleanupsScope LocalScope(CGF);
5109 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
5110 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
5111 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
5113 if (NumDependencies)
5114 CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
5116 // Call proxy_task_entry(gtid, new_task);
5117 auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
5118 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
5120 llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
5121 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
5125 // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
5126 // kmp_task_t *new_task);
5127 // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
5128 // kmp_task_t *new_task);
5129 RegionCodeGenTy RCG(CodeGen);
5130 CommonActionTy Action(
5131 RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
5132 RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
5133 RCG.setAction(Action);
5138 emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
5140 RegionCodeGenTy ThenRCG(ThenCodeGen);
5145 void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
5146 const OMPLoopDirective &D,
5147 llvm::Value *TaskFunction,
5148 QualType SharedsTy, Address Shareds,
5150 const OMPTaskDataTy &Data) {
5151 if (!CGF.HaveInsertPoint())
5153 TaskResultTy Result =
5154 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5155 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5157 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
5158 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
5159 // sched, kmp_uint64 grainsize, void *task_dup);
5160 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5161 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5164 IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
5167 IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
5170 LValue LBLVal = CGF.EmitLValueForField(
5172 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
5174 cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
5175 CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
5176 /*IsInitializer=*/true);
5177 LValue UBLVal = CGF.EmitLValueForField(
5179 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
5181 cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
5182 CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
5183 /*IsInitializer=*/true);
5184 LValue StLVal = CGF.EmitLValueForField(
5186 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
5188 cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
5189 CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
5190 /*IsInitializer=*/true);
5191 // Store reductions address.
5192 LValue RedLVal = CGF.EmitLValueForField(
5194 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
5195 if (Data.Reductions) {
5196 CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
5198 CGF.EmitNullInitialization(RedLVal.getAddress(),
5199 CGF.getContext().VoidPtrTy);
5201 enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
5202 llvm::Value *TaskArgs[] = {
5207 LBLVal.getPointer(),
5208 UBLVal.getPointer(),
5209 CGF.EmitLoadOfScalar(StLVal, Loc),
5210 llvm::ConstantInt::getNullValue(
5211 CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
5212 llvm::ConstantInt::getSigned(
5213 CGF.IntTy, Data.Schedule.getPointer()
5214 ? Data.Schedule.getInt() ? NumTasks : Grainsize
5216 Data.Schedule.getPointer()
5217 ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
5219 : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
5220 Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5221 Result.TaskDupFn, CGF.VoidPtrTy)
5222 : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
5223 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
5226 /// Emit reduction operation for each element of array (required for
5227 /// array sections) LHS op = RHS.
5228 /// \param Type Type of array.
5229 /// \param LHSVar Variable on the left side of the reduction operation
5230 /// (references element of array in original variable).
5231 /// \param RHSVar Variable on the right side of the reduction operation
5232 /// (references element of array in original variable).
5233 /// \param RedOpGen Generator of reduction operation with use of LHSVar and
5235 static void EmitOMPAggregateReduction(
5236 CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
5237 const VarDecl *RHSVar,
5238 const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
5239 const Expr *, const Expr *)> &RedOpGen,
5240 const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
5241 const Expr *UpExpr = nullptr) {
5242 // Perform element-by-element initialization.
5244 Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
5245 Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
5247 // Drill down to the base element type on both arrays.
5248 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
5249 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
5251 llvm::Value *RHSBegin = RHSAddr.getPointer();
5252 llvm::Value *LHSBegin = LHSAddr.getPointer();
5253 // Cast from pointer to array type to pointer to single element.
5254 llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
5255 // The basic structure here is a while-do loop.
5256 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
5257 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
5258 llvm::Value *IsEmpty =
5259 CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
5260 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5262 // Enter the loop body, making that address the current address.
5263 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5264 CGF.EmitBlock(BodyBB);
5266 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
5268 llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
5269 RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
5270 RHSElementPHI->addIncoming(RHSBegin, EntryBB);
5271 Address RHSElementCurrent =
5272 Address(RHSElementPHI,
5273 RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5275 llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
5276 LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
5277 LHSElementPHI->addIncoming(LHSBegin, EntryBB);
5278 Address LHSElementCurrent =
5279 Address(LHSElementPHI,
5280 LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5283 CodeGenFunction::OMPPrivateScope Scope(CGF);
5284 Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
5285 Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
5287 RedOpGen(CGF, XExpr, EExpr, UpExpr);
5288 Scope.ForceCleanup();
5290 // Shift the address forward by one element.
5291 llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
5292 LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
5293 llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
5294 RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
5295 // Check whether we've reached the end.
5297 CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
5298 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
5299 LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
5300 RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
5303 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5306 /// Emit reduction combiner. If the combiner is a simple expression emit it as
5307 /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
5308 /// UDR combiner function.
5309 static void emitReductionCombiner(CodeGenFunction &CGF,
5310 const Expr *ReductionOp) {
5311 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
5312 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
5313 if (const auto *DRE =
5314 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
5315 if (const auto *DRD =
5316 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
5317 std::pair<llvm::Function *, llvm::Function *> Reduction =
5318 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
5319 RValue Func = RValue::get(Reduction.first);
5320 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
5321 CGF.EmitIgnoredExpr(ReductionOp);
5324 CGF.EmitIgnoredExpr(ReductionOp);
5327 llvm::Value *CGOpenMPRuntime::emitReductionFunction(
5328 CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
5329 ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
5330 ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
5331 ASTContext &C = CGM.getContext();
5333 // void reduction_func(void *LHSArg, void *RHSArg);
5334 FunctionArgList Args;
5335 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5336 ImplicitParamDecl::Other);
5337 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5338 ImplicitParamDecl::Other);
5339 Args.push_back(&LHSArg);
5340 Args.push_back(&RHSArg);
5342 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5343 std::string Name = getName({"omp", "reduction", "reduction_func"});
5344 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
5345 llvm::GlobalValue::InternalLinkage, Name,
5347 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
5348 Fn->setDoesNotRecurse();
5349 CodeGenFunction CGF(CGM);
5350 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
5352 // Dst = (void*[n])(LHSArg);
5353 // Src = (void*[n])(RHSArg);
5354 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5355 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
5356 ArgsType), CGF.getPointerAlign());
5357 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5358 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
5359 ArgsType), CGF.getPointerAlign());
5362 // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
5364 CodeGenFunction::OMPPrivateScope Scope(CGF);
5365 auto IPriv = Privates.begin();
5367 for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
5368 const auto *RHSVar =
5369 cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
5370 Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
5371 return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
5373 const auto *LHSVar =
5374 cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5375 Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
5376 return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5378 QualType PrivTy = (*IPriv)->getType();
5379 if (PrivTy->isVariablyModifiedType()) {
5380 // Get array size and emit VLA type.
5383 CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
5384 llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5385 const VariableArrayType *VLA =
5386 CGF.getContext().getAsVariableArrayType(PrivTy);
5387 const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5388 CodeGenFunction::OpaqueValueMapping OpaqueMap(
5389 CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5390 CGF.EmitVariablyModifiedType(PrivTy);
5394 IPriv = Privates.begin();
5395 auto ILHS = LHSExprs.begin();
5396 auto IRHS = RHSExprs.begin();
5397 for (const Expr *E : ReductionOps) {
5398 if ((*IPriv)->getType()->isArrayType()) {
5399 // Emit reduction for array section.
5400 const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5401 const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5402 EmitOMPAggregateReduction(
5403 CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5404 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5405 emitReductionCombiner(CGF, E);
5408 // Emit reduction for array subscript or single variable.
5409 emitReductionCombiner(CGF, E);
5415 Scope.ForceCleanup();
5416 CGF.FinishFunction();
5420 void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5421 const Expr *ReductionOp,
5422 const Expr *PrivateRef,
5423 const DeclRefExpr *LHS,
5424 const DeclRefExpr *RHS) {
5425 if (PrivateRef->getType()->isArrayType()) {
5426 // Emit reduction for array section.
5427 const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5428 const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5429 EmitOMPAggregateReduction(
5430 CGF, PrivateRef->getType(), LHSVar, RHSVar,
5431 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5432 emitReductionCombiner(CGF, ReductionOp);
5435 // Emit reduction for array subscript or single variable.
5436 emitReductionCombiner(CGF, ReductionOp);
5440 void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5441 ArrayRef<const Expr *> Privates,
5442 ArrayRef<const Expr *> LHSExprs,
5443 ArrayRef<const Expr *> RHSExprs,
5444 ArrayRef<const Expr *> ReductionOps,
5445 ReductionOptionsTy Options) {
5446 if (!CGF.HaveInsertPoint())
5449 bool WithNowait = Options.WithNowait;
5450 bool SimpleReduction = Options.SimpleReduction;
5452 // Next code should be emitted for reduction:
5454 // static kmp_critical_name lock = { 0 };
5456 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5457 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5459 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5460 // *(Type<n>-1*)rhs[<n>-1]);
5464 // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5465 // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5466 // RedList, reduce_func, &<lock>)) {
5469 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5471 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5475 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5477 // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5482 // if SimpleReduction is true, only the next code is generated:
5484 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5487 ASTContext &C = CGM.getContext();
5489 if (SimpleReduction) {
5490 CodeGenFunction::RunCleanupsScope Scope(CGF);
5491 auto IPriv = Privates.begin();
5492 auto ILHS = LHSExprs.begin();
5493 auto IRHS = RHSExprs.begin();
5494 for (const Expr *E : ReductionOps) {
5495 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5496 cast<DeclRefExpr>(*IRHS));
5504 // 1. Build a list of reduction variables.
5505 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5506 auto Size = RHSExprs.size();
5507 for (const Expr *E : Privates) {
5508 if (E->getType()->isVariablyModifiedType())
5509 // Reserve place for array size.
5512 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5513 QualType ReductionArrayTy =
5514 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
5515 /*IndexTypeQuals=*/0);
5516 Address ReductionList =
5517 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5518 auto IPriv = Privates.begin();
5520 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5522 CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
5523 CGF.Builder.CreateStore(
5524 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5525 CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
5527 if ((*IPriv)->getType()->isVariablyModifiedType()) {
5528 // Store array size.
5530 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
5531 CGF.getPointerSize());
5532 llvm::Value *Size = CGF.Builder.CreateIntCast(
5534 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5536 CGF.SizeTy, /*isSigned=*/false);
5537 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5542 // 2. Emit reduce_func().
5543 llvm::Value *ReductionFn = emitReductionFunction(
5544 CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
5545 Privates, LHSExprs, RHSExprs, ReductionOps);
5547 // 3. Create static kmp_critical_name lock = { 0 };
5548 std::string Name = getName({"reduction"});
5549 llvm::Value *Lock = getCriticalRegionLock(Name);
5551 // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5552 // RedList, reduce_func, &<lock>);
5553 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5554 llvm::Value *ThreadId = getThreadID(CGF, Loc);
5555 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5556 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5557 ReductionList.getPointer(), CGF.VoidPtrTy);
5558 llvm::Value *Args[] = {
5559 IdentTLoc, // ident_t *<loc>
5560 ThreadId, // i32 <gtid>
5561 CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5562 ReductionArrayTySize, // size_type sizeof(RedList)
5563 RL, // void *RedList
5564 ReductionFn, // void (*) (void *, void *) <reduce_func>
5565 Lock // kmp_critical_name *&<lock>
5567 llvm::Value *Res = CGF.EmitRuntimeCall(
5568 createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
5569 : OMPRTL__kmpc_reduce),
5572 // 5. Build switch(res)
5573 llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5574 llvm::SwitchInst *SwInst =
5575 CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5579 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5581 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5583 llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5584 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5585 CGF.EmitBlock(Case1BB);
5587 // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5588 llvm::Value *EndArgs[] = {
5589 IdentTLoc, // ident_t *<loc>
5590 ThreadId, // i32 <gtid>
5591 Lock // kmp_critical_name *&<lock>
5593 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5594 CodeGenFunction &CGF, PrePostActionTy &Action) {
5595 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5596 auto IPriv = Privates.begin();
5597 auto ILHS = LHSExprs.begin();
5598 auto IRHS = RHSExprs.begin();
5599 for (const Expr *E : ReductionOps) {
5600 RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5601 cast<DeclRefExpr>(*IRHS));
5607 RegionCodeGenTy RCG(CodeGen);
5608 CommonActionTy Action(
5609 nullptr, llvm::None,
5610 createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
5611 : OMPRTL__kmpc_end_reduce),
5613 RCG.setAction(Action);
5616 CGF.EmitBranch(DefaultBB);
5620 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5623 llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5624 SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5625 CGF.EmitBlock(Case2BB);
5627 auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5628 CodeGenFunction &CGF, PrePostActionTy &Action) {
5629 auto ILHS = LHSExprs.begin();
5630 auto IRHS = RHSExprs.begin();
5631 auto IPriv = Privates.begin();
5632 for (const Expr *E : ReductionOps) {
5633 const Expr *XExpr = nullptr;
5634 const Expr *EExpr = nullptr;
5635 const Expr *UpExpr = nullptr;
5636 BinaryOperatorKind BO = BO_Comma;
5637 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
5638 if (BO->getOpcode() == BO_Assign) {
5639 XExpr = BO->getLHS();
5640 UpExpr = BO->getRHS();
5643 // Try to emit update expression as a simple atomic.
5644 const Expr *RHSExpr = UpExpr;
5646 // Analyze RHS part of the whole expression.
5647 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5648 RHSExpr->IgnoreParenImpCasts())) {
5649 // If this is a conditional operator, analyze its condition for
5650 // min/max reduction operator.
5651 RHSExpr = ACO->getCond();
5653 if (const auto *BORHS =
5654 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5655 EExpr = BORHS->getRHS();
5656 BO = BORHS->getOpcode();
5660 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5661 auto &&AtomicRedGen = [BO, VD,
5662 Loc](CodeGenFunction &CGF, const Expr *XExpr,
5663 const Expr *EExpr, const Expr *UpExpr) {
5664 LValue X = CGF.EmitLValue(XExpr);
5667 E = CGF.EmitAnyExpr(EExpr);
5668 CGF.EmitOMPAtomicSimpleUpdateExpr(
5669 X, E, BO, /*IsXLHSInRHSPart=*/true,
5670 llvm::AtomicOrdering::Monotonic, Loc,
5671 [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5672 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5673 PrivateScope.addPrivate(
5674 VD, [&CGF, VD, XRValue, Loc]() {
5675 Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5676 CGF.emitOMPSimpleStore(
5677 CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5678 VD->getType().getNonReferenceType(), Loc);
5681 (void)PrivateScope.Privatize();
5682 return CGF.EmitAnyExpr(UpExpr);
5685 if ((*IPriv)->getType()->isArrayType()) {
5686 // Emit atomic reduction for array section.
5687 const auto *RHSVar =
5688 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5689 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5690 AtomicRedGen, XExpr, EExpr, UpExpr);
5692 // Emit atomic reduction for array subscript or single variable.
5693 AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5696 // Emit as a critical region.
5697 auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5698 const Expr *, const Expr *) {
5699 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5700 std::string Name = RT.getName({"atomic_reduction"});
5701 RT.emitCriticalRegion(
5703 [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5705 emitReductionCombiner(CGF, E);
5709 if ((*IPriv)->getType()->isArrayType()) {
5710 const auto *LHSVar =
5711 cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5712 const auto *RHSVar =
5713 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5714 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5717 CritRedGen(CGF, nullptr, nullptr, nullptr);
5725 RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5727 // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5728 llvm::Value *EndArgs[] = {
5729 IdentTLoc, // ident_t *<loc>
5730 ThreadId, // i32 <gtid>
5731 Lock // kmp_critical_name *&<lock>
5733 CommonActionTy Action(nullptr, llvm::None,
5734 createRuntimeFunction(OMPRTL__kmpc_end_reduce),
5736 AtomicRCG.setAction(Action);
5742 CGF.EmitBranch(DefaultBB);
5743 CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5746 /// Generates unique name for artificial threadprivate variables.
5747 /// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
5748 static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
5750 SmallString<256> Buffer;
5751 llvm::raw_svector_ostream Out(Buffer);
5752 const clang::DeclRefExpr *DE;
5753 const VarDecl *D = ::getBaseDecl(Ref, DE);
5755 D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
5756 D = D->getCanonicalDecl();
5757 std::string Name = CGM.getOpenMPRuntime().getName(
5758 {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
5759 Out << Prefix << Name << "_"
5760 << D->getCanonicalDecl()->getLocStart().getRawEncoding();
5764 /// Emits reduction initializer function:
5766 /// void @.red_init(void* %arg) {
5767 /// %0 = bitcast void* %arg to <type>*
5768 /// store <type> <init>, <type>* %0
5772 static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5774 ReductionCodeGen &RCG, unsigned N) {
5775 ASTContext &C = CGM.getContext();
5776 FunctionArgList Args;
5777 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5778 ImplicitParamDecl::Other);
5779 Args.emplace_back(&Param);
5780 const auto &FnInfo =
5781 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5782 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5783 std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
5784 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5785 Name, &CGM.getModule());
5786 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5787 Fn->setDoesNotRecurse();
5788 CodeGenFunction CGF(CGM);
5789 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5790 Address PrivateAddr = CGF.EmitLoadOfPointer(
5791 CGF.GetAddrOfLocalVar(&Param),
5792 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5793 llvm::Value *Size = nullptr;
5794 // If the size of the reduction item is non-constant, load it from global
5795 // threadprivate variable.
5796 if (RCG.getSizes(N).second) {
5797 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5798 CGF, CGM.getContext().getSizeType(),
5799 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5800 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5801 CGM.getContext().getSizeType(), Loc);
5803 RCG.emitAggregateType(CGF, N, Size);
5805 // If initializer uses initializer from declare reduction construct, emit a
5806 // pointer to the address of the original reduction item (reuired by reduction
5808 if (RCG.usesReductionInitializer(N)) {
5809 Address SharedAddr =
5810 CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5811 CGF, CGM.getContext().VoidPtrTy,
5812 generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
5813 SharedAddr = CGF.EmitLoadOfPointer(
5815 CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
5816 SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5818 SharedLVal = CGF.MakeNaturalAlignAddrLValue(
5819 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5820 CGM.getContext().VoidPtrTy);
5822 // Emit the initializer:
5823 // %0 = bitcast void* %arg to <type>*
5824 // store <type> <init>, <type>* %0
5825 RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
5826 [](CodeGenFunction &) { return false; });
5827 CGF.FinishFunction();
5831 /// Emits reduction combiner function:
5833 /// void @.red_comb(void* %arg0, void* %arg1) {
5834 /// %lhs = bitcast void* %arg0 to <type>*
5835 /// %rhs = bitcast void* %arg1 to <type>*
5836 /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5837 /// store <type> %2, <type>* %lhs
5841 static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5843 ReductionCodeGen &RCG, unsigned N,
5844 const Expr *ReductionOp,
5845 const Expr *LHS, const Expr *RHS,
5846 const Expr *PrivateRef) {
5847 ASTContext &C = CGM.getContext();
5848 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5849 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5850 FunctionArgList Args;
5851 ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
5852 C.VoidPtrTy, ImplicitParamDecl::Other);
5853 ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5854 ImplicitParamDecl::Other);
5855 Args.emplace_back(&ParamInOut);
5856 Args.emplace_back(&ParamIn);
5857 const auto &FnInfo =
5858 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5859 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5860 std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
5861 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5862 Name, &CGM.getModule());
5863 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5864 Fn->setDoesNotRecurse();
5865 CodeGenFunction CGF(CGM);
5866 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5867 llvm::Value *Size = nullptr;
5868 // If the size of the reduction item is non-constant, load it from global
5869 // threadprivate variable.
5870 if (RCG.getSizes(N).second) {
5871 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5872 CGF, CGM.getContext().getSizeType(),
5873 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5874 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5875 CGM.getContext().getSizeType(), Loc);
5877 RCG.emitAggregateType(CGF, N, Size);
5878 // Remap lhs and rhs variables to the addresses of the function arguments.
5879 // %lhs = bitcast void* %arg0 to <type>*
5880 // %rhs = bitcast void* %arg1 to <type>*
5881 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5882 PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
5883 // Pull out the pointer to the variable.
5884 Address PtrAddr = CGF.EmitLoadOfPointer(
5885 CGF.GetAddrOfLocalVar(&ParamInOut),
5886 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5887 return CGF.Builder.CreateElementBitCast(
5888 PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5890 PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
5891 // Pull out the pointer to the variable.
5892 Address PtrAddr = CGF.EmitLoadOfPointer(
5893 CGF.GetAddrOfLocalVar(&ParamIn),
5894 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5895 return CGF.Builder.CreateElementBitCast(
5896 PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5898 PrivateScope.Privatize();
5899 // Emit the combiner body:
5900 // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
5901 // store <type> %2, <type>* %lhs
5902 CGM.getOpenMPRuntime().emitSingleReductionCombiner(
5903 CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
5904 cast<DeclRefExpr>(RHS));
5905 CGF.FinishFunction();
5909 /// Emits reduction finalizer function:
5911 /// void @.red_fini(void* %arg) {
5912 /// %0 = bitcast void* %arg to <type>*
5913 /// <destroy>(<type>* %0)
5917 static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
5919 ReductionCodeGen &RCG, unsigned N) {
5920 if (!RCG.needCleanups(N))
5922 ASTContext &C = CGM.getContext();
5923 FunctionArgList Args;
5924 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5925 ImplicitParamDecl::Other);
5926 Args.emplace_back(&Param);
5927 const auto &FnInfo =
5928 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5929 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5930 std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
5931 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5932 Name, &CGM.getModule());
5933 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5934 Fn->setDoesNotRecurse();
5935 CodeGenFunction CGF(CGM);
5936 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5937 Address PrivateAddr = CGF.EmitLoadOfPointer(
5938 CGF.GetAddrOfLocalVar(&Param),
5939 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5940 llvm::Value *Size = nullptr;
5941 // If the size of the reduction item is non-constant, load it from global
5942 // threadprivate variable.
5943 if (RCG.getSizes(N).second) {
5944 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5945 CGF, CGM.getContext().getSizeType(),
5946 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5947 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5948 CGM.getContext().getSizeType(), Loc);
5950 RCG.emitAggregateType(CGF, N, Size);
5951 // Emit the finalizer body:
5952 // <destroy>(<type>* %0)
5953 RCG.emitCleanups(CGF, N, PrivateAddr);
5954 CGF.FinishFunction();
5958 llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
5959 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
5960 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
5961 if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
5964 // Build typedef struct:
5965 // kmp_task_red_input {
5966 // void *reduce_shar; // shared reduction item
5967 // size_t reduce_size; // size of data item
5968 // void *reduce_init; // data initialization routine
5969 // void *reduce_fini; // data finalization routine
5970 // void *reduce_comb; // data combiner routine
5971 // kmp_task_red_flags_t flags; // flags for additional info from compiler
5972 // } kmp_task_red_input_t;
5973 ASTContext &C = CGM.getContext();
5974 RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
5975 RD->startDefinition();
5976 const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5977 const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
5978 const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5979 const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5980 const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5981 const FieldDecl *FlagsFD = addFieldToRecordDecl(
5982 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
5983 RD->completeDefinition();
5984 QualType RDType = C.getRecordType(RD);
5985 unsigned Size = Data.ReductionVars.size();
5986 llvm::APInt ArraySize(/*numBits=*/64, Size);
5987 QualType ArrayRDType = C.getConstantArrayType(
5988 RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
5989 // kmp_task_red_input_t .rd_input.[Size];
5990 Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
5991 ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
5993 for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
5994 // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
5995 llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
5996 llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
5997 llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
5998 TaskRedInput.getPointer(), Idxs,
5999 /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
6001 LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
6002 // ElemLVal.reduce_shar = &Shareds[Cnt];
6003 LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
6004 RCG.emitSharedLValue(CGF, Cnt);
6005 llvm::Value *CastedShared =
6006 CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer());
6007 CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
6008 RCG.emitAggregateType(CGF, Cnt);
6009 llvm::Value *SizeValInChars;
6010 llvm::Value *SizeVal;
6011 std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
6012 // We use delayed creation/initialization for VLAs, array sections and
6013 // custom reduction initializations. It is required because runtime does not
6014 // provide the way to pass the sizes of VLAs/array sections to
6015 // initializer/combiner/finalizer functions and does not pass the pointer to
6016 // original reduction item to the initializer. Instead threadprivate global
6017 // variables are used to store these values and use them in the functions.
6018 bool DelayedCreation = !!SizeVal;
6019 SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
6020 /*isSigned=*/false);
6021 LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
6022 CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
6023 // ElemLVal.reduce_init = init;
6024 LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
6025 llvm::Value *InitAddr =
6026 CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
6027 CGF.EmitStoreOfScalar(InitAddr, InitLVal);
6028 DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
6029 // ElemLVal.reduce_fini = fini;
6030 LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
6031 llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
6032 llvm::Value *FiniAddr = Fini
6033 ? CGF.EmitCastToVoidPtr(Fini)
6034 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
6035 CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
6036 // ElemLVal.reduce_comb = comb;
6037 LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
6038 llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
6039 CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
6040 RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
6041 CGF.EmitStoreOfScalar(CombAddr, CombLVal);
6042 // ElemLVal.flags = 0;
6043 LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
6044 if (DelayedCreation) {
6045 CGF.EmitStoreOfScalar(
6046 llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
6049 CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
6051 // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
6053 llvm::Value *Args[] = {
6054 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6056 llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6057 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
6059 return CGF.EmitRuntimeCall(
6060 createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
6063 void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
6065 ReductionCodeGen &RCG,
6067 auto Sizes = RCG.getSizes(N);
6068 // Emit threadprivate global variable if the type is non-constant
6069 // (Sizes.second = nullptr).
6071 llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
6072 /*isSigned=*/false);
6073 Address SizeAddr = getAddrOfArtificialThreadPrivate(
6074 CGF, CGM.getContext().getSizeType(),
6075 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
6076 CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
6078 // Store address of the original reduction item if custom initializer is used.
6079 if (RCG.usesReductionInitializer(N)) {
6080 Address SharedAddr = getAddrOfArtificialThreadPrivate(
6081 CGF, CGM.getContext().VoidPtrTy,
6082 generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
6083 CGF.Builder.CreateStore(
6084 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6085 RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
6086 SharedAddr, /*IsVolatile=*/false);
6090 Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
6092 llvm::Value *ReductionsPtr,
6093 LValue SharedLVal) {
6094 // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
6096 llvm::Value *Args[] = {
6097 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6100 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(),
6103 CGF.EmitRuntimeCall(
6104 createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
6105 SharedLVal.getAlignment());
6108 void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
6109 SourceLocation Loc) {
6110 if (!CGF.HaveInsertPoint())
6112 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
6114 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
6115 // Ignore return result until untied tasks are supported.
6116 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
6117 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
6118 Region->emitUntiedSwitch(CGF);
6121 void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
6122 OpenMPDirectiveKind InnerKind,
6123 const RegionCodeGenTy &CodeGen,
6125 if (!CGF.HaveInsertPoint())
6127 InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
6128 CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
6139 } // anonymous namespace
6141 static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
6142 RTCancelKind CancelKind = CancelNoreq;
6143 if (CancelRegion == OMPD_parallel)
6144 CancelKind = CancelParallel;
6145 else if (CancelRegion == OMPD_for)
6146 CancelKind = CancelLoop;
6147 else if (CancelRegion == OMPD_sections)
6148 CancelKind = CancelSections;
6150 assert(CancelRegion == OMPD_taskgroup);
6151 CancelKind = CancelTaskgroup;
6156 void CGOpenMPRuntime::emitCancellationPointCall(
6157 CodeGenFunction &CGF, SourceLocation Loc,
6158 OpenMPDirectiveKind CancelRegion) {
6159 if (!CGF.HaveInsertPoint())
6161 // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
6162 // global_tid, kmp_int32 cncl_kind);
6163 if (auto *OMPRegionInfo =
6164 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6165 // For 'cancellation point taskgroup', the task region info may not have a
6166 // cancel. This may instead happen in another adjacent task.
6167 if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
6168 llvm::Value *Args[] = {
6169 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
6170 CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6171 // Ignore return result until untied tasks are supported.
6172 llvm::Value *Result = CGF.EmitRuntimeCall(
6173 createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
6174 // if (__kmpc_cancellationpoint()) {
6175 // exit from construct;
6177 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6178 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6179 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6180 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6181 CGF.EmitBlock(ExitBB);
6182 // exit from construct;
6183 CodeGenFunction::JumpDest CancelDest =
6184 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6185 CGF.EmitBranchThroughCleanup(CancelDest);
6186 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6191 void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
6193 OpenMPDirectiveKind CancelRegion) {
6194 if (!CGF.HaveInsertPoint())
6196 // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
6197 // kmp_int32 cncl_kind);
6198 if (auto *OMPRegionInfo =
6199 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6200 auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
6201 PrePostActionTy &) {
6202 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
6203 llvm::Value *Args[] = {
6204 RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
6205 CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6206 // Ignore return result until untied tasks are supported.
6207 llvm::Value *Result = CGF.EmitRuntimeCall(
6208 RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
6209 // if (__kmpc_cancel()) {
6210 // exit from construct;
6212 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6213 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6214 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6215 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6216 CGF.EmitBlock(ExitBB);
6217 // exit from construct;
6218 CodeGenFunction::JumpDest CancelDest =
6219 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6220 CGF.EmitBranchThroughCleanup(CancelDest);
6221 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6224 emitOMPIfClause(CGF, IfCond, ThenGen,
6225 [](CodeGenFunction &, PrePostActionTy &) {});
6227 RegionCodeGenTy ThenRCG(ThenGen);
6233 void CGOpenMPRuntime::emitTargetOutlinedFunction(
6234 const OMPExecutableDirective &D, StringRef ParentName,
6235 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6236 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6237 assert(!ParentName.empty() && "Invalid target region parent name!");
6238 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
6239 IsOffloadEntry, CodeGen);
6242 void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
6243 const OMPExecutableDirective &D, StringRef ParentName,
6244 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6245 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6246 // Create a unique name for the entry function using the source location
6247 // information of the current target region. The name will be something like:
6249 // __omp_offloading_DD_FFFF_PP_lBB
6251 // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
6252 // mangled name of the function that encloses the target region and BB is the
6253 // line number of the target region.
6258 getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
6260 SmallString<64> EntryFnName;
6262 llvm::raw_svector_ostream OS(EntryFnName);
6263 OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
6264 << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
6267 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6269 CodeGenFunction CGF(CGM, true);
6270 CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
6271 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6273 OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
6275 // If this target outline function is not an offload entry, we don't need to
6277 if (!IsOffloadEntry)
6280 // The target region ID is used by the runtime library to identify the current
6281 // target region, so it only has to be unique and not necessarily point to
6282 // anything. It could be the pointer to the outlined function that implements
6283 // the target region, but we aren't using that so that the compiler doesn't
6284 // need to keep that, and could therefore inline the host function if proven
6285 // worthwhile during optimization. In the other hand, if emitting code for the
6286 // device, the ID has to be the function address so that it can retrieved from
6287 // the offloading entry and launched by the runtime library. We also mark the
6288 // outlined function to have external linkage in case we are emitting code for
6289 // the device, because these functions will be entry points to the device.
6291 if (CGM.getLangOpts().OpenMPIsDevice) {
6292 OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
6293 OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
6294 OutlinedFn->setDSOLocal(false);
6296 std::string Name = getName({EntryFnName, "region_id"});
6297 OutlinedFnID = new llvm::GlobalVariable(
6298 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
6299 llvm::GlobalValue::WeakAnyLinkage,
6300 llvm::Constant::getNullValue(CGM.Int8Ty), Name);
6303 // Register the information for the entry associated with this target region.
6304 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
6305 DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
6306 OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
6309 /// discard all CompoundStmts intervening between two constructs
6310 static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
6311 while (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
6312 Body = CS->body_front();
6317 /// Emit the number of teams for a target directive. Inspect the num_teams
6318 /// clause associated with a teams construct combined or closely nested
6319 /// with the target directive.
6321 /// Emit a team of size one for directives such as 'target parallel' that
6322 /// have no associated teams construct.
6324 /// Otherwise, return nullptr.
6325 static llvm::Value *
6326 emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6327 CodeGenFunction &CGF,
6328 const OMPExecutableDirective &D) {
6329 assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6330 "teams directive expected to be "
6331 "emitted only for the host!");
6333 CGBuilderTy &Bld = CGF.Builder;
6335 // If the target directive is combined with a teams directive:
6336 // Return the value in the num_teams clause, if any.
6337 // Otherwise, return 0 to denote the runtime default.
6338 if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
6339 if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
6340 CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
6341 llvm::Value *NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
6342 /*IgnoreResultAssign*/ true);
6343 return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6347 // The default value is 0.
6348 return Bld.getInt32(0);
6351 // If the target directive is combined with a parallel directive but not a
6352 // teams directive, start one team.
6353 if (isOpenMPParallelDirective(D.getDirectiveKind()))
6354 return Bld.getInt32(1);
6356 // If the current target region has a teams region enclosed, we need to get
6357 // the number of teams to pass to the runtime function call. This is done
6358 // by generating the expression in a inlined region. This is required because
6359 // the expression is captured in the enclosing target environment when the
6360 // teams directive is not combined with target.
6362 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6364 if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6365 ignoreCompoundStmts(CS.getCapturedStmt()))) {
6366 if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6367 if (const auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
6368 CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6369 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6370 llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
6371 return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6375 // If we have an enclosed teams directive but no num_teams clause we use
6376 // the default value 0.
6377 return Bld.getInt32(0);
6381 // No teams associated with the directive.
6385 /// Emit the number of threads for a target directive. Inspect the
6386 /// thread_limit clause associated with a teams construct combined or closely
6387 /// nested with the target directive.
6389 /// Emit the num_threads clause for directives such as 'target parallel' that
6390 /// have no associated teams construct.
6392 /// Otherwise, return nullptr.
6393 static llvm::Value *
6394 emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6395 CodeGenFunction &CGF,
6396 const OMPExecutableDirective &D) {
6397 assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6398 "teams directive expected to be "
6399 "emitted only for the host!");
6401 CGBuilderTy &Bld = CGF.Builder;
6404 // If the target directive is combined with a teams directive:
6405 // Return the value in the thread_limit clause, if any.
6407 // If the target directive is combined with a parallel directive:
6408 // Return the value in the num_threads clause, if any.
6410 // If both clauses are set, select the minimum of the two.
6412 // If neither teams or parallel combined directives set the number of threads
6413 // in a team, return 0 to denote the runtime default.
6415 // If this is not a teams directive return nullptr.
6417 if (isOpenMPTeamsDirective(D.getDirectiveKind()) ||
6418 isOpenMPParallelDirective(D.getDirectiveKind())) {
6419 llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0);
6420 llvm::Value *NumThreadsVal = nullptr;
6421 llvm::Value *ThreadLimitVal = nullptr;
6423 if (const auto *ThreadLimitClause =
6424 D.getSingleClause<OMPThreadLimitClause>()) {
6425 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6426 llvm::Value *ThreadLimit =
6427 CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
6428 /*IgnoreResultAssign*/ true);
6429 ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6433 if (const auto *NumThreadsClause =
6434 D.getSingleClause<OMPNumThreadsClause>()) {
6435 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
6436 llvm::Value *NumThreads =
6437 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
6438 /*IgnoreResultAssign*/ true);
6440 Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true);
6443 // Select the lesser of thread_limit and num_threads.
6445 ThreadLimitVal = ThreadLimitVal
6446 ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal,
6448 NumThreadsVal, ThreadLimitVal)
6451 // Set default value passed to the runtime if either teams or a target
6452 // parallel type directive is found but no clause is specified.
6453 if (!ThreadLimitVal)
6454 ThreadLimitVal = DefaultThreadLimitVal;
6456 return ThreadLimitVal;
6459 // If the current target region has a teams region enclosed, we need to get
6460 // the thread limit to pass to the runtime function call. This is done
6461 // by generating the expression in a inlined region. This is required because
6462 // the expression is captured in the enclosing target environment when the
6463 // teams directive is not combined with target.
6465 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6467 if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6468 ignoreCompoundStmts(CS.getCapturedStmt()))) {
6469 if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6470 if (const auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
6471 CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6472 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6473 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
6474 return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6478 // If we have an enclosed teams directive but no thread_limit clause we
6479 // use the default value 0.
6480 return CGF.Builder.getInt32(0);
6484 // No teams associated with the directive.
6489 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
6491 // Utility to handle information from clauses associated with a given
6492 // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
6493 // It provides a convenient interface to obtain the information and generate
6494 // code for that information.
6495 class MappableExprsHandler {
6497 /// Values for bit flags used to specify the mapping type for
6499 enum OpenMPOffloadMappingFlags : uint64_t {
6502 /// Allocate memory on the device and move data from host to device.
6504 /// Allocate memory on the device and move data from device to host.
6505 OMP_MAP_FROM = 0x02,
6506 /// Always perform the requested mapping action on the element, even
6507 /// if it was already mapped before.
6508 OMP_MAP_ALWAYS = 0x04,
6509 /// Delete the element from the device environment, ignoring the
6510 /// current reference count associated with the element.
6511 OMP_MAP_DELETE = 0x08,
6512 /// The element being mapped is a pointer-pointee pair; both the
6513 /// pointer and the pointee should be mapped.
6514 OMP_MAP_PTR_AND_OBJ = 0x10,
6515 /// This flags signals that the base address of an entry should be
6516 /// passed to the target kernel as an argument.
6517 OMP_MAP_TARGET_PARAM = 0x20,
6518 /// Signal that the runtime library has to return the device pointer
6519 /// in the current position for the data being mapped. Used when we have the
6520 /// use_device_ptr clause.
6521 OMP_MAP_RETURN_PARAM = 0x40,
6522 /// This flag signals that the reference being passed is a pointer to
6524 OMP_MAP_PRIVATE = 0x80,
6525 /// Pass the element to the device by value.
6526 OMP_MAP_LITERAL = 0x100,
6528 OMP_MAP_IMPLICIT = 0x200,
6529 /// The 16 MSBs of the flags indicate whether the entry is member of some
6531 OMP_MAP_MEMBER_OF = 0xffff000000000000,
6532 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF),
6535 /// Class that associates information with a base pointer to be passed to the
6536 /// runtime library.
6537 class BasePointerInfo {
6538 /// The base pointer.
6539 llvm::Value *Ptr = nullptr;
6540 /// The base declaration that refers to this device pointer, or null if
6542 const ValueDecl *DevPtrDecl = nullptr;
6545 BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
6546 : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
6547 llvm::Value *operator*() const { return Ptr; }
6548 const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
6549 void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
6552 using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
6553 using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
6554 using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
6556 /// Map between a struct and the its lowest & highest elements which have been
6558 /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
6559 /// HE(FieldIndex, Pointer)}
6560 struct StructRangeInfoTy {
6561 std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
6562 0, Address::invalid()};
6563 std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
6564 0, Address::invalid()};
6565 Address Base = Address::invalid();
6569 /// Kind that defines how a device pointer has to be returned.
6571 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
6572 OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
6573 OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
6574 bool ReturnDevicePointer = false;
6575 bool IsImplicit = false;
6577 MapInfo() = default;
6579 OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6580 OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6581 bool ReturnDevicePointer, bool IsImplicit)
6582 : Components(Components), MapType(MapType),
6583 MapTypeModifier(MapTypeModifier),
6584 ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
6587 /// If use_device_ptr is used on a pointer which is a struct member and there
6588 /// is no map information about it, then emission of that entry is deferred
6589 /// until the whole struct has been processed.
6590 struct DeferredDevicePtrEntryTy {
6591 const Expr *IE = nullptr;
6592 const ValueDecl *VD = nullptr;
6594 DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD)
6598 /// Directive from where the map clauses were extracted.
6599 const OMPExecutableDirective &CurDir;
6601 /// Function the directive is being generated for.
6602 CodeGenFunction &CGF;
6604 /// Set of all first private variables in the current directive.
6605 llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
6607 /// Map between device pointer declarations and their expression components.
6608 /// The key value for declarations in 'this' is null.
6611 SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
6614 llvm::Value *getExprTypeSize(const Expr *E) const {
6615 QualType ExprTy = E->getType().getCanonicalType();
6617 // Reference types are ignored for mapping purposes.
6618 if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
6619 ExprTy = RefTy->getPointeeType().getCanonicalType();
6621 // Given that an array section is considered a built-in type, we need to
6622 // do the calculation based on the length of the section instead of relying
6623 // on CGF.getTypeSize(E->getType()).
6624 if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
6625 QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
6626 OAE->getBase()->IgnoreParenImpCasts())
6627 .getCanonicalType();
6629 // If there is no length associated with the expression, that means we
6630 // are using the whole length of the base.
6631 if (!OAE->getLength() && OAE->getColonLoc().isValid())
6632 return CGF.getTypeSize(BaseTy);
6634 llvm::Value *ElemSize;
6635 if (const auto *PTy = BaseTy->getAs<PointerType>()) {
6636 ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
6638 const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
6639 assert(ATy && "Expecting array type if not a pointer type.");
6640 ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
6643 // If we don't have a length at this point, that is because we have an
6644 // array section with a single element.
6645 if (!OAE->getLength())
6648 llvm::Value *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
6650 CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false);
6651 return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
6653 return CGF.getTypeSize(ExprTy);
6656 /// Return the corresponding bits for a given map clause modifier. Add
6657 /// a flag marking the map as a pointer if requested. Add a flag marking the
6658 /// map as the first one of a series of maps that relate to the same map
6660 OpenMPOffloadMappingFlags getMapTypeBits(OpenMPMapClauseKind MapType,
6661 OpenMPMapClauseKind MapTypeModifier,
6662 bool IsImplicit, bool AddPtrFlag,
6663 bool AddIsTargetParamFlag) const {
6664 OpenMPOffloadMappingFlags Bits =
6665 IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
6667 case OMPC_MAP_alloc:
6668 case OMPC_MAP_release:
6669 // alloc and release is the default behavior in the runtime library, i.e.
6670 // if we don't pass any bits alloc/release that is what the runtime is
6671 // going to do. Therefore, we don't need to signal anything for these two
6678 Bits |= OMP_MAP_FROM;
6680 case OMPC_MAP_tofrom:
6681 Bits |= OMP_MAP_TO | OMP_MAP_FROM;
6683 case OMPC_MAP_delete:
6684 Bits |= OMP_MAP_DELETE;
6686 case OMPC_MAP_always:
6687 case OMPC_MAP_unknown:
6688 llvm_unreachable("Unexpected map type!");
6691 Bits |= OMP_MAP_PTR_AND_OBJ;
6692 if (AddIsTargetParamFlag)
6693 Bits |= OMP_MAP_TARGET_PARAM;
6694 if (MapTypeModifier == OMPC_MAP_always)
6695 Bits |= OMP_MAP_ALWAYS;
6699 /// Return true if the provided expression is a final array section. A
6700 /// final array section, is one whose length can't be proved to be one.
6701 bool isFinalArraySectionExpression(const Expr *E) const {
6702 const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
6704 // It is not an array section and therefore not a unity-size one.
6708 // An array section with no colon always refer to a single element.
6709 if (OASE->getColonLoc().isInvalid())
6712 const Expr *Length = OASE->getLength();
6714 // If we don't have a length we have to check if the array has size 1
6715 // for this dimension. Also, we should always expect a length if the
6716 // base type is pointer.
6718 QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
6719 OASE->getBase()->IgnoreParenImpCasts())
6720 .getCanonicalType();
6721 if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
6722 return ATy->getSize().getSExtValue() != 1;
6723 // If we don't have a constant dimension length, we have to consider
6724 // the current section as having any size, so it is not necessarily
6725 // unitary. If it happen to be unity size, that's user fault.
6729 // Check if the length evaluates to 1.
6730 llvm::APSInt ConstLength;
6731 if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
6732 return true; // Can have more that size 1.
6734 return ConstLength.getSExtValue() != 1;
6737 /// Generate the base pointers, section pointers, sizes and map type
6738 /// bits for the provided map type, map modifier, and expression components.
6739 /// \a IsFirstComponent should be set to true if the provided set of
6740 /// components is the first associated with a capture.
6741 void generateInfoForComponentList(
6742 OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6743 OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6744 MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
6745 MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
6746 StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
6747 bool IsImplicit) const {
6748 // The following summarizes what has to be generated for each map and the
6749 // types below. The generated information is expressed in this order:
6750 // base pointer, section pointer, size, flags
6751 // (to add to the ones that come from the map type and modifier).
6772 // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
6775 // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
6778 // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
6781 // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
6784 // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
6787 // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
6790 // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
6793 // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
6796 // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
6798 // map(to: s.p[:22])
6799 // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
6800 // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
6801 // &(s.p), &(s.p[0]), 22*sizeof(double),
6802 // MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
6803 // (*) alloc space for struct members, only this is a target parameter
6804 // (**) map the pointer (nothing to be mapped in this example) (the compiler
6805 // optimizes this entry out, same in the examples below)
6806 // (***) map the pointee (map: to)
6809 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
6811 // map(from: s.ps->s.i)
6812 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6813 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6814 // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6816 // map(to: s.ps->ps)
6817 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6818 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6819 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO
6821 // map(s.ps->ps->ps)
6822 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6823 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6824 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6825 // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
6827 // map(to: s.ps->ps->s.f[:22])
6828 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6829 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6830 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6831 // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
6834 // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
6837 // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
6840 // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
6843 // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
6845 // map(to: ps->p[:22])
6846 // ps, &(ps->p), sizeof(double*), TARGET_PARAM
6847 // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
6848 // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
6851 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
6853 // map(from: ps->ps->s.i)
6854 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6855 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6856 // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6858 // map(from: ps->ps->ps)
6859 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6860 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6861 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6863 // map(ps->ps->ps->ps)
6864 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6865 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6866 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6867 // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
6869 // map(to: ps->ps->ps->s.f[:22])
6870 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6871 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6872 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6873 // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
6875 // map(to: s.f[:22]) map(from: s.p[:33])
6876 // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
6877 // sizeof(double*) (**), TARGET_PARAM
6878 // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
6879 // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
6880 // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6881 // (*) allocate contiguous space needed to fit all mapped members even if
6882 // we allocate space for members not mapped (in this example,
6883 // s.f[22..49] and s.s are not mapped, yet we must allocate space for
6884 // them as well because they fall between &s.f[0] and &s.p)
6886 // map(from: s.f[:22]) map(to: ps->p[:33])
6887 // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
6888 // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
6889 // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
6890 // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
6891 // (*) the struct this entry pertains to is the 2nd element in the list of
6892 // arguments, hence MEMBER_OF(2)
6894 // map(from: s.f[:22], s.s) map(to: ps->p[:33])
6895 // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
6896 // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
6897 // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
6898 // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
6899 // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
6900 // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
6901 // (*) the struct this entry pertains to is the 4th element in the list
6902 // of arguments, hence MEMBER_OF(4)
6904 // Track if the map information being generated is the first for a capture.
6905 bool IsCaptureFirstInfo = IsFirstComponentList;
6906 bool IsLink = false; // Is this variable a "declare target link"?
6908 // Scan the components from the base to the complete expression.
6909 auto CI = Components.rbegin();
6910 auto CE = Components.rend();
6913 // Track if the map information being generated is the first for a list of
6915 bool IsExpressionFirstInfo = true;
6916 Address BP = Address::invalid();
6918 if (isa<MemberExpr>(I->getAssociatedExpression())) {
6919 // The base is the 'this' pointer. The content of the pointer is going
6920 // to be the base of the field being mapped.
6921 BP = CGF.LoadCXXThisAddress();
6923 // The base is the reference to the variable.
6925 BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
6926 if (const auto *VD =
6927 dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
6928 if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
6929 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
6930 if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
6932 BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
6936 // If the variable is a pointer and is being dereferenced (i.e. is not
6937 // the last component), the base has to be the pointer itself, not its
6938 // reference. References are ignored for mapping purposes.
6940 I->getAssociatedDeclaration()->getType().getNonReferenceType();
6941 if (Ty->isAnyPointerType() && std::next(I) != CE) {
6942 BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
6944 // We do not need to generate individual map information for the
6945 // pointer, it can be associated with the combined storage.
6950 // Track whether a component of the list should be marked as MEMBER_OF some
6951 // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
6952 // in a component list should be marked as MEMBER_OF, all subsequent entries
6953 // do not belong to the base struct. E.g.
6955 // s.ps->ps->ps->f[:]
6957 // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
6958 // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
6959 // is the pointee of ps(2) which is not member of struct s, so it should not
6960 // be marked as such (it is still PTR_AND_OBJ).
6961 // The variable is initialized to false so that PTR_AND_OBJ entries which
6962 // are not struct members are not considered (e.g. array of pointers to
6964 bool ShouldBeMemberOf = false;
6966 // Variable keeping track of whether or not we have encountered a component
6967 // in the component list which is a member expression. Useful when we have a
6968 // pointer or a final array section, in which case it is the previous
6969 // component in the list which tells us whether we have a member expression.
6971 // While processing the final array section "[:]" it is "f" which tells us
6972 // whether we are dealing with a member of a declared struct.
6973 const MemberExpr *EncounteredME = nullptr;
6975 for (; I != CE; ++I) {
6976 // If the current component is member of a struct (parent struct) mark it.
6977 if (!EncounteredME) {
6978 EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
6979 // If we encounter a PTR_AND_OBJ entry from now on it should be marked
6980 // as MEMBER_OF the parent struct.
6982 ShouldBeMemberOf = true;
6985 auto Next = std::next(I);
6987 // We need to generate the addresses and sizes if this is the last
6988 // component, if the component is a pointer or if it is an array section
6989 // whose length can't be proved to be one. If this is a pointer, it
6990 // becomes the base address for the following components.
6992 // A final array section, is one whose length can't be proved to be one.
6993 bool IsFinalArraySection =
6994 isFinalArraySectionExpression(I->getAssociatedExpression());
6996 // Get information on whether the element is a pointer. Have to do a
6997 // special treatment for array sections given that they are built-in
7000 dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
7002 (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
7004 ->isAnyPointerType()) ||
7005 I->getAssociatedExpression()->getType()->isAnyPointerType();
7007 if (Next == CE || IsPointer || IsFinalArraySection) {
7008 // If this is not the last component, we expect the pointer to be
7009 // associated with an array expression or member expression.
7010 assert((Next == CE ||
7011 isa<MemberExpr>(Next->getAssociatedExpression()) ||
7012 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
7013 isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
7014 "Unexpected expression");
7017 CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
7018 llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
7020 // If this component is a pointer inside the base struct then we don't
7021 // need to create any entry for it - it will be combined with the object
7022 // it is pointing to into a single PTR_AND_OBJ entry.
7023 bool IsMemberPointer =
7024 IsPointer && EncounteredME &&
7025 (dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
7027 if (!IsMemberPointer) {
7028 BasePointers.push_back(BP.getPointer());
7029 Pointers.push_back(LB.getPointer());
7030 Sizes.push_back(Size);
7032 // We need to add a pointer flag for each map that comes from the
7033 // same expression except for the first one. We also need to signal
7034 // this map is the first one that relates with the current capture
7035 // (there is a set of entries for each capture).
7036 OpenMPOffloadMappingFlags Flags = getMapTypeBits(
7037 MapType, MapTypeModifier, IsImplicit,
7038 !IsExpressionFirstInfo || IsLink, IsCaptureFirstInfo && !IsLink);
7040 if (!IsExpressionFirstInfo) {
7041 // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
7042 // then we reset the TO/FROM/ALWAYS/DELETE flags.
7044 Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
7047 if (ShouldBeMemberOf) {
7048 // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
7049 // should be later updated with the correct value of MEMBER_OF.
7050 Flags |= OMP_MAP_MEMBER_OF;
7051 // From now on, all subsequent PTR_AND_OBJ entries should not be
7052 // marked as MEMBER_OF.
7053 ShouldBeMemberOf = false;
7057 Types.push_back(Flags);
7060 // If we have encountered a member expression so far, keep track of the
7061 // mapped member. If the parent is "*this", then the value declaration
7063 if (EncounteredME) {
7064 const auto *FD = dyn_cast<FieldDecl>(EncounteredME->getMemberDecl());
7065 unsigned FieldIndex = FD->getFieldIndex();
7067 // Update info about the lowest and highest elements for this struct
7068 if (!PartialStruct.Base.isValid()) {
7069 PartialStruct.LowestElem = {FieldIndex, LB};
7070 PartialStruct.HighestElem = {FieldIndex, LB};
7071 PartialStruct.Base = BP;
7072 } else if (FieldIndex < PartialStruct.LowestElem.first) {
7073 PartialStruct.LowestElem = {FieldIndex, LB};
7074 } else if (FieldIndex > PartialStruct.HighestElem.first) {
7075 PartialStruct.HighestElem = {FieldIndex, LB};
7079 // If we have a final array section, we are done with this expression.
7080 if (IsFinalArraySection)
7083 // The pointer becomes the base for the next element.
7087 IsExpressionFirstInfo = false;
7088 IsCaptureFirstInfo = false;
7093 /// Return the adjusted map modifiers if the declaration a capture refers to
7094 /// appears in a first-private clause. This is expected to be used only with
7095 /// directives that start with 'target'.
7096 MappableExprsHandler::OpenMPOffloadMappingFlags
7097 getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
7098 assert(Cap.capturesVariable() && "Expected capture by reference only!");
7100 // A first private variable captured by reference will use only the
7101 // 'private ptr' and 'map to' flag. Return the right flags if the captured
7102 // declaration is known as first-private in this handler.
7103 if (FirstPrivateDecls.count(Cap.getCapturedVar()))
7104 return MappableExprsHandler::OMP_MAP_PRIVATE |
7105 MappableExprsHandler::OMP_MAP_TO;
7106 return MappableExprsHandler::OMP_MAP_TO |
7107 MappableExprsHandler::OMP_MAP_FROM;
7110 static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
7111 // Member of is given by the 16 MSB of the flag, so rotate by 48 bits.
7112 return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
7116 static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
7117 OpenMPOffloadMappingFlags MemberOfFlag) {
7118 // If the entry is PTR_AND_OBJ but has not been marked with the special
7119 // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
7120 // marked as MEMBER_OF.
7121 if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
7122 ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
7125 // Reset the placeholder value to prepare the flag for the assignment of the
7126 // proper MEMBER_OF value.
7127 Flags &= ~OMP_MAP_MEMBER_OF;
7128 Flags |= MemberOfFlag;
7132 MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
7133 : CurDir(Dir), CGF(CGF) {
7134 // Extract firstprivate clause information.
7135 for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
7136 for (const auto *D : C->varlists())
7137 FirstPrivateDecls.insert(
7138 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
7139 // Extract device pointer clause information.
7140 for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
7141 for (auto L : C->component_lists())
7142 DevPointersMap[L.first].push_back(L.second);
7145 /// Generate code for the combined entry if we have a partially mapped struct
7146 /// and take care of the mapping flags of the arguments corresponding to
7147 /// individual struct members.
7148 void emitCombinedEntry(MapBaseValuesArrayTy &BasePointers,
7149 MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
7150 MapFlagsArrayTy &Types, MapFlagsArrayTy &CurTypes,
7151 const StructRangeInfoTy &PartialStruct) const {
7152 // Base is the base of the struct
7153 BasePointers.push_back(PartialStruct.Base.getPointer());
7154 // Pointer is the address of the lowest element
7155 llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
7156 Pointers.push_back(LB);
7157 // Size is (addr of {highest+1} element) - (addr of lowest element)
7158 llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
7159 llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
7160 llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
7161 llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
7162 llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
7163 llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.SizeTy,
7164 /*isSinged=*/false);
7165 Sizes.push_back(Size);
7166 // Map type is always TARGET_PARAM
7167 Types.push_back(OMP_MAP_TARGET_PARAM);
7168 // Remove TARGET_PARAM flag from the first element
7169 (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
7171 // All other current entries will be MEMBER_OF the combined entry
7172 // (except for PTR_AND_OBJ entries which do not have a placeholder value
7173 // 0xFFFF in the MEMBER_OF field).
7174 OpenMPOffloadMappingFlags MemberOfFlag =
7175 getMemberOfFlag(BasePointers.size() - 1);
7176 for (auto &M : CurTypes)
7177 setCorrectMemberOfFlag(M, MemberOfFlag);
7180 /// Generate all the base pointers, section pointers, sizes and map
7181 /// types for the extracted mappable expressions. Also, for each item that
7182 /// relates with a device pointer, a pair of the relevant declaration and
7183 /// index where it occurs is appended to the device pointers info array.
7184 void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
7185 MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
7186 MapFlagsArrayTy &Types) const {
7187 // We have to process the component lists that relate with the same
7188 // declaration in a single chunk so that we can generate the map flags
7189 // correctly. Therefore, we organize all lists in a map.
7190 llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
7192 // Helper function to fill the information map for the different supported
7194 auto &&InfoGen = [&Info](
7196 OMPClauseMappableExprCommon::MappableExprComponentListRef L,
7197 OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
7198 bool ReturnDevicePointer, bool IsImplicit) {
7199 const ValueDecl *VD =
7200 D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
7201 Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
7205 // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7206 for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7207 for (const auto &L : C->component_lists()) {
7208 InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
7209 /*ReturnDevicePointer=*/false, C->isImplicit());
7211 for (const auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
7212 for (const auto &L : C->component_lists()) {
7213 InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
7214 /*ReturnDevicePointer=*/false, C->isImplicit());
7216 for (const auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
7217 for (const auto &L : C->component_lists()) {
7218 InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
7219 /*ReturnDevicePointer=*/false, C->isImplicit());
7222 // Look at the use_device_ptr clause information and mark the existing map
7223 // entries as such. If there is no map information for an entry in the
7224 // use_device_ptr list, we create one with map type 'alloc' and zero size
7225 // section. It is the user fault if that was not mapped before. If there is
7226 // no map information and the pointer is a struct member, then we defer the
7227 // emission of that entry until the whole struct has been processed.
7228 llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
7231 // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7232 for (const auto *C :
7233 this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>()) {
7234 for (const auto &L : C->component_lists()) {
7235 assert(!L.second.empty() && "Not expecting empty list of components!");
7236 const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
7237 VD = cast<ValueDecl>(VD->getCanonicalDecl());
7238 const Expr *IE = L.second.back().getAssociatedExpression();
7239 // If the first component is a member expression, we have to look into
7240 // 'this', which maps to null in the map of map information. Otherwise
7241 // look directly for the information.
7242 auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
7244 // We potentially have map information for this declaration already.
7245 // Look for the first set of components that refer to it.
7246 if (It != Info.end()) {
7247 auto CI = std::find_if(
7248 It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
7249 return MI.Components.back().getAssociatedDeclaration() == VD;
7251 // If we found a map entry, signal that the pointer has to be returned
7252 // and move on to the next declaration.
7253 if (CI != It->second.end()) {
7254 CI->ReturnDevicePointer = true;
7259 // We didn't find any match in our map information - generate a zero
7260 // size array section - if the pointer is a struct member we defer this
7261 // action until the whole struct has been processed.
7262 // FIXME: MSVC 2013 seems to require this-> to find member CGF.
7263 if (isa<MemberExpr>(IE)) {
7264 // Insert the pointer into Info to be processed by
7265 // generateInfoForComponentList. Because it is a member pointer
7266 // without a pointee, no entry will be generated for it, therefore
7267 // we need to generate one after the whole struct has been processed.
7268 // Nonetheless, generateInfoForComponentList must be called to take
7269 // the pointer into account for the calculation of the range of the
7271 InfoGen(nullptr, L.second, OMPC_MAP_unknown, OMPC_MAP_unknown,
7272 /*ReturnDevicePointer=*/false, C->isImplicit());
7273 DeferredInfo[nullptr].emplace_back(IE, VD);
7275 llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
7276 this->CGF.EmitLValue(IE), IE->getExprLoc());
7277 BasePointers.emplace_back(Ptr, VD);
7278 Pointers.push_back(Ptr);
7279 Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
7280 Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
7285 for (const auto &M : Info) {
7286 // We need to know when we generate information for the first component
7287 // associated with a capture, because the mapping flags depend on it.
7288 bool IsFirstComponentList = true;
7290 // Temporary versions of arrays
7291 MapBaseValuesArrayTy CurBasePointers;
7292 MapValuesArrayTy CurPointers;
7293 MapValuesArrayTy CurSizes;
7294 MapFlagsArrayTy CurTypes;
7295 StructRangeInfoTy PartialStruct;
7297 for (const MapInfo &L : M.second) {
7298 assert(!L.Components.empty() &&
7299 "Not expecting declaration with no component lists.");
7301 // Remember the current base pointer index.
7302 unsigned CurrentBasePointersIdx = CurBasePointers.size();
7303 // FIXME: MSVC 2013 seems to require this-> to find the member method.
7304 this->generateInfoForComponentList(
7305 L.MapType, L.MapTypeModifier, L.Components, CurBasePointers,
7306 CurPointers, CurSizes, CurTypes, PartialStruct,
7307 IsFirstComponentList, L.IsImplicit);
7309 // If this entry relates with a device pointer, set the relevant
7310 // declaration and add the 'return pointer' flag.
7311 if (L.ReturnDevicePointer) {
7312 assert(CurBasePointers.size() > CurrentBasePointersIdx &&
7313 "Unexpected number of mapped base pointers.");
7315 const ValueDecl *RelevantVD =
7316 L.Components.back().getAssociatedDeclaration();
7317 assert(RelevantVD &&
7318 "No relevant declaration related with device pointer??");
7320 CurBasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
7321 CurTypes[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
7323 IsFirstComponentList = false;
7326 // Append any pending zero-length pointers which are struct members and
7327 // used with use_device_ptr.
7328 auto CI = DeferredInfo.find(M.first);
7329 if (CI != DeferredInfo.end()) {
7330 for (const DeferredDevicePtrEntryTy &L : CI->second) {
7331 llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer();
7332 llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
7333 this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
7334 CurBasePointers.emplace_back(BasePtr, L.VD);
7335 CurPointers.push_back(Ptr);
7336 CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
7337 // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
7338 // value MEMBER_OF=FFFF so that the entry is later updated with the
7339 // correct value of MEMBER_OF.
7340 CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
7345 // If there is an entry in PartialStruct it means we have a struct with
7346 // individual members mapped. Emit an extra combined entry.
7347 if (PartialStruct.Base.isValid())
7348 emitCombinedEntry(BasePointers, Pointers, Sizes, Types, CurTypes,
7351 // We need to append the results of this capture to what we already have.
7352 BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7353 Pointers.append(CurPointers.begin(), CurPointers.end());
7354 Sizes.append(CurSizes.begin(), CurSizes.end());
7355 Types.append(CurTypes.begin(), CurTypes.end());
7359 /// Generate the base pointers, section pointers, sizes and map types
7360 /// associated to a given capture.
7361 void generateInfoForCapture(const CapturedStmt::Capture *Cap,
7363 MapBaseValuesArrayTy &BasePointers,
7364 MapValuesArrayTy &Pointers,
7365 MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
7366 StructRangeInfoTy &PartialStruct) const {
7367 assert(!Cap->capturesVariableArrayType() &&
7368 "Not expecting to generate map info for a variable array type!");
7370 // We need to know when we generating information for the first component
7371 // associated with a capture, because the mapping flags depend on it.
7372 bool IsFirstComponentList = true;
7374 const ValueDecl *VD = Cap->capturesThis()
7376 : Cap->getCapturedVar()->getCanonicalDecl();
7378 // If this declaration appears in a is_device_ptr clause we just have to
7379 // pass the pointer by value. If it is a reference to a declaration, we just
7381 if (DevPointersMap.count(VD)) {
7382 BasePointers.emplace_back(Arg, VD);
7383 Pointers.push_back(Arg);
7384 Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
7385 Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
7389 // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7390 for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7391 for (const auto &L : C->decl_component_lists(VD)) {
7392 assert(L.first == VD &&
7393 "We got information for the wrong declaration??");
7394 assert(!L.second.empty() &&
7395 "Not expecting declaration with no component lists.");
7396 generateInfoForComponentList(C->getMapType(), C->getMapTypeModifier(),
7397 L.second, BasePointers, Pointers, Sizes,
7398 Types, PartialStruct, IsFirstComponentList,
7400 IsFirstComponentList = false;
7404 /// Generate the base pointers, section pointers, sizes and map types
7405 /// associated with the declare target link variables.
7406 void generateInfoForDeclareTargetLink(MapBaseValuesArrayTy &BasePointers,
7407 MapValuesArrayTy &Pointers,
7408 MapValuesArrayTy &Sizes,
7409 MapFlagsArrayTy &Types) const {
7410 // Map other list items in the map clause which are not captured variables
7411 // but "declare target link" global variables.,
7412 for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
7413 for (const auto &L : C->component_lists()) {
7416 const auto *VD = dyn_cast<VarDecl>(L.first);
7419 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7420 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
7421 if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
7423 StructRangeInfoTy PartialStruct;
7424 generateInfoForComponentList(
7425 C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
7426 Pointers, Sizes, Types, PartialStruct,
7427 /*IsFirstComponentList=*/true, C->isImplicit());
7428 assert(!PartialStruct.Base.isValid() &&
7429 "No partial structs for declare target link expected.");
7434 /// Generate the default map information for a given capture \a CI,
7435 /// record field declaration \a RI and captured value \a CV.
7436 void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
7437 const FieldDecl &RI, llvm::Value *CV,
7438 MapBaseValuesArrayTy &CurBasePointers,
7439 MapValuesArrayTy &CurPointers,
7440 MapValuesArrayTy &CurSizes,
7441 MapFlagsArrayTy &CurMapTypes) const {
7442 // Do the default mapping.
7443 if (CI.capturesThis()) {
7444 CurBasePointers.push_back(CV);
7445 CurPointers.push_back(CV);
7446 const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
7447 CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
7448 // Default map type.
7449 CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
7450 } else if (CI.capturesVariableByCopy()) {
7451 CurBasePointers.push_back(CV);
7452 CurPointers.push_back(CV);
7453 if (!RI.getType()->isAnyPointerType()) {
7454 // We have to signal to the runtime captures passed by value that are
7456 CurMapTypes.push_back(OMP_MAP_LITERAL);
7457 CurSizes.push_back(CGF.getTypeSize(RI.getType()));
7459 // Pointers are implicitly mapped with a zero size and no flags
7460 // (other than first map that is added for all implicit maps).
7461 CurMapTypes.push_back(OMP_MAP_NONE);
7462 CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
7465 assert(CI.capturesVariable() && "Expected captured reference.");
7466 CurBasePointers.push_back(CV);
7467 CurPointers.push_back(CV);
7469 const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
7470 QualType ElementType = PtrTy->getPointeeType();
7471 CurSizes.push_back(CGF.getTypeSize(ElementType));
7472 // The default map type for a scalar/complex type is 'to' because by
7473 // default the value doesn't have to be retrieved. For an aggregate
7474 // type, the default is 'tofrom'.
7475 CurMapTypes.push_back(getMapModifiersForPrivateClauses(CI));
7477 // Every default map produces a single argument which is a target parameter.
7478 CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
7480 // Add flag stating this is an implicit map.
7481 CurMapTypes.back() |= OMP_MAP_IMPLICIT;
7485 enum OpenMPOffloadingReservedDeviceIDs {
7486 /// Device ID if the device was not defined, runtime should get it
7487 /// from environment variables in the spec.
7488 OMP_DEVICEID_UNDEF = -1,
7490 } // anonymous namespace
7492 /// Emit the arrays used to pass the captures and map information to the
7493 /// offloading runtime library. If there is no map or capture information,
7494 /// return nullptr by reference.
7496 emitOffloadingArrays(CodeGenFunction &CGF,
7497 MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
7498 MappableExprsHandler::MapValuesArrayTy &Pointers,
7499 MappableExprsHandler::MapValuesArrayTy &Sizes,
7500 MappableExprsHandler::MapFlagsArrayTy &MapTypes,
7501 CGOpenMPRuntime::TargetDataInfo &Info) {
7502 CodeGenModule &CGM = CGF.CGM;
7503 ASTContext &Ctx = CGF.getContext();
7505 // Reset the array information.
7506 Info.clearArrayInfo();
7507 Info.NumberOfPtrs = BasePointers.size();
7509 if (Info.NumberOfPtrs) {
7510 // Detect if we have any capture size requiring runtime evaluation of the
7511 // size so that a constant array could be eventually used.
7512 bool hasRuntimeEvaluationCaptureSize = false;
7513 for (llvm::Value *S : Sizes)
7514 if (!isa<llvm::Constant>(S)) {
7515 hasRuntimeEvaluationCaptureSize = true;
7519 llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
7520 QualType PointerArrayType =
7521 Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
7522 /*IndexTypeQuals=*/0);
7524 Info.BasePointersArray =
7525 CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
7526 Info.PointersArray =
7527 CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
7529 // If we don't have any VLA types or other types that require runtime
7530 // evaluation, we can use a constant array for the map sizes, otherwise we
7531 // need to fill up the arrays as we do for the pointers.
7532 if (hasRuntimeEvaluationCaptureSize) {
7533 QualType SizeArrayType = Ctx.getConstantArrayType(
7534 Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
7535 /*IndexTypeQuals=*/0);
7537 CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
7539 // We expect all the sizes to be constant, so we collect them to create
7540 // a constant array.
7541 SmallVector<llvm::Constant *, 16> ConstSizes;
7542 for (llvm::Value *S : Sizes)
7543 ConstSizes.push_back(cast<llvm::Constant>(S));
7545 auto *SizesArrayInit = llvm::ConstantArray::get(
7546 llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
7547 std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
7548 auto *SizesArrayGbl = new llvm::GlobalVariable(
7549 CGM.getModule(), SizesArrayInit->getType(),
7550 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7551 SizesArrayInit, Name);
7552 SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7553 Info.SizesArray = SizesArrayGbl;
7556 // The map types are always constant so we don't need to generate code to
7557 // fill arrays. Instead, we create an array constant.
7558 SmallVector<uint64_t, 4> Mapping(MapTypes.size(), 0);
7559 llvm::copy(MapTypes, Mapping.begin());
7560 llvm::Constant *MapTypesArrayInit =
7561 llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
7562 std::string MaptypesName =
7563 CGM.getOpenMPRuntime().getName({"offload_maptypes"});
7564 auto *MapTypesArrayGbl = new llvm::GlobalVariable(
7565 CGM.getModule(), MapTypesArrayInit->getType(),
7566 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7567 MapTypesArrayInit, MaptypesName);
7568 MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7569 Info.MapTypesArray = MapTypesArrayGbl;
7571 for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
7572 llvm::Value *BPVal = *BasePointers[I];
7573 llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
7574 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7575 Info.BasePointersArray, 0, I);
7576 BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7577 BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
7578 Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7579 CGF.Builder.CreateStore(BPVal, BPAddr);
7581 if (Info.requiresDevicePointerInfo())
7582 if (const ValueDecl *DevVD = BasePointers[I].getDevicePtrDecl())
7583 Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
7585 llvm::Value *PVal = Pointers[I];
7586 llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
7587 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7588 Info.PointersArray, 0, I);
7589 P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7590 P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
7591 Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7592 CGF.Builder.CreateStore(PVal, PAddr);
7594 if (hasRuntimeEvaluationCaptureSize) {
7595 llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
7596 llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
7600 Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
7601 CGF.Builder.CreateStore(
7602 CGF.Builder.CreateIntCast(Sizes[I], CGM.SizeTy, /*isSigned=*/true),
7608 /// Emit the arguments to be passed to the runtime library based on the
7609 /// arrays of pointers, sizes and map types.
7610 static void emitOffloadingArraysArgument(
7611 CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
7612 llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
7613 llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
7614 CodeGenModule &CGM = CGF.CGM;
7615 if (Info.NumberOfPtrs) {
7616 BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7617 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7618 Info.BasePointersArray,
7619 /*Idx0=*/0, /*Idx1=*/0);
7620 PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7621 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7625 SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7626 llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
7627 /*Idx0=*/0, /*Idx1=*/0);
7628 MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7629 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
7634 BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7635 PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7636 SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
7638 llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
7642 void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
7643 const OMPExecutableDirective &D,
7644 llvm::Value *OutlinedFn,
7645 llvm::Value *OutlinedFnID,
7646 const Expr *IfCond, const Expr *Device) {
7647 if (!CGF.HaveInsertPoint())
7650 assert(OutlinedFn && "Invalid outlined function!");
7652 const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
7653 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
7654 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
7655 auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
7656 PrePostActionTy &) {
7657 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7659 emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
7661 CodeGenFunction::OMPTargetDataInfo InputInfo;
7662 llvm::Value *MapTypesArray = nullptr;
7663 // Fill up the pointer arrays and transfer execution to the device.
7664 auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
7665 &MapTypesArray, &CS, RequiresOuterTask,
7666 &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) {
7667 // On top of the arrays that were filled up, the target offloading call
7668 // takes as arguments the device id as well as the host pointer. The host
7669 // pointer is used by the runtime library to identify the current target
7670 // region, so it only has to be unique and not necessarily point to
7671 // anything. It could be the pointer to the outlined function that
7672 // implements the target region, but we aren't using that so that the
7673 // compiler doesn't need to keep that, and could therefore inline the host
7674 // function if proven worthwhile during optimization.
7676 // From this point on, we need to have an ID of the target region defined.
7677 assert(OutlinedFnID && "Invalid outlined function ID!");
7679 // Emit device ID if any.
7680 llvm::Value *DeviceID;
7682 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7683 CGF.Int64Ty, /*isSigned=*/true);
7685 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7688 // Emit the number of elements in the offloading arrays.
7689 llvm::Value *PointerNum =
7690 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
7692 // Return value of the runtime offloading call.
7693 llvm::Value *Return;
7695 llvm::Value *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D);
7696 llvm::Value *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D);
7698 bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
7699 // The target region is an outlined function launched by the runtime
7700 // via calls __tgt_target() or __tgt_target_teams().
7702 // __tgt_target() launches a target region with one team and one thread,
7703 // executing a serial region. This master thread may in turn launch
7704 // more threads within its team upon encountering a parallel region,
7705 // however, no additional teams can be launched on the device.
7707 // __tgt_target_teams() launches a target region with one or more teams,
7708 // each with one or more threads. This call is required for target
7709 // constructs such as:
7711 // 'target' / 'teams'
7712 // 'target teams distribute parallel for'
7713 // 'target parallel'
7716 // Note that on the host and CPU targets, the runtime implementation of
7717 // these calls simply call the outlined function without forking threads.
7718 // The outlined functions themselves have runtime calls to
7719 // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
7720 // the compiler in emitTeamsCall() and emitParallelCall().
7722 // In contrast, on the NVPTX target, the implementation of
7723 // __tgt_target_teams() launches a GPU kernel with the requested number
7724 // of teams and threads so no additional calls to the runtime are required.
7726 // If we have NumTeams defined this means that we have an enclosed teams
7727 // region. Therefore we also expect to have NumThreads defined. These two
7728 // values should be defined in the presence of a teams directive,
7729 // regardless of having any clauses associated. If the user is using teams
7730 // but no clauses, these two values will be the default that should be
7731 // passed to the runtime library - a 32-bit integer with the value zero.
7732 assert(NumThreads && "Thread limit expression should be available along "
7733 "with number of teams.");
7734 llvm::Value *OffloadingArgs[] = {DeviceID,
7737 InputInfo.BasePointersArray.getPointer(),
7738 InputInfo.PointersArray.getPointer(),
7739 InputInfo.SizesArray.getPointer(),
7743 Return = CGF.EmitRuntimeCall(
7744 createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
7745 : OMPRTL__tgt_target_teams),
7748 llvm::Value *OffloadingArgs[] = {DeviceID,
7751 InputInfo.BasePointersArray.getPointer(),
7752 InputInfo.PointersArray.getPointer(),
7753 InputInfo.SizesArray.getPointer(),
7755 Return = CGF.EmitRuntimeCall(
7756 createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
7757 : OMPRTL__tgt_target),
7761 // Check the error code and execute the host version if required.
7762 llvm::BasicBlock *OffloadFailedBlock =
7763 CGF.createBasicBlock("omp_offload.failed");
7764 llvm::BasicBlock *OffloadContBlock =
7765 CGF.createBasicBlock("omp_offload.cont");
7766 llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
7767 CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
7769 CGF.EmitBlock(OffloadFailedBlock);
7770 if (RequiresOuterTask) {
7771 CapturedVars.clear();
7772 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7774 emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7775 CGF.EmitBranch(OffloadContBlock);
7777 CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
7780 // Notify that the host version must be executed.
7781 auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
7782 RequiresOuterTask](CodeGenFunction &CGF,
7783 PrePostActionTy &) {
7784 if (RequiresOuterTask) {
7785 CapturedVars.clear();
7786 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7788 emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7791 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
7792 &CapturedVars, RequiresOuterTask,
7793 &CS](CodeGenFunction &CGF, PrePostActionTy &) {
7794 // Fill up the arrays with all the captured variables.
7795 MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
7796 MappableExprsHandler::MapValuesArrayTy Pointers;
7797 MappableExprsHandler::MapValuesArrayTy Sizes;
7798 MappableExprsHandler::MapFlagsArrayTy MapTypes;
7800 // Get mappable expression information.
7801 MappableExprsHandler MEHandler(D, CGF);
7803 auto RI = CS.getCapturedRecordDecl()->field_begin();
7804 auto CV = CapturedVars.begin();
7805 for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
7806 CE = CS.capture_end();
7807 CI != CE; ++CI, ++RI, ++CV) {
7808 MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
7809 MappableExprsHandler::MapValuesArrayTy CurPointers;
7810 MappableExprsHandler::MapValuesArrayTy CurSizes;
7811 MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
7812 MappableExprsHandler::StructRangeInfoTy PartialStruct;
7814 // VLA sizes are passed to the outlined region by copy and do not have map
7815 // information associated.
7816 if (CI->capturesVariableArrayType()) {
7817 CurBasePointers.push_back(*CV);
7818 CurPointers.push_back(*CV);
7819 CurSizes.push_back(CGF.getTypeSize(RI->getType()));
7820 // Copy to the device as an argument. No need to retrieve it.
7821 CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
7822 MappableExprsHandler::OMP_MAP_TARGET_PARAM);
7824 // If we have any information in the map clause, we use it, otherwise we
7825 // just do a default mapping.
7826 MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
7827 CurSizes, CurMapTypes, PartialStruct);
7828 if (CurBasePointers.empty())
7829 MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
7830 CurPointers, CurSizes, CurMapTypes);
7832 // We expect to have at least an element of information for this capture.
7833 assert(!CurBasePointers.empty() &&
7834 "Non-existing map pointer for capture!");
7835 assert(CurBasePointers.size() == CurPointers.size() &&
7836 CurBasePointers.size() == CurSizes.size() &&
7837 CurBasePointers.size() == CurMapTypes.size() &&
7838 "Inconsistent map information sizes!");
7840 // If there is an entry in PartialStruct it means we have a struct with
7841 // individual members mapped. Emit an extra combined entry.
7842 if (PartialStruct.Base.isValid())
7843 MEHandler.emitCombinedEntry(BasePointers, Pointers, Sizes, MapTypes,
7844 CurMapTypes, PartialStruct);
7846 // We need to append the results of this capture to what we already have.
7847 BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7848 Pointers.append(CurPointers.begin(), CurPointers.end());
7849 Sizes.append(CurSizes.begin(), CurSizes.end());
7850 MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
7852 // Map other list items in the map clause which are not captured variables
7853 // but "declare target link" global variables.
7854 MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
7857 TargetDataInfo Info;
7858 // Fill up the arrays and create the arguments.
7859 emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
7860 emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
7861 Info.PointersArray, Info.SizesArray,
7862 Info.MapTypesArray, Info);
7863 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
7864 InputInfo.BasePointersArray =
7865 Address(Info.BasePointersArray, CGM.getPointerAlign());
7866 InputInfo.PointersArray =
7867 Address(Info.PointersArray, CGM.getPointerAlign());
7868 InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
7869 MapTypesArray = Info.MapTypesArray;
7870 if (RequiresOuterTask)
7871 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
7873 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
7876 auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
7877 CodeGenFunction &CGF, PrePostActionTy &) {
7878 if (RequiresOuterTask) {
7879 CodeGenFunction::OMPTargetDataInfo InputInfo;
7880 CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
7882 emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
7886 // If we have a target function ID it means that we need to support
7887 // offloading, otherwise, just execute on the host. We need to execute on host
7888 // regardless of the conditional in the if clause if, e.g., the user do not
7889 // specify target triples.
7892 emitOMPIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
7894 RegionCodeGenTy ThenRCG(TargetThenGen);
7898 RegionCodeGenTy ElseRCG(TargetElseGen);
7903 void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
7904 StringRef ParentName) {
7908 // Codegen OMP target directives that offload compute to the device.
7909 bool RequiresDeviceCodegen =
7910 isa<OMPExecutableDirective>(S) &&
7911 isOpenMPTargetExecutionDirective(
7912 cast<OMPExecutableDirective>(S)->getDirectiveKind());
7914 if (RequiresDeviceCodegen) {
7915 const auto &E = *cast<OMPExecutableDirective>(S);
7919 getTargetEntryUniqueInfo(CGM.getContext(), E.getLocStart(), DeviceID,
7922 // Is this a target region that should not be emitted as an entry point? If
7923 // so just signal we are done with this target region.
7924 if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
7928 switch (E.getDirectiveKind()) {
7930 CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
7931 cast<OMPTargetDirective>(E));
7933 case OMPD_target_parallel:
7934 CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
7935 CGM, ParentName, cast<OMPTargetParallelDirective>(E));
7937 case OMPD_target_teams:
7938 CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
7939 CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
7941 case OMPD_target_teams_distribute:
7942 CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
7943 CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
7945 case OMPD_target_teams_distribute_simd:
7946 CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
7947 CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
7949 case OMPD_target_parallel_for:
7950 CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
7951 CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
7953 case OMPD_target_parallel_for_simd:
7954 CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
7955 CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
7957 case OMPD_target_simd:
7958 CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
7959 CGM, ParentName, cast<OMPTargetSimdDirective>(E));
7961 case OMPD_target_teams_distribute_parallel_for:
7962 CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
7964 cast<OMPTargetTeamsDistributeParallelForDirective>(E));
7966 case OMPD_target_teams_distribute_parallel_for_simd:
7968 EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
7970 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
7974 case OMPD_parallel_for:
7975 case OMPD_parallel_sections:
7977 case OMPD_parallel_for_simd:
7979 case OMPD_cancellation_point:
7981 case OMPD_threadprivate:
7989 case OMPD_taskyield:
7992 case OMPD_taskgroup:
7996 case OMPD_target_data:
7997 case OMPD_target_exit_data:
7998 case OMPD_target_enter_data:
7999 case OMPD_distribute:
8000 case OMPD_distribute_simd:
8001 case OMPD_distribute_parallel_for:
8002 case OMPD_distribute_parallel_for_simd:
8003 case OMPD_teams_distribute:
8004 case OMPD_teams_distribute_simd:
8005 case OMPD_teams_distribute_parallel_for:
8006 case OMPD_teams_distribute_parallel_for_simd:
8007 case OMPD_target_update:
8008 case OMPD_declare_simd:
8009 case OMPD_declare_target:
8010 case OMPD_end_declare_target:
8011 case OMPD_declare_reduction:
8013 case OMPD_taskloop_simd:
8015 llvm_unreachable("Unknown target directive for OpenMP device codegen.");
8020 if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
8021 if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
8024 scanForTargetRegionsFunctions(
8025 E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName);
8029 // If this is a lambda function, look into its body.
8030 if (const auto *L = dyn_cast<LambdaExpr>(S))
8033 // Keep looking for target regions recursively.
8034 for (const Stmt *II : S->children())
8035 scanForTargetRegionsFunctions(II, ParentName);
8038 bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
8039 const auto *FD = cast<FunctionDecl>(GD.getDecl());
8041 // If emitting code for the host, we do not process FD here. Instead we do
8042 // the normal code generation.
8043 if (!CGM.getLangOpts().OpenMPIsDevice)
8046 // Try to detect target regions in the function.
8047 scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
8049 // Do not to emit function if it is not marked as declare target.
8050 return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD) &&
8051 AlreadyEmittedTargetFunctions.count(FD->getCanonicalDecl()) == 0;
8054 bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
8055 if (!CGM.getLangOpts().OpenMPIsDevice)
8058 // Check if there are Ctors/Dtors in this declaration and look for target
8059 // regions in it. We use the complete variant to produce the kernel name
8061 QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
8062 if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
8063 for (const CXXConstructorDecl *Ctor : RD->ctors()) {
8064 StringRef ParentName =
8065 CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
8066 scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
8068 if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
8069 StringRef ParentName =
8070 CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
8071 scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
8075 // Do not to emit variable if it is not marked as declare target.
8076 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
8077 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
8078 cast<VarDecl>(GD.getDecl()));
8079 return !Res || *Res == OMPDeclareTargetDeclAttr::MT_Link;
8082 void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
8083 llvm::Constant *Addr) {
8084 if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
8085 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
8086 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
8089 llvm::GlobalValue::LinkageTypes Linkage;
8091 case OMPDeclareTargetDeclAttr::MT_To:
8092 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
8093 VarName = CGM.getMangledName(VD);
8094 VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
8095 Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
8096 // Temp solution to prevent optimizations of the internal variables.
8097 if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
8098 std::string RefName = getName({VarName, "ref"});
8099 if (!CGM.GetGlobalValue(RefName)) {
8100 llvm::Constant *AddrRef =
8101 getOrCreateInternalVariable(Addr->getType(), RefName);
8102 auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
8103 GVAddrRef->setConstant(/*Val=*/true);
8104 GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
8105 GVAddrRef->setInitializer(Addr);
8106 CGM.addCompilerUsedGlobal(GVAddrRef);
8110 case OMPDeclareTargetDeclAttr::MT_Link:
8111 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
8112 if (CGM.getLangOpts().OpenMPIsDevice) {
8113 VarName = Addr->getName();
8116 VarName = getAddrOfDeclareTargetLink(VD).getName();
8118 cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
8120 VarSize = CGM.getPointerSize();
8121 Linkage = llvm::GlobalValue::WeakAnyLinkage;
8124 OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
8125 VarName, Addr, VarSize, Flags, Linkage);
8129 bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
8130 if (isa<FunctionDecl>(GD.getDecl()))
8131 return emitTargetFunctions(GD);
8133 return emitTargetGlobalVariable(GD);
8136 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
8139 if (CGM.getLangOpts().OpenMPIsDevice) {
8140 SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
8141 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
8145 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
8146 if (CGM.getLangOpts().OpenMPIsDevice)
8147 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
8150 bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
8151 if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
8154 const auto *D = cast<FunctionDecl>(GD.getDecl());
8155 const FunctionDecl *FD = D->getCanonicalDecl();
8156 // Do not to emit function if it is marked as declare target as it was already
8158 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
8159 if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
8160 if (auto *F = dyn_cast_or_null<llvm::Function>(
8161 CGM.GetGlobalValue(CGM.getMangledName(GD))))
8162 return !F->isDeclaration();
8168 return !AlreadyEmittedTargetFunctions.insert(FD).second;
8171 llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
8172 // If we have offloading in the current module, we need to emit the entries
8173 // now and register the offloading descriptor.
8174 createOffloadEntriesAndInfoMetadata();
8176 // Create and register the offloading binary descriptors. This is the main
8177 // entity that captures all the information about offloading in the current
8178 // compilation unit.
8179 return createOffloadingBinaryDescriptorRegistration();
8182 void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
8183 const OMPExecutableDirective &D,
8185 llvm::Value *OutlinedFn,
8186 ArrayRef<llvm::Value *> CapturedVars) {
8187 if (!CGF.HaveInsertPoint())
8190 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
8191 CodeGenFunction::RunCleanupsScope Scope(CGF);
8193 // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
8194 llvm::Value *Args[] = {
8196 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
8197 CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
8198 llvm::SmallVector<llvm::Value *, 16> RealArgs;
8199 RealArgs.append(std::begin(Args), std::end(Args));
8200 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
8202 llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
8203 CGF.EmitRuntimeCall(RTLFn, RealArgs);
8206 void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
8207 const Expr *NumTeams,
8208 const Expr *ThreadLimit,
8209 SourceLocation Loc) {
8210 if (!CGF.HaveInsertPoint())
8213 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
8215 llvm::Value *NumTeamsVal =
8217 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
8218 CGF.CGM.Int32Ty, /* isSigned = */ true)
8219 : CGF.Builder.getInt32(0);
8221 llvm::Value *ThreadLimitVal =
8223 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
8224 CGF.CGM.Int32Ty, /* isSigned = */ true)
8225 : CGF.Builder.getInt32(0);
8227 // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
8228 llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
8230 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
8234 void CGOpenMPRuntime::emitTargetDataCalls(
8235 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8236 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
8237 if (!CGF.HaveInsertPoint())
8240 // Action used to replace the default codegen action and turn privatization
8242 PrePostActionTy NoPrivAction;
8244 // Generate the code for the opening of the data environment. Capture all the
8245 // arguments of the runtime call by reference because they are used in the
8246 // closing of the region.
8247 auto &&BeginThenGen = [this, &D, Device, &Info,
8248 &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
8249 // Fill up the arrays with all the mapped variables.
8250 MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8251 MappableExprsHandler::MapValuesArrayTy Pointers;
8252 MappableExprsHandler::MapValuesArrayTy Sizes;
8253 MappableExprsHandler::MapFlagsArrayTy MapTypes;
8255 // Get map clause information.
8256 MappableExprsHandler MCHandler(D, CGF);
8257 MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8259 // Fill up the arrays and create the arguments.
8260 emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8262 llvm::Value *BasePointersArrayArg = nullptr;
8263 llvm::Value *PointersArrayArg = nullptr;
8264 llvm::Value *SizesArrayArg = nullptr;
8265 llvm::Value *MapTypesArrayArg = nullptr;
8266 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8267 SizesArrayArg, MapTypesArrayArg, Info);
8269 // Emit device ID if any.
8270 llvm::Value *DeviceID = nullptr;
8272 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8273 CGF.Int64Ty, /*isSigned=*/true);
8275 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8278 // Emit the number of elements in the offloading arrays.
8279 llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8281 llvm::Value *OffloadingArgs[] = {
8282 DeviceID, PointerNum, BasePointersArrayArg,
8283 PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8284 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin),
8287 // If device pointer privatization is required, emit the body of the region
8288 // here. It will have to be duplicated: with and without privatization.
8289 if (!Info.CaptureDeviceAddrMap.empty())
8293 // Generate code for the closing of the data region.
8294 auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF,
8295 PrePostActionTy &) {
8296 assert(Info.isValid() && "Invalid data environment closing arguments.");
8298 llvm::Value *BasePointersArrayArg = nullptr;
8299 llvm::Value *PointersArrayArg = nullptr;
8300 llvm::Value *SizesArrayArg = nullptr;
8301 llvm::Value *MapTypesArrayArg = nullptr;
8302 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8303 SizesArrayArg, MapTypesArrayArg, Info);
8305 // Emit device ID if any.
8306 llvm::Value *DeviceID = nullptr;
8308 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8309 CGF.Int64Ty, /*isSigned=*/true);
8311 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8314 // Emit the number of elements in the offloading arrays.
8315 llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8317 llvm::Value *OffloadingArgs[] = {
8318 DeviceID, PointerNum, BasePointersArrayArg,
8319 PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8320 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end),
8324 // If we need device pointer privatization, we need to emit the body of the
8325 // region with no privatization in the 'else' branch of the conditional.
8326 // Otherwise, we don't have to do anything.
8327 auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
8328 PrePostActionTy &) {
8329 if (!Info.CaptureDeviceAddrMap.empty()) {
8330 CodeGen.setAction(NoPrivAction);
8335 // We don't have to do anything to close the region if the if clause evaluates
8337 auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
8340 emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
8342 RegionCodeGenTy RCG(BeginThenGen);
8346 // If we don't require privatization of device pointers, we emit the body in
8347 // between the runtime calls. This avoids duplicating the body code.
8348 if (Info.CaptureDeviceAddrMap.empty()) {
8349 CodeGen.setAction(NoPrivAction);
8354 emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen);
8356 RegionCodeGenTy RCG(EndThenGen);
8361 void CGOpenMPRuntime::emitTargetDataStandAloneCall(
8362 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8363 const Expr *Device) {
8364 if (!CGF.HaveInsertPoint())
8367 assert((isa<OMPTargetEnterDataDirective>(D) ||
8368 isa<OMPTargetExitDataDirective>(D) ||
8369 isa<OMPTargetUpdateDirective>(D)) &&
8370 "Expecting either target enter, exit data, or update directives.");
8372 CodeGenFunction::OMPTargetDataInfo InputInfo;
8373 llvm::Value *MapTypesArray = nullptr;
8374 // Generate the code for the opening of the data environment.
8375 auto &&ThenGen = [this, &D, Device, &InputInfo,
8376 &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) {
8377 // Emit device ID if any.
8378 llvm::Value *DeviceID = nullptr;
8380 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8381 CGF.Int64Ty, /*isSigned=*/true);
8383 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8386 // Emit the number of elements in the offloading arrays.
8387 llvm::Constant *PointerNum =
8388 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
8390 llvm::Value *OffloadingArgs[] = {DeviceID,
8392 InputInfo.BasePointersArray.getPointer(),
8393 InputInfo.PointersArray.getPointer(),
8394 InputInfo.SizesArray.getPointer(),
8397 // Select the right runtime function call for each expected standalone
8399 const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
8400 OpenMPRTLFunction RTLFn;
8401 switch (D.getDirectiveKind()) {
8402 case OMPD_target_enter_data:
8403 RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
8404 : OMPRTL__tgt_target_data_begin;
8406 case OMPD_target_exit_data:
8407 RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
8408 : OMPRTL__tgt_target_data_end;
8410 case OMPD_target_update:
8411 RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
8412 : OMPRTL__tgt_target_data_update;
8416 case OMPD_parallel_for:
8417 case OMPD_parallel_sections:
8419 case OMPD_parallel_for_simd:
8421 case OMPD_cancellation_point:
8423 case OMPD_threadprivate:
8431 case OMPD_taskyield:
8434 case OMPD_taskgroup:
8438 case OMPD_target_data:
8439 case OMPD_distribute:
8440 case OMPD_distribute_simd:
8441 case OMPD_distribute_parallel_for:
8442 case OMPD_distribute_parallel_for_simd:
8443 case OMPD_teams_distribute:
8444 case OMPD_teams_distribute_simd:
8445 case OMPD_teams_distribute_parallel_for:
8446 case OMPD_teams_distribute_parallel_for_simd:
8447 case OMPD_declare_simd:
8448 case OMPD_declare_target:
8449 case OMPD_end_declare_target:
8450 case OMPD_declare_reduction:
8452 case OMPD_taskloop_simd:
8454 case OMPD_target_simd:
8455 case OMPD_target_teams_distribute:
8456 case OMPD_target_teams_distribute_simd:
8457 case OMPD_target_teams_distribute_parallel_for:
8458 case OMPD_target_teams_distribute_parallel_for_simd:
8459 case OMPD_target_teams:
8460 case OMPD_target_parallel:
8461 case OMPD_target_parallel_for:
8462 case OMPD_target_parallel_for_simd:
8464 llvm_unreachable("Unexpected standalone target data directive.");
8467 CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
8470 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
8471 CodeGenFunction &CGF, PrePostActionTy &) {
8472 // Fill up the arrays with all the mapped variables.
8473 MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8474 MappableExprsHandler::MapValuesArrayTy Pointers;
8475 MappableExprsHandler::MapValuesArrayTy Sizes;
8476 MappableExprsHandler::MapFlagsArrayTy MapTypes;
8478 // Get map clause information.
8479 MappableExprsHandler MEHandler(D, CGF);
8480 MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8482 TargetDataInfo Info;
8483 // Fill up the arrays and create the arguments.
8484 emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8485 emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
8486 Info.PointersArray, Info.SizesArray,
8487 Info.MapTypesArray, Info);
8488 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
8489 InputInfo.BasePointersArray =
8490 Address(Info.BasePointersArray, CGM.getPointerAlign());
8491 InputInfo.PointersArray =
8492 Address(Info.PointersArray, CGM.getPointerAlign());
8493 InputInfo.SizesArray =
8494 Address(Info.SizesArray, CGM.getPointerAlign());
8495 MapTypesArray = Info.MapTypesArray;
8496 if (D.hasClausesOfKind<OMPDependClause>())
8497 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
8499 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
8503 emitOMPIfClause(CGF, IfCond, TargetThenGen,
8504 [](CodeGenFunction &CGF, PrePostActionTy &) {});
8506 RegionCodeGenTy ThenRCG(TargetThenGen);
8512 /// Kind of parameter in a function with 'declare simd' directive.
8513 enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
8514 /// Attribute set of the parameter.
8515 struct ParamAttrTy {
8516 ParamKindTy Kind = Vector;
8517 llvm::APSInt StrideOrArg;
8518 llvm::APSInt Alignment;
8522 static unsigned evaluateCDTSize(const FunctionDecl *FD,
8523 ArrayRef<ParamAttrTy> ParamAttrs) {
8524 // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
8525 // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
8526 // of that clause. The VLEN value must be power of 2.
8527 // In other case the notion of the function`s "characteristic data type" (CDT)
8528 // is used to compute the vector length.
8529 // CDT is defined in the following order:
8530 // a) For non-void function, the CDT is the return type.
8531 // b) If the function has any non-uniform, non-linear parameters, then the
8532 // CDT is the type of the first such parameter.
8533 // c) If the CDT determined by a) or b) above is struct, union, or class
8534 // type which is pass-by-value (except for the type that maps to the
8535 // built-in complex data type), the characteristic data type is int.
8536 // d) If none of the above three cases is applicable, the CDT is int.
8537 // The VLEN is then determined based on the CDT and the size of vector
8538 // register of that ISA for which current vector version is generated. The
8539 // VLEN is computed using the formula below:
8540 // VLEN = sizeof(vector_register) / sizeof(CDT),
8541 // where vector register size specified in section 3.2.1 Registers and the
8542 // Stack Frame of original AMD64 ABI document.
8543 QualType RetType = FD->getReturnType();
8544 if (RetType.isNull())
8546 ASTContext &C = FD->getASTContext();
8548 if (!RetType.isNull() && !RetType->isVoidType()) {
8551 unsigned Offset = 0;
8552 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
8553 if (ParamAttrs[Offset].Kind == Vector)
8554 CDT = C.getPointerType(C.getRecordType(MD->getParent()));
8558 for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
8559 if (ParamAttrs[I + Offset].Kind == Vector) {
8560 CDT = FD->getParamDecl(I)->getType();
8568 CDT = CDT->getCanonicalTypeUnqualified();
8569 if (CDT->isRecordType() || CDT->isUnionType())
8571 return C.getTypeSize(CDT);
8575 emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
8576 const llvm::APSInt &VLENVal,
8577 ArrayRef<ParamAttrTy> ParamAttrs,
8578 OMPDeclareSimdDeclAttr::BranchStateTy State) {
8581 unsigned VecRegSize;
8583 ISADataTy ISAData[] = {
8597 llvm::SmallVector<char, 2> Masked;
8599 case OMPDeclareSimdDeclAttr::BS_Undefined:
8600 Masked.push_back('N');
8601 Masked.push_back('M');
8603 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
8604 Masked.push_back('N');
8606 case OMPDeclareSimdDeclAttr::BS_Inbranch:
8607 Masked.push_back('M');
8610 for (char Mask : Masked) {
8611 for (const ISADataTy &Data : ISAData) {
8612 SmallString<256> Buffer;
8613 llvm::raw_svector_ostream Out(Buffer);
8614 Out << "_ZGV" << Data.ISA << Mask;
8616 Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
8617 evaluateCDTSize(FD, ParamAttrs));
8621 for (const ParamAttrTy &ParamAttr : ParamAttrs) {
8622 switch (ParamAttr.Kind){
8623 case LinearWithVarStride:
8624 Out << 's' << ParamAttr.StrideOrArg;
8628 if (!!ParamAttr.StrideOrArg)
8629 Out << ParamAttr.StrideOrArg;
8638 if (!!ParamAttr.Alignment)
8639 Out << 'a' << ParamAttr.Alignment;
8641 Out << '_' << Fn->getName();
8642 Fn->addFnAttr(Out.str());
8647 void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
8648 llvm::Function *Fn) {
8649 ASTContext &C = CGM.getContext();
8650 FD = FD->getMostRecentDecl();
8651 // Map params to their positions in function decl.
8652 llvm::DenseMap<const Decl *, unsigned> ParamPositions;
8653 if (isa<CXXMethodDecl>(FD))
8654 ParamPositions.try_emplace(FD, 0);
8655 unsigned ParamPos = ParamPositions.size();
8656 for (const ParmVarDecl *P : FD->parameters()) {
8657 ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
8661 for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
8662 llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
8663 // Mark uniform parameters.
8664 for (const Expr *E : Attr->uniforms()) {
8665 E = E->IgnoreParenImpCasts();
8667 if (isa<CXXThisExpr>(E)) {
8668 Pos = ParamPositions[FD];
8670 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8671 ->getCanonicalDecl();
8672 Pos = ParamPositions[PVD];
8674 ParamAttrs[Pos].Kind = Uniform;
8676 // Get alignment info.
8677 auto NI = Attr->alignments_begin();
8678 for (const Expr *E : Attr->aligneds()) {
8679 E = E->IgnoreParenImpCasts();
8682 if (isa<CXXThisExpr>(E)) {
8683 Pos = ParamPositions[FD];
8684 ParmTy = E->getType();
8686 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8687 ->getCanonicalDecl();
8688 Pos = ParamPositions[PVD];
8689 ParmTy = PVD->getType();
8691 ParamAttrs[Pos].Alignment =
8693 ? (*NI)->EvaluateKnownConstInt(C)
8694 : llvm::APSInt::getUnsigned(
8695 C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
8699 // Mark linear parameters.
8700 auto SI = Attr->steps_begin();
8701 auto MI = Attr->modifiers_begin();
8702 for (const Expr *E : Attr->linears()) {
8703 E = E->IgnoreParenImpCasts();
8705 if (isa<CXXThisExpr>(E)) {
8706 Pos = ParamPositions[FD];
8708 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8709 ->getCanonicalDecl();
8710 Pos = ParamPositions[PVD];
8712 ParamAttrTy &ParamAttr = ParamAttrs[Pos];
8713 ParamAttr.Kind = Linear;
8715 if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
8716 Expr::SE_AllowSideEffects)) {
8717 if (const auto *DRE =
8718 cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
8719 if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
8720 ParamAttr.Kind = LinearWithVarStride;
8721 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
8722 ParamPositions[StridePVD->getCanonicalDecl()]);
8730 llvm::APSInt VLENVal;
8731 if (const Expr *VLEN = Attr->getSimdlen())
8732 VLENVal = VLEN->EvaluateKnownConstInt(C);
8733 OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
8734 if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
8735 CGM.getTriple().getArch() == llvm::Triple::x86_64)
8736 emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
8738 FD = FD->getPreviousDecl();
8743 /// Cleanup action for doacross support.
8744 class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
8746 static const int DoacrossFinArgs = 2;
8750 llvm::Value *Args[DoacrossFinArgs];
8753 DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs)
8755 assert(CallArgs.size() == DoacrossFinArgs);
8756 std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
8758 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
8759 if (!CGF.HaveInsertPoint())
8761 CGF.EmitRuntimeCall(RTLFn, Args);
8766 void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
8767 const OMPLoopDirective &D,
8768 ArrayRef<Expr *> NumIterations) {
8769 if (!CGF.HaveInsertPoint())
8772 ASTContext &C = CGM.getContext();
8773 QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
8775 if (KmpDimTy.isNull()) {
8776 // Build struct kmp_dim { // loop bounds info casted to kmp_int64
8777 // kmp_int64 lo; // lower
8778 // kmp_int64 up; // upper
8779 // kmp_int64 st; // stride
8781 RD = C.buildImplicitRecord("kmp_dim");
8782 RD->startDefinition();
8783 addFieldToRecordDecl(C, RD, Int64Ty);
8784 addFieldToRecordDecl(C, RD, Int64Ty);
8785 addFieldToRecordDecl(C, RD, Int64Ty);
8786 RD->completeDefinition();
8787 KmpDimTy = C.getRecordType(RD);
8789 RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
8791 llvm::APInt Size(/*numBits=*/32, NumIterations.size());
8793 C.getConstantArrayType(KmpDimTy, Size, ArrayType::Normal, 0);
8795 Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
8796 CGF.EmitNullInitialization(DimsAddr, ArrayTy);
8797 enum { LowerFD = 0, UpperFD, StrideFD };
8798 // Fill dims with data.
8799 for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
8801 CGF.MakeAddrLValue(CGF.Builder.CreateConstArrayGEP(
8802 DimsAddr, I, C.getTypeSizeInChars(KmpDimTy)),
8804 // dims.upper = num_iterations;
8805 LValue UpperLVal = CGF.EmitLValueForField(
8806 DimsLVal, *std::next(RD->field_begin(), UpperFD));
8807 llvm::Value *NumIterVal =
8808 CGF.EmitScalarConversion(CGF.EmitScalarExpr(NumIterations[I]),
8809 D.getNumIterations()->getType(), Int64Ty,
8810 D.getNumIterations()->getExprLoc());
8811 CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
8813 LValue StrideLVal = CGF.EmitLValueForField(
8814 DimsLVal, *std::next(RD->field_begin(), StrideFD));
8815 CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
8819 // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
8820 // kmp_int32 num_dims, struct kmp_dim * dims);
8821 llvm::Value *Args[] = {
8822 emitUpdateLocation(CGF, D.getLocStart()),
8823 getThreadID(CGF, D.getLocStart()),
8824 llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
8825 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
8827 .CreateConstArrayGEP(DimsAddr, 0, C.getTypeSizeInChars(KmpDimTy))
8831 llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
8832 CGF.EmitRuntimeCall(RTLFn, Args);
8833 llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
8834 emitUpdateLocation(CGF, D.getLocEnd()), getThreadID(CGF, D.getLocEnd())};
8835 llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
8836 CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
8837 llvm::makeArrayRef(FiniArgs));
8840 void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
8841 const OMPDependClause *C) {
8843 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
8844 llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
8845 QualType ArrayTy = CGM.getContext().getConstantArrayType(
8846 Int64Ty, Size, ArrayType::Normal, 0);
8847 Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
8848 for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
8849 const Expr *CounterVal = C->getLoopData(I);
8851 llvm::Value *CntVal = CGF.EmitScalarConversion(
8852 CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
8853 CounterVal->getExprLoc());
8854 CGF.EmitStoreOfScalar(
8856 CGF.Builder.CreateConstArrayGEP(
8857 CntAddr, I, CGM.getContext().getTypeSizeInChars(Int64Ty)),
8858 /*Volatile=*/false, Int64Ty);
8860 llvm::Value *Args[] = {
8861 emitUpdateLocation(CGF, C->getLocStart()),
8862 getThreadID(CGF, C->getLocStart()),
8864 .CreateConstArrayGEP(CntAddr, 0,
8865 CGM.getContext().getTypeSizeInChars(Int64Ty))
8868 if (C->getDependencyKind() == OMPC_DEPEND_source) {
8869 RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
8871 assert(C->getDependencyKind() == OMPC_DEPEND_sink);
8872 RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
8874 CGF.EmitRuntimeCall(RTLFn, Args);
8877 void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
8878 llvm::Value *Callee,
8879 ArrayRef<llvm::Value *> Args) const {
8880 assert(Loc.isValid() && "Outlined function call location must be valid.");
8881 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
8883 if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
8884 if (Fn->doesNotThrow()) {
8885 CGF.EmitNounwindRuntimeCall(Fn, Args);
8889 CGF.EmitRuntimeCall(Callee, Args);
8892 void CGOpenMPRuntime::emitOutlinedFunctionCall(
8893 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
8894 ArrayRef<llvm::Value *> Args) const {
8895 emitCall(CGF, Loc, OutlinedFn, Args);
8898 Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
8899 const VarDecl *NativeParam,
8900 const VarDecl *TargetParam) const {
8901 return CGF.GetAddrOfLocalVar(NativeParam);
8904 Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
8905 const VarDecl *VD) {
8906 return Address::invalid();
8909 llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
8910 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8911 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8912 llvm_unreachable("Not supported in SIMD-only mode");
8915 llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
8916 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8917 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8918 llvm_unreachable("Not supported in SIMD-only mode");
8921 llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
8922 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8923 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
8924 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
8925 bool Tied, unsigned &NumberOfParts) {
8926 llvm_unreachable("Not supported in SIMD-only mode");
8929 void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
8931 llvm::Value *OutlinedFn,
8932 ArrayRef<llvm::Value *> CapturedVars,
8933 const Expr *IfCond) {
8934 llvm_unreachable("Not supported in SIMD-only mode");
8937 void CGOpenMPSIMDRuntime::emitCriticalRegion(
8938 CodeGenFunction &CGF, StringRef CriticalName,
8939 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
8941 llvm_unreachable("Not supported in SIMD-only mode");
8944 void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
8945 const RegionCodeGenTy &MasterOpGen,
8946 SourceLocation Loc) {
8947 llvm_unreachable("Not supported in SIMD-only mode");
8950 void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
8951 SourceLocation Loc) {
8952 llvm_unreachable("Not supported in SIMD-only mode");
8955 void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
8956 CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
8957 SourceLocation Loc) {
8958 llvm_unreachable("Not supported in SIMD-only mode");
8961 void CGOpenMPSIMDRuntime::emitSingleRegion(
8962 CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
8963 SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
8964 ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
8965 ArrayRef<const Expr *> AssignmentOps) {
8966 llvm_unreachable("Not supported in SIMD-only mode");
8969 void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
8970 const RegionCodeGenTy &OrderedOpGen,
8973 llvm_unreachable("Not supported in SIMD-only mode");
8976 void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
8978 OpenMPDirectiveKind Kind,
8980 bool ForceSimpleCall) {
8981 llvm_unreachable("Not supported in SIMD-only mode");
8984 void CGOpenMPSIMDRuntime::emitForDispatchInit(
8985 CodeGenFunction &CGF, SourceLocation Loc,
8986 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
8987 bool Ordered, const DispatchRTInput &DispatchValues) {
8988 llvm_unreachable("Not supported in SIMD-only mode");
8991 void CGOpenMPSIMDRuntime::emitForStaticInit(
8992 CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
8993 const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
8994 llvm_unreachable("Not supported in SIMD-only mode");
8997 void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
8998 CodeGenFunction &CGF, SourceLocation Loc,
8999 OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
9000 llvm_unreachable("Not supported in SIMD-only mode");
9003 void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
9007 llvm_unreachable("Not supported in SIMD-only mode");
9010 void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
9012 OpenMPDirectiveKind DKind) {
9013 llvm_unreachable("Not supported in SIMD-only mode");
9016 llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
9018 unsigned IVSize, bool IVSigned,
9019 Address IL, Address LB,
9020 Address UB, Address ST) {
9021 llvm_unreachable("Not supported in SIMD-only mode");
9024 void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
9025 llvm::Value *NumThreads,
9026 SourceLocation Loc) {
9027 llvm_unreachable("Not supported in SIMD-only mode");
9030 void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
9031 OpenMPProcBindClauseKind ProcBind,
9032 SourceLocation Loc) {
9033 llvm_unreachable("Not supported in SIMD-only mode");
9036 Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
9039 SourceLocation Loc) {
9040 llvm_unreachable("Not supported in SIMD-only mode");
9043 llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
9044 const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
9045 CodeGenFunction *CGF) {
9046 llvm_unreachable("Not supported in SIMD-only mode");
9049 Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
9050 CodeGenFunction &CGF, QualType VarType, StringRef Name) {
9051 llvm_unreachable("Not supported in SIMD-only mode");
9054 void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
9055 ArrayRef<const Expr *> Vars,
9056 SourceLocation Loc) {
9057 llvm_unreachable("Not supported in SIMD-only mode");
9060 void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
9061 const OMPExecutableDirective &D,
9062 llvm::Value *TaskFunction,
9063 QualType SharedsTy, Address Shareds,
9065 const OMPTaskDataTy &Data) {
9066 llvm_unreachable("Not supported in SIMD-only mode");
9069 void CGOpenMPSIMDRuntime::emitTaskLoopCall(
9070 CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
9071 llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
9072 const Expr *IfCond, const OMPTaskDataTy &Data) {
9073 llvm_unreachable("Not supported in SIMD-only mode");
9076 void CGOpenMPSIMDRuntime::emitReduction(
9077 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
9078 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
9079 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
9080 assert(Options.SimpleReduction && "Only simple reduction is expected.");
9081 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
9082 ReductionOps, Options);
9085 llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
9086 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
9087 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
9088 llvm_unreachable("Not supported in SIMD-only mode");
9091 void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
9093 ReductionCodeGen &RCG,
9095 llvm_unreachable("Not supported in SIMD-only mode");
9098 Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
9100 llvm::Value *ReductionsPtr,
9101 LValue SharedLVal) {
9102 llvm_unreachable("Not supported in SIMD-only mode");
9105 void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
9106 SourceLocation Loc) {
9107 llvm_unreachable("Not supported in SIMD-only mode");
9110 void CGOpenMPSIMDRuntime::emitCancellationPointCall(
9111 CodeGenFunction &CGF, SourceLocation Loc,
9112 OpenMPDirectiveKind CancelRegion) {
9113 llvm_unreachable("Not supported in SIMD-only mode");
9116 void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
9117 SourceLocation Loc, const Expr *IfCond,
9118 OpenMPDirectiveKind CancelRegion) {
9119 llvm_unreachable("Not supported in SIMD-only mode");
9122 void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
9123 const OMPExecutableDirective &D, StringRef ParentName,
9124 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
9125 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
9126 llvm_unreachable("Not supported in SIMD-only mode");
9129 void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF,
9130 const OMPExecutableDirective &D,
9131 llvm::Value *OutlinedFn,
9132 llvm::Value *OutlinedFnID,
9133 const Expr *IfCond, const Expr *Device) {
9134 llvm_unreachable("Not supported in SIMD-only mode");
9137 bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
9138 llvm_unreachable("Not supported in SIMD-only mode");
9141 bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
9142 llvm_unreachable("Not supported in SIMD-only mode");
9145 bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
9149 llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() {
9153 void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
9154 const OMPExecutableDirective &D,
9156 llvm::Value *OutlinedFn,
9157 ArrayRef<llvm::Value *> CapturedVars) {
9158 llvm_unreachable("Not supported in SIMD-only mode");
9161 void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
9162 const Expr *NumTeams,
9163 const Expr *ThreadLimit,
9164 SourceLocation Loc) {
9165 llvm_unreachable("Not supported in SIMD-only mode");
9168 void CGOpenMPSIMDRuntime::emitTargetDataCalls(
9169 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
9170 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
9171 llvm_unreachable("Not supported in SIMD-only mode");
9174 void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
9175 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
9176 const Expr *Device) {
9177 llvm_unreachable("Not supported in SIMD-only mode");
9180 void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
9181 const OMPLoopDirective &D,
9182 ArrayRef<Expr *> NumIterations) {
9183 llvm_unreachable("Not supported in SIMD-only mode");
9186 void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
9187 const OMPDependClause *C) {
9188 llvm_unreachable("Not supported in SIMD-only mode");
9192 CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
9193 const VarDecl *NativeParam) const {
9194 llvm_unreachable("Not supported in SIMD-only mode");
9198 CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
9199 const VarDecl *NativeParam,
9200 const VarDecl *TargetParam) const {
9201 llvm_unreachable("Not supported in SIMD-only mode");