]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp
Merge compiler-rt trunk r321017 to contrib/compiler-rt.
[FreeBSD/FreeBSD.git] / contrib / llvm / tools / clang / lib / CodeGen / CGOpenMPRuntime.cpp
1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides a class for OpenMP runtime code generation.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "clang/CodeGen/ConstantInitBuilder.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/BitmaskEnum.h"
23 #include "llvm/Bitcode/BitcodeReader.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/IR/Value.h"
28 #include "llvm/Support/Format.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <cassert>
31
32 using namespace clang;
33 using namespace CodeGen;
34
35 namespace {
36 /// \brief Base class for handling code generation inside OpenMP regions.
37 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
38 public:
39   /// \brief Kinds of OpenMP regions used in codegen.
40   enum CGOpenMPRegionKind {
41     /// \brief Region with outlined function for standalone 'parallel'
42     /// directive.
43     ParallelOutlinedRegion,
44     /// \brief Region with outlined function for standalone 'task' directive.
45     TaskOutlinedRegion,
46     /// \brief Region for constructs that do not require function outlining,
47     /// like 'for', 'sections', 'atomic' etc. directives.
48     InlinedRegion,
49     /// \brief Region with outlined function for standalone 'target' directive.
50     TargetRegion,
51   };
52
53   CGOpenMPRegionInfo(const CapturedStmt &CS,
54                      const CGOpenMPRegionKind RegionKind,
55                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
56                      bool HasCancel)
57       : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
58         CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
59
60   CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
61                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
62                      bool HasCancel)
63       : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
64         Kind(Kind), HasCancel(HasCancel) {}
65
66   /// \brief Get a variable or parameter for storing global thread id
67   /// inside OpenMP construct.
68   virtual const VarDecl *getThreadIDVariable() const = 0;
69
70   /// \brief Emit the captured statement body.
71   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
72
73   /// \brief Get an LValue for the current ThreadID variable.
74   /// \return LValue for thread id variable. This LValue always has type int32*.
75   virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
76
77   virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
78
79   CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
80
81   OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
82
83   bool hasCancel() const { return HasCancel; }
84
85   static bool classof(const CGCapturedStmtInfo *Info) {
86     return Info->getKind() == CR_OpenMP;
87   }
88
89   ~CGOpenMPRegionInfo() override = default;
90
91 protected:
92   CGOpenMPRegionKind RegionKind;
93   RegionCodeGenTy CodeGen;
94   OpenMPDirectiveKind Kind;
95   bool HasCancel;
96 };
97
98 /// \brief API for captured statement code generation in OpenMP constructs.
99 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
100 public:
101   CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
102                              const RegionCodeGenTy &CodeGen,
103                              OpenMPDirectiveKind Kind, bool HasCancel,
104                              StringRef HelperName)
105       : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
106                            HasCancel),
107         ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
108     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
109   }
110
111   /// \brief Get a variable or parameter for storing global thread id
112   /// inside OpenMP construct.
113   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
114
115   /// \brief Get the name of the capture helper.
116   StringRef getHelperName() const override { return HelperName; }
117
118   static bool classof(const CGCapturedStmtInfo *Info) {
119     return CGOpenMPRegionInfo::classof(Info) &&
120            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
121                ParallelOutlinedRegion;
122   }
123
124 private:
125   /// \brief A variable or parameter storing global thread id for OpenMP
126   /// constructs.
127   const VarDecl *ThreadIDVar;
128   StringRef HelperName;
129 };
130
131 /// \brief API for captured statement code generation in OpenMP constructs.
132 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
133 public:
134   class UntiedTaskActionTy final : public PrePostActionTy {
135     bool Untied;
136     const VarDecl *PartIDVar;
137     const RegionCodeGenTy UntiedCodeGen;
138     llvm::SwitchInst *UntiedSwitch = nullptr;
139
140   public:
141     UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
142                        const RegionCodeGenTy &UntiedCodeGen)
143         : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
144     void Enter(CodeGenFunction &CGF) override {
145       if (Untied) {
146         // Emit task switching point.
147         auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
148             CGF.GetAddrOfLocalVar(PartIDVar),
149             PartIDVar->getType()->castAs<PointerType>());
150         auto *Res = CGF.EmitLoadOfScalar(PartIdLVal, SourceLocation());
151         auto *DoneBB = CGF.createBasicBlock(".untied.done.");
152         UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
153         CGF.EmitBlock(DoneBB);
154         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
155         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
156         UntiedSwitch->addCase(CGF.Builder.getInt32(0),
157                               CGF.Builder.GetInsertBlock());
158         emitUntiedSwitch(CGF);
159       }
160     }
161     void emitUntiedSwitch(CodeGenFunction &CGF) const {
162       if (Untied) {
163         auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
164             CGF.GetAddrOfLocalVar(PartIDVar),
165             PartIDVar->getType()->castAs<PointerType>());
166         CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
167                               PartIdLVal);
168         UntiedCodeGen(CGF);
169         CodeGenFunction::JumpDest CurPoint =
170             CGF.getJumpDestInCurrentScope(".untied.next.");
171         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
172         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
173         UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
174                               CGF.Builder.GetInsertBlock());
175         CGF.EmitBranchThroughCleanup(CurPoint);
176         CGF.EmitBlock(CurPoint.getBlock());
177       }
178     }
179     unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
180   };
181   CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
182                                  const VarDecl *ThreadIDVar,
183                                  const RegionCodeGenTy &CodeGen,
184                                  OpenMPDirectiveKind Kind, bool HasCancel,
185                                  const UntiedTaskActionTy &Action)
186       : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
187         ThreadIDVar(ThreadIDVar), Action(Action) {
188     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
189   }
190
191   /// \brief Get a variable or parameter for storing global thread id
192   /// inside OpenMP construct.
193   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
194
195   /// \brief Get an LValue for the current ThreadID variable.
196   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
197
198   /// \brief Get the name of the capture helper.
199   StringRef getHelperName() const override { return ".omp_outlined."; }
200
201   void emitUntiedSwitch(CodeGenFunction &CGF) override {
202     Action.emitUntiedSwitch(CGF);
203   }
204
205   static bool classof(const CGCapturedStmtInfo *Info) {
206     return CGOpenMPRegionInfo::classof(Info) &&
207            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
208                TaskOutlinedRegion;
209   }
210
211 private:
212   /// \brief A variable or parameter storing global thread id for OpenMP
213   /// constructs.
214   const VarDecl *ThreadIDVar;
215   /// Action for emitting code for untied tasks.
216   const UntiedTaskActionTy &Action;
217 };
218
219 /// \brief API for inlined captured statement code generation in OpenMP
220 /// constructs.
221 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
222 public:
223   CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
224                             const RegionCodeGenTy &CodeGen,
225                             OpenMPDirectiveKind Kind, bool HasCancel)
226       : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
227         OldCSI(OldCSI),
228         OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
229
230   // \brief Retrieve the value of the context parameter.
231   llvm::Value *getContextValue() const override {
232     if (OuterRegionInfo)
233       return OuterRegionInfo->getContextValue();
234     llvm_unreachable("No context value for inlined OpenMP region");
235   }
236
237   void setContextValue(llvm::Value *V) override {
238     if (OuterRegionInfo) {
239       OuterRegionInfo->setContextValue(V);
240       return;
241     }
242     llvm_unreachable("No context value for inlined OpenMP region");
243   }
244
245   /// \brief Lookup the captured field decl for a variable.
246   const FieldDecl *lookup(const VarDecl *VD) const override {
247     if (OuterRegionInfo)
248       return OuterRegionInfo->lookup(VD);
249     // If there is no outer outlined region,no need to lookup in a list of
250     // captured variables, we can use the original one.
251     return nullptr;
252   }
253
254   FieldDecl *getThisFieldDecl() const override {
255     if (OuterRegionInfo)
256       return OuterRegionInfo->getThisFieldDecl();
257     return nullptr;
258   }
259
260   /// \brief Get a variable or parameter for storing global thread id
261   /// inside OpenMP construct.
262   const VarDecl *getThreadIDVariable() const override {
263     if (OuterRegionInfo)
264       return OuterRegionInfo->getThreadIDVariable();
265     return nullptr;
266   }
267
268   /// \brief Get an LValue for the current ThreadID variable.
269   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
270     if (OuterRegionInfo)
271       return OuterRegionInfo->getThreadIDVariableLValue(CGF);
272     llvm_unreachable("No LValue for inlined OpenMP construct");
273   }
274
275   /// \brief Get the name of the capture helper.
276   StringRef getHelperName() const override {
277     if (auto *OuterRegionInfo = getOldCSI())
278       return OuterRegionInfo->getHelperName();
279     llvm_unreachable("No helper name for inlined OpenMP construct");
280   }
281
282   void emitUntiedSwitch(CodeGenFunction &CGF) override {
283     if (OuterRegionInfo)
284       OuterRegionInfo->emitUntiedSwitch(CGF);
285   }
286
287   CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
288
289   static bool classof(const CGCapturedStmtInfo *Info) {
290     return CGOpenMPRegionInfo::classof(Info) &&
291            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
292   }
293
294   ~CGOpenMPInlinedRegionInfo() override = default;
295
296 private:
297   /// \brief CodeGen info about outer OpenMP region.
298   CodeGenFunction::CGCapturedStmtInfo *OldCSI;
299   CGOpenMPRegionInfo *OuterRegionInfo;
300 };
301
302 /// \brief API for captured statement code generation in OpenMP target
303 /// constructs. For this captures, implicit parameters are used instead of the
304 /// captured fields. The name of the target region has to be unique in a given
305 /// application so it is provided by the client, because only the client has
306 /// the information to generate that.
307 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
308 public:
309   CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
310                            const RegionCodeGenTy &CodeGen, StringRef HelperName)
311       : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
312                            /*HasCancel=*/false),
313         HelperName(HelperName) {}
314
315   /// \brief This is unused for target regions because each starts executing
316   /// with a single thread.
317   const VarDecl *getThreadIDVariable() const override { return nullptr; }
318
319   /// \brief Get the name of the capture helper.
320   StringRef getHelperName() const override { return HelperName; }
321
322   static bool classof(const CGCapturedStmtInfo *Info) {
323     return CGOpenMPRegionInfo::classof(Info) &&
324            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
325   }
326
327 private:
328   StringRef HelperName;
329 };
330
331 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
332   llvm_unreachable("No codegen for expressions");
333 }
334 /// \brief API for generation of expressions captured in a innermost OpenMP
335 /// region.
336 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
337 public:
338   CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
339       : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
340                                   OMPD_unknown,
341                                   /*HasCancel=*/false),
342         PrivScope(CGF) {
343     // Make sure the globals captured in the provided statement are local by
344     // using the privatization logic. We assume the same variable is not
345     // captured more than once.
346     for (auto &C : CS.captures()) {
347       if (!C.capturesVariable() && !C.capturesVariableByCopy())
348         continue;
349
350       const VarDecl *VD = C.getCapturedVar();
351       if (VD->isLocalVarDeclOrParm())
352         continue;
353
354       DeclRefExpr DRE(const_cast<VarDecl *>(VD),
355                       /*RefersToEnclosingVariableOrCapture=*/false,
356                       VD->getType().getNonReferenceType(), VK_LValue,
357                       SourceLocation());
358       PrivScope.addPrivate(VD, [&CGF, &DRE]() -> Address {
359         return CGF.EmitLValue(&DRE).getAddress();
360       });
361     }
362     (void)PrivScope.Privatize();
363   }
364
365   /// \brief Lookup the captured field decl for a variable.
366   const FieldDecl *lookup(const VarDecl *VD) const override {
367     if (auto *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
368       return FD;
369     return nullptr;
370   }
371
372   /// \brief Emit the captured statement body.
373   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
374     llvm_unreachable("No body for expressions");
375   }
376
377   /// \brief Get a variable or parameter for storing global thread id
378   /// inside OpenMP construct.
379   const VarDecl *getThreadIDVariable() const override {
380     llvm_unreachable("No thread id for expressions");
381   }
382
383   /// \brief Get the name of the capture helper.
384   StringRef getHelperName() const override {
385     llvm_unreachable("No helper name for expressions");
386   }
387
388   static bool classof(const CGCapturedStmtInfo *Info) { return false; }
389
390 private:
391   /// Private scope to capture global variables.
392   CodeGenFunction::OMPPrivateScope PrivScope;
393 };
394
395 /// \brief RAII for emitting code of OpenMP constructs.
396 class InlinedOpenMPRegionRAII {
397   CodeGenFunction &CGF;
398   llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
399   FieldDecl *LambdaThisCaptureField = nullptr;
400
401 public:
402   /// \brief Constructs region for combined constructs.
403   /// \param CodeGen Code generation sequence for combined directives. Includes
404   /// a list of functions used for code generation of implicitly inlined
405   /// regions.
406   InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
407                           OpenMPDirectiveKind Kind, bool HasCancel)
408       : CGF(CGF) {
409     // Start emission for the construct.
410     CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
411         CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
412     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
413     LambdaThisCaptureField = CGF.LambdaThisCaptureField;
414     CGF.LambdaThisCaptureField = nullptr;
415   }
416
417   ~InlinedOpenMPRegionRAII() {
418     // Restore original CapturedStmtInfo only if we're done with code emission.
419     auto *OldCSI =
420         cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
421     delete CGF.CapturedStmtInfo;
422     CGF.CapturedStmtInfo = OldCSI;
423     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
424     CGF.LambdaThisCaptureField = LambdaThisCaptureField;
425   }
426 };
427
428 /// \brief Values for bit flags used in the ident_t to describe the fields.
429 /// All enumeric elements are named and described in accordance with the code
430 /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
431 enum OpenMPLocationFlags : unsigned {
432   /// \brief Use trampoline for internal microtask.
433   OMP_IDENT_IMD = 0x01,
434   /// \brief Use c-style ident structure.
435   OMP_IDENT_KMPC = 0x02,
436   /// \brief Atomic reduction option for kmpc_reduce.
437   OMP_ATOMIC_REDUCE = 0x10,
438   /// \brief Explicit 'barrier' directive.
439   OMP_IDENT_BARRIER_EXPL = 0x20,
440   /// \brief Implicit barrier in code.
441   OMP_IDENT_BARRIER_IMPL = 0x40,
442   /// \brief Implicit barrier in 'for' directive.
443   OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
444   /// \brief Implicit barrier in 'sections' directive.
445   OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
446   /// \brief Implicit barrier in 'single' directive.
447   OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
448   /// Call of __kmp_for_static_init for static loop.
449   OMP_IDENT_WORK_LOOP = 0x200,
450   /// Call of __kmp_for_static_init for sections.
451   OMP_IDENT_WORK_SECTIONS = 0x400,
452   /// Call of __kmp_for_static_init for distribute.
453   OMP_IDENT_WORK_DISTRIBUTE = 0x800,
454   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
455 };
456
457 /// \brief Describes ident structure that describes a source location.
458 /// All descriptions are taken from
459 /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
460 /// Original structure:
461 /// typedef struct ident {
462 ///    kmp_int32 reserved_1;   /**<  might be used in Fortran;
463 ///                                  see above  */
464 ///    kmp_int32 flags;        /**<  also f.flags; KMP_IDENT_xxx flags;
465 ///                                  KMP_IDENT_KMPC identifies this union
466 ///                                  member  */
467 ///    kmp_int32 reserved_2;   /**<  not really used in Fortran any more;
468 ///                                  see above */
469 ///#if USE_ITT_BUILD
470 ///                            /*  but currently used for storing
471 ///                                region-specific ITT */
472 ///                            /*  contextual information. */
473 ///#endif /* USE_ITT_BUILD */
474 ///    kmp_int32 reserved_3;   /**< source[4] in Fortran, do not use for
475 ///                                 C++  */
476 ///    char const *psource;    /**< String describing the source location.
477 ///                            The string is composed of semi-colon separated
478 //                             fields which describe the source file,
479 ///                            the function and a pair of line numbers that
480 ///                            delimit the construct.
481 ///                             */
482 /// } ident_t;
483 enum IdentFieldIndex {
484   /// \brief might be used in Fortran
485   IdentField_Reserved_1,
486   /// \brief OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
487   IdentField_Flags,
488   /// \brief Not really used in Fortran any more
489   IdentField_Reserved_2,
490   /// \brief Source[4] in Fortran, do not use for C++
491   IdentField_Reserved_3,
492   /// \brief String describing the source location. The string is composed of
493   /// semi-colon separated fields which describe the source file, the function
494   /// and a pair of line numbers that delimit the construct.
495   IdentField_PSource
496 };
497
498 /// \brief Schedule types for 'omp for' loops (these enumerators are taken from
499 /// the enum sched_type in kmp.h).
500 enum OpenMPSchedType {
501   /// \brief Lower bound for default (unordered) versions.
502   OMP_sch_lower = 32,
503   OMP_sch_static_chunked = 33,
504   OMP_sch_static = 34,
505   OMP_sch_dynamic_chunked = 35,
506   OMP_sch_guided_chunked = 36,
507   OMP_sch_runtime = 37,
508   OMP_sch_auto = 38,
509   /// static with chunk adjustment (e.g., simd)
510   OMP_sch_static_balanced_chunked = 45,
511   /// \brief Lower bound for 'ordered' versions.
512   OMP_ord_lower = 64,
513   OMP_ord_static_chunked = 65,
514   OMP_ord_static = 66,
515   OMP_ord_dynamic_chunked = 67,
516   OMP_ord_guided_chunked = 68,
517   OMP_ord_runtime = 69,
518   OMP_ord_auto = 70,
519   OMP_sch_default = OMP_sch_static,
520   /// \brief dist_schedule types
521   OMP_dist_sch_static_chunked = 91,
522   OMP_dist_sch_static = 92,
523   /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
524   /// Set if the monotonic schedule modifier was present.
525   OMP_sch_modifier_monotonic = (1 << 29),
526   /// Set if the nonmonotonic schedule modifier was present.
527   OMP_sch_modifier_nonmonotonic = (1 << 30),
528 };
529
530 enum OpenMPRTLFunction {
531   /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
532   /// kmpc_micro microtask, ...);
533   OMPRTL__kmpc_fork_call,
534   /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
535   /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
536   OMPRTL__kmpc_threadprivate_cached,
537   /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
538   /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
539   OMPRTL__kmpc_threadprivate_register,
540   // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
541   OMPRTL__kmpc_global_thread_num,
542   // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
543   // kmp_critical_name *crit);
544   OMPRTL__kmpc_critical,
545   // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
546   // global_tid, kmp_critical_name *crit, uintptr_t hint);
547   OMPRTL__kmpc_critical_with_hint,
548   // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
549   // kmp_critical_name *crit);
550   OMPRTL__kmpc_end_critical,
551   // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
552   // global_tid);
553   OMPRTL__kmpc_cancel_barrier,
554   // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
555   OMPRTL__kmpc_barrier,
556   // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
557   OMPRTL__kmpc_for_static_fini,
558   // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
559   // global_tid);
560   OMPRTL__kmpc_serialized_parallel,
561   // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
562   // global_tid);
563   OMPRTL__kmpc_end_serialized_parallel,
564   // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
565   // kmp_int32 num_threads);
566   OMPRTL__kmpc_push_num_threads,
567   // Call to void __kmpc_flush(ident_t *loc);
568   OMPRTL__kmpc_flush,
569   // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
570   OMPRTL__kmpc_master,
571   // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
572   OMPRTL__kmpc_end_master,
573   // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
574   // int end_part);
575   OMPRTL__kmpc_omp_taskyield,
576   // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
577   OMPRTL__kmpc_single,
578   // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
579   OMPRTL__kmpc_end_single,
580   // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
581   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
582   // kmp_routine_entry_t *task_entry);
583   OMPRTL__kmpc_omp_task_alloc,
584   // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
585   // new_task);
586   OMPRTL__kmpc_omp_task,
587   // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
588   // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
589   // kmp_int32 didit);
590   OMPRTL__kmpc_copyprivate,
591   // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
592   // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
593   // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
594   OMPRTL__kmpc_reduce,
595   // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
596   // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
597   // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
598   // *lck);
599   OMPRTL__kmpc_reduce_nowait,
600   // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
601   // kmp_critical_name *lck);
602   OMPRTL__kmpc_end_reduce,
603   // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
604   // kmp_critical_name *lck);
605   OMPRTL__kmpc_end_reduce_nowait,
606   // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
607   // kmp_task_t * new_task);
608   OMPRTL__kmpc_omp_task_begin_if0,
609   // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
610   // kmp_task_t * new_task);
611   OMPRTL__kmpc_omp_task_complete_if0,
612   // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
613   OMPRTL__kmpc_ordered,
614   // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
615   OMPRTL__kmpc_end_ordered,
616   // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
617   // global_tid);
618   OMPRTL__kmpc_omp_taskwait,
619   // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
620   OMPRTL__kmpc_taskgroup,
621   // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
622   OMPRTL__kmpc_end_taskgroup,
623   // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
624   // int proc_bind);
625   OMPRTL__kmpc_push_proc_bind,
626   // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
627   // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
628   // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
629   OMPRTL__kmpc_omp_task_with_deps,
630   // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
631   // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
632   // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
633   OMPRTL__kmpc_omp_wait_deps,
634   // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
635   // global_tid, kmp_int32 cncl_kind);
636   OMPRTL__kmpc_cancellationpoint,
637   // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
638   // kmp_int32 cncl_kind);
639   OMPRTL__kmpc_cancel,
640   // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
641   // kmp_int32 num_teams, kmp_int32 thread_limit);
642   OMPRTL__kmpc_push_num_teams,
643   // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
644   // microtask, ...);
645   OMPRTL__kmpc_fork_teams,
646   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
647   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
648   // sched, kmp_uint64 grainsize, void *task_dup);
649   OMPRTL__kmpc_taskloop,
650   // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
651   // num_dims, struct kmp_dim *dims);
652   OMPRTL__kmpc_doacross_init,
653   // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
654   OMPRTL__kmpc_doacross_fini,
655   // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
656   // *vec);
657   OMPRTL__kmpc_doacross_post,
658   // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
659   // *vec);
660   OMPRTL__kmpc_doacross_wait,
661   // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
662   // *data);
663   OMPRTL__kmpc_task_reduction_init,
664   // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
665   // *d);
666   OMPRTL__kmpc_task_reduction_get_th_data,
667
668   //
669   // Offloading related calls
670   //
671   // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
672   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
673   // *arg_types);
674   OMPRTL__tgt_target,
675   // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
676   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
677   // *arg_types);
678   OMPRTL__tgt_target_nowait,
679   // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
680   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
681   // *arg_types, int32_t num_teams, int32_t thread_limit);
682   OMPRTL__tgt_target_teams,
683   // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
684   // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
685   // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
686   OMPRTL__tgt_target_teams_nowait,
687   // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
688   OMPRTL__tgt_register_lib,
689   // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
690   OMPRTL__tgt_unregister_lib,
691   // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
692   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
693   OMPRTL__tgt_target_data_begin,
694   // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
695   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
696   // *arg_types);
697   OMPRTL__tgt_target_data_begin_nowait,
698   // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
699   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
700   OMPRTL__tgt_target_data_end,
701   // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
702   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
703   // *arg_types);
704   OMPRTL__tgt_target_data_end_nowait,
705   // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
706   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
707   OMPRTL__tgt_target_data_update,
708   // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
709   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
710   // *arg_types);
711   OMPRTL__tgt_target_data_update_nowait,
712 };
713
714 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
715 /// region.
716 class CleanupTy final : public EHScopeStack::Cleanup {
717   PrePostActionTy *Action;
718
719 public:
720   explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
721   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
722     if (!CGF.HaveInsertPoint())
723       return;
724     Action->Exit(CGF);
725   }
726 };
727
728 } // anonymous namespace
729
730 void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
731   CodeGenFunction::RunCleanupsScope Scope(CGF);
732   if (PrePostAction) {
733     CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
734     Callback(CodeGen, CGF, *PrePostAction);
735   } else {
736     PrePostActionTy Action;
737     Callback(CodeGen, CGF, Action);
738   }
739 }
740
741 /// Check if the combiner is a call to UDR combiner and if it is so return the
742 /// UDR decl used for reduction.
743 static const OMPDeclareReductionDecl *
744 getReductionInit(const Expr *ReductionOp) {
745   if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
746     if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
747       if (auto *DRE =
748               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
749         if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
750           return DRD;
751   return nullptr;
752 }
753
754 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
755                                              const OMPDeclareReductionDecl *DRD,
756                                              const Expr *InitOp,
757                                              Address Private, Address Original,
758                                              QualType Ty) {
759   if (DRD->getInitializer()) {
760     std::pair<llvm::Function *, llvm::Function *> Reduction =
761         CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
762     auto *CE = cast<CallExpr>(InitOp);
763     auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
764     const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
765     const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
766     auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
767     auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
768     CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
769     PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
770                             [=]() -> Address { return Private; });
771     PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
772                             [=]() -> Address { return Original; });
773     (void)PrivateScope.Privatize();
774     RValue Func = RValue::get(Reduction.second);
775     CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
776     CGF.EmitIgnoredExpr(InitOp);
777   } else {
778     llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
779     auto *GV = new llvm::GlobalVariable(
780         CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
781         llvm::GlobalValue::PrivateLinkage, Init, ".init");
782     LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
783     RValue InitRVal;
784     switch (CGF.getEvaluationKind(Ty)) {
785     case TEK_Scalar:
786       InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
787       break;
788     case TEK_Complex:
789       InitRVal =
790           RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
791       break;
792     case TEK_Aggregate:
793       InitRVal = RValue::getAggregate(LV.getAddress());
794       break;
795     }
796     OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
797     CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
798     CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
799                          /*IsInitializer=*/false);
800   }
801 }
802
803 /// \brief Emit initialization of arrays of complex types.
804 /// \param DestAddr Address of the array.
805 /// \param Type Type of array.
806 /// \param Init Initial expression of array.
807 /// \param SrcAddr Address of the original array.
808 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
809                                  QualType Type, bool EmitDeclareReductionInit,
810                                  const Expr *Init,
811                                  const OMPDeclareReductionDecl *DRD,
812                                  Address SrcAddr = Address::invalid()) {
813   // Perform element-by-element initialization.
814   QualType ElementTy;
815
816   // Drill down to the base element type on both arrays.
817   auto ArrayTy = Type->getAsArrayTypeUnsafe();
818   auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
819   DestAddr =
820       CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
821   if (DRD)
822     SrcAddr =
823         CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
824
825   llvm::Value *SrcBegin = nullptr;
826   if (DRD)
827     SrcBegin = SrcAddr.getPointer();
828   auto DestBegin = DestAddr.getPointer();
829   // Cast from pointer to array type to pointer to single element.
830   auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
831   // The basic structure here is a while-do loop.
832   auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
833   auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
834   auto IsEmpty =
835       CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
836   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
837
838   // Enter the loop body, making that address the current address.
839   auto EntryBB = CGF.Builder.GetInsertBlock();
840   CGF.EmitBlock(BodyBB);
841
842   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
843
844   llvm::PHINode *SrcElementPHI = nullptr;
845   Address SrcElementCurrent = Address::invalid();
846   if (DRD) {
847     SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
848                                           "omp.arraycpy.srcElementPast");
849     SrcElementPHI->addIncoming(SrcBegin, EntryBB);
850     SrcElementCurrent =
851         Address(SrcElementPHI,
852                 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
853   }
854   llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
855       DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
856   DestElementPHI->addIncoming(DestBegin, EntryBB);
857   Address DestElementCurrent =
858       Address(DestElementPHI,
859               DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
860
861   // Emit copy.
862   {
863     CodeGenFunction::RunCleanupsScope InitScope(CGF);
864     if (EmitDeclareReductionInit) {
865       emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
866                                        SrcElementCurrent, ElementTy);
867     } else
868       CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
869                            /*IsInitializer=*/false);
870   }
871
872   if (DRD) {
873     // Shift the address forward by one element.
874     auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
875         SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
876     SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
877   }
878
879   // Shift the address forward by one element.
880   auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
881       DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
882   // Check whether we've reached the end.
883   auto Done =
884       CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
885   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
886   DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
887
888   // Done.
889   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
890 }
891
892 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
893   return CGF.EmitOMPSharedLValue(E);
894 }
895
896 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
897                                             const Expr *E) {
898   if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
899     return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
900   return LValue();
901 }
902
903 void ReductionCodeGen::emitAggregateInitialization(
904     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
905     const OMPDeclareReductionDecl *DRD) {
906   // Emit VarDecl with copy init for arrays.
907   // Get the address of the original variable captured in current
908   // captured region.
909   auto *PrivateVD =
910       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
911   bool EmitDeclareReductionInit =
912       DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
913   EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
914                        EmitDeclareReductionInit,
915                        EmitDeclareReductionInit ? ClausesData[N].ReductionOp
916                                                 : PrivateVD->getInit(),
917                        DRD, SharedLVal.getAddress());
918 }
919
920 ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
921                                    ArrayRef<const Expr *> Privates,
922                                    ArrayRef<const Expr *> ReductionOps) {
923   ClausesData.reserve(Shareds.size());
924   SharedAddresses.reserve(Shareds.size());
925   Sizes.reserve(Shareds.size());
926   BaseDecls.reserve(Shareds.size());
927   auto IPriv = Privates.begin();
928   auto IRed = ReductionOps.begin();
929   for (const auto *Ref : Shareds) {
930     ClausesData.emplace_back(Ref, *IPriv, *IRed);
931     std::advance(IPriv, 1);
932     std::advance(IRed, 1);
933   }
934 }
935
936 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
937   assert(SharedAddresses.size() == N &&
938          "Number of generated lvalues must be exactly N.");
939   LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
940   LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
941   SharedAddresses.emplace_back(First, Second);
942 }
943
944 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
945   auto *PrivateVD =
946       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
947   QualType PrivateType = PrivateVD->getType();
948   bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
949   if (!PrivateType->isVariablyModifiedType()) {
950     Sizes.emplace_back(
951         CGF.getTypeSize(
952             SharedAddresses[N].first.getType().getNonReferenceType()),
953         nullptr);
954     return;
955   }
956   llvm::Value *Size;
957   llvm::Value *SizeInChars;
958   llvm::Type *ElemType =
959       cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
960           ->getElementType();
961   auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
962   if (AsArraySection) {
963     Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
964                                      SharedAddresses[N].first.getPointer());
965     Size = CGF.Builder.CreateNUWAdd(
966         Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
967     SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
968   } else {
969     SizeInChars = CGF.getTypeSize(
970         SharedAddresses[N].first.getType().getNonReferenceType());
971     Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
972   }
973   Sizes.emplace_back(SizeInChars, Size);
974   CodeGenFunction::OpaqueValueMapping OpaqueMap(
975       CGF,
976       cast<OpaqueValueExpr>(
977           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
978       RValue::get(Size));
979   CGF.EmitVariablyModifiedType(PrivateType);
980 }
981
982 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
983                                          llvm::Value *Size) {
984   auto *PrivateVD =
985       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
986   QualType PrivateType = PrivateVD->getType();
987   if (!PrivateType->isVariablyModifiedType()) {
988     assert(!Size && !Sizes[N].second &&
989            "Size should be nullptr for non-variably modified reduction "
990            "items.");
991     return;
992   }
993   CodeGenFunction::OpaqueValueMapping OpaqueMap(
994       CGF,
995       cast<OpaqueValueExpr>(
996           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
997       RValue::get(Size));
998   CGF.EmitVariablyModifiedType(PrivateType);
999 }
1000
1001 void ReductionCodeGen::emitInitialization(
1002     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1003     llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1004   assert(SharedAddresses.size() > N && "No variable was generated");
1005   auto *PrivateVD =
1006       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1007   auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
1008   QualType PrivateType = PrivateVD->getType();
1009   PrivateAddr = CGF.Builder.CreateElementBitCast(
1010       PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1011   QualType SharedType = SharedAddresses[N].first.getType();
1012   SharedLVal = CGF.MakeAddrLValue(
1013       CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1014                                        CGF.ConvertTypeForMem(SharedType)),
1015       SharedType, SharedAddresses[N].first.getBaseInfo(),
1016       CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1017   if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1018     emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1019   } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1020     emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1021                                      PrivateAddr, SharedLVal.getAddress(),
1022                                      SharedLVal.getType());
1023   } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1024              !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1025     CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1026                          PrivateVD->getType().getQualifiers(),
1027                          /*IsInitializer=*/false);
1028   }
1029 }
1030
1031 bool ReductionCodeGen::needCleanups(unsigned N) {
1032   auto *PrivateVD =
1033       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1034   QualType PrivateType = PrivateVD->getType();
1035   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1036   return DTorKind != QualType::DK_none;
1037 }
1038
1039 void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
1040                                     Address PrivateAddr) {
1041   auto *PrivateVD =
1042       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1043   QualType PrivateType = PrivateVD->getType();
1044   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1045   if (needCleanups(N)) {
1046     PrivateAddr = CGF.Builder.CreateElementBitCast(
1047         PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1048     CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1049   }
1050 }
1051
1052 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1053                           LValue BaseLV) {
1054   BaseTy = BaseTy.getNonReferenceType();
1055   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1056          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1057     if (auto *PtrTy = BaseTy->getAs<PointerType>())
1058       BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1059     else {
1060       LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1061       BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1062     }
1063     BaseTy = BaseTy->getPointeeType();
1064   }
1065   return CGF.MakeAddrLValue(
1066       CGF.Builder.CreateElementBitCast(BaseLV.getAddress(),
1067                                        CGF.ConvertTypeForMem(ElTy)),
1068       BaseLV.getType(), BaseLV.getBaseInfo(),
1069       CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1070 }
1071
1072 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1073                           llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1074                           llvm::Value *Addr) {
1075   Address Tmp = Address::invalid();
1076   Address TopTmp = Address::invalid();
1077   Address MostTopTmp = Address::invalid();
1078   BaseTy = BaseTy.getNonReferenceType();
1079   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1080          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1081     Tmp = CGF.CreateMemTemp(BaseTy);
1082     if (TopTmp.isValid())
1083       CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1084     else
1085       MostTopTmp = Tmp;
1086     TopTmp = Tmp;
1087     BaseTy = BaseTy->getPointeeType();
1088   }
1089   llvm::Type *Ty = BaseLVType;
1090   if (Tmp.isValid())
1091     Ty = Tmp.getElementType();
1092   Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1093   if (Tmp.isValid()) {
1094     CGF.Builder.CreateStore(Addr, Tmp);
1095     return MostTopTmp;
1096   }
1097   return Address(Addr, BaseLVAlignment);
1098 }
1099
1100 Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1101                                                Address PrivateAddr) {
1102   const DeclRefExpr *DE;
1103   const VarDecl *OrigVD = nullptr;
1104   if (auto *OASE = dyn_cast<OMPArraySectionExpr>(ClausesData[N].Ref)) {
1105     auto *Base = OASE->getBase()->IgnoreParenImpCasts();
1106     while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1107       Base = TempOASE->getBase()->IgnoreParenImpCasts();
1108     while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1109       Base = TempASE->getBase()->IgnoreParenImpCasts();
1110     DE = cast<DeclRefExpr>(Base);
1111     OrigVD = cast<VarDecl>(DE->getDecl());
1112   } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(ClausesData[N].Ref)) {
1113     auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1114     while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1115       Base = TempASE->getBase()->IgnoreParenImpCasts();
1116     DE = cast<DeclRefExpr>(Base);
1117     OrigVD = cast<VarDecl>(DE->getDecl());
1118   }
1119   if (OrigVD) {
1120     BaseDecls.emplace_back(OrigVD);
1121     auto OriginalBaseLValue = CGF.EmitLValue(DE);
1122     LValue BaseLValue =
1123         loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1124                     OriginalBaseLValue);
1125     llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1126         BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1127     llvm::Value *PrivatePointer =
1128         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1129             PrivateAddr.getPointer(),
1130             SharedAddresses[N].first.getAddress().getType());
1131     llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1132     return castToBase(CGF, OrigVD->getType(),
1133                       SharedAddresses[N].first.getType(),
1134                       OriginalBaseLValue.getAddress().getType(),
1135                       OriginalBaseLValue.getAlignment(), Ptr);
1136   }
1137   BaseDecls.emplace_back(
1138       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1139   return PrivateAddr;
1140 }
1141
1142 bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1143   auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
1144   return DRD && DRD->getInitializer();
1145 }
1146
1147 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1148   return CGF.EmitLoadOfPointerLValue(
1149       CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1150       getThreadIDVariable()->getType()->castAs<PointerType>());
1151 }
1152
1153 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1154   if (!CGF.HaveInsertPoint())
1155     return;
1156   // 1.2.2 OpenMP Language Terminology
1157   // Structured block - An executable statement with a single entry at the
1158   // top and a single exit at the bottom.
1159   // The point of exit cannot be a branch out of the structured block.
1160   // longjmp() and throw() must not violate the entry/exit criteria.
1161   CGF.EHStack.pushTerminate();
1162   CodeGen(CGF);
1163   CGF.EHStack.popTerminate();
1164 }
1165
1166 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1167     CodeGenFunction &CGF) {
1168   return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1169                             getThreadIDVariable()->getType(),
1170                             AlignmentSource::Decl);
1171 }
1172
1173 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
1174     : CGM(CGM), OffloadEntriesInfoManager(CGM) {
1175   IdentTy = llvm::StructType::create(
1176       "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
1177       CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
1178       CGM.Int8PtrTy /* psource */);
1179   KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1180
1181   loadOffloadInfoMetadata();
1182 }
1183
1184 void CGOpenMPRuntime::clear() {
1185   InternalVars.clear();
1186 }
1187
1188 static llvm::Function *
1189 emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1190                           const Expr *CombinerInitializer, const VarDecl *In,
1191                           const VarDecl *Out, bool IsCombiner) {
1192   // void .omp_combiner.(Ty *in, Ty *out);
1193   auto &C = CGM.getContext();
1194   QualType PtrTy = C.getPointerType(Ty).withRestrict();
1195   FunctionArgList Args;
1196   ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1197                                /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1198   ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1199                               /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1200   Args.push_back(&OmpOutParm);
1201   Args.push_back(&OmpInParm);
1202   auto &FnInfo =
1203       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1204   auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1205   auto *Fn = llvm::Function::Create(
1206       FnTy, llvm::GlobalValue::InternalLinkage,
1207       IsCombiner ? ".omp_combiner." : ".omp_initializer.", &CGM.getModule());
1208   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
1209   Fn->removeFnAttr(llvm::Attribute::NoInline);
1210   Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1211   Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1212   CodeGenFunction CGF(CGM);
1213   // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1214   // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1215   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
1216   CodeGenFunction::OMPPrivateScope Scope(CGF);
1217   Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1218   Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() -> Address {
1219     return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1220         .getAddress();
1221   });
1222   Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1223   Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() -> Address {
1224     return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1225         .getAddress();
1226   });
1227   (void)Scope.Privatize();
1228   if (!IsCombiner && Out->hasInit() &&
1229       !CGF.isTrivialInitializer(Out->getInit())) {
1230     CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1231                          Out->getType().getQualifiers(),
1232                          /*IsInitializer=*/true);
1233   }
1234   if (CombinerInitializer)
1235     CGF.EmitIgnoredExpr(CombinerInitializer);
1236   Scope.ForceCleanup();
1237   CGF.FinishFunction();
1238   return Fn;
1239 }
1240
1241 void CGOpenMPRuntime::emitUserDefinedReduction(
1242     CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1243   if (UDRMap.count(D) > 0)
1244     return;
1245   auto &C = CGM.getContext();
1246   if (!In || !Out) {
1247     In = &C.Idents.get("omp_in");
1248     Out = &C.Idents.get("omp_out");
1249   }
1250   llvm::Function *Combiner = emitCombinerOrInitializer(
1251       CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
1252       cast<VarDecl>(D->lookup(Out).front()),
1253       /*IsCombiner=*/true);
1254   llvm::Function *Initializer = nullptr;
1255   if (auto *Init = D->getInitializer()) {
1256     if (!Priv || !Orig) {
1257       Priv = &C.Idents.get("omp_priv");
1258       Orig = &C.Idents.get("omp_orig");
1259     }
1260     Initializer = emitCombinerOrInitializer(
1261         CGM, D->getType(),
1262         D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1263                                                                      : nullptr,
1264         cast<VarDecl>(D->lookup(Orig).front()),
1265         cast<VarDecl>(D->lookup(Priv).front()),
1266         /*IsCombiner=*/false);
1267   }
1268   UDRMap.insert(std::make_pair(D, std::make_pair(Combiner, Initializer)));
1269   if (CGF) {
1270     auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1271     Decls.second.push_back(D);
1272   }
1273 }
1274
1275 std::pair<llvm::Function *, llvm::Function *>
1276 CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1277   auto I = UDRMap.find(D);
1278   if (I != UDRMap.end())
1279     return I->second;
1280   emitUserDefinedReduction(/*CGF=*/nullptr, D);
1281   return UDRMap.lookup(D);
1282 }
1283
1284 // Layout information for ident_t.
1285 static CharUnits getIdentAlign(CodeGenModule &CGM) {
1286   return CGM.getPointerAlign();
1287 }
1288 static CharUnits getIdentSize(CodeGenModule &CGM) {
1289   assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign()));
1290   return CharUnits::fromQuantity(16) + CGM.getPointerSize();
1291 }
1292 static CharUnits getOffsetOfIdentField(IdentFieldIndex Field) {
1293   // All the fields except the last are i32, so this works beautifully.
1294   return unsigned(Field) * CharUnits::fromQuantity(4);
1295 }
1296 static Address createIdentFieldGEP(CodeGenFunction &CGF, Address Addr,
1297                                    IdentFieldIndex Field,
1298                                    const llvm::Twine &Name = "") {
1299   auto Offset = getOffsetOfIdentField(Field);
1300   return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name);
1301 }
1302
1303 static llvm::Value *emitParallelOrTeamsOutlinedFunction(
1304     CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1305     const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1306     const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1307   assert(ThreadIDVar->getType()->isPointerType() &&
1308          "thread id variable must be of type kmp_int32 *");
1309   CodeGenFunction CGF(CGM, true);
1310   bool HasCancel = false;
1311   if (auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1312     HasCancel = OPD->hasCancel();
1313   else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1314     HasCancel = OPSD->hasCancel();
1315   else if (auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1316     HasCancel = OPFD->hasCancel();
1317   else if (auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1318     HasCancel = OPFD->hasCancel();
1319   else if (auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1320     HasCancel = OPFD->hasCancel();
1321   else if (auto *OPFD = dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1322     HasCancel = OPFD->hasCancel();
1323   else if (auto *OPFD =
1324                dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1325     HasCancel = OPFD->hasCancel();
1326   CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1327                                     HasCancel, OutlinedHelperName);
1328   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1329   return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1330 }
1331
1332 llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
1333     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1334     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1335   const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1336   return emitParallelOrTeamsOutlinedFunction(
1337       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1338 }
1339
1340 llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1341     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1342     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1343   const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1344   return emitParallelOrTeamsOutlinedFunction(
1345       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1346 }
1347
1348 llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
1349     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1350     const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1351     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1352     bool Tied, unsigned &NumberOfParts) {
1353   auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1354                                               PrePostActionTy &) {
1355     auto *ThreadID = getThreadID(CGF, D.getLocStart());
1356     auto *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
1357     llvm::Value *TaskArgs[] = {
1358         UpLoc, ThreadID,
1359         CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1360                                     TaskTVar->getType()->castAs<PointerType>())
1361             .getPointer()};
1362     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1363   };
1364   CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1365                                                             UntiedCodeGen);
1366   CodeGen.setAction(Action);
1367   assert(!ThreadIDVar->getType()->isPointerType() &&
1368          "thread id variable must be of type kmp_int32 for tasks");
1369   auto *CS = cast<CapturedStmt>(D.getAssociatedStmt());
1370   auto *TD = dyn_cast<OMPTaskDirective>(&D);
1371   CodeGenFunction CGF(CGM, true);
1372   CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1373                                         InnermostKind,
1374                                         TD ? TD->hasCancel() : false, Action);
1375   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1376   auto *Res = CGF.GenerateCapturedStmtFunction(*CS);
1377   if (!Tied)
1378     NumberOfParts = Action.getNumberOfParts();
1379   return Res;
1380 }
1381
1382 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1383   CharUnits Align = getIdentAlign(CGM);
1384   llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
1385   if (!Entry) {
1386     if (!DefaultOpenMPPSource) {
1387       // Initialize default location for psource field of ident_t structure of
1388       // all ident_t objects. Format is ";file;function;line;column;;".
1389       // Taken from
1390       // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
1391       DefaultOpenMPPSource =
1392           CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1393       DefaultOpenMPPSource =
1394           llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1395     }
1396
1397     ConstantInitBuilder builder(CGM);
1398     auto fields = builder.beginStruct(IdentTy);
1399     fields.addInt(CGM.Int32Ty, 0);
1400     fields.addInt(CGM.Int32Ty, Flags);
1401     fields.addInt(CGM.Int32Ty, 0);
1402     fields.addInt(CGM.Int32Ty, 0);
1403     fields.add(DefaultOpenMPPSource);
1404     auto DefaultOpenMPLocation =
1405       fields.finishAndCreateGlobal("", Align, /*isConstant*/ true,
1406                                    llvm::GlobalValue::PrivateLinkage);
1407     DefaultOpenMPLocation->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1408
1409     OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
1410   }
1411   return Address(Entry, Align);
1412 }
1413
1414 llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1415                                                  SourceLocation Loc,
1416                                                  unsigned Flags) {
1417   Flags |= OMP_IDENT_KMPC;
1418   // If no debug info is generated - return global default location.
1419   if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1420       Loc.isInvalid())
1421     return getOrCreateDefaultLocation(Flags).getPointer();
1422
1423   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1424
1425   Address LocValue = Address::invalid();
1426   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1427   if (I != OpenMPLocThreadIDMap.end())
1428     LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM));
1429
1430   // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1431   // GetOpenMPThreadID was called before this routine.
1432   if (!LocValue.isValid()) {
1433     // Generate "ident_t .kmpc_loc.addr;"
1434     Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM),
1435                                       ".kmpc_loc.addr");
1436     auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1437     Elem.second.DebugLoc = AI.getPointer();
1438     LocValue = AI;
1439
1440     CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1441     CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1442     CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1443                              CGM.getSize(getIdentSize(CGF.CGM)));
1444   }
1445
1446   // char **psource = &.kmpc_loc_<flags>.addr.psource;
1447   Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource);
1448
1449   auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1450   if (OMPDebugLoc == nullptr) {
1451     SmallString<128> Buffer2;
1452     llvm::raw_svector_ostream OS2(Buffer2);
1453     // Build debug location
1454     PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1455     OS2 << ";" << PLoc.getFilename() << ";";
1456     if (const FunctionDecl *FD =
1457             dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) {
1458       OS2 << FD->getQualifiedNameAsString();
1459     }
1460     OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1461     OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1462     OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1463   }
1464   // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1465   CGF.Builder.CreateStore(OMPDebugLoc, PSource);
1466
1467   // Our callers always pass this to a runtime function, so for
1468   // convenience, go ahead and return a naked pointer.
1469   return LocValue.getPointer();
1470 }
1471
1472 llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1473                                           SourceLocation Loc) {
1474   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1475
1476   llvm::Value *ThreadID = nullptr;
1477   // Check whether we've already cached a load of the thread id in this
1478   // function.
1479   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1480   if (I != OpenMPLocThreadIDMap.end()) {
1481     ThreadID = I->second.ThreadID;
1482     if (ThreadID != nullptr)
1483       return ThreadID;
1484   }
1485   // If exceptions are enabled, do not use parameter to avoid possible crash.
1486   if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1487       !CGF.getLangOpts().CXXExceptions ||
1488       CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1489     if (auto *OMPRegionInfo =
1490             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1491       if (OMPRegionInfo->getThreadIDVariable()) {
1492         // Check if this an outlined function with thread id passed as argument.
1493         auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1494         ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal();
1495         // If value loaded in entry block, cache it and use it everywhere in
1496         // function.
1497         if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1498           auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1499           Elem.second.ThreadID = ThreadID;
1500         }
1501         return ThreadID;
1502       }
1503     }
1504   }
1505
1506   // This is not an outlined function region - need to call __kmpc_int32
1507   // kmpc_global_thread_num(ident_t *loc).
1508   // Generate thread id value and cache this value for use across the
1509   // function.
1510   CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1511   CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1512   auto *Call = CGF.Builder.CreateCall(
1513       createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
1514       emitUpdateLocation(CGF, Loc));
1515   Call->setCallingConv(CGF.getRuntimeCC());
1516   auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1517   Elem.second.ThreadID = Call;
1518   return Call;
1519 }
1520
1521 void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1522   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1523   if (OpenMPLocThreadIDMap.count(CGF.CurFn))
1524     OpenMPLocThreadIDMap.erase(CGF.CurFn);
1525   if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1526     for(auto *D : FunctionUDRMap[CGF.CurFn]) {
1527       UDRMap.erase(D);
1528     }
1529     FunctionUDRMap.erase(CGF.CurFn);
1530   }
1531 }
1532
1533 llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1534   if (!IdentTy) {
1535   }
1536   return llvm::PointerType::getUnqual(IdentTy);
1537 }
1538
1539 llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1540   if (!Kmpc_MicroTy) {
1541     // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1542     llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1543                                  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1544     Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1545   }
1546   return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1547 }
1548
1549 llvm::Constant *
1550 CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1551   llvm::Constant *RTLFn = nullptr;
1552   switch (static_cast<OpenMPRTLFunction>(Function)) {
1553   case OMPRTL__kmpc_fork_call: {
1554     // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1555     // microtask, ...);
1556     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1557                                 getKmpc_MicroPointerTy()};
1558     llvm::FunctionType *FnTy =
1559         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1560     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1561     break;
1562   }
1563   case OMPRTL__kmpc_global_thread_num: {
1564     // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1565     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1566     llvm::FunctionType *FnTy =
1567         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1568     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1569     break;
1570   }
1571   case OMPRTL__kmpc_threadprivate_cached: {
1572     // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1573     // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1574     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1575                                 CGM.VoidPtrTy, CGM.SizeTy,
1576                                 CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1577     llvm::FunctionType *FnTy =
1578         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1579     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1580     break;
1581   }
1582   case OMPRTL__kmpc_critical: {
1583     // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1584     // kmp_critical_name *crit);
1585     llvm::Type *TypeParams[] = {
1586         getIdentTyPointerTy(), CGM.Int32Ty,
1587         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1588     llvm::FunctionType *FnTy =
1589         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1590     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1591     break;
1592   }
1593   case OMPRTL__kmpc_critical_with_hint: {
1594     // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1595     // kmp_critical_name *crit, uintptr_t hint);
1596     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1597                                 llvm::PointerType::getUnqual(KmpCriticalNameTy),
1598                                 CGM.IntPtrTy};
1599     llvm::FunctionType *FnTy =
1600         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1601     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1602     break;
1603   }
1604   case OMPRTL__kmpc_threadprivate_register: {
1605     // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1606     // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1607     // typedef void *(*kmpc_ctor)(void *);
1608     auto KmpcCtorTy =
1609         llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1610                                 /*isVarArg*/ false)->getPointerTo();
1611     // typedef void *(*kmpc_cctor)(void *, void *);
1612     llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1613     auto KmpcCopyCtorTy =
1614         llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1615                                 /*isVarArg*/ false)->getPointerTo();
1616     // typedef void (*kmpc_dtor)(void *);
1617     auto KmpcDtorTy =
1618         llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1619             ->getPointerTo();
1620     llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1621                               KmpcCopyCtorTy, KmpcDtorTy};
1622     auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1623                                         /*isVarArg*/ false);
1624     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1625     break;
1626   }
1627   case OMPRTL__kmpc_end_critical: {
1628     // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1629     // kmp_critical_name *crit);
1630     llvm::Type *TypeParams[] = {
1631         getIdentTyPointerTy(), CGM.Int32Ty,
1632         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1633     llvm::FunctionType *FnTy =
1634         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1635     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1636     break;
1637   }
1638   case OMPRTL__kmpc_cancel_barrier: {
1639     // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1640     // global_tid);
1641     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1642     llvm::FunctionType *FnTy =
1643         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1644     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1645     break;
1646   }
1647   case OMPRTL__kmpc_barrier: {
1648     // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1649     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1650     llvm::FunctionType *FnTy =
1651         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1652     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1653     break;
1654   }
1655   case OMPRTL__kmpc_for_static_fini: {
1656     // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1657     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1658     llvm::FunctionType *FnTy =
1659         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1660     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1661     break;
1662   }
1663   case OMPRTL__kmpc_push_num_threads: {
1664     // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1665     // kmp_int32 num_threads)
1666     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1667                                 CGM.Int32Ty};
1668     llvm::FunctionType *FnTy =
1669         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1670     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1671     break;
1672   }
1673   case OMPRTL__kmpc_serialized_parallel: {
1674     // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1675     // global_tid);
1676     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1677     llvm::FunctionType *FnTy =
1678         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1679     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1680     break;
1681   }
1682   case OMPRTL__kmpc_end_serialized_parallel: {
1683     // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1684     // global_tid);
1685     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1686     llvm::FunctionType *FnTy =
1687         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1688     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1689     break;
1690   }
1691   case OMPRTL__kmpc_flush: {
1692     // Build void __kmpc_flush(ident_t *loc);
1693     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1694     llvm::FunctionType *FnTy =
1695         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1696     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1697     break;
1698   }
1699   case OMPRTL__kmpc_master: {
1700     // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1701     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1702     llvm::FunctionType *FnTy =
1703         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1704     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1705     break;
1706   }
1707   case OMPRTL__kmpc_end_master: {
1708     // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1709     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1710     llvm::FunctionType *FnTy =
1711         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1712     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1713     break;
1714   }
1715   case OMPRTL__kmpc_omp_taskyield: {
1716     // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1717     // int end_part);
1718     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1719     llvm::FunctionType *FnTy =
1720         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1721     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1722     break;
1723   }
1724   case OMPRTL__kmpc_single: {
1725     // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1726     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1727     llvm::FunctionType *FnTy =
1728         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1729     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1730     break;
1731   }
1732   case OMPRTL__kmpc_end_single: {
1733     // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1734     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1735     llvm::FunctionType *FnTy =
1736         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1737     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1738     break;
1739   }
1740   case OMPRTL__kmpc_omp_task_alloc: {
1741     // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1742     // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1743     // kmp_routine_entry_t *task_entry);
1744     assert(KmpRoutineEntryPtrTy != nullptr &&
1745            "Type kmp_routine_entry_t must be created.");
1746     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1747                                 CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1748     // Return void * and then cast to particular kmp_task_t type.
1749     llvm::FunctionType *FnTy =
1750         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1751     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1752     break;
1753   }
1754   case OMPRTL__kmpc_omp_task: {
1755     // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1756     // *new_task);
1757     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1758                                 CGM.VoidPtrTy};
1759     llvm::FunctionType *FnTy =
1760         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1761     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1762     break;
1763   }
1764   case OMPRTL__kmpc_copyprivate: {
1765     // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1766     // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1767     // kmp_int32 didit);
1768     llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1769     auto *CpyFnTy =
1770         llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1771     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1772                                 CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1773                                 CGM.Int32Ty};
1774     llvm::FunctionType *FnTy =
1775         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1776     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1777     break;
1778   }
1779   case OMPRTL__kmpc_reduce: {
1780     // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1781     // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1782     // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1783     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1784     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1785                                                /*isVarArg=*/false);
1786     llvm::Type *TypeParams[] = {
1787         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1788         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1789         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1790     llvm::FunctionType *FnTy =
1791         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1792     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1793     break;
1794   }
1795   case OMPRTL__kmpc_reduce_nowait: {
1796     // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1797     // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1798     // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1799     // *lck);
1800     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1801     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1802                                                /*isVarArg=*/false);
1803     llvm::Type *TypeParams[] = {
1804         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1805         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1806         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1807     llvm::FunctionType *FnTy =
1808         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1809     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1810     break;
1811   }
1812   case OMPRTL__kmpc_end_reduce: {
1813     // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
1814     // kmp_critical_name *lck);
1815     llvm::Type *TypeParams[] = {
1816         getIdentTyPointerTy(), CGM.Int32Ty,
1817         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1818     llvm::FunctionType *FnTy =
1819         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1820     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
1821     break;
1822   }
1823   case OMPRTL__kmpc_end_reduce_nowait: {
1824     // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
1825     // kmp_critical_name *lck);
1826     llvm::Type *TypeParams[] = {
1827         getIdentTyPointerTy(), CGM.Int32Ty,
1828         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1829     llvm::FunctionType *FnTy =
1830         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1831     RTLFn =
1832         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
1833     break;
1834   }
1835   case OMPRTL__kmpc_omp_task_begin_if0: {
1836     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1837     // *new_task);
1838     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1839                                 CGM.VoidPtrTy};
1840     llvm::FunctionType *FnTy =
1841         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1842     RTLFn =
1843         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
1844     break;
1845   }
1846   case OMPRTL__kmpc_omp_task_complete_if0: {
1847     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1848     // *new_task);
1849     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1850                                 CGM.VoidPtrTy};
1851     llvm::FunctionType *FnTy =
1852         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1853     RTLFn = CGM.CreateRuntimeFunction(FnTy,
1854                                       /*Name=*/"__kmpc_omp_task_complete_if0");
1855     break;
1856   }
1857   case OMPRTL__kmpc_ordered: {
1858     // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
1859     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1860     llvm::FunctionType *FnTy =
1861         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1862     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
1863     break;
1864   }
1865   case OMPRTL__kmpc_end_ordered: {
1866     // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
1867     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1868     llvm::FunctionType *FnTy =
1869         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1870     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
1871     break;
1872   }
1873   case OMPRTL__kmpc_omp_taskwait: {
1874     // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
1875     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1876     llvm::FunctionType *FnTy =
1877         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1878     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
1879     break;
1880   }
1881   case OMPRTL__kmpc_taskgroup: {
1882     // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
1883     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1884     llvm::FunctionType *FnTy =
1885         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1886     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
1887     break;
1888   }
1889   case OMPRTL__kmpc_end_taskgroup: {
1890     // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
1891     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1892     llvm::FunctionType *FnTy =
1893         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1894     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
1895     break;
1896   }
1897   case OMPRTL__kmpc_push_proc_bind: {
1898     // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
1899     // int proc_bind)
1900     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1901     llvm::FunctionType *FnTy =
1902         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1903     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
1904     break;
1905   }
1906   case OMPRTL__kmpc_omp_task_with_deps: {
1907     // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
1908     // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
1909     // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
1910     llvm::Type *TypeParams[] = {
1911         getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
1912         CGM.VoidPtrTy,         CGM.Int32Ty, CGM.VoidPtrTy};
1913     llvm::FunctionType *FnTy =
1914         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1915     RTLFn =
1916         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
1917     break;
1918   }
1919   case OMPRTL__kmpc_omp_wait_deps: {
1920     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
1921     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
1922     // kmp_depend_info_t *noalias_dep_list);
1923     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1924                                 CGM.Int32Ty,           CGM.VoidPtrTy,
1925                                 CGM.Int32Ty,           CGM.VoidPtrTy};
1926     llvm::FunctionType *FnTy =
1927         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1928     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
1929     break;
1930   }
1931   case OMPRTL__kmpc_cancellationpoint: {
1932     // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
1933     // global_tid, kmp_int32 cncl_kind)
1934     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1935     llvm::FunctionType *FnTy =
1936         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1937     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
1938     break;
1939   }
1940   case OMPRTL__kmpc_cancel: {
1941     // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
1942     // kmp_int32 cncl_kind)
1943     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1944     llvm::FunctionType *FnTy =
1945         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1946     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
1947     break;
1948   }
1949   case OMPRTL__kmpc_push_num_teams: {
1950     // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
1951     // kmp_int32 num_teams, kmp_int32 num_threads)
1952     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1953         CGM.Int32Ty};
1954     llvm::FunctionType *FnTy =
1955         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1956     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
1957     break;
1958   }
1959   case OMPRTL__kmpc_fork_teams: {
1960     // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
1961     // microtask, ...);
1962     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1963                                 getKmpc_MicroPointerTy()};
1964     llvm::FunctionType *FnTy =
1965         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1966     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
1967     break;
1968   }
1969   case OMPRTL__kmpc_taskloop: {
1970     // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
1971     // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
1972     // sched, kmp_uint64 grainsize, void *task_dup);
1973     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1974                                 CGM.IntTy,
1975                                 CGM.VoidPtrTy,
1976                                 CGM.IntTy,
1977                                 CGM.Int64Ty->getPointerTo(),
1978                                 CGM.Int64Ty->getPointerTo(),
1979                                 CGM.Int64Ty,
1980                                 CGM.IntTy,
1981                                 CGM.IntTy,
1982                                 CGM.Int64Ty,
1983                                 CGM.VoidPtrTy};
1984     llvm::FunctionType *FnTy =
1985         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1986     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
1987     break;
1988   }
1989   case OMPRTL__kmpc_doacross_init: {
1990     // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
1991     // num_dims, struct kmp_dim *dims);
1992     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1993                                 CGM.Int32Ty,
1994                                 CGM.Int32Ty,
1995                                 CGM.VoidPtrTy};
1996     llvm::FunctionType *FnTy =
1997         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1998     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
1999     break;
2000   }
2001   case OMPRTL__kmpc_doacross_fini: {
2002     // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2003     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2004     llvm::FunctionType *FnTy =
2005         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2006     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2007     break;
2008   }
2009   case OMPRTL__kmpc_doacross_post: {
2010     // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2011     // *vec);
2012     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2013                                 CGM.Int64Ty->getPointerTo()};
2014     llvm::FunctionType *FnTy =
2015         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2016     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2017     break;
2018   }
2019   case OMPRTL__kmpc_doacross_wait: {
2020     // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2021     // *vec);
2022     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2023                                 CGM.Int64Ty->getPointerTo()};
2024     llvm::FunctionType *FnTy =
2025         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2026     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2027     break;
2028   }
2029   case OMPRTL__kmpc_task_reduction_init: {
2030     // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2031     // *data);
2032     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2033     llvm::FunctionType *FnTy =
2034         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2035     RTLFn =
2036         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2037     break;
2038   }
2039   case OMPRTL__kmpc_task_reduction_get_th_data: {
2040     // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2041     // *d);
2042     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2043     llvm::FunctionType *FnTy =
2044         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2045     RTLFn = CGM.CreateRuntimeFunction(
2046         FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2047     break;
2048   }
2049   case OMPRTL__tgt_target: {
2050     // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2051     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2052     // *arg_types);
2053     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2054                                 CGM.VoidPtrTy,
2055                                 CGM.Int32Ty,
2056                                 CGM.VoidPtrPtrTy,
2057                                 CGM.VoidPtrPtrTy,
2058                                 CGM.SizeTy->getPointerTo(),
2059                                 CGM.Int64Ty->getPointerTo()};
2060     llvm::FunctionType *FnTy =
2061         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2062     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2063     break;
2064   }
2065   case OMPRTL__tgt_target_nowait: {
2066     // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2067     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2068     // int64_t *arg_types);
2069     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2070                                 CGM.VoidPtrTy,
2071                                 CGM.Int32Ty,
2072                                 CGM.VoidPtrPtrTy,
2073                                 CGM.VoidPtrPtrTy,
2074                                 CGM.SizeTy->getPointerTo(),
2075                                 CGM.Int64Ty->getPointerTo()};
2076     llvm::FunctionType *FnTy =
2077         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2078     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2079     break;
2080   }
2081   case OMPRTL__tgt_target_teams: {
2082     // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2083     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2084     // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2085     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2086                                 CGM.VoidPtrTy,
2087                                 CGM.Int32Ty,
2088                                 CGM.VoidPtrPtrTy,
2089                                 CGM.VoidPtrPtrTy,
2090                                 CGM.SizeTy->getPointerTo(),
2091                                 CGM.Int64Ty->getPointerTo(),
2092                                 CGM.Int32Ty,
2093                                 CGM.Int32Ty};
2094     llvm::FunctionType *FnTy =
2095         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2096     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2097     break;
2098   }
2099   case OMPRTL__tgt_target_teams_nowait: {
2100     // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2101     // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2102     // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2103     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2104                                 CGM.VoidPtrTy,
2105                                 CGM.Int32Ty,
2106                                 CGM.VoidPtrPtrTy,
2107                                 CGM.VoidPtrPtrTy,
2108                                 CGM.SizeTy->getPointerTo(),
2109                                 CGM.Int64Ty->getPointerTo(),
2110                                 CGM.Int32Ty,
2111                                 CGM.Int32Ty};
2112     llvm::FunctionType *FnTy =
2113         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2114     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2115     break;
2116   }
2117   case OMPRTL__tgt_register_lib: {
2118     // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2119     QualType ParamTy =
2120         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2121     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2122     llvm::FunctionType *FnTy =
2123         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2124     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2125     break;
2126   }
2127   case OMPRTL__tgt_unregister_lib: {
2128     // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2129     QualType ParamTy =
2130         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2131     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2132     llvm::FunctionType *FnTy =
2133         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2134     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2135     break;
2136   }
2137   case OMPRTL__tgt_target_data_begin: {
2138     // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2139     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2140     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2141                                 CGM.Int32Ty,
2142                                 CGM.VoidPtrPtrTy,
2143                                 CGM.VoidPtrPtrTy,
2144                                 CGM.SizeTy->getPointerTo(),
2145                                 CGM.Int64Ty->getPointerTo()};
2146     llvm::FunctionType *FnTy =
2147         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2148     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2149     break;
2150   }
2151   case OMPRTL__tgt_target_data_begin_nowait: {
2152     // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2153     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2154     // *arg_types);
2155     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2156                                 CGM.Int32Ty,
2157                                 CGM.VoidPtrPtrTy,
2158                                 CGM.VoidPtrPtrTy,
2159                                 CGM.SizeTy->getPointerTo(),
2160                                 CGM.Int64Ty->getPointerTo()};
2161     auto *FnTy =
2162         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2163     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2164     break;
2165   }
2166   case OMPRTL__tgt_target_data_end: {
2167     // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2168     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2169     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2170                                 CGM.Int32Ty,
2171                                 CGM.VoidPtrPtrTy,
2172                                 CGM.VoidPtrPtrTy,
2173                                 CGM.SizeTy->getPointerTo(),
2174                                 CGM.Int64Ty->getPointerTo()};
2175     llvm::FunctionType *FnTy =
2176         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2177     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2178     break;
2179   }
2180   case OMPRTL__tgt_target_data_end_nowait: {
2181     // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2182     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2183     // *arg_types);
2184     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2185                                 CGM.Int32Ty,
2186                                 CGM.VoidPtrPtrTy,
2187                                 CGM.VoidPtrPtrTy,
2188                                 CGM.SizeTy->getPointerTo(),
2189                                 CGM.Int64Ty->getPointerTo()};
2190     auto *FnTy =
2191         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2192     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2193     break;
2194   }
2195   case OMPRTL__tgt_target_data_update: {
2196     // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2197     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2198     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2199                                 CGM.Int32Ty,
2200                                 CGM.VoidPtrPtrTy,
2201                                 CGM.VoidPtrPtrTy,
2202                                 CGM.SizeTy->getPointerTo(),
2203                                 CGM.Int64Ty->getPointerTo()};
2204     llvm::FunctionType *FnTy =
2205         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2206     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2207     break;
2208   }
2209   case OMPRTL__tgt_target_data_update_nowait: {
2210     // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2211     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2212     // *arg_types);
2213     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2214                                 CGM.Int32Ty,
2215                                 CGM.VoidPtrPtrTy,
2216                                 CGM.VoidPtrPtrTy,
2217                                 CGM.SizeTy->getPointerTo(),
2218                                 CGM.Int64Ty->getPointerTo()};
2219     auto *FnTy =
2220         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2221     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2222     break;
2223   }
2224   }
2225   assert(RTLFn && "Unable to find OpenMP runtime function");
2226   return RTLFn;
2227 }
2228
2229 llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
2230                                                              bool IVSigned) {
2231   assert((IVSize == 32 || IVSize == 64) &&
2232          "IV size is not compatible with the omp runtime");
2233   auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2234                                        : "__kmpc_for_static_init_4u")
2235                            : (IVSigned ? "__kmpc_for_static_init_8"
2236                                        : "__kmpc_for_static_init_8u");
2237   auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2238   auto PtrTy = llvm::PointerType::getUnqual(ITy);
2239   llvm::Type *TypeParams[] = {
2240     getIdentTyPointerTy(),                     // loc
2241     CGM.Int32Ty,                               // tid
2242     CGM.Int32Ty,                               // schedtype
2243     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2244     PtrTy,                                     // p_lower
2245     PtrTy,                                     // p_upper
2246     PtrTy,                                     // p_stride
2247     ITy,                                       // incr
2248     ITy                                        // chunk
2249   };
2250   llvm::FunctionType *FnTy =
2251       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2252   return CGM.CreateRuntimeFunction(FnTy, Name);
2253 }
2254
2255 llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
2256                                                             bool IVSigned) {
2257   assert((IVSize == 32 || IVSize == 64) &&
2258          "IV size is not compatible with the omp runtime");
2259   auto Name =
2260       IVSize == 32
2261           ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2262           : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2263   auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2264   llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2265                                CGM.Int32Ty,           // tid
2266                                CGM.Int32Ty,           // schedtype
2267                                ITy,                   // lower
2268                                ITy,                   // upper
2269                                ITy,                   // stride
2270                                ITy                    // chunk
2271   };
2272   llvm::FunctionType *FnTy =
2273       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2274   return CGM.CreateRuntimeFunction(FnTy, Name);
2275 }
2276
2277 llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
2278                                                             bool IVSigned) {
2279   assert((IVSize == 32 || IVSize == 64) &&
2280          "IV size is not compatible with the omp runtime");
2281   auto Name =
2282       IVSize == 32
2283           ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2284           : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2285   llvm::Type *TypeParams[] = {
2286       getIdentTyPointerTy(), // loc
2287       CGM.Int32Ty,           // tid
2288   };
2289   llvm::FunctionType *FnTy =
2290       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2291   return CGM.CreateRuntimeFunction(FnTy, Name);
2292 }
2293
2294 llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
2295                                                             bool IVSigned) {
2296   assert((IVSize == 32 || IVSize == 64) &&
2297          "IV size is not compatible with the omp runtime");
2298   auto Name =
2299       IVSize == 32
2300           ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2301           : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2302   auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2303   auto PtrTy = llvm::PointerType::getUnqual(ITy);
2304   llvm::Type *TypeParams[] = {
2305     getIdentTyPointerTy(),                     // loc
2306     CGM.Int32Ty,                               // tid
2307     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2308     PtrTy,                                     // p_lower
2309     PtrTy,                                     // p_upper
2310     PtrTy                                      // p_stride
2311   };
2312   llvm::FunctionType *FnTy =
2313       llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2314   return CGM.CreateRuntimeFunction(FnTy, Name);
2315 }
2316
2317 llvm::Constant *
2318 CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
2319   assert(!CGM.getLangOpts().OpenMPUseTLS ||
2320          !CGM.getContext().getTargetInfo().isTLSSupported());
2321   // Lookup the entry, lazily creating it if necessary.
2322   return getOrCreateInternalVariable(CGM.Int8PtrPtrTy,
2323                                      Twine(CGM.getMangledName(VD)) + ".cache.");
2324 }
2325
2326 Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
2327                                                 const VarDecl *VD,
2328                                                 Address VDAddr,
2329                                                 SourceLocation Loc) {
2330   if (CGM.getLangOpts().OpenMPUseTLS &&
2331       CGM.getContext().getTargetInfo().isTLSSupported())
2332     return VDAddr;
2333
2334   auto VarTy = VDAddr.getElementType();
2335   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2336                          CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2337                                                        CGM.Int8PtrTy),
2338                          CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
2339                          getOrCreateThreadPrivateCache(VD)};
2340   return Address(CGF.EmitRuntimeCall(
2341       createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2342                  VDAddr.getAlignment());
2343 }
2344
2345 void CGOpenMPRuntime::emitThreadPrivateVarInit(
2346     CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2347     llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2348   // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2349   // library.
2350   auto OMPLoc = emitUpdateLocation(CGF, Loc);
2351   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
2352                       OMPLoc);
2353   // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2354   // to register constructor/destructor for variable.
2355   llvm::Value *Args[] = {OMPLoc,
2356                          CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2357                                                        CGM.VoidPtrTy),
2358                          Ctor, CopyCtor, Dtor};
2359   CGF.EmitRuntimeCall(
2360       createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
2361 }
2362
2363 llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
2364     const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2365     bool PerformInit, CodeGenFunction *CGF) {
2366   if (CGM.getLangOpts().OpenMPUseTLS &&
2367       CGM.getContext().getTargetInfo().isTLSSupported())
2368     return nullptr;
2369
2370   VD = VD->getDefinition(CGM.getContext());
2371   if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
2372     ThreadPrivateWithDefinition.insert(VD);
2373     QualType ASTTy = VD->getType();
2374
2375     llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2376     auto Init = VD->getAnyInitializer();
2377     if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2378       // Generate function that re-emits the declaration's initializer into the
2379       // threadprivate copy of the variable VD
2380       CodeGenFunction CtorCGF(CGM);
2381       FunctionArgList Args;
2382       ImplicitParamDecl Dst(CGM.getContext(), CGM.getContext().VoidPtrTy,
2383                             ImplicitParamDecl::Other);
2384       Args.push_back(&Dst);
2385
2386       auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2387           CGM.getContext().VoidPtrTy, Args);
2388       auto FTy = CGM.getTypes().GetFunctionType(FI);
2389       auto Fn = CGM.CreateGlobalInitOrDestructFunction(
2390           FTy, ".__kmpc_global_ctor_.", FI, Loc);
2391       CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2392                             Args, SourceLocation());
2393       auto ArgVal = CtorCGF.EmitLoadOfScalar(
2394           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2395           CGM.getContext().VoidPtrTy, Dst.getLocation());
2396       Address Arg = Address(ArgVal, VDAddr.getAlignment());
2397       Arg = CtorCGF.Builder.CreateElementBitCast(Arg,
2398                                              CtorCGF.ConvertTypeForMem(ASTTy));
2399       CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2400                                /*IsInitializer=*/true);
2401       ArgVal = CtorCGF.EmitLoadOfScalar(
2402           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2403           CGM.getContext().VoidPtrTy, Dst.getLocation());
2404       CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2405       CtorCGF.FinishFunction();
2406       Ctor = Fn;
2407     }
2408     if (VD->getType().isDestructedType() != QualType::DK_none) {
2409       // Generate function that emits destructor call for the threadprivate copy
2410       // of the variable VD
2411       CodeGenFunction DtorCGF(CGM);
2412       FunctionArgList Args;
2413       ImplicitParamDecl Dst(CGM.getContext(), CGM.getContext().VoidPtrTy,
2414                             ImplicitParamDecl::Other);
2415       Args.push_back(&Dst);
2416
2417       auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2418           CGM.getContext().VoidTy, Args);
2419       auto FTy = CGM.getTypes().GetFunctionType(FI);
2420       auto Fn = CGM.CreateGlobalInitOrDestructFunction(
2421           FTy, ".__kmpc_global_dtor_.", FI, Loc);
2422       auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2423       DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2424                             SourceLocation());
2425       // Create a scope with an artificial location for the body of this function.
2426       auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2427       auto ArgVal = DtorCGF.EmitLoadOfScalar(
2428           DtorCGF.GetAddrOfLocalVar(&Dst),
2429           /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2430       DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2431                           DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2432                           DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2433       DtorCGF.FinishFunction();
2434       Dtor = Fn;
2435     }
2436     // Do not emit init function if it is not required.
2437     if (!Ctor && !Dtor)
2438       return nullptr;
2439
2440     llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2441     auto CopyCtorTy =
2442         llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2443                                 /*isVarArg=*/false)->getPointerTo();
2444     // Copying constructor for the threadprivate variable.
2445     // Must be NULL - reserved by runtime, but currently it requires that this
2446     // parameter is always NULL. Otherwise it fires assertion.
2447     CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2448     if (Ctor == nullptr) {
2449       auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2450                                             /*isVarArg=*/false)->getPointerTo();
2451       Ctor = llvm::Constant::getNullValue(CtorTy);
2452     }
2453     if (Dtor == nullptr) {
2454       auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2455                                             /*isVarArg=*/false)->getPointerTo();
2456       Dtor = llvm::Constant::getNullValue(DtorTy);
2457     }
2458     if (!CGF) {
2459       auto InitFunctionTy =
2460           llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2461       auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2462           InitFunctionTy, ".__omp_threadprivate_init_.",
2463           CGM.getTypes().arrangeNullaryFunction());
2464       CodeGenFunction InitCGF(CGM);
2465       FunctionArgList ArgList;
2466       InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2467                             CGM.getTypes().arrangeNullaryFunction(), ArgList,
2468                             Loc);
2469       emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2470       InitCGF.FinishFunction();
2471       return InitFunction;
2472     }
2473     emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2474   }
2475   return nullptr;
2476 }
2477
2478 Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2479                                                           QualType VarType,
2480                                                           StringRef Name) {
2481   llvm::Twine VarName(Name, ".artificial.");
2482   llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2483   llvm::Value *GAddr = getOrCreateInternalVariable(VarLVType, VarName);
2484   llvm::Value *Args[] = {
2485       emitUpdateLocation(CGF, SourceLocation()),
2486       getThreadID(CGF, SourceLocation()),
2487       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2488       CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2489                                 /*IsSigned=*/false),
2490       getOrCreateInternalVariable(CGM.VoidPtrPtrTy, VarName + ".cache.")};
2491   return Address(
2492       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2493           CGF.EmitRuntimeCall(
2494               createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2495           VarLVType->getPointerTo(/*AddrSpace=*/0)),
2496       CGM.getPointerAlign());
2497 }
2498
2499 /// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
2500 /// function. Here is the logic:
2501 /// if (Cond) {
2502 ///   ThenGen();
2503 /// } else {
2504 ///   ElseGen();
2505 /// }
2506 void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
2507                                       const RegionCodeGenTy &ThenGen,
2508                                       const RegionCodeGenTy &ElseGen) {
2509   CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2510
2511   // If the condition constant folds and can be elided, try to avoid emitting
2512   // the condition and the dead arm of the if/else.
2513   bool CondConstant;
2514   if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2515     if (CondConstant)
2516       ThenGen(CGF);
2517     else
2518       ElseGen(CGF);
2519     return;
2520   }
2521
2522   // Otherwise, the condition did not fold, or we couldn't elide it.  Just
2523   // emit the conditional branch.
2524   auto ThenBlock = CGF.createBasicBlock("omp_if.then");
2525   auto ElseBlock = CGF.createBasicBlock("omp_if.else");
2526   auto ContBlock = CGF.createBasicBlock("omp_if.end");
2527   CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2528
2529   // Emit the 'then' code.
2530   CGF.EmitBlock(ThenBlock);
2531   ThenGen(CGF);
2532   CGF.EmitBranch(ContBlock);
2533   // Emit the 'else' code if present.
2534   // There is no need to emit line number for unconditional branch.
2535   (void)ApplyDebugLocation::CreateEmpty(CGF);
2536   CGF.EmitBlock(ElseBlock);
2537   ElseGen(CGF);
2538   // There is no need to emit line number for unconditional branch.
2539   (void)ApplyDebugLocation::CreateEmpty(CGF);
2540   CGF.EmitBranch(ContBlock);
2541   // Emit the continuation block for code after the if.
2542   CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2543 }
2544
2545 void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2546                                        llvm::Value *OutlinedFn,
2547                                        ArrayRef<llvm::Value *> CapturedVars,
2548                                        const Expr *IfCond) {
2549   if (!CGF.HaveInsertPoint())
2550     return;
2551   auto *RTLoc = emitUpdateLocation(CGF, Loc);
2552   auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2553                                                      PrePostActionTy &) {
2554     // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2555     auto &RT = CGF.CGM.getOpenMPRuntime();
2556     llvm::Value *Args[] = {
2557         RTLoc,
2558         CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2559         CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2560     llvm::SmallVector<llvm::Value *, 16> RealArgs;
2561     RealArgs.append(std::begin(Args), std::end(Args));
2562     RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2563
2564     auto RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2565     CGF.EmitRuntimeCall(RTLFn, RealArgs);
2566   };
2567   auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2568                                                           PrePostActionTy &) {
2569     auto &RT = CGF.CGM.getOpenMPRuntime();
2570     auto ThreadID = RT.getThreadID(CGF, Loc);
2571     // Build calls:
2572     // __kmpc_serialized_parallel(&Loc, GTid);
2573     llvm::Value *Args[] = {RTLoc, ThreadID};
2574     CGF.EmitRuntimeCall(
2575         RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2576
2577     // OutlinedFn(&GTid, &zero, CapturedStruct);
2578     auto ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
2579     Address ZeroAddr =
2580         CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
2581                              /*Name*/ ".zero.addr");
2582     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2583     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2584     OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2585     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2586     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2587     RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2588
2589     // __kmpc_end_serialized_parallel(&Loc, GTid);
2590     llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2591     CGF.EmitRuntimeCall(
2592         RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2593         EndArgs);
2594   };
2595   if (IfCond)
2596     emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
2597   else {
2598     RegionCodeGenTy ThenRCG(ThenGen);
2599     ThenRCG(CGF);
2600   }
2601 }
2602
2603 // If we're inside an (outlined) parallel region, use the region info's
2604 // thread-ID variable (it is passed in a first argument of the outlined function
2605 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2606 // regular serial code region, get thread ID by calling kmp_int32
2607 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2608 // return the address of that temp.
2609 Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2610                                              SourceLocation Loc) {
2611   if (auto *OMPRegionInfo =
2612           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2613     if (OMPRegionInfo->getThreadIDVariable())
2614       return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
2615
2616   auto ThreadID = getThreadID(CGF, Loc);
2617   auto Int32Ty =
2618       CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2619   auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2620   CGF.EmitStoreOfScalar(ThreadID,
2621                         CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2622
2623   return ThreadIDTemp;
2624 }
2625
2626 llvm::Constant *
2627 CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
2628                                              const llvm::Twine &Name) {
2629   SmallString<256> Buffer;
2630   llvm::raw_svector_ostream Out(Buffer);
2631   Out << Name;
2632   auto RuntimeName = Out.str();
2633   auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
2634   if (Elem.second) {
2635     assert(Elem.second->getType()->getPointerElementType() == Ty &&
2636            "OMP internal variable has different type than requested");
2637     return &*Elem.second;
2638   }
2639
2640   return Elem.second = new llvm::GlobalVariable(
2641              CGM.getModule(), Ty, /*IsConstant*/ false,
2642              llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2643              Elem.first());
2644 }
2645
2646 llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2647   llvm::Twine Name(".gomp_critical_user_", CriticalName);
2648   return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
2649 }
2650
2651 namespace {
2652 /// Common pre(post)-action for different OpenMP constructs.
2653 class CommonActionTy final : public PrePostActionTy {
2654   llvm::Value *EnterCallee;
2655   ArrayRef<llvm::Value *> EnterArgs;
2656   llvm::Value *ExitCallee;
2657   ArrayRef<llvm::Value *> ExitArgs;
2658   bool Conditional;
2659   llvm::BasicBlock *ContBlock = nullptr;
2660
2661 public:
2662   CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
2663                  llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
2664                  bool Conditional = false)
2665       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2666         ExitArgs(ExitArgs), Conditional(Conditional) {}
2667   void Enter(CodeGenFunction &CGF) override {
2668     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2669     if (Conditional) {
2670       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2671       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2672       ContBlock = CGF.createBasicBlock("omp_if.end");
2673       // Generate the branch (If-stmt)
2674       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2675       CGF.EmitBlock(ThenBlock);
2676     }
2677   }
2678   void Done(CodeGenFunction &CGF) {
2679     // Emit the rest of blocks/branches
2680     CGF.EmitBranch(ContBlock);
2681     CGF.EmitBlock(ContBlock, true);
2682   }
2683   void Exit(CodeGenFunction &CGF) override {
2684     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2685   }
2686 };
2687 } // anonymous namespace
2688
2689 void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2690                                          StringRef CriticalName,
2691                                          const RegionCodeGenTy &CriticalOpGen,
2692                                          SourceLocation Loc, const Expr *Hint) {
2693   // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2694   // CriticalOpGen();
2695   // __kmpc_end_critical(ident_t *, gtid, Lock);
2696   // Prepare arguments and build a call to __kmpc_critical
2697   if (!CGF.HaveInsertPoint())
2698     return;
2699   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2700                          getCriticalRegionLock(CriticalName)};
2701   llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2702                                                 std::end(Args));
2703   if (Hint) {
2704     EnterArgs.push_back(CGF.Builder.CreateIntCast(
2705         CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
2706   }
2707   CommonActionTy Action(
2708       createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
2709                                  : OMPRTL__kmpc_critical),
2710       EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
2711   CriticalOpGen.setAction(Action);
2712   emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2713 }
2714
2715 void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2716                                        const RegionCodeGenTy &MasterOpGen,
2717                                        SourceLocation Loc) {
2718   if (!CGF.HaveInsertPoint())
2719     return;
2720   // if(__kmpc_master(ident_t *, gtid)) {
2721   //   MasterOpGen();
2722   //   __kmpc_end_master(ident_t *, gtid);
2723   // }
2724   // Prepare arguments and build a call to __kmpc_master
2725   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2726   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
2727                         createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
2728                         /*Conditional=*/true);
2729   MasterOpGen.setAction(Action);
2730   emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2731   Action.Done(CGF);
2732 }
2733
2734 void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2735                                         SourceLocation Loc) {
2736   if (!CGF.HaveInsertPoint())
2737     return;
2738   // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2739   llvm::Value *Args[] = {
2740       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2741       llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2742   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
2743   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2744     Region->emitUntiedSwitch(CGF);
2745 }
2746
2747 void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
2748                                           const RegionCodeGenTy &TaskgroupOpGen,
2749                                           SourceLocation Loc) {
2750   if (!CGF.HaveInsertPoint())
2751     return;
2752   // __kmpc_taskgroup(ident_t *, gtid);
2753   // TaskgroupOpGen();
2754   // __kmpc_end_taskgroup(ident_t *, gtid);
2755   // Prepare arguments and build a call to __kmpc_taskgroup
2756   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2757   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
2758                         createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
2759                         Args);
2760   TaskgroupOpGen.setAction(Action);
2761   emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
2762 }
2763
2764 /// Given an array of pointers to variables, project the address of a
2765 /// given variable.
2766 static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
2767                                       unsigned Index, const VarDecl *Var) {
2768   // Pull out the pointer to the variable.
2769   Address PtrAddr =
2770       CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
2771   llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
2772
2773   Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
2774   Addr = CGF.Builder.CreateElementBitCast(
2775       Addr, CGF.ConvertTypeForMem(Var->getType()));
2776   return Addr;
2777 }
2778
2779 static llvm::Value *emitCopyprivateCopyFunction(
2780     CodeGenModule &CGM, llvm::Type *ArgsType,
2781     ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
2782     ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) {
2783   auto &C = CGM.getContext();
2784   // void copy_func(void *LHSArg, void *RHSArg);
2785   FunctionArgList Args;
2786   ImplicitParamDecl LHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
2787   ImplicitParamDecl RHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
2788   Args.push_back(&LHSArg);
2789   Args.push_back(&RHSArg);
2790   auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2791   auto *Fn = llvm::Function::Create(
2792       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2793       ".omp.copyprivate.copy_func", &CGM.getModule());
2794   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
2795   CodeGenFunction CGF(CGM);
2796   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
2797   // Dest = (void*[n])(LHSArg);
2798   // Src = (void*[n])(RHSArg);
2799   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2800       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
2801       ArgsType), CGF.getPointerAlign());
2802   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2803       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
2804       ArgsType), CGF.getPointerAlign());
2805   // *(Type0*)Dst[0] = *(Type0*)Src[0];
2806   // *(Type1*)Dst[1] = *(Type1*)Src[1];
2807   // ...
2808   // *(Typen*)Dst[n] = *(Typen*)Src[n];
2809   for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2810     auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
2811     Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
2812
2813     auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
2814     Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
2815
2816     auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
2817     QualType Type = VD->getType();
2818     CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2819   }
2820   CGF.FinishFunction();
2821   return Fn;
2822 }
2823
2824 void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
2825                                        const RegionCodeGenTy &SingleOpGen,
2826                                        SourceLocation Loc,
2827                                        ArrayRef<const Expr *> CopyprivateVars,
2828                                        ArrayRef<const Expr *> SrcExprs,
2829                                        ArrayRef<const Expr *> DstExprs,
2830                                        ArrayRef<const Expr *> AssignmentOps) {
2831   if (!CGF.HaveInsertPoint())
2832     return;
2833   assert(CopyprivateVars.size() == SrcExprs.size() &&
2834          CopyprivateVars.size() == DstExprs.size() &&
2835          CopyprivateVars.size() == AssignmentOps.size());
2836   auto &C = CGM.getContext();
2837   // int32 did_it = 0;
2838   // if(__kmpc_single(ident_t *, gtid)) {
2839   //   SingleOpGen();
2840   //   __kmpc_end_single(ident_t *, gtid);
2841   //   did_it = 1;
2842   // }
2843   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2844   // <copy_func>, did_it);
2845
2846   Address DidIt = Address::invalid();
2847   if (!CopyprivateVars.empty()) {
2848     // int32 did_it = 0;
2849     auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2850     DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
2851     CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
2852   }
2853   // Prepare arguments and build a call to __kmpc_single
2854   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2855   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
2856                         createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
2857                         /*Conditional=*/true);
2858   SingleOpGen.setAction(Action);
2859   emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
2860   if (DidIt.isValid()) {
2861     // did_it = 1;
2862     CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
2863   }
2864   Action.Done(CGF);
2865   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2866   // <copy_func>, did_it);
2867   if (DidIt.isValid()) {
2868     llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
2869     auto CopyprivateArrayTy =
2870         C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
2871                                /*IndexTypeQuals=*/0);
2872     // Create a list of all private variables for copyprivate.
2873     Address CopyprivateList =
2874         CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
2875     for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2876       Address Elem = CGF.Builder.CreateConstArrayGEP(
2877           CopyprivateList, I, CGF.getPointerSize());
2878       CGF.Builder.CreateStore(
2879           CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2880               CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
2881           Elem);
2882     }
2883     // Build function that copies private values from single region to all other
2884     // threads in the corresponding parallel region.
2885     auto *CpyFn = emitCopyprivateCopyFunction(
2886         CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
2887         CopyprivateVars, SrcExprs, DstExprs, AssignmentOps);
2888     auto *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
2889     Address CL =
2890       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
2891                                                       CGF.VoidPtrTy);
2892     auto *DidItVal = CGF.Builder.CreateLoad(DidIt);
2893     llvm::Value *Args[] = {
2894         emitUpdateLocation(CGF, Loc), // ident_t *<loc>
2895         getThreadID(CGF, Loc),        // i32 <gtid>
2896         BufSize,                      // size_t <buf_size>
2897         CL.getPointer(),              // void *<copyprivate list>
2898         CpyFn,                        // void (*) (void *, void *) <copy_func>
2899         DidItVal                      // i32 did_it
2900     };
2901     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
2902   }
2903 }
2904
2905 void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
2906                                         const RegionCodeGenTy &OrderedOpGen,
2907                                         SourceLocation Loc, bool IsThreads) {
2908   if (!CGF.HaveInsertPoint())
2909     return;
2910   // __kmpc_ordered(ident_t *, gtid);
2911   // OrderedOpGen();
2912   // __kmpc_end_ordered(ident_t *, gtid);
2913   // Prepare arguments and build a call to __kmpc_ordered
2914   if (IsThreads) {
2915     llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2916     CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
2917                           createRuntimeFunction(OMPRTL__kmpc_end_ordered),
2918                           Args);
2919     OrderedOpGen.setAction(Action);
2920     emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2921     return;
2922   }
2923   emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2924 }
2925
2926 void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
2927                                       OpenMPDirectiveKind Kind, bool EmitChecks,
2928                                       bool ForceSimpleCall) {
2929   if (!CGF.HaveInsertPoint())
2930     return;
2931   // Build call __kmpc_cancel_barrier(loc, thread_id);
2932   // Build call __kmpc_barrier(loc, thread_id);
2933   unsigned Flags;
2934   if (Kind == OMPD_for)
2935     Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2936   else if (Kind == OMPD_sections)
2937     Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2938   else if (Kind == OMPD_single)
2939     Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2940   else if (Kind == OMPD_barrier)
2941     Flags = OMP_IDENT_BARRIER_EXPL;
2942   else
2943     Flags = OMP_IDENT_BARRIER_IMPL;
2944   // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
2945   // thread_id);
2946   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2947                          getThreadID(CGF, Loc)};
2948   if (auto *OMPRegionInfo =
2949           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
2950     if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2951       auto *Result = CGF.EmitRuntimeCall(
2952           createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
2953       if (EmitChecks) {
2954         // if (__kmpc_cancel_barrier()) {
2955         //   exit from construct;
2956         // }
2957         auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
2958         auto *ContBB = CGF.createBasicBlock(".cancel.continue");
2959         auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
2960         CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2961         CGF.EmitBlock(ExitBB);
2962         //   exit from construct;
2963         auto CancelDestination =
2964             CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
2965         CGF.EmitBranchThroughCleanup(CancelDestination);
2966         CGF.EmitBlock(ContBB, /*IsFinished=*/true);
2967       }
2968       return;
2969     }
2970   }
2971   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
2972 }
2973
2974 /// \brief Map the OpenMP loop schedule to the runtime enumeration.
2975 static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
2976                                           bool Chunked, bool Ordered) {
2977   switch (ScheduleKind) {
2978   case OMPC_SCHEDULE_static:
2979     return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2980                    : (Ordered ? OMP_ord_static : OMP_sch_static);
2981   case OMPC_SCHEDULE_dynamic:
2982     return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
2983   case OMPC_SCHEDULE_guided:
2984     return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
2985   case OMPC_SCHEDULE_runtime:
2986     return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
2987   case OMPC_SCHEDULE_auto:
2988     return Ordered ? OMP_ord_auto : OMP_sch_auto;
2989   case OMPC_SCHEDULE_unknown:
2990     assert(!Chunked && "chunk was specified but schedule kind not known");
2991     return Ordered ? OMP_ord_static : OMP_sch_static;
2992   }
2993   llvm_unreachable("Unexpected runtime schedule");
2994 }
2995
2996 /// \brief Map the OpenMP distribute schedule to the runtime enumeration.
2997 static OpenMPSchedType
2998 getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
2999   // only static is allowed for dist_schedule
3000   return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
3001 }
3002
3003 bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
3004                                          bool Chunked) const {
3005   auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3006   return Schedule == OMP_sch_static;
3007 }
3008
3009 bool CGOpenMPRuntime::isStaticNonchunked(
3010     OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3011   auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3012   return Schedule == OMP_dist_sch_static;
3013 }
3014
3015
3016 bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
3017   auto Schedule =
3018       getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3019   assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3020   return Schedule != OMP_sch_static;
3021 }
3022
3023 static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
3024                                   OpenMPScheduleClauseModifier M1,
3025                                   OpenMPScheduleClauseModifier M2) {
3026   int Modifier = 0;
3027   switch (M1) {
3028   case OMPC_SCHEDULE_MODIFIER_monotonic:
3029     Modifier = OMP_sch_modifier_monotonic;
3030     break;
3031   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3032     Modifier = OMP_sch_modifier_nonmonotonic;
3033     break;
3034   case OMPC_SCHEDULE_MODIFIER_simd:
3035     if (Schedule == OMP_sch_static_chunked)
3036       Schedule = OMP_sch_static_balanced_chunked;
3037     break;
3038   case OMPC_SCHEDULE_MODIFIER_last:
3039   case OMPC_SCHEDULE_MODIFIER_unknown:
3040     break;
3041   }
3042   switch (M2) {
3043   case OMPC_SCHEDULE_MODIFIER_monotonic:
3044     Modifier = OMP_sch_modifier_monotonic;
3045     break;
3046   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3047     Modifier = OMP_sch_modifier_nonmonotonic;
3048     break;
3049   case OMPC_SCHEDULE_MODIFIER_simd:
3050     if (Schedule == OMP_sch_static_chunked)
3051       Schedule = OMP_sch_static_balanced_chunked;
3052     break;
3053   case OMPC_SCHEDULE_MODIFIER_last:
3054   case OMPC_SCHEDULE_MODIFIER_unknown:
3055     break;
3056   }
3057   return Schedule | Modifier;
3058 }
3059
3060 void CGOpenMPRuntime::emitForDispatchInit(
3061     CodeGenFunction &CGF, SourceLocation Loc,
3062     const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3063     bool Ordered, const DispatchRTInput &DispatchValues) {
3064   if (!CGF.HaveInsertPoint())
3065     return;
3066   OpenMPSchedType Schedule = getRuntimeSchedule(
3067       ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3068   assert(Ordered ||
3069          (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3070           Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3071           Schedule != OMP_sch_static_balanced_chunked));
3072   // Call __kmpc_dispatch_init(
3073   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3074   //          kmp_int[32|64] lower, kmp_int[32|64] upper,
3075   //          kmp_int[32|64] stride, kmp_int[32|64] chunk);
3076
3077   // If the Chunk was not specified in the clause - use default value 1.
3078   llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3079                                             : CGF.Builder.getIntN(IVSize, 1);
3080   llvm::Value *Args[] = {
3081       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3082       CGF.Builder.getInt32(addMonoNonMonoModifier(
3083           Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3084       DispatchValues.LB,                                // Lower
3085       DispatchValues.UB,                                // Upper
3086       CGF.Builder.getIntN(IVSize, 1),                   // Stride
3087       Chunk                                             // Chunk
3088   };
3089   CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3090 }
3091
3092 static void emitForStaticInitCall(
3093     CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3094     llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
3095     OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
3096     const CGOpenMPRuntime::StaticRTInput &Values) {
3097   if (!CGF.HaveInsertPoint())
3098     return;
3099
3100   assert(!Values.Ordered);
3101   assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3102          Schedule == OMP_sch_static_balanced_chunked ||
3103          Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3104          Schedule == OMP_dist_sch_static ||
3105          Schedule == OMP_dist_sch_static_chunked);
3106
3107   // Call __kmpc_for_static_init(
3108   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3109   //          kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3110   //          kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3111   //          kmp_int[32|64] incr, kmp_int[32|64] chunk);
3112   llvm::Value *Chunk = Values.Chunk;
3113   if (Chunk == nullptr) {
3114     assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3115             Schedule == OMP_dist_sch_static) &&
3116            "expected static non-chunked schedule");
3117     // If the Chunk was not specified in the clause - use default value 1.
3118     Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3119   } else {
3120     assert((Schedule == OMP_sch_static_chunked ||
3121             Schedule == OMP_sch_static_balanced_chunked ||
3122             Schedule == OMP_ord_static_chunked ||
3123             Schedule == OMP_dist_sch_static_chunked) &&
3124            "expected static chunked schedule");
3125   }
3126   llvm::Value *Args[] = {
3127       UpdateLocation,
3128       ThreadId,
3129       CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3130                                                   M2)), // Schedule type
3131       Values.IL.getPointer(),                           // &isLastIter
3132       Values.LB.getPointer(),                           // &LB
3133       Values.UB.getPointer(),                           // &UB
3134       Values.ST.getPointer(),                           // &Stride
3135       CGF.Builder.getIntN(Values.IVSize, 1),            // Incr
3136       Chunk                                             // Chunk
3137   };
3138   CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3139 }
3140
3141 void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
3142                                         SourceLocation Loc,
3143                                         OpenMPDirectiveKind DKind,
3144                                         const OpenMPScheduleTy &ScheduleKind,
3145                                         const StaticRTInput &Values) {
3146   OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3147       ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3148   assert(isOpenMPWorksharingDirective(DKind) &&
3149          "Expected loop-based or sections-based directive.");
3150   auto *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3151                                              isOpenMPLoopDirective(DKind)
3152                                                  ? OMP_IDENT_WORK_LOOP
3153                                                  : OMP_IDENT_WORK_SECTIONS);
3154   auto *ThreadId = getThreadID(CGF, Loc);
3155   auto *StaticInitFunction =
3156       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3157   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3158                         ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3159 }
3160
3161 void CGOpenMPRuntime::emitDistributeStaticInit(
3162     CodeGenFunction &CGF, SourceLocation Loc,
3163     OpenMPDistScheduleClauseKind SchedKind,
3164     const CGOpenMPRuntime::StaticRTInput &Values) {
3165   OpenMPSchedType ScheduleNum =
3166       getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3167   auto *UpdatedLocation =
3168       emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3169   auto *ThreadId = getThreadID(CGF, Loc);
3170   auto *StaticInitFunction =
3171       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3172   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3173                         ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3174                         OMPC_SCHEDULE_MODIFIER_unknown, Values);
3175 }
3176
3177 void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
3178                                           SourceLocation Loc,
3179                                           OpenMPDirectiveKind DKind) {
3180   if (!CGF.HaveInsertPoint())
3181     return;
3182   // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3183   llvm::Value *Args[] = {
3184       emitUpdateLocation(CGF, Loc,
3185                          isOpenMPDistributeDirective(DKind)
3186                              ? OMP_IDENT_WORK_DISTRIBUTE
3187                              : isOpenMPLoopDirective(DKind)
3188                                    ? OMP_IDENT_WORK_LOOP
3189                                    : OMP_IDENT_WORK_SECTIONS),
3190       getThreadID(CGF, Loc)};
3191   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
3192                       Args);
3193 }
3194
3195 void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
3196                                                  SourceLocation Loc,
3197                                                  unsigned IVSize,
3198                                                  bool IVSigned) {
3199   if (!CGF.HaveInsertPoint())
3200     return;
3201   // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3202   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3203   CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3204 }
3205
3206 llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
3207                                           SourceLocation Loc, unsigned IVSize,
3208                                           bool IVSigned, Address IL,
3209                                           Address LB, Address UB,
3210                                           Address ST) {
3211   // Call __kmpc_dispatch_next(
3212   //          ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3213   //          kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3214   //          kmp_int[32|64] *p_stride);
3215   llvm::Value *Args[] = {
3216       emitUpdateLocation(CGF, Loc),
3217       getThreadID(CGF, Loc),
3218       IL.getPointer(), // &isLastIter
3219       LB.getPointer(), // &Lower
3220       UB.getPointer(), // &Upper
3221       ST.getPointer()  // &Stride
3222   };
3223   llvm::Value *Call =
3224       CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3225   return CGF.EmitScalarConversion(
3226       Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true),
3227       CGF.getContext().BoolTy, Loc);
3228 }
3229
3230 void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
3231                                            llvm::Value *NumThreads,
3232                                            SourceLocation Loc) {
3233   if (!CGF.HaveInsertPoint())
3234     return;
3235   // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3236   llvm::Value *Args[] = {
3237       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3238       CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3239   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
3240                       Args);
3241 }
3242
3243 void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
3244                                          OpenMPProcBindClauseKind ProcBind,
3245                                          SourceLocation Loc) {
3246   if (!CGF.HaveInsertPoint())
3247     return;
3248   // Constants for proc bind value accepted by the runtime.
3249   enum ProcBindTy {
3250     ProcBindFalse = 0,
3251     ProcBindTrue,
3252     ProcBindMaster,
3253     ProcBindClose,
3254     ProcBindSpread,
3255     ProcBindIntel,
3256     ProcBindDefault
3257   } RuntimeProcBind;
3258   switch (ProcBind) {
3259   case OMPC_PROC_BIND_master:
3260     RuntimeProcBind = ProcBindMaster;
3261     break;
3262   case OMPC_PROC_BIND_close:
3263     RuntimeProcBind = ProcBindClose;
3264     break;
3265   case OMPC_PROC_BIND_spread:
3266     RuntimeProcBind = ProcBindSpread;
3267     break;
3268   case OMPC_PROC_BIND_unknown:
3269     llvm_unreachable("Unsupported proc_bind value.");
3270   }
3271   // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3272   llvm::Value *Args[] = {
3273       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3274       llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3275   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
3276 }
3277
3278 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3279                                 SourceLocation Loc) {
3280   if (!CGF.HaveInsertPoint())
3281     return;
3282   // Build call void __kmpc_flush(ident_t *loc)
3283   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
3284                       emitUpdateLocation(CGF, Loc));
3285 }
3286
3287 namespace {
3288 /// \brief Indexes of fields for type kmp_task_t.
3289 enum KmpTaskTFields {
3290   /// \brief List of shared variables.
3291   KmpTaskTShareds,
3292   /// \brief Task routine.
3293   KmpTaskTRoutine,
3294   /// \brief Partition id for the untied tasks.
3295   KmpTaskTPartId,
3296   /// Function with call of destructors for private variables.
3297   Data1,
3298   /// Task priority.
3299   Data2,
3300   /// (Taskloops only) Lower bound.
3301   KmpTaskTLowerBound,
3302   /// (Taskloops only) Upper bound.
3303   KmpTaskTUpperBound,
3304   /// (Taskloops only) Stride.
3305   KmpTaskTStride,
3306   /// (Taskloops only) Is last iteration flag.
3307   KmpTaskTLastIter,
3308   /// (Taskloops only) Reduction data.
3309   KmpTaskTReductions,
3310 };
3311 } // anonymous namespace
3312
3313 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3314   // FIXME: Add other entries type when they become supported.
3315   return OffloadEntriesTargetRegion.empty();
3316 }
3317
3318 /// \brief Initialize target region entry.
3319 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3320     initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3321                                     StringRef ParentName, unsigned LineNum,
3322                                     unsigned Order) {
3323   assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3324                                              "only required for the device "
3325                                              "code generation.");
3326   OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3327       OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3328                                    /*Flags=*/0);
3329   ++OffloadingEntriesNum;
3330 }
3331
3332 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3333     registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3334                                   StringRef ParentName, unsigned LineNum,
3335                                   llvm::Constant *Addr, llvm::Constant *ID,
3336                                   int32_t Flags) {
3337   // If we are emitting code for a target, the entry is already initialized,
3338   // only has to be registered.
3339   if (CGM.getLangOpts().OpenMPIsDevice) {
3340     assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&
3341            "Entry must exist.");
3342     auto &Entry =
3343         OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3344     assert(Entry.isValid() && "Entry not initialized!");
3345     Entry.setAddress(Addr);
3346     Entry.setID(ID);
3347     Entry.setFlags(Flags);
3348     return;
3349   } else {
3350     OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID, Flags);
3351     OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3352   }
3353 }
3354
3355 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3356     unsigned DeviceID, unsigned FileID, StringRef ParentName,
3357     unsigned LineNum) const {
3358   auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3359   if (PerDevice == OffloadEntriesTargetRegion.end())
3360     return false;
3361   auto PerFile = PerDevice->second.find(FileID);
3362   if (PerFile == PerDevice->second.end())
3363     return false;
3364   auto PerParentName = PerFile->second.find(ParentName);
3365   if (PerParentName == PerFile->second.end())
3366     return false;
3367   auto PerLine = PerParentName->second.find(LineNum);
3368   if (PerLine == PerParentName->second.end())
3369     return false;
3370   // Fail if this entry is already registered.
3371   if (PerLine->second.getAddress() || PerLine->second.getID())
3372     return false;
3373   return true;
3374 }
3375
3376 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3377     const OffloadTargetRegionEntryInfoActTy &Action) {
3378   // Scan all target region entries and perform the provided action.
3379   for (auto &D : OffloadEntriesTargetRegion)
3380     for (auto &F : D.second)
3381       for (auto &P : F.second)
3382         for (auto &L : P.second)
3383           Action(D.first, F.first, P.first(), L.first, L.second);
3384 }
3385
3386 /// \brief Create a Ctor/Dtor-like function whose body is emitted through
3387 /// \a Codegen. This is used to emit the two functions that register and
3388 /// unregister the descriptor of the current compilation unit.
3389 static llvm::Function *
3390 createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
3391                                          const RegionCodeGenTy &Codegen) {
3392   auto &C = CGM.getContext();
3393   FunctionArgList Args;
3394   ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
3395   Args.push_back(&DummyPtr);
3396
3397   CodeGenFunction CGF(CGM);
3398   auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3399   auto FTy = CGM.getTypes().GetFunctionType(FI);
3400   auto *Fn =
3401       CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, SourceLocation());
3402   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args, SourceLocation());
3403   Codegen(CGF);
3404   CGF.FinishFunction();
3405   return Fn;
3406 }
3407
3408 llvm::Function *
3409 CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
3410
3411   // If we don't have entries or if we are emitting code for the device, we
3412   // don't need to do anything.
3413   if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3414     return nullptr;
3415
3416   auto &M = CGM.getModule();
3417   auto &C = CGM.getContext();
3418
3419   // Get list of devices we care about
3420   auto &Devices = CGM.getLangOpts().OMPTargetTriples;
3421
3422   // We should be creating an offloading descriptor only if there are devices
3423   // specified.
3424   assert(!Devices.empty() && "No OpenMP offloading devices??");
3425
3426   // Create the external variables that will point to the begin and end of the
3427   // host entries section. These will be defined by the linker.
3428   auto *OffloadEntryTy =
3429       CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
3430   llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable(
3431       M, OffloadEntryTy, /*isConstant=*/true,
3432       llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3433       ".omp_offloading.entries_begin");
3434   llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable(
3435       M, OffloadEntryTy, /*isConstant=*/true,
3436       llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3437       ".omp_offloading.entries_end");
3438
3439   // Create all device images
3440   auto *DeviceImageTy = cast<llvm::StructType>(
3441       CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
3442   ConstantInitBuilder DeviceImagesBuilder(CGM);
3443   auto DeviceImagesEntries = DeviceImagesBuilder.beginArray(DeviceImageTy);
3444
3445   for (unsigned i = 0; i < Devices.size(); ++i) {
3446     StringRef T = Devices[i].getTriple();
3447     auto *ImgBegin = new llvm::GlobalVariable(
3448         M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3449         /*Initializer=*/nullptr,
3450         Twine(".omp_offloading.img_start.") + Twine(T));
3451     auto *ImgEnd = new llvm::GlobalVariable(
3452         M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3453         /*Initializer=*/nullptr, Twine(".omp_offloading.img_end.") + Twine(T));
3454
3455     auto Dev = DeviceImagesEntries.beginStruct(DeviceImageTy);
3456     Dev.add(ImgBegin);
3457     Dev.add(ImgEnd);
3458     Dev.add(HostEntriesBegin);
3459     Dev.add(HostEntriesEnd);
3460     Dev.finishAndAddTo(DeviceImagesEntries);
3461   }
3462
3463   // Create device images global array.
3464   llvm::GlobalVariable *DeviceImages =
3465     DeviceImagesEntries.finishAndCreateGlobal(".omp_offloading.device_images",
3466                                               CGM.getPointerAlign(),
3467                                               /*isConstant=*/true);
3468   DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3469
3470   // This is a Zero array to be used in the creation of the constant expressions
3471   llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3472                              llvm::Constant::getNullValue(CGM.Int32Ty)};
3473
3474   // Create the target region descriptor.
3475   auto *BinaryDescriptorTy = cast<llvm::StructType>(
3476       CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
3477   ConstantInitBuilder DescBuilder(CGM);
3478   auto DescInit = DescBuilder.beginStruct(BinaryDescriptorTy);
3479   DescInit.addInt(CGM.Int32Ty, Devices.size());
3480   DescInit.add(llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3481                                                     DeviceImages,
3482                                                     Index));
3483   DescInit.add(HostEntriesBegin);
3484   DescInit.add(HostEntriesEnd);
3485
3486   auto *Desc = DescInit.finishAndCreateGlobal(".omp_offloading.descriptor",
3487                                               CGM.getPointerAlign(),
3488                                               /*isConstant=*/true);
3489
3490   // Emit code to register or unregister the descriptor at execution
3491   // startup or closing, respectively.
3492
3493   // Create a variable to drive the registration and unregistration of the
3494   // descriptor, so we can reuse the logic that emits Ctors and Dtors.
3495   auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var");
3496   ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(),
3497                                 IdentInfo, C.CharTy, ImplicitParamDecl::Other);
3498
3499   auto *UnRegFn = createOffloadingBinaryDescriptorFunction(
3500       CGM, ".omp_offloading.descriptor_unreg",
3501       [&](CodeGenFunction &CGF, PrePostActionTy &) {
3502         CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
3503                             Desc);
3504       });
3505   auto *RegFn = createOffloadingBinaryDescriptorFunction(
3506       CGM, ".omp_offloading.descriptor_reg",
3507       [&](CodeGenFunction &CGF, PrePostActionTy &) {
3508         CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib),
3509                             Desc);
3510         CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
3511       });
3512   if (CGM.supportsCOMDAT()) {
3513     // It is sufficient to call registration function only once, so create a
3514     // COMDAT group for registration/unregistration functions and associated
3515     // data. That would reduce startup time and code size. Registration
3516     // function serves as a COMDAT group key.
3517     auto ComdatKey = M.getOrInsertComdat(RegFn->getName());
3518     RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3519     RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3520     RegFn->setComdat(ComdatKey);
3521     UnRegFn->setComdat(ComdatKey);
3522     DeviceImages->setComdat(ComdatKey);
3523     Desc->setComdat(ComdatKey);
3524   }
3525   return RegFn;
3526 }
3527
3528 void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *ID,
3529                                          llvm::Constant *Addr, uint64_t Size,
3530                                          int32_t Flags) {
3531   StringRef Name = Addr->getName();
3532   auto *TgtOffloadEntryType = cast<llvm::StructType>(
3533       CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy()));
3534   llvm::LLVMContext &C = CGM.getModule().getContext();
3535   llvm::Module &M = CGM.getModule();
3536
3537   // Make sure the address has the right type.
3538   llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy);
3539
3540   // Create constant string with the name.
3541   llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3542
3543   llvm::GlobalVariable *Str =
3544       new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true,
3545                                llvm::GlobalValue::InternalLinkage, StrPtrInit,
3546                                ".omp_offloading.entry_name");
3547   Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3548   llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
3549
3550   // We can't have any padding between symbols, so we need to have 1-byte
3551   // alignment.
3552   auto Align = CharUnits::fromQuantity(1);
3553
3554   // Create the entry struct.
3555   ConstantInitBuilder EntryBuilder(CGM);
3556   auto EntryInit = EntryBuilder.beginStruct(TgtOffloadEntryType);
3557   EntryInit.add(AddrPtr);
3558   EntryInit.add(StrPtr);
3559   EntryInit.addInt(CGM.SizeTy, Size);
3560   EntryInit.addInt(CGM.Int32Ty, Flags);
3561   EntryInit.addInt(CGM.Int32Ty, 0);
3562   llvm::GlobalVariable *Entry =
3563     EntryInit.finishAndCreateGlobal(".omp_offloading.entry",
3564                                     Align,
3565                                     /*constant*/ true,
3566                                     llvm::GlobalValue::ExternalLinkage);
3567
3568   // The entry has to be created in the section the linker expects it to be.
3569   Entry->setSection(".omp_offloading.entries");
3570 }
3571
3572 void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3573   // Emit the offloading entries and metadata so that the device codegen side
3574   // can easily figure out what to emit. The produced metadata looks like
3575   // this:
3576   //
3577   // !omp_offload.info = !{!1, ...}
3578   //
3579   // Right now we only generate metadata for function that contain target
3580   // regions.
3581
3582   // If we do not have entries, we dont need to do anything.
3583   if (OffloadEntriesInfoManager.empty())
3584     return;
3585
3586   llvm::Module &M = CGM.getModule();
3587   llvm::LLVMContext &C = M.getContext();
3588   SmallVector<OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
3589       OrderedEntries(OffloadEntriesInfoManager.size());
3590
3591   // Create the offloading info metadata node.
3592   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3593
3594   // Auxiliary methods to create metadata values and strings.
3595   auto getMDInt = [&](unsigned v) {
3596     return llvm::ConstantAsMetadata::get(
3597         llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v));
3598   };
3599
3600   auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); };
3601
3602   // Create function that emits metadata for each target region entry;
3603   auto &&TargetRegionMetadataEmitter = [&](
3604       unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line,
3605       OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3606     llvm::SmallVector<llvm::Metadata *, 32> Ops;
3607     // Generate metadata for target regions. Each entry of this metadata
3608     // contains:
3609     // - Entry 0 -> Kind of this type of metadata (0).
3610     // - Entry 1 -> Device ID of the file where the entry was identified.
3611     // - Entry 2 -> File ID of the file where the entry was identified.
3612     // - Entry 3 -> Mangled name of the function where the entry was identified.
3613     // - Entry 4 -> Line in the file where the entry was identified.
3614     // - Entry 5 -> Order the entry was created.
3615     // The first element of the metadata node is the kind.
3616     Ops.push_back(getMDInt(E.getKind()));
3617     Ops.push_back(getMDInt(DeviceID));
3618     Ops.push_back(getMDInt(FileID));
3619     Ops.push_back(getMDString(ParentName));
3620     Ops.push_back(getMDInt(Line));
3621     Ops.push_back(getMDInt(E.getOrder()));
3622
3623     // Save this entry in the right position of the ordered entries array.
3624     OrderedEntries[E.getOrder()] = &E;
3625
3626     // Add metadata to the named metadata node.
3627     MD->addOperand(llvm::MDNode::get(C, Ops));
3628   };
3629
3630   OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3631       TargetRegionMetadataEmitter);
3632
3633   for (auto *E : OrderedEntries) {
3634     assert(E && "All ordered entries must exist!");
3635     if (auto *CE =
3636             dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3637                 E)) {
3638       assert(CE->getID() && CE->getAddress() &&
3639              "Entry ID and Addr are invalid!");
3640       createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0);
3641     } else
3642       llvm_unreachable("Unsupported entry kind.");
3643   }
3644 }
3645
3646 /// \brief Loads all the offload entries information from the host IR
3647 /// metadata.
3648 void CGOpenMPRuntime::loadOffloadInfoMetadata() {
3649   // If we are in target mode, load the metadata from the host IR. This code has
3650   // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
3651
3652   if (!CGM.getLangOpts().OpenMPIsDevice)
3653     return;
3654
3655   if (CGM.getLangOpts().OMPHostIRFile.empty())
3656     return;
3657
3658   auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
3659   if (Buf.getError())
3660     return;
3661
3662   llvm::LLVMContext C;
3663   auto ME = expectedToErrorOrAndEmitErrors(
3664       C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
3665
3666   if (ME.getError())
3667     return;
3668
3669   llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
3670   if (!MD)
3671     return;
3672
3673   for (auto I : MD->operands()) {
3674     llvm::MDNode *MN = cast<llvm::MDNode>(I);
3675
3676     auto getMDInt = [&](unsigned Idx) {
3677       llvm::ConstantAsMetadata *V =
3678           cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
3679       return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
3680     };
3681
3682     auto getMDString = [&](unsigned Idx) {
3683       llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx));
3684       return V->getString();
3685     };
3686
3687     switch (getMDInt(0)) {
3688     default:
3689       llvm_unreachable("Unexpected metadata!");
3690       break;
3691     case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3692         OFFLOAD_ENTRY_INFO_TARGET_REGION:
3693       OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
3694           /*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2),
3695           /*ParentName=*/getMDString(3), /*Line=*/getMDInt(4),
3696           /*Order=*/getMDInt(5));
3697       break;
3698     }
3699   }
3700 }
3701
3702 void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
3703   if (!KmpRoutineEntryPtrTy) {
3704     // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
3705     auto &C = CGM.getContext();
3706     QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
3707     FunctionProtoType::ExtProtoInfo EPI;
3708     KmpRoutineEntryPtrQTy = C.getPointerType(
3709         C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
3710     KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
3711   }
3712 }
3713
3714 static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
3715                                        QualType FieldTy) {
3716   auto *Field = FieldDecl::Create(
3717       C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
3718       C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
3719       /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
3720   Field->setAccess(AS_public);
3721   DC->addDecl(Field);
3722   return Field;
3723 }
3724
3725 QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
3726
3727   // Make sure the type of the entry is already created. This is the type we
3728   // have to create:
3729   // struct __tgt_offload_entry{
3730   //   void      *addr;       // Pointer to the offload entry info.
3731   //                          // (function or global)
3732   //   char      *name;       // Name of the function or global.
3733   //   size_t     size;       // Size of the entry info (0 if it a function).
3734   //   int32_t    flags;      // Flags associated with the entry, e.g. 'link'.
3735   //   int32_t    reserved;   // Reserved, to use by the runtime library.
3736   // };
3737   if (TgtOffloadEntryQTy.isNull()) {
3738     ASTContext &C = CGM.getContext();
3739     auto *RD = C.buildImplicitRecord("__tgt_offload_entry");
3740     RD->startDefinition();
3741     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3742     addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
3743     addFieldToRecordDecl(C, RD, C.getSizeType());
3744     addFieldToRecordDecl(
3745         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3746     addFieldToRecordDecl(
3747         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3748     RD->completeDefinition();
3749     TgtOffloadEntryQTy = C.getRecordType(RD);
3750   }
3751   return TgtOffloadEntryQTy;
3752 }
3753
3754 QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
3755   // These are the types we need to build:
3756   // struct __tgt_device_image{
3757   // void   *ImageStart;       // Pointer to the target code start.
3758   // void   *ImageEnd;         // Pointer to the target code end.
3759   // // We also add the host entries to the device image, as it may be useful
3760   // // for the target runtime to have access to that information.
3761   // __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all
3762   //                                       // the entries.
3763   // __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
3764   //                                       // entries (non inclusive).
3765   // };
3766   if (TgtDeviceImageQTy.isNull()) {
3767     ASTContext &C = CGM.getContext();
3768     auto *RD = C.buildImplicitRecord("__tgt_device_image");
3769     RD->startDefinition();
3770     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3771     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3772     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
3773     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
3774     RD->completeDefinition();
3775     TgtDeviceImageQTy = C.getRecordType(RD);
3776   }
3777   return TgtDeviceImageQTy;
3778 }
3779
3780 QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
3781   // struct __tgt_bin_desc{
3782   //   int32_t              NumDevices;      // Number of devices supported.
3783   //   __tgt_device_image   *DeviceImages;   // Arrays of device images
3784   //                                         // (one per device).
3785   //   __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all the
3786   //                                         // entries.
3787   //   __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
3788   //                                         // entries (non inclusive).
3789   // };
3790   if (TgtBinaryDescriptorQTy.isNull()) {
3791     ASTContext &C = CGM.getContext();
3792     auto *RD = C.buildImplicitRecord("__tgt_bin_desc");
3793     RD->startDefinition();
3794     addFieldToRecordDecl(
3795         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3796     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
3797     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
3798     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
3799     RD->completeDefinition();
3800     TgtBinaryDescriptorQTy = C.getRecordType(RD);
3801   }
3802   return TgtBinaryDescriptorQTy;
3803 }
3804
3805 namespace {
3806 struct PrivateHelpersTy {
3807   PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
3808                    const VarDecl *PrivateElemInit)
3809       : Original(Original), PrivateCopy(PrivateCopy),
3810         PrivateElemInit(PrivateElemInit) {}
3811   const VarDecl *Original;
3812   const VarDecl *PrivateCopy;
3813   const VarDecl *PrivateElemInit;
3814 };
3815 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
3816 } // anonymous namespace
3817
3818 static RecordDecl *
3819 createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
3820   if (!Privates.empty()) {
3821     auto &C = CGM.getContext();
3822     // Build struct .kmp_privates_t. {
3823     //         /*  private vars  */
3824     //       };
3825     auto *RD = C.buildImplicitRecord(".kmp_privates.t");
3826     RD->startDefinition();
3827     for (auto &&Pair : Privates) {
3828       auto *VD = Pair.second.Original;
3829       auto Type = VD->getType();
3830       Type = Type.getNonReferenceType();
3831       auto *FD = addFieldToRecordDecl(C, RD, Type);
3832       if (VD->hasAttrs()) {
3833         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
3834              E(VD->getAttrs().end());
3835              I != E; ++I)
3836           FD->addAttr(*I);
3837       }
3838     }
3839     RD->completeDefinition();
3840     return RD;
3841   }
3842   return nullptr;
3843 }
3844
3845 static RecordDecl *
3846 createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
3847                          QualType KmpInt32Ty,
3848                          QualType KmpRoutineEntryPointerQTy) {
3849   auto &C = CGM.getContext();
3850   // Build struct kmp_task_t {
3851   //         void *              shareds;
3852   //         kmp_routine_entry_t routine;
3853   //         kmp_int32           part_id;
3854   //         kmp_cmplrdata_t data1;
3855   //         kmp_cmplrdata_t data2;
3856   // For taskloops additional fields:
3857   //         kmp_uint64          lb;
3858   //         kmp_uint64          ub;
3859   //         kmp_int64           st;
3860   //         kmp_int32           liter;
3861   //         void *              reductions;
3862   //       };
3863   auto *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
3864   UD->startDefinition();
3865   addFieldToRecordDecl(C, UD, KmpInt32Ty);
3866   addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
3867   UD->completeDefinition();
3868   QualType KmpCmplrdataTy = C.getRecordType(UD);
3869   auto *RD = C.buildImplicitRecord("kmp_task_t");
3870   RD->startDefinition();
3871   addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3872   addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
3873   addFieldToRecordDecl(C, RD, KmpInt32Ty);
3874   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3875   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3876   if (isOpenMPTaskLoopDirective(Kind)) {
3877     QualType KmpUInt64Ty =
3878         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
3879     QualType KmpInt64Ty =
3880         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
3881     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3882     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3883     addFieldToRecordDecl(C, RD, KmpInt64Ty);
3884     addFieldToRecordDecl(C, RD, KmpInt32Ty);
3885     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3886   }
3887   RD->completeDefinition();
3888   return RD;
3889 }
3890
3891 static RecordDecl *
3892 createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
3893                                      ArrayRef<PrivateDataTy> Privates) {
3894   auto &C = CGM.getContext();
3895   // Build struct kmp_task_t_with_privates {
3896   //         kmp_task_t task_data;
3897   //         .kmp_privates_t. privates;
3898   //       };
3899   auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
3900   RD->startDefinition();
3901   addFieldToRecordDecl(C, RD, KmpTaskTQTy);
3902   if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) {
3903     addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
3904   }
3905   RD->completeDefinition();
3906   return RD;
3907 }
3908
3909 /// \brief Emit a proxy function which accepts kmp_task_t as the second
3910 /// argument.
3911 /// \code
3912 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
3913 ///   TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
3914 ///   For taskloops:
3915 ///   tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3916 ///   tt->reductions, tt->shareds);
3917 ///   return 0;
3918 /// }
3919 /// \endcode
3920 static llvm::Value *
3921 emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
3922                       OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
3923                       QualType KmpTaskTWithPrivatesPtrQTy,
3924                       QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
3925                       QualType SharedsPtrTy, llvm::Value *TaskFunction,
3926                       llvm::Value *TaskPrivatesMap) {
3927   auto &C = CGM.getContext();
3928   FunctionArgList Args;
3929   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3930                             ImplicitParamDecl::Other);
3931   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3932                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3933                                 ImplicitParamDecl::Other);
3934   Args.push_back(&GtidArg);
3935   Args.push_back(&TaskTypeArg);
3936   auto &TaskEntryFnInfo =
3937       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3938   auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
3939   auto *TaskEntry =
3940       llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage,
3941                              ".omp_task_entry.", &CGM.getModule());
3942   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskEntry, TaskEntryFnInfo);
3943   CodeGenFunction CGF(CGM);
3944   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args);
3945
3946   // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
3947   // tt,
3948   // For taskloops:
3949   // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3950   // tt->task_data.shareds);
3951   auto *GtidParam = CGF.EmitLoadOfScalar(
3952       CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
3953   LValue TDBase = CGF.EmitLoadOfPointerLValue(
3954       CGF.GetAddrOfLocalVar(&TaskTypeArg),
3955       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3956   auto *KmpTaskTWithPrivatesQTyRD =
3957       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3958   LValue Base =
3959       CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3960   auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3961   auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3962   auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
3963   auto *PartidParam = PartIdLVal.getPointer();
3964
3965   auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3966   auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
3967   auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3968       CGF.EmitLoadOfLValue(SharedsLVal, Loc).getScalarVal(),
3969       CGF.ConvertTypeForMem(SharedsPtrTy));
3970
3971   auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3972   llvm::Value *PrivatesParam;
3973   if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3974     auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
3975     PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3976         PrivatesLVal.getPointer(), CGF.VoidPtrTy);
3977   } else
3978     PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
3979
3980   llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
3981                                TaskPrivatesMap,
3982                                CGF.Builder
3983                                    .CreatePointerBitCastOrAddrSpaceCast(
3984                                        TDBase.getAddress(), CGF.VoidPtrTy)
3985                                    .getPointer()};
3986   SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
3987                                           std::end(CommonArgs));
3988   if (isOpenMPTaskLoopDirective(Kind)) {
3989     auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
3990     auto LBLVal = CGF.EmitLValueForField(Base, *LBFI);
3991     auto *LBParam = CGF.EmitLoadOfLValue(LBLVal, Loc).getScalarVal();
3992     auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
3993     auto UBLVal = CGF.EmitLValueForField(Base, *UBFI);
3994     auto *UBParam = CGF.EmitLoadOfLValue(UBLVal, Loc).getScalarVal();
3995     auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
3996     auto StLVal = CGF.EmitLValueForField(Base, *StFI);
3997     auto *StParam = CGF.EmitLoadOfLValue(StLVal, Loc).getScalarVal();
3998     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3999     auto LILVal = CGF.EmitLValueForField(Base, *LIFI);
4000     auto *LIParam = CGF.EmitLoadOfLValue(LILVal, Loc).getScalarVal();
4001     auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4002     auto RLVal = CGF.EmitLValueForField(Base, *RFI);
4003     auto *RParam = CGF.EmitLoadOfLValue(RLVal, Loc).getScalarVal();
4004     CallArgs.push_back(LBParam);
4005     CallArgs.push_back(UBParam);
4006     CallArgs.push_back(StParam);
4007     CallArgs.push_back(LIParam);
4008     CallArgs.push_back(RParam);
4009   }
4010   CallArgs.push_back(SharedsParam);
4011
4012   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4013                                                   CallArgs);
4014   CGF.EmitStoreThroughLValue(
4015       RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4016       CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4017   CGF.FinishFunction();
4018   return TaskEntry;
4019 }
4020
4021 static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
4022                                             SourceLocation Loc,
4023                                             QualType KmpInt32Ty,
4024                                             QualType KmpTaskTWithPrivatesPtrQTy,
4025                                             QualType KmpTaskTWithPrivatesQTy) {
4026   auto &C = CGM.getContext();
4027   FunctionArgList Args;
4028   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4029                             ImplicitParamDecl::Other);
4030   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4031                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4032                                 ImplicitParamDecl::Other);
4033   Args.push_back(&GtidArg);
4034   Args.push_back(&TaskTypeArg);
4035   auto &DestructorFnInfo =
4036       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4037   auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo);
4038   auto *DestructorFn =
4039       llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
4040                              ".omp_task_destructor.", &CGM.getModule());
4041   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, DestructorFn,
4042                                     DestructorFnInfo);
4043   CodeGenFunction CGF(CGM);
4044   CGF.disableDebugInfo();
4045   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4046                     Args);
4047
4048   LValue Base = CGF.EmitLoadOfPointerLValue(
4049       CGF.GetAddrOfLocalVar(&TaskTypeArg),
4050       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4051   auto *KmpTaskTWithPrivatesQTyRD =
4052       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4053   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4054   Base = CGF.EmitLValueForField(Base, *FI);
4055   for (auto *Field :
4056        cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4057     if (auto DtorKind = Field->getType().isDestructedType()) {
4058       auto FieldLValue = CGF.EmitLValueForField(Base, Field);
4059       CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4060     }
4061   }
4062   CGF.FinishFunction();
4063   return DestructorFn;
4064 }
4065
4066 /// \brief Emit a privates mapping function for correct handling of private and
4067 /// firstprivate variables.
4068 /// \code
4069 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
4070 /// **noalias priv1,...,  <tyn> **noalias privn) {
4071 ///   *priv1 = &.privates.priv1;
4072 ///   ...;
4073 ///   *privn = &.privates.privn;
4074 /// }
4075 /// \endcode
4076 static llvm::Value *
4077 emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
4078                                ArrayRef<const Expr *> PrivateVars,
4079                                ArrayRef<const Expr *> FirstprivateVars,
4080                                ArrayRef<const Expr *> LastprivateVars,
4081                                QualType PrivatesQTy,
4082                                ArrayRef<PrivateDataTy> Privates) {
4083   auto &C = CGM.getContext();
4084   FunctionArgList Args;
4085   ImplicitParamDecl TaskPrivatesArg(
4086       C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4087       C.getPointerType(PrivatesQTy).withConst().withRestrict(),
4088       ImplicitParamDecl::Other);
4089   Args.push_back(&TaskPrivatesArg);
4090   llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
4091   unsigned Counter = 1;
4092   for (auto *E: PrivateVars) {
4093     Args.push_back(ImplicitParamDecl::Create(
4094         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4095         C.getPointerType(C.getPointerType(E->getType()))
4096             .withConst()
4097             .withRestrict(),
4098         ImplicitParamDecl::Other));
4099     auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4100     PrivateVarsPos[VD] = Counter;
4101     ++Counter;
4102   }
4103   for (auto *E : FirstprivateVars) {
4104     Args.push_back(ImplicitParamDecl::Create(
4105         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4106         C.getPointerType(C.getPointerType(E->getType()))
4107             .withConst()
4108             .withRestrict(),
4109         ImplicitParamDecl::Other));
4110     auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4111     PrivateVarsPos[VD] = Counter;
4112     ++Counter;
4113   }
4114   for (auto *E: LastprivateVars) {
4115     Args.push_back(ImplicitParamDecl::Create(
4116         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4117         C.getPointerType(C.getPointerType(E->getType()))
4118             .withConst()
4119             .withRestrict(),
4120         ImplicitParamDecl::Other));
4121     auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4122     PrivateVarsPos[VD] = Counter;
4123     ++Counter;
4124   }
4125   auto &TaskPrivatesMapFnInfo =
4126       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4127   auto *TaskPrivatesMapTy =
4128       CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
4129   auto *TaskPrivatesMap = llvm::Function::Create(
4130       TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage,
4131       ".omp_task_privates_map.", &CGM.getModule());
4132   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
4133                                     TaskPrivatesMapFnInfo);
4134   TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
4135   TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
4136   TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
4137   CodeGenFunction CGF(CGM);
4138   CGF.disableDebugInfo();
4139   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
4140                     TaskPrivatesMapFnInfo, Args);
4141
4142   // *privi = &.privates.privi;
4143   LValue Base = CGF.EmitLoadOfPointerLValue(
4144       CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
4145       TaskPrivatesArg.getType()->castAs<PointerType>());
4146   auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
4147   Counter = 0;
4148   for (auto *Field : PrivatesQTyRD->fields()) {
4149     auto FieldLVal = CGF.EmitLValueForField(Base, Field);
4150     auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
4151     auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
4152     auto RefLoadLVal = CGF.EmitLoadOfPointerLValue(
4153         RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
4154     CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
4155     ++Counter;
4156   }
4157   CGF.FinishFunction();
4158   return TaskPrivatesMap;
4159 }
4160
4161 static bool stable_sort_comparator(const PrivateDataTy P1,
4162                                    const PrivateDataTy P2) {
4163   return P1.first > P2.first;
4164 }
4165
4166 /// Emit initialization for private variables in task-based directives.
4167 static void emitPrivatesInit(CodeGenFunction &CGF,
4168                              const OMPExecutableDirective &D,
4169                              Address KmpTaskSharedsPtr, LValue TDBase,
4170                              const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4171                              QualType SharedsTy, QualType SharedsPtrTy,
4172                              const OMPTaskDataTy &Data,
4173                              ArrayRef<PrivateDataTy> Privates, bool ForDup) {
4174   auto &C = CGF.getContext();
4175   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4176   LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
4177   LValue SrcBase;
4178   if (!Data.FirstprivateVars.empty()) {
4179     SrcBase = CGF.MakeAddrLValue(
4180         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4181             KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
4182         SharedsTy);
4183   }
4184   CodeGenFunction::CGCapturedStmtInfo CapturesInfo(
4185       cast<CapturedStmt>(*D.getAssociatedStmt()));
4186   FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
4187   for (auto &&Pair : Privates) {
4188     auto *VD = Pair.second.PrivateCopy;
4189     auto *Init = VD->getAnyInitializer();
4190     if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
4191                              !CGF.isTrivialInitializer(Init)))) {
4192       LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
4193       if (auto *Elem = Pair.second.PrivateElemInit) {
4194         auto *OriginalVD = Pair.second.Original;
4195         auto *SharedField = CapturesInfo.lookup(OriginalVD);
4196         auto SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
4197         SharedRefLValue = CGF.MakeAddrLValue(
4198             Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
4199             SharedRefLValue.getType(),
4200             LValueBaseInfo(AlignmentSource::Decl),
4201             SharedRefLValue.getTBAAInfo());
4202         QualType Type = OriginalVD->getType();
4203         if (Type->isArrayType()) {
4204           // Initialize firstprivate array.
4205           if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
4206             // Perform simple memcpy.
4207             CGF.EmitAggregateAssign(PrivateLValue.getAddress(),
4208                                     SharedRefLValue.getAddress(), Type);
4209           } else {
4210             // Initialize firstprivate array using element-by-element
4211             // initialization.
4212             CGF.EmitOMPAggregateAssign(
4213                 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
4214                 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
4215                                                   Address SrcElement) {
4216                   // Clean up any temporaries needed by the initialization.
4217                   CodeGenFunction::OMPPrivateScope InitScope(CGF);
4218                   InitScope.addPrivate(
4219                       Elem, [SrcElement]() -> Address { return SrcElement; });
4220                   (void)InitScope.Privatize();
4221                   // Emit initialization for single element.
4222                   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
4223                       CGF, &CapturesInfo);
4224                   CGF.EmitAnyExprToMem(Init, DestElement,
4225                                        Init->getType().getQualifiers(),
4226                                        /*IsInitializer=*/false);
4227                 });
4228           }
4229         } else {
4230           CodeGenFunction::OMPPrivateScope InitScope(CGF);
4231           InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
4232             return SharedRefLValue.getAddress();
4233           });
4234           (void)InitScope.Privatize();
4235           CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
4236           CGF.EmitExprAsInit(Init, VD, PrivateLValue,
4237                              /*capturedByInit=*/false);
4238         }
4239       } else
4240         CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
4241     }
4242     ++FI;
4243   }
4244 }
4245
4246 /// Check if duplication function is required for taskloops.
4247 static bool checkInitIsRequired(CodeGenFunction &CGF,
4248                                 ArrayRef<PrivateDataTy> Privates) {
4249   bool InitRequired = false;
4250   for (auto &&Pair : Privates) {
4251     auto *VD = Pair.second.PrivateCopy;
4252     auto *Init = VD->getAnyInitializer();
4253     InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
4254                                     !CGF.isTrivialInitializer(Init));
4255   }
4256   return InitRequired;
4257 }
4258
4259
4260 /// Emit task_dup function (for initialization of
4261 /// private/firstprivate/lastprivate vars and last_iter flag)
4262 /// \code
4263 /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
4264 /// lastpriv) {
4265 /// // setup lastprivate flag
4266 ///    task_dst->last = lastpriv;
4267 /// // could be constructor calls here...
4268 /// }
4269 /// \endcode
4270 static llvm::Value *
4271 emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
4272                     const OMPExecutableDirective &D,
4273                     QualType KmpTaskTWithPrivatesPtrQTy,
4274                     const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4275                     const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
4276                     QualType SharedsPtrTy, const OMPTaskDataTy &Data,
4277                     ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
4278   auto &C = CGM.getContext();
4279   FunctionArgList Args;
4280   ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4281                            KmpTaskTWithPrivatesPtrQTy,
4282                            ImplicitParamDecl::Other);
4283   ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4284                            KmpTaskTWithPrivatesPtrQTy,
4285                            ImplicitParamDecl::Other);
4286   ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
4287                                 ImplicitParamDecl::Other);
4288   Args.push_back(&DstArg);
4289   Args.push_back(&SrcArg);
4290   Args.push_back(&LastprivArg);
4291   auto &TaskDupFnInfo =
4292       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4293   auto *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4294   auto *TaskDup =
4295       llvm::Function::Create(TaskDupTy, llvm::GlobalValue::InternalLinkage,
4296                              ".omp_task_dup.", &CGM.getModule());
4297   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskDup, TaskDupFnInfo);
4298   CodeGenFunction CGF(CGM);
4299   CGF.disableDebugInfo();
4300   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args);
4301
4302   LValue TDBase = CGF.EmitLoadOfPointerLValue(
4303       CGF.GetAddrOfLocalVar(&DstArg),
4304       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4305   // task_dst->liter = lastpriv;
4306   if (WithLastIter) {
4307     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4308     LValue Base = CGF.EmitLValueForField(
4309         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4310     LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4311     llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4312         CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4313     CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4314   }
4315
4316   // Emit initial values for private copies (if any).
4317   assert(!Privates.empty());
4318   Address KmpTaskSharedsPtr = Address::invalid();
4319   if (!Data.FirstprivateVars.empty()) {
4320     LValue TDBase = CGF.EmitLoadOfPointerLValue(
4321         CGF.GetAddrOfLocalVar(&SrcArg),
4322         KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4323     LValue Base = CGF.EmitLValueForField(
4324         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4325     KmpTaskSharedsPtr = Address(
4326         CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4327                                  Base, *std::next(KmpTaskTQTyRD->field_begin(),
4328                                                   KmpTaskTShareds)),
4329                              Loc),
4330         CGF.getNaturalTypeAlignment(SharedsTy));
4331   }
4332   emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4333                    SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4334   CGF.FinishFunction();
4335   return TaskDup;
4336 }
4337
4338 /// Checks if destructor function is required to be generated.
4339 /// \return true if cleanups are required, false otherwise.
4340 static bool
4341 checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
4342   bool NeedsCleanup = false;
4343   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4344   auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
4345   for (auto *FD : PrivateRD->fields()) {
4346     NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
4347     if (NeedsCleanup)
4348       break;
4349   }
4350   return NeedsCleanup;
4351 }
4352
4353 CGOpenMPRuntime::TaskResultTy
4354 CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4355                               const OMPExecutableDirective &D,
4356                               llvm::Value *TaskFunction, QualType SharedsTy,
4357                               Address Shareds, const OMPTaskDataTy &Data) {
4358   auto &C = CGM.getContext();
4359   llvm::SmallVector<PrivateDataTy, 4> Privates;
4360   // Aggregate privates and sort them by the alignment.
4361   auto I = Data.PrivateCopies.begin();
4362   for (auto *E : Data.PrivateVars) {
4363     auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4364     Privates.push_back(std::make_pair(
4365         C.getDeclAlign(VD),
4366         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4367                          /*PrivateElemInit=*/nullptr)));
4368     ++I;
4369   }
4370   I = Data.FirstprivateCopies.begin();
4371   auto IElemInitRef = Data.FirstprivateInits.begin();
4372   for (auto *E : Data.FirstprivateVars) {
4373     auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4374     Privates.push_back(std::make_pair(
4375         C.getDeclAlign(VD),
4376         PrivateHelpersTy(
4377             VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4378             cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl()))));
4379     ++I;
4380     ++IElemInitRef;
4381   }
4382   I = Data.LastprivateCopies.begin();
4383   for (auto *E : Data.LastprivateVars) {
4384     auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4385     Privates.push_back(std::make_pair(
4386         C.getDeclAlign(VD),
4387         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4388                          /*PrivateElemInit=*/nullptr)));
4389     ++I;
4390   }
4391   std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
4392   auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4393   // Build type kmp_routine_entry_t (if not built yet).
4394   emitKmpRoutineEntryT(KmpInt32Ty);
4395   // Build type kmp_task_t (if not built yet).
4396   if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4397     if (SavedKmpTaskloopTQTy.isNull()) {
4398       SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4399           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4400     }
4401     KmpTaskTQTy = SavedKmpTaskloopTQTy;
4402   } else {
4403     assert(D.getDirectiveKind() == OMPD_task &&
4404            "Expected taskloop or task directive");
4405     if (SavedKmpTaskTQTy.isNull()) {
4406       SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4407           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4408     }
4409     KmpTaskTQTy = SavedKmpTaskTQTy;
4410   }
4411   auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4412   // Build particular struct kmp_task_t for the given task.
4413   auto *KmpTaskTWithPrivatesQTyRD =
4414       createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4415   auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4416   QualType KmpTaskTWithPrivatesPtrQTy =
4417       C.getPointerType(KmpTaskTWithPrivatesQTy);
4418   auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4419   auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo();
4420   auto *KmpTaskTWithPrivatesTySize = CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4421   QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4422
4423   // Emit initial values for private copies (if any).
4424   llvm::Value *TaskPrivatesMap = nullptr;
4425   auto *TaskPrivatesMapTy =
4426       std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
4427   if (!Privates.empty()) {
4428     auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4429     TaskPrivatesMap = emitTaskPrivateMappingFunction(
4430         CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
4431         FI->getType(), Privates);
4432     TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4433         TaskPrivatesMap, TaskPrivatesMapTy);
4434   } else {
4435     TaskPrivatesMap = llvm::ConstantPointerNull::get(
4436         cast<llvm::PointerType>(TaskPrivatesMapTy));
4437   }
4438   // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4439   // kmp_task_t *tt);
4440   auto *TaskEntry = emitProxyTaskFunction(
4441       CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4442       KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4443       TaskPrivatesMap);
4444
4445   // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4446   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4447   // kmp_routine_entry_t *task_entry);
4448   // Task flags. Format is taken from
4449   // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
4450   // description of kmp_tasking_flags struct.
4451   enum {
4452     TiedFlag = 0x1,
4453     FinalFlag = 0x2,
4454     DestructorsFlag = 0x8,
4455     PriorityFlag = 0x20
4456   };
4457   unsigned Flags = Data.Tied ? TiedFlag : 0;
4458   bool NeedsCleanup = false;
4459   if (!Privates.empty()) {
4460     NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
4461     if (NeedsCleanup)
4462       Flags = Flags | DestructorsFlag;
4463   }
4464   if (Data.Priority.getInt())
4465     Flags = Flags | PriorityFlag;
4466   auto *TaskFlags =
4467       Data.Final.getPointer()
4468           ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4469                                      CGF.Builder.getInt32(FinalFlag),
4470                                      CGF.Builder.getInt32(/*C=*/0))
4471           : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4472   TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4473   auto *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4474   llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
4475                               getThreadID(CGF, Loc), TaskFlags,
4476                               KmpTaskTWithPrivatesTySize, SharedsSize,
4477                               CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4478                                   TaskEntry, KmpRoutineEntryPtrTy)};
4479   auto *NewTask = CGF.EmitRuntimeCall(
4480       createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
4481   auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4482       NewTask, KmpTaskTWithPrivatesPtrTy);
4483   LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4484                                                KmpTaskTWithPrivatesQTy);
4485   LValue TDBase =
4486       CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4487   // Fill the data in the resulting kmp_task_t record.
4488   // Copy shareds if there are any.
4489   Address KmpTaskSharedsPtr = Address::invalid();
4490   if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4491     KmpTaskSharedsPtr =
4492         Address(CGF.EmitLoadOfScalar(
4493                     CGF.EmitLValueForField(
4494                         TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4495                                            KmpTaskTShareds)),
4496                     Loc),
4497                 CGF.getNaturalTypeAlignment(SharedsTy));
4498     CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy);
4499   }
4500   // Emit initial values for private copies (if any).
4501   TaskResultTy Result;
4502   if (!Privates.empty()) {
4503     emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4504                      SharedsTy, SharedsPtrTy, Data, Privates,
4505                      /*ForDup=*/false);
4506     if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4507         (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4508       Result.TaskDupFn = emitTaskDupFunction(
4509           CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4510           KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4511           /*WithLastIter=*/!Data.LastprivateVars.empty());
4512     }
4513   }
4514   // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4515   enum { Priority = 0, Destructors = 1 };
4516   // Provide pointer to function with destructors for privates.
4517   auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4518   auto *KmpCmplrdataUD = (*FI)->getType()->getAsUnionType()->getDecl();
4519   if (NeedsCleanup) {
4520     llvm::Value *DestructorFn = emitDestructorsFunction(
4521         CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4522         KmpTaskTWithPrivatesQTy);
4523     LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4524     LValue DestructorsLV = CGF.EmitLValueForField(
4525         Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4526     CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4527                               DestructorFn, KmpRoutineEntryPtrTy),
4528                           DestructorsLV);
4529   }
4530   // Set priority.
4531   if (Data.Priority.getInt()) {
4532     LValue Data2LV = CGF.EmitLValueForField(
4533         TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4534     LValue PriorityLV = CGF.EmitLValueForField(
4535         Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4536     CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4537   }
4538   Result.NewTask = NewTask;
4539   Result.TaskEntry = TaskEntry;
4540   Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4541   Result.TDBase = TDBase;
4542   Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4543   return Result;
4544 }
4545
4546 void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
4547                                    const OMPExecutableDirective &D,
4548                                    llvm::Value *TaskFunction,
4549                                    QualType SharedsTy, Address Shareds,
4550                                    const Expr *IfCond,
4551                                    const OMPTaskDataTy &Data) {
4552   if (!CGF.HaveInsertPoint())
4553     return;
4554
4555   TaskResultTy Result =
4556       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4557   llvm::Value *NewTask = Result.NewTask;
4558   llvm::Value *TaskEntry = Result.TaskEntry;
4559   llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
4560   LValue TDBase = Result.TDBase;
4561   RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
4562   auto &C = CGM.getContext();
4563   // Process list of dependences.
4564   Address DependenciesArray = Address::invalid();
4565   unsigned NumDependencies = Data.Dependences.size();
4566   if (NumDependencies) {
4567     // Dependence kind for RTL.
4568     enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
4569     enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4570     RecordDecl *KmpDependInfoRD;
4571     QualType FlagsTy =
4572         C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4573     llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4574     if (KmpDependInfoTy.isNull()) {
4575       KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4576       KmpDependInfoRD->startDefinition();
4577       addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4578       addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4579       addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4580       KmpDependInfoRD->completeDefinition();
4581       KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4582     } else
4583       KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4584     CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
4585     // Define type kmp_depend_info[<Dependences.size()>];
4586     QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4587         KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
4588         ArrayType::Normal, /*IndexTypeQuals=*/0);
4589     // kmp_depend_info[<Dependences.size()>] deps;
4590     DependenciesArray =
4591         CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4592     for (unsigned i = 0; i < NumDependencies; ++i) {
4593       const Expr *E = Data.Dependences[i].second;
4594       auto Addr = CGF.EmitLValue(E);
4595       llvm::Value *Size;
4596       QualType Ty = E->getType();
4597       if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
4598         LValue UpAddrLVal =
4599             CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
4600         llvm::Value *UpAddr =
4601             CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
4602         llvm::Value *LowIntPtr =
4603             CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
4604         llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
4605         Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
4606       } else
4607         Size = CGF.getTypeSize(Ty);
4608       auto Base = CGF.MakeAddrLValue(
4609           CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize),
4610           KmpDependInfoTy);
4611       // deps[i].base_addr = &<Dependences[i].second>;
4612       auto BaseAddrLVal = CGF.EmitLValueForField(
4613           Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4614       CGF.EmitStoreOfScalar(
4615           CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
4616           BaseAddrLVal);
4617       // deps[i].len = sizeof(<Dependences[i].second>);
4618       auto LenLVal = CGF.EmitLValueForField(
4619           Base, *std::next(KmpDependInfoRD->field_begin(), Len));
4620       CGF.EmitStoreOfScalar(Size, LenLVal);
4621       // deps[i].flags = <Dependences[i].first>;
4622       RTLDependenceKindTy DepKind;
4623       switch (Data.Dependences[i].first) {
4624       case OMPC_DEPEND_in:
4625         DepKind = DepIn;
4626         break;
4627       // Out and InOut dependencies must use the same code.
4628       case OMPC_DEPEND_out:
4629       case OMPC_DEPEND_inout:
4630         DepKind = DepInOut;
4631         break;
4632       case OMPC_DEPEND_source:
4633       case OMPC_DEPEND_sink:
4634       case OMPC_DEPEND_unknown:
4635         llvm_unreachable("Unknown task dependence type");
4636       }
4637       auto FlagsLVal = CGF.EmitLValueForField(
4638           Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
4639       CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
4640                             FlagsLVal);
4641     }
4642     DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4643         CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
4644         CGF.VoidPtrTy);
4645   }
4646
4647   // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
4648   // libcall.
4649   // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
4650   // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
4651   // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
4652   // list is not empty
4653   auto *ThreadID = getThreadID(CGF, Loc);
4654   auto *UpLoc = emitUpdateLocation(CGF, Loc);
4655   llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
4656   llvm::Value *DepTaskArgs[7];
4657   if (NumDependencies) {
4658     DepTaskArgs[0] = UpLoc;
4659     DepTaskArgs[1] = ThreadID;
4660     DepTaskArgs[2] = NewTask;
4661     DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
4662     DepTaskArgs[4] = DependenciesArray.getPointer();
4663     DepTaskArgs[5] = CGF.Builder.getInt32(0);
4664     DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4665   }
4666   auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
4667                         &TaskArgs,
4668                         &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
4669     if (!Data.Tied) {
4670       auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4671       auto PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
4672       CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
4673     }
4674     if (NumDependencies) {
4675       CGF.EmitRuntimeCall(
4676           createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
4677     } else {
4678       CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
4679                           TaskArgs);
4680     }
4681     // Check if parent region is untied and build return for untied task;
4682     if (auto *Region =
4683             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
4684       Region->emitUntiedSwitch(CGF);
4685   };
4686
4687   llvm::Value *DepWaitTaskArgs[6];
4688   if (NumDependencies) {
4689     DepWaitTaskArgs[0] = UpLoc;
4690     DepWaitTaskArgs[1] = ThreadID;
4691     DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
4692     DepWaitTaskArgs[3] = DependenciesArray.getPointer();
4693     DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
4694     DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4695   }
4696   auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
4697                         NumDependencies, &DepWaitTaskArgs,
4698                         Loc](CodeGenFunction &CGF, PrePostActionTy &) {
4699     auto &RT = CGF.CGM.getOpenMPRuntime();
4700     CodeGenFunction::RunCleanupsScope LocalScope(CGF);
4701     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
4702     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
4703     // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
4704     // is specified.
4705     if (NumDependencies)
4706       CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
4707                           DepWaitTaskArgs);
4708     // Call proxy_task_entry(gtid, new_task);
4709     auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
4710                       Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
4711       Action.Enter(CGF);
4712       llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
4713       CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
4714                                                           OutlinedFnArgs);
4715     };
4716
4717     // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
4718     // kmp_task_t *new_task);
4719     // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
4720     // kmp_task_t *new_task);
4721     RegionCodeGenTy RCG(CodeGen);
4722     CommonActionTy Action(
4723         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
4724         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
4725     RCG.setAction(Action);
4726     RCG(CGF);
4727   };
4728
4729   if (IfCond)
4730     emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
4731   else {
4732     RegionCodeGenTy ThenRCG(ThenCodeGen);
4733     ThenRCG(CGF);
4734   }
4735 }
4736
4737 void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
4738                                        const OMPLoopDirective &D,
4739                                        llvm::Value *TaskFunction,
4740                                        QualType SharedsTy, Address Shareds,
4741                                        const Expr *IfCond,
4742                                        const OMPTaskDataTy &Data) {
4743   if (!CGF.HaveInsertPoint())
4744     return;
4745   TaskResultTy Result =
4746       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4747   // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
4748   // libcall.
4749   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
4750   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
4751   // sched, kmp_uint64 grainsize, void *task_dup);
4752   llvm::Value *ThreadID = getThreadID(CGF, Loc);
4753   llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
4754   llvm::Value *IfVal;
4755   if (IfCond) {
4756     IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
4757                                       /*isSigned=*/true);
4758   } else
4759     IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
4760
4761   LValue LBLVal = CGF.EmitLValueForField(
4762       Result.TDBase,
4763       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
4764   auto *LBVar =
4765       cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
4766   CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
4767                        /*IsInitializer=*/true);
4768   LValue UBLVal = CGF.EmitLValueForField(
4769       Result.TDBase,
4770       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
4771   auto *UBVar =
4772       cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
4773   CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
4774                        /*IsInitializer=*/true);
4775   LValue StLVal = CGF.EmitLValueForField(
4776       Result.TDBase,
4777       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
4778   auto *StVar =
4779       cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
4780   CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
4781                        /*IsInitializer=*/true);
4782   // Store reductions address.
4783   LValue RedLVal = CGF.EmitLValueForField(
4784       Result.TDBase,
4785       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
4786   if (Data.Reductions)
4787     CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
4788   else {
4789     CGF.EmitNullInitialization(RedLVal.getAddress(),
4790                                CGF.getContext().VoidPtrTy);
4791   }
4792   enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
4793   llvm::Value *TaskArgs[] = {
4794       UpLoc,
4795       ThreadID,
4796       Result.NewTask,
4797       IfVal,
4798       LBLVal.getPointer(),
4799       UBLVal.getPointer(),
4800       CGF.EmitLoadOfScalar(StLVal, SourceLocation()),
4801       llvm::ConstantInt::getNullValue(
4802           CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
4803       llvm::ConstantInt::getSigned(
4804           CGF.IntTy, Data.Schedule.getPointer()
4805                          ? Data.Schedule.getInt() ? NumTasks : Grainsize
4806                          : NoSchedule),
4807       Data.Schedule.getPointer()
4808           ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
4809                                       /*isSigned=*/false)
4810           : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
4811       Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4812                              Result.TaskDupFn, CGF.VoidPtrTy)
4813                        : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
4814   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
4815 }
4816
4817 /// \brief Emit reduction operation for each element of array (required for
4818 /// array sections) LHS op = RHS.
4819 /// \param Type Type of array.
4820 /// \param LHSVar Variable on the left side of the reduction operation
4821 /// (references element of array in original variable).
4822 /// \param RHSVar Variable on the right side of the reduction operation
4823 /// (references element of array in original variable).
4824 /// \param RedOpGen Generator of reduction operation with use of LHSVar and
4825 /// RHSVar.
4826 static void EmitOMPAggregateReduction(
4827     CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
4828     const VarDecl *RHSVar,
4829     const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
4830                                   const Expr *, const Expr *)> &RedOpGen,
4831     const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
4832     const Expr *UpExpr = nullptr) {
4833   // Perform element-by-element initialization.
4834   QualType ElementTy;
4835   Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
4836   Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
4837
4838   // Drill down to the base element type on both arrays.
4839   auto ArrayTy = Type->getAsArrayTypeUnsafe();
4840   auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
4841
4842   auto RHSBegin = RHSAddr.getPointer();
4843   auto LHSBegin = LHSAddr.getPointer();
4844   // Cast from pointer to array type to pointer to single element.
4845   auto LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
4846   // The basic structure here is a while-do loop.
4847   auto BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
4848   auto DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
4849   auto IsEmpty =
4850       CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
4851   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
4852
4853   // Enter the loop body, making that address the current address.
4854   auto EntryBB = CGF.Builder.GetInsertBlock();
4855   CGF.EmitBlock(BodyBB);
4856
4857   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
4858
4859   llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
4860       RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
4861   RHSElementPHI->addIncoming(RHSBegin, EntryBB);
4862   Address RHSElementCurrent =
4863       Address(RHSElementPHI,
4864               RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
4865
4866   llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
4867       LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
4868   LHSElementPHI->addIncoming(LHSBegin, EntryBB);
4869   Address LHSElementCurrent =
4870       Address(LHSElementPHI,
4871               LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
4872
4873   // Emit copy.
4874   CodeGenFunction::OMPPrivateScope Scope(CGF);
4875   Scope.addPrivate(LHSVar, [=]() -> Address { return LHSElementCurrent; });
4876   Scope.addPrivate(RHSVar, [=]() -> Address { return RHSElementCurrent; });
4877   Scope.Privatize();
4878   RedOpGen(CGF, XExpr, EExpr, UpExpr);
4879   Scope.ForceCleanup();
4880
4881   // Shift the address forward by one element.
4882   auto LHSElementNext = CGF.Builder.CreateConstGEP1_32(
4883       LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
4884   auto RHSElementNext = CGF.Builder.CreateConstGEP1_32(
4885       RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
4886   // Check whether we've reached the end.
4887   auto Done =
4888       CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
4889   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
4890   LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
4891   RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
4892
4893   // Done.
4894   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
4895 }
4896
4897 /// Emit reduction combiner. If the combiner is a simple expression emit it as
4898 /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
4899 /// UDR combiner function.
4900 static void emitReductionCombiner(CodeGenFunction &CGF,
4901                                   const Expr *ReductionOp) {
4902   if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
4903     if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
4904       if (auto *DRE =
4905               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
4906         if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
4907           std::pair<llvm::Function *, llvm::Function *> Reduction =
4908               CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
4909           RValue Func = RValue::get(Reduction.first);
4910           CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
4911           CGF.EmitIgnoredExpr(ReductionOp);
4912           return;
4913         }
4914   CGF.EmitIgnoredExpr(ReductionOp);
4915 }
4916
4917 llvm::Value *CGOpenMPRuntime::emitReductionFunction(
4918     CodeGenModule &CGM, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
4919     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
4920     ArrayRef<const Expr *> ReductionOps) {
4921   auto &C = CGM.getContext();
4922
4923   // void reduction_func(void *LHSArg, void *RHSArg);
4924   FunctionArgList Args;
4925   ImplicitParamDecl LHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
4926   ImplicitParamDecl RHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
4927   Args.push_back(&LHSArg);
4928   Args.push_back(&RHSArg);
4929   auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4930   auto *Fn = llvm::Function::Create(
4931       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4932       ".omp.reduction.reduction_func", &CGM.getModule());
4933   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
4934   CodeGenFunction CGF(CGM);
4935   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
4936
4937   // Dst = (void*[n])(LHSArg);
4938   // Src = (void*[n])(RHSArg);
4939   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4940       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
4941       ArgsType), CGF.getPointerAlign());
4942   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4943       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
4944       ArgsType), CGF.getPointerAlign());
4945
4946   //  ...
4947   //  *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
4948   //  ...
4949   CodeGenFunction::OMPPrivateScope Scope(CGF);
4950   auto IPriv = Privates.begin();
4951   unsigned Idx = 0;
4952   for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
4953     auto RHSVar = cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
4954     Scope.addPrivate(RHSVar, [&]() -> Address {
4955       return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
4956     });
4957     auto LHSVar = cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
4958     Scope.addPrivate(LHSVar, [&]() -> Address {
4959       return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
4960     });
4961     QualType PrivTy = (*IPriv)->getType();
4962     if (PrivTy->isVariablyModifiedType()) {
4963       // Get array size and emit VLA type.
4964       ++Idx;
4965       Address Elem =
4966           CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
4967       llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
4968       auto *VLA = CGF.getContext().getAsVariableArrayType(PrivTy);
4969       auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
4970       CodeGenFunction::OpaqueValueMapping OpaqueMap(
4971           CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
4972       CGF.EmitVariablyModifiedType(PrivTy);
4973     }
4974   }
4975   Scope.Privatize();
4976   IPriv = Privates.begin();
4977   auto ILHS = LHSExprs.begin();
4978   auto IRHS = RHSExprs.begin();
4979   for (auto *E : ReductionOps) {
4980     if ((*IPriv)->getType()->isArrayType()) {
4981       // Emit reduction for array section.
4982       auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
4983       auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
4984       EmitOMPAggregateReduction(
4985           CGF, (*IPriv)->getType(), LHSVar, RHSVar,
4986           [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
4987             emitReductionCombiner(CGF, E);
4988           });
4989     } else
4990       // Emit reduction for array subscript or single variable.
4991       emitReductionCombiner(CGF, E);
4992     ++IPriv;
4993     ++ILHS;
4994     ++IRHS;
4995   }
4996   Scope.ForceCleanup();
4997   CGF.FinishFunction();
4998   return Fn;
4999 }
5000
5001 void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5002                                                   const Expr *ReductionOp,
5003                                                   const Expr *PrivateRef,
5004                                                   const DeclRefExpr *LHS,
5005                                                   const DeclRefExpr *RHS) {
5006   if (PrivateRef->getType()->isArrayType()) {
5007     // Emit reduction for array section.
5008     auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5009     auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5010     EmitOMPAggregateReduction(
5011         CGF, PrivateRef->getType(), LHSVar, RHSVar,
5012         [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5013           emitReductionCombiner(CGF, ReductionOp);
5014         });
5015   } else
5016     // Emit reduction for array subscript or single variable.
5017     emitReductionCombiner(CGF, ReductionOp);
5018 }
5019
5020 void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5021                                     ArrayRef<const Expr *> Privates,
5022                                     ArrayRef<const Expr *> LHSExprs,
5023                                     ArrayRef<const Expr *> RHSExprs,
5024                                     ArrayRef<const Expr *> ReductionOps,
5025                                     ReductionOptionsTy Options) {
5026   if (!CGF.HaveInsertPoint())
5027     return;
5028
5029   bool WithNowait = Options.WithNowait;
5030   bool SimpleReduction = Options.SimpleReduction;
5031
5032   // Next code should be emitted for reduction:
5033   //
5034   // static kmp_critical_name lock = { 0 };
5035   //
5036   // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5037   //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5038   //  ...
5039   //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5040   //  *(Type<n>-1*)rhs[<n>-1]);
5041   // }
5042   //
5043   // ...
5044   // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5045   // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5046   // RedList, reduce_func, &<lock>)) {
5047   // case 1:
5048   //  ...
5049   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5050   //  ...
5051   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5052   // break;
5053   // case 2:
5054   //  ...
5055   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5056   //  ...
5057   // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5058   // break;
5059   // default:;
5060   // }
5061   //
5062   // if SimpleReduction is true, only the next code is generated:
5063   //  ...
5064   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5065   //  ...
5066
5067   auto &C = CGM.getContext();
5068
5069   if (SimpleReduction) {
5070     CodeGenFunction::RunCleanupsScope Scope(CGF);
5071     auto IPriv = Privates.begin();
5072     auto ILHS = LHSExprs.begin();
5073     auto IRHS = RHSExprs.begin();
5074     for (auto *E : ReductionOps) {
5075       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5076                                   cast<DeclRefExpr>(*IRHS));
5077       ++IPriv;
5078       ++ILHS;
5079       ++IRHS;
5080     }
5081     return;
5082   }
5083
5084   // 1. Build a list of reduction variables.
5085   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5086   auto Size = RHSExprs.size();
5087   for (auto *E : Privates) {
5088     if (E->getType()->isVariablyModifiedType())
5089       // Reserve place for array size.
5090       ++Size;
5091   }
5092   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5093   QualType ReductionArrayTy =
5094       C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
5095                              /*IndexTypeQuals=*/0);
5096   Address ReductionList =
5097       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5098   auto IPriv = Privates.begin();
5099   unsigned Idx = 0;
5100   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5101     Address Elem =
5102       CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
5103     CGF.Builder.CreateStore(
5104         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5105             CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
5106         Elem);
5107     if ((*IPriv)->getType()->isVariablyModifiedType()) {
5108       // Store array size.
5109       ++Idx;
5110       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
5111                                              CGF.getPointerSize());
5112       llvm::Value *Size = CGF.Builder.CreateIntCast(
5113           CGF.getVLASize(
5114                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5115               .first,
5116           CGF.SizeTy, /*isSigned=*/false);
5117       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5118                               Elem);
5119     }
5120   }
5121
5122   // 2. Emit reduce_func().
5123   auto *ReductionFn = emitReductionFunction(
5124       CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
5125       LHSExprs, RHSExprs, ReductionOps);
5126
5127   // 3. Create static kmp_critical_name lock = { 0 };
5128   auto *Lock = getCriticalRegionLock(".reduction");
5129
5130   // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5131   // RedList, reduce_func, &<lock>);
5132   auto *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5133   auto *ThreadId = getThreadID(CGF, Loc);
5134   auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5135   auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5136       ReductionList.getPointer(), CGF.VoidPtrTy);
5137   llvm::Value *Args[] = {
5138       IdentTLoc,                             // ident_t *<loc>
5139       ThreadId,                              // i32 <gtid>
5140       CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5141       ReductionArrayTySize,                  // size_type sizeof(RedList)
5142       RL,                                    // void *RedList
5143       ReductionFn, // void (*) (void *, void *) <reduce_func>
5144       Lock         // kmp_critical_name *&<lock>
5145   };
5146   auto Res = CGF.EmitRuntimeCall(
5147       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
5148                                        : OMPRTL__kmpc_reduce),
5149       Args);
5150
5151   // 5. Build switch(res)
5152   auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5153   auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5154
5155   // 6. Build case 1:
5156   //  ...
5157   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5158   //  ...
5159   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5160   // break;
5161   auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5162   SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5163   CGF.EmitBlock(Case1BB);
5164
5165   // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5166   llvm::Value *EndArgs[] = {
5167       IdentTLoc, // ident_t *<loc>
5168       ThreadId,  // i32 <gtid>
5169       Lock       // kmp_critical_name *&<lock>
5170   };
5171   auto &&CodeGen = [&Privates, &LHSExprs, &RHSExprs, &ReductionOps](
5172       CodeGenFunction &CGF, PrePostActionTy &Action) {
5173     auto &RT = CGF.CGM.getOpenMPRuntime();
5174     auto IPriv = Privates.begin();
5175     auto ILHS = LHSExprs.begin();
5176     auto IRHS = RHSExprs.begin();
5177     for (auto *E : ReductionOps) {
5178       RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5179                                      cast<DeclRefExpr>(*IRHS));
5180       ++IPriv;
5181       ++ILHS;
5182       ++IRHS;
5183     }
5184   };
5185   RegionCodeGenTy RCG(CodeGen);
5186   CommonActionTy Action(
5187       nullptr, llvm::None,
5188       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
5189                                        : OMPRTL__kmpc_end_reduce),
5190       EndArgs);
5191   RCG.setAction(Action);
5192   RCG(CGF);
5193
5194   CGF.EmitBranch(DefaultBB);
5195
5196   // 7. Build case 2:
5197   //  ...
5198   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5199   //  ...
5200   // break;
5201   auto *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5202   SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5203   CGF.EmitBlock(Case2BB);
5204
5205   auto &&AtomicCodeGen = [Loc, &Privates, &LHSExprs, &RHSExprs, &ReductionOps](
5206       CodeGenFunction &CGF, PrePostActionTy &Action) {
5207     auto ILHS = LHSExprs.begin();
5208     auto IRHS = RHSExprs.begin();
5209     auto IPriv = Privates.begin();
5210     for (auto *E : ReductionOps) {
5211       const Expr *XExpr = nullptr;
5212       const Expr *EExpr = nullptr;
5213       const Expr *UpExpr = nullptr;
5214       BinaryOperatorKind BO = BO_Comma;
5215       if (auto *BO = dyn_cast<BinaryOperator>(E)) {
5216         if (BO->getOpcode() == BO_Assign) {
5217           XExpr = BO->getLHS();
5218           UpExpr = BO->getRHS();
5219         }
5220       }
5221       // Try to emit update expression as a simple atomic.
5222       auto *RHSExpr = UpExpr;
5223       if (RHSExpr) {
5224         // Analyze RHS part of the whole expression.
5225         if (auto *ACO = dyn_cast<AbstractConditionalOperator>(
5226                 RHSExpr->IgnoreParenImpCasts())) {
5227           // If this is a conditional operator, analyze its condition for
5228           // min/max reduction operator.
5229           RHSExpr = ACO->getCond();
5230         }
5231         if (auto *BORHS =
5232                 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5233           EExpr = BORHS->getRHS();
5234           BO = BORHS->getOpcode();
5235         }
5236       }
5237       if (XExpr) {
5238         auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5239         auto &&AtomicRedGen = [BO, VD,
5240                                Loc](CodeGenFunction &CGF, const Expr *XExpr,
5241                                     const Expr *EExpr, const Expr *UpExpr) {
5242           LValue X = CGF.EmitLValue(XExpr);
5243           RValue E;
5244           if (EExpr)
5245             E = CGF.EmitAnyExpr(EExpr);
5246           CGF.EmitOMPAtomicSimpleUpdateExpr(
5247               X, E, BO, /*IsXLHSInRHSPart=*/true,
5248               llvm::AtomicOrdering::Monotonic, Loc,
5249               [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5250                 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5251                 PrivateScope.addPrivate(
5252                     VD, [&CGF, VD, XRValue, Loc]() -> Address {
5253                       Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5254                       CGF.emitOMPSimpleStore(
5255                           CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5256                           VD->getType().getNonReferenceType(), Loc);
5257                       return LHSTemp;
5258                     });
5259                 (void)PrivateScope.Privatize();
5260                 return CGF.EmitAnyExpr(UpExpr);
5261               });
5262         };
5263         if ((*IPriv)->getType()->isArrayType()) {
5264           // Emit atomic reduction for array section.
5265           auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5266           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5267                                     AtomicRedGen, XExpr, EExpr, UpExpr);
5268         } else
5269           // Emit atomic reduction for array subscript or single variable.
5270           AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5271       } else {
5272         // Emit as a critical region.
5273         auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5274                                      const Expr *, const Expr *) {
5275           auto &RT = CGF.CGM.getOpenMPRuntime();
5276           RT.emitCriticalRegion(
5277               CGF, ".atomic_reduction",
5278               [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5279                 Action.Enter(CGF);
5280                 emitReductionCombiner(CGF, E);
5281               },
5282               Loc);
5283         };
5284         if ((*IPriv)->getType()->isArrayType()) {
5285           auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5286           auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5287           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5288                                     CritRedGen);
5289         } else
5290           CritRedGen(CGF, nullptr, nullptr, nullptr);
5291       }
5292       ++ILHS;
5293       ++IRHS;
5294       ++IPriv;
5295     }
5296   };
5297   RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5298   if (!WithNowait) {
5299     // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5300     llvm::Value *EndArgs[] = {
5301         IdentTLoc, // ident_t *<loc>
5302         ThreadId,  // i32 <gtid>
5303         Lock       // kmp_critical_name *&<lock>
5304     };
5305     CommonActionTy Action(nullptr, llvm::None,
5306                           createRuntimeFunction(OMPRTL__kmpc_end_reduce),
5307                           EndArgs);
5308     AtomicRCG.setAction(Action);
5309     AtomicRCG(CGF);
5310   } else
5311     AtomicRCG(CGF);
5312
5313   CGF.EmitBranch(DefaultBB);
5314   CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5315 }
5316
5317 /// Generates unique name for artificial threadprivate variables.
5318 /// Format is: <Prefix> "." <Loc_raw_encoding> "_" <N>
5319 static std::string generateUniqueName(StringRef Prefix, SourceLocation Loc,
5320                                       unsigned N) {
5321   SmallString<256> Buffer;
5322   llvm::raw_svector_ostream Out(Buffer);
5323   Out << Prefix << "." << Loc.getRawEncoding() << "_" << N;
5324   return Out.str();
5325 }
5326
5327 /// Emits reduction initializer function:
5328 /// \code
5329 /// void @.red_init(void* %arg) {
5330 /// %0 = bitcast void* %arg to <type>*
5331 /// store <type> <init>, <type>* %0
5332 /// ret void
5333 /// }
5334 /// \endcode
5335 static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5336                                            SourceLocation Loc,
5337                                            ReductionCodeGen &RCG, unsigned N) {
5338   auto &C = CGM.getContext();
5339   FunctionArgList Args;
5340   ImplicitParamDecl Param(C, C.VoidPtrTy, ImplicitParamDecl::Other);
5341   Args.emplace_back(&Param);
5342   auto &FnInfo =
5343       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5344   auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5345   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5346                                     ".red_init.", &CGM.getModule());
5347   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
5348   CodeGenFunction CGF(CGM);
5349   CGF.disableDebugInfo();
5350   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
5351   Address PrivateAddr = CGF.EmitLoadOfPointer(
5352       CGF.GetAddrOfLocalVar(&Param),
5353       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5354   llvm::Value *Size = nullptr;
5355   // If the size of the reduction item is non-constant, load it from global
5356   // threadprivate variable.
5357   if (RCG.getSizes(N).second) {
5358     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5359         CGF, CGM.getContext().getSizeType(),
5360         generateUniqueName("reduction_size", Loc, N));
5361     Size =
5362         CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5363                              CGM.getContext().getSizeType(), SourceLocation());
5364   }
5365   RCG.emitAggregateType(CGF, N, Size);
5366   LValue SharedLVal;
5367   // If initializer uses initializer from declare reduction construct, emit a
5368   // pointer to the address of the original reduction item (reuired by reduction
5369   // initializer)
5370   if (RCG.usesReductionInitializer(N)) {
5371     Address SharedAddr =
5372         CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5373             CGF, CGM.getContext().VoidPtrTy,
5374             generateUniqueName("reduction", Loc, N));
5375     SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5376   } else {
5377     SharedLVal = CGF.MakeNaturalAlignAddrLValue(
5378         llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5379         CGM.getContext().VoidPtrTy);
5380   }
5381   // Emit the initializer:
5382   // %0 = bitcast void* %arg to <type>*
5383   // store <type> <init>, <type>* %0
5384   RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
5385                          [](CodeGenFunction &) { return false; });
5386   CGF.FinishFunction();
5387   return Fn;
5388 }
5389
5390 /// Emits reduction combiner function:
5391 /// \code
5392 /// void @.red_comb(void* %arg0, void* %arg1) {
5393 /// %lhs = bitcast void* %arg0 to <type>*
5394 /// %rhs = bitcast void* %arg1 to <type>*
5395 /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5396 /// store <type> %2, <type>* %lhs
5397 /// ret void
5398 /// }
5399 /// \endcode
5400 static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5401                                            SourceLocation Loc,
5402                                            ReductionCodeGen &RCG, unsigned N,
5403                                            const Expr *ReductionOp,
5404                                            const Expr *LHS, const Expr *RHS,
5405                                            const Expr *PrivateRef) {
5406   auto &C = CGM.getContext();
5407   auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5408   auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5409   FunctionArgList Args;
5410   ImplicitParamDecl ParamInOut(C, C.VoidPtrTy, ImplicitParamDecl::Other);
5411   ImplicitParamDecl ParamIn(C, C.VoidPtrTy, ImplicitParamDecl::Other);
5412   Args.emplace_back(&ParamInOut);
5413   Args.emplace_back(&ParamIn);
5414   auto &FnInfo =
5415       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5416   auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5417   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5418                                     ".red_comb.", &CGM.getModule());
5419   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
5420   CodeGenFunction CGF(CGM);
5421   CGF.disableDebugInfo();
5422   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
5423   llvm::Value *Size = nullptr;
5424   // If the size of the reduction item is non-constant, load it from global
5425   // threadprivate variable.
5426   if (RCG.getSizes(N).second) {
5427     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5428         CGF, CGM.getContext().getSizeType(),
5429         generateUniqueName("reduction_size", Loc, N));
5430     Size =
5431         CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5432                              CGM.getContext().getSizeType(), SourceLocation());
5433   }
5434   RCG.emitAggregateType(CGF, N, Size);
5435   // Remap lhs and rhs variables to the addresses of the function arguments.
5436   // %lhs = bitcast void* %arg0 to <type>*
5437   // %rhs = bitcast void* %arg1 to <type>*
5438   CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5439   PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() -> Address {
5440     // Pull out the pointer to the variable.
5441     Address PtrAddr = CGF.EmitLoadOfPointer(
5442         CGF.GetAddrOfLocalVar(&ParamInOut),
5443         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5444     return CGF.Builder.CreateElementBitCast(
5445         PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5446   });
5447   PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() -> Address {
5448     // Pull out the pointer to the variable.
5449     Address PtrAddr = CGF.EmitLoadOfPointer(
5450         CGF.GetAddrOfLocalVar(&ParamIn),
5451         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5452     return CGF.Builder.CreateElementBitCast(
5453         PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5454   });
5455   PrivateScope.Privatize();
5456   // Emit the combiner body:
5457   // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
5458   // store <type> %2, <type>* %lhs
5459   CGM.getOpenMPRuntime().emitSingleReductionCombiner(
5460       CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
5461       cast<DeclRefExpr>(RHS));
5462   CGF.FinishFunction();
5463   return Fn;
5464 }
5465
5466 /// Emits reduction finalizer function:
5467 /// \code
5468 /// void @.red_fini(void* %arg) {
5469 /// %0 = bitcast void* %arg to <type>*
5470 /// <destroy>(<type>* %0)
5471 /// ret void
5472 /// }
5473 /// \endcode
5474 static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
5475                                            SourceLocation Loc,
5476                                            ReductionCodeGen &RCG, unsigned N) {
5477   if (!RCG.needCleanups(N))
5478     return nullptr;
5479   auto &C = CGM.getContext();
5480   FunctionArgList Args;
5481   ImplicitParamDecl Param(C, C.VoidPtrTy, ImplicitParamDecl::Other);
5482   Args.emplace_back(&Param);
5483   auto &FnInfo =
5484       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5485   auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5486   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5487                                     ".red_fini.", &CGM.getModule());
5488   CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
5489   CodeGenFunction CGF(CGM);
5490   CGF.disableDebugInfo();
5491   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
5492   Address PrivateAddr = CGF.EmitLoadOfPointer(
5493       CGF.GetAddrOfLocalVar(&Param),
5494       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5495   llvm::Value *Size = nullptr;
5496   // If the size of the reduction item is non-constant, load it from global
5497   // threadprivate variable.
5498   if (RCG.getSizes(N).second) {
5499     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5500         CGF, CGM.getContext().getSizeType(),
5501         generateUniqueName("reduction_size", Loc, N));
5502     Size =
5503         CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5504                              CGM.getContext().getSizeType(), SourceLocation());
5505   }
5506   RCG.emitAggregateType(CGF, N, Size);
5507   // Emit the finalizer body:
5508   // <destroy>(<type>* %0)
5509   RCG.emitCleanups(CGF, N, PrivateAddr);
5510   CGF.FinishFunction();
5511   return Fn;
5512 }
5513
5514 llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
5515     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
5516     ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
5517   if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
5518     return nullptr;
5519
5520   // Build typedef struct:
5521   // kmp_task_red_input {
5522   //   void *reduce_shar; // shared reduction item
5523   //   size_t reduce_size; // size of data item
5524   //   void *reduce_init; // data initialization routine
5525   //   void *reduce_fini; // data finalization routine
5526   //   void *reduce_comb; // data combiner routine
5527   //   kmp_task_red_flags_t flags; // flags for additional info from compiler
5528   // } kmp_task_red_input_t;
5529   ASTContext &C = CGM.getContext();
5530   auto *RD = C.buildImplicitRecord("kmp_task_red_input_t");
5531   RD->startDefinition();
5532   const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5533   const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
5534   const FieldDecl *InitFD  = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5535   const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5536   const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5537   const FieldDecl *FlagsFD = addFieldToRecordDecl(
5538       C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
5539   RD->completeDefinition();
5540   QualType RDType = C.getRecordType(RD);
5541   unsigned Size = Data.ReductionVars.size();
5542   llvm::APInt ArraySize(/*numBits=*/64, Size);
5543   QualType ArrayRDType = C.getConstantArrayType(
5544       RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
5545   // kmp_task_red_input_t .rd_input.[Size];
5546   Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
5547   ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
5548                        Data.ReductionOps);
5549   for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
5550     // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
5551     llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
5552                            llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
5553     llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
5554         TaskRedInput.getPointer(), Idxs,
5555         /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
5556         ".rd_input.gep.");
5557     LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
5558     // ElemLVal.reduce_shar = &Shareds[Cnt];
5559     LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
5560     RCG.emitSharedLValue(CGF, Cnt);
5561     llvm::Value *CastedShared =
5562         CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer());
5563     CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
5564     RCG.emitAggregateType(CGF, Cnt);
5565     llvm::Value *SizeValInChars;
5566     llvm::Value *SizeVal;
5567     std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
5568     // We use delayed creation/initialization for VLAs, array sections and
5569     // custom reduction initializations. It is required because runtime does not
5570     // provide the way to pass the sizes of VLAs/array sections to
5571     // initializer/combiner/finalizer functions and does not pass the pointer to
5572     // original reduction item to the initializer. Instead threadprivate global
5573     // variables are used to store these values and use them in the functions.
5574     bool DelayedCreation = !!SizeVal;
5575     SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
5576                                                /*isSigned=*/false);
5577     LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
5578     CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
5579     // ElemLVal.reduce_init = init;
5580     LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
5581     llvm::Value *InitAddr =
5582         CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
5583     CGF.EmitStoreOfScalar(InitAddr, InitLVal);
5584     DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
5585     // ElemLVal.reduce_fini = fini;
5586     LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
5587     llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
5588     llvm::Value *FiniAddr = Fini
5589                                 ? CGF.EmitCastToVoidPtr(Fini)
5590                                 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
5591     CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
5592     // ElemLVal.reduce_comb = comb;
5593     LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
5594     llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
5595         CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
5596         RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
5597     CGF.EmitStoreOfScalar(CombAddr, CombLVal);
5598     // ElemLVal.flags = 0;
5599     LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
5600     if (DelayedCreation) {
5601       CGF.EmitStoreOfScalar(
5602           llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
5603           FlagsLVal);
5604     } else
5605       CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
5606   }
5607   // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
5608   // *data);
5609   llvm::Value *Args[] = {
5610       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
5611                                 /*isSigned=*/true),
5612       llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
5613       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
5614                                                       CGM.VoidPtrTy)};
5615   return CGF.EmitRuntimeCall(
5616       createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
5617 }
5618
5619 void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
5620                                               SourceLocation Loc,
5621                                               ReductionCodeGen &RCG,
5622                                               unsigned N) {
5623   auto Sizes = RCG.getSizes(N);
5624   // Emit threadprivate global variable if the type is non-constant
5625   // (Sizes.second = nullptr).
5626   if (Sizes.second) {
5627     llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
5628                                                      /*isSigned=*/false);
5629     Address SizeAddr = getAddrOfArtificialThreadPrivate(
5630         CGF, CGM.getContext().getSizeType(),
5631         generateUniqueName("reduction_size", Loc, N));
5632     CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
5633   }
5634   // Store address of the original reduction item if custom initializer is used.
5635   if (RCG.usesReductionInitializer(N)) {
5636     Address SharedAddr = getAddrOfArtificialThreadPrivate(
5637         CGF, CGM.getContext().VoidPtrTy,
5638         generateUniqueName("reduction", Loc, N));
5639     CGF.Builder.CreateStore(
5640         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5641             RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
5642         SharedAddr, /*IsVolatile=*/false);
5643   }
5644 }
5645
5646 Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
5647                                               SourceLocation Loc,
5648                                               llvm::Value *ReductionsPtr,
5649                                               LValue SharedLVal) {
5650   // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
5651   // *d);
5652   llvm::Value *Args[] = {
5653       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
5654                                 /*isSigned=*/true),
5655       ReductionsPtr,
5656       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(),
5657                                                       CGM.VoidPtrTy)};
5658   return Address(
5659       CGF.EmitRuntimeCall(
5660           createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
5661       SharedLVal.getAlignment());
5662 }
5663
5664 void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
5665                                        SourceLocation Loc) {
5666   if (!CGF.HaveInsertPoint())
5667     return;
5668   // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
5669   // global_tid);
5670   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
5671   // Ignore return result until untied tasks are supported.
5672   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
5673   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5674     Region->emitUntiedSwitch(CGF);
5675 }
5676
5677 void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
5678                                            OpenMPDirectiveKind InnerKind,
5679                                            const RegionCodeGenTy &CodeGen,
5680                                            bool HasCancel) {
5681   if (!CGF.HaveInsertPoint())
5682     return;
5683   InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
5684   CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
5685 }
5686
5687 namespace {
5688 enum RTCancelKind {
5689   CancelNoreq = 0,
5690   CancelParallel = 1,
5691   CancelLoop = 2,
5692   CancelSections = 3,
5693   CancelTaskgroup = 4
5694 };
5695 } // anonymous namespace
5696
5697 static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
5698   RTCancelKind CancelKind = CancelNoreq;
5699   if (CancelRegion == OMPD_parallel)
5700     CancelKind = CancelParallel;
5701   else if (CancelRegion == OMPD_for)
5702     CancelKind = CancelLoop;
5703   else if (CancelRegion == OMPD_sections)
5704     CancelKind = CancelSections;
5705   else {
5706     assert(CancelRegion == OMPD_taskgroup);
5707     CancelKind = CancelTaskgroup;
5708   }
5709   return CancelKind;
5710 }
5711
5712 void CGOpenMPRuntime::emitCancellationPointCall(
5713     CodeGenFunction &CGF, SourceLocation Loc,
5714     OpenMPDirectiveKind CancelRegion) {
5715   if (!CGF.HaveInsertPoint())
5716     return;
5717   // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
5718   // global_tid, kmp_int32 cncl_kind);
5719   if (auto *OMPRegionInfo =
5720           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
5721     // For 'cancellation point taskgroup', the task region info may not have a
5722     // cancel. This may instead happen in another adjacent task.
5723     if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
5724       llvm::Value *Args[] = {
5725           emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
5726           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
5727       // Ignore return result until untied tasks are supported.
5728       auto *Result = CGF.EmitRuntimeCall(
5729           createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
5730       // if (__kmpc_cancellationpoint()) {
5731       //   exit from construct;
5732       // }
5733       auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
5734       auto *ContBB = CGF.createBasicBlock(".cancel.continue");
5735       auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
5736       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
5737       CGF.EmitBlock(ExitBB);
5738       // exit from construct;
5739       auto CancelDest =
5740           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
5741       CGF.EmitBranchThroughCleanup(CancelDest);
5742       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
5743     }
5744   }
5745 }
5746
5747 void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
5748                                      const Expr *IfCond,
5749                                      OpenMPDirectiveKind CancelRegion) {
5750   if (!CGF.HaveInsertPoint())
5751     return;
5752   // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
5753   // kmp_int32 cncl_kind);
5754   if (auto *OMPRegionInfo =
5755           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
5756     auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
5757                                                         PrePostActionTy &) {
5758       auto &RT = CGF.CGM.getOpenMPRuntime();
5759       llvm::Value *Args[] = {
5760           RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
5761           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
5762       // Ignore return result until untied tasks are supported.
5763       auto *Result = CGF.EmitRuntimeCall(
5764           RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
5765       // if (__kmpc_cancel()) {
5766       //   exit from construct;
5767       // }
5768       auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
5769       auto *ContBB = CGF.createBasicBlock(".cancel.continue");
5770       auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
5771       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
5772       CGF.EmitBlock(ExitBB);
5773       // exit from construct;
5774       auto CancelDest =
5775           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
5776       CGF.EmitBranchThroughCleanup(CancelDest);
5777       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
5778     };
5779     if (IfCond)
5780       emitOMPIfClause(CGF, IfCond, ThenGen,
5781                       [](CodeGenFunction &, PrePostActionTy &) {});
5782     else {
5783       RegionCodeGenTy ThenRCG(ThenGen);
5784       ThenRCG(CGF);
5785     }
5786   }
5787 }
5788
5789 /// \brief Obtain information that uniquely identifies a target entry. This
5790 /// consists of the file and device IDs as well as line number associated with
5791 /// the relevant entry source location.
5792 static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
5793                                      unsigned &DeviceID, unsigned &FileID,
5794                                      unsigned &LineNum) {
5795
5796   auto &SM = C.getSourceManager();
5797
5798   // The loc should be always valid and have a file ID (the user cannot use
5799   // #pragma directives in macros)
5800
5801   assert(Loc.isValid() && "Source location is expected to be always valid.");
5802   assert(Loc.isFileID() && "Source location is expected to refer to a file.");
5803
5804   PresumedLoc PLoc = SM.getPresumedLoc(Loc);
5805   assert(PLoc.isValid() && "Source location is expected to be always valid.");
5806
5807   llvm::sys::fs::UniqueID ID;
5808   if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
5809     llvm_unreachable("Source file with target region no longer exists!");
5810
5811   DeviceID = ID.getDevice();
5812   FileID = ID.getFile();
5813   LineNum = PLoc.getLine();
5814 }
5815
5816 void CGOpenMPRuntime::emitTargetOutlinedFunction(
5817     const OMPExecutableDirective &D, StringRef ParentName,
5818     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
5819     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
5820   assert(!ParentName.empty() && "Invalid target region parent name!");
5821
5822   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
5823                                    IsOffloadEntry, CodeGen);
5824 }
5825
5826 void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
5827     const OMPExecutableDirective &D, StringRef ParentName,
5828     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
5829     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
5830   // Create a unique name for the entry function using the source location
5831   // information of the current target region. The name will be something like:
5832   //
5833   // __omp_offloading_DD_FFFF_PP_lBB
5834   //
5835   // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
5836   // mangled name of the function that encloses the target region and BB is the
5837   // line number of the target region.
5838
5839   unsigned DeviceID;
5840   unsigned FileID;
5841   unsigned Line;
5842   getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
5843                            Line);
5844   SmallString<64> EntryFnName;
5845   {
5846     llvm::raw_svector_ostream OS(EntryFnName);
5847     OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
5848        << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
5849   }
5850
5851   const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
5852
5853   CodeGenFunction CGF(CGM, true);
5854   CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
5855   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
5856
5857   OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
5858
5859   // If this target outline function is not an offload entry, we don't need to
5860   // register it.
5861   if (!IsOffloadEntry)
5862     return;
5863
5864   // The target region ID is used by the runtime library to identify the current
5865   // target region, so it only has to be unique and not necessarily point to
5866   // anything. It could be the pointer to the outlined function that implements
5867   // the target region, but we aren't using that so that the compiler doesn't
5868   // need to keep that, and could therefore inline the host function if proven
5869   // worthwhile during optimization. In the other hand, if emitting code for the
5870   // device, the ID has to be the function address so that it can retrieved from
5871   // the offloading entry and launched by the runtime library. We also mark the
5872   // outlined function to have external linkage in case we are emitting code for
5873   // the device, because these functions will be entry points to the device.
5874
5875   if (CGM.getLangOpts().OpenMPIsDevice) {
5876     OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
5877     OutlinedFn->setLinkage(llvm::GlobalValue::ExternalLinkage);
5878   } else
5879     OutlinedFnID = new llvm::GlobalVariable(
5880         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
5881         llvm::GlobalValue::PrivateLinkage,
5882         llvm::Constant::getNullValue(CGM.Int8Ty), ".omp_offload.region_id");
5883
5884   // Register the information for the entry associated with this target region.
5885   OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
5886       DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
5887       /*Flags=*/0);
5888 }
5889
5890 /// discard all CompoundStmts intervening between two constructs
5891 static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
5892   while (auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
5893     Body = CS->body_front();
5894
5895   return Body;
5896 }
5897
5898 /// Emit the number of teams for a target directive.  Inspect the num_teams
5899 /// clause associated with a teams construct combined or closely nested
5900 /// with the target directive.
5901 ///
5902 /// Emit a team of size one for directives such as 'target parallel' that
5903 /// have no associated teams construct.
5904 ///
5905 /// Otherwise, return nullptr.
5906 static llvm::Value *
5907 emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
5908                                CodeGenFunction &CGF,
5909                                const OMPExecutableDirective &D) {
5910
5911   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
5912                                               "teams directive expected to be "
5913                                               "emitted only for the host!");
5914
5915   auto &Bld = CGF.Builder;
5916
5917   // If the target directive is combined with a teams directive:
5918   //   Return the value in the num_teams clause, if any.
5919   //   Otherwise, return 0 to denote the runtime default.
5920   if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
5921     if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
5922       CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
5923       auto NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
5924                                          /*IgnoreResultAssign*/ true);
5925       return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
5926                                /*IsSigned=*/true);
5927     }
5928
5929     // The default value is 0.
5930     return Bld.getInt32(0);
5931   }
5932
5933   // If the target directive is combined with a parallel directive but not a
5934   // teams directive, start one team.
5935   if (isOpenMPParallelDirective(D.getDirectiveKind()))
5936     return Bld.getInt32(1);
5937
5938   // If the current target region has a teams region enclosed, we need to get
5939   // the number of teams to pass to the runtime function call. This is done
5940   // by generating the expression in a inlined region. This is required because
5941   // the expression is captured in the enclosing target environment when the
5942   // teams directive is not combined with target.
5943
5944   const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
5945
5946   if (auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
5947           ignoreCompoundStmts(CS.getCapturedStmt()))) {
5948     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
5949       if (auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
5950         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
5951         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
5952         llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
5953         return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
5954                                  /*IsSigned=*/true);
5955       }
5956
5957       // If we have an enclosed teams directive but no num_teams clause we use
5958       // the default value 0.
5959       return Bld.getInt32(0);
5960     }
5961   }
5962
5963   // No teams associated with the directive.
5964   return nullptr;
5965 }
5966
5967 /// Emit the number of threads for a target directive.  Inspect the
5968 /// thread_limit clause associated with a teams construct combined or closely
5969 /// nested with the target directive.
5970 ///
5971 /// Emit the num_threads clause for directives such as 'target parallel' that
5972 /// have no associated teams construct.
5973 ///
5974 /// Otherwise, return nullptr.
5975 static llvm::Value *
5976 emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
5977                                  CodeGenFunction &CGF,
5978                                  const OMPExecutableDirective &D) {
5979
5980   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
5981                                               "teams directive expected to be "
5982                                               "emitted only for the host!");
5983
5984   auto &Bld = CGF.Builder;
5985
5986   //
5987   // If the target directive is combined with a teams directive:
5988   //   Return the value in the thread_limit clause, if any.
5989   //
5990   // If the target directive is combined with a parallel directive:
5991   //   Return the value in the num_threads clause, if any.
5992   //
5993   // If both clauses are set, select the minimum of the two.
5994   //
5995   // If neither teams or parallel combined directives set the number of threads
5996   // in a team, return 0 to denote the runtime default.
5997   //
5998   // If this is not a teams directive return nullptr.
5999
6000   if (isOpenMPTeamsDirective(D.getDirectiveKind()) ||
6001       isOpenMPParallelDirective(D.getDirectiveKind())) {
6002     llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0);
6003     llvm::Value *NumThreadsVal = nullptr;
6004     llvm::Value *ThreadLimitVal = nullptr;
6005
6006     if (const auto *ThreadLimitClause =
6007             D.getSingleClause<OMPThreadLimitClause>()) {
6008       CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6009       auto ThreadLimit = CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
6010                                             /*IgnoreResultAssign*/ true);
6011       ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6012                                          /*IsSigned=*/true);
6013     }
6014
6015     if (const auto *NumThreadsClause =
6016             D.getSingleClause<OMPNumThreadsClause>()) {
6017       CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
6018       llvm::Value *NumThreads =
6019           CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
6020                              /*IgnoreResultAssign*/ true);
6021       NumThreadsVal =
6022           Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true);
6023     }
6024
6025     // Select the lesser of thread_limit and num_threads.
6026     if (NumThreadsVal)
6027       ThreadLimitVal = ThreadLimitVal
6028                            ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal,
6029                                                                 ThreadLimitVal),
6030                                               NumThreadsVal, ThreadLimitVal)
6031                            : NumThreadsVal;
6032
6033     // Set default value passed to the runtime if either teams or a target
6034     // parallel type directive is found but no clause is specified.
6035     if (!ThreadLimitVal)
6036       ThreadLimitVal = DefaultThreadLimitVal;
6037
6038     return ThreadLimitVal;
6039   }
6040
6041   // If the current target region has a teams region enclosed, we need to get
6042   // the thread limit to pass to the runtime function call. This is done
6043   // by generating the expression in a inlined region. This is required because
6044   // the expression is captured in the enclosing target environment when the
6045   // teams directive is not combined with target.
6046
6047   const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
6048
6049   if (auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6050           ignoreCompoundStmts(CS.getCapturedStmt()))) {
6051     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6052       if (auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
6053         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6054         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6055         llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
6056         return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6057                                          /*IsSigned=*/true);
6058       }
6059
6060       // If we have an enclosed teams directive but no thread_limit clause we
6061       // use the default value 0.
6062       return CGF.Builder.getInt32(0);
6063     }
6064   }
6065
6066   // No teams associated with the directive.
6067   return nullptr;
6068 }
6069
6070 namespace {
6071 // \brief Utility to handle information from clauses associated with a given
6072 // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
6073 // It provides a convenient interface to obtain the information and generate
6074 // code for that information.
6075 class MappableExprsHandler {
6076 public:
6077   /// \brief Values for bit flags used to specify the mapping type for
6078   /// offloading.
6079   enum OpenMPOffloadMappingFlags {
6080     /// \brief Allocate memory on the device and move data from host to device.
6081     OMP_MAP_TO = 0x01,
6082     /// \brief Allocate memory on the device and move data from device to host.
6083     OMP_MAP_FROM = 0x02,
6084     /// \brief Always perform the requested mapping action on the element, even
6085     /// if it was already mapped before.
6086     OMP_MAP_ALWAYS = 0x04,
6087     /// \brief Delete the element from the device environment, ignoring the
6088     /// current reference count associated with the element.
6089     OMP_MAP_DELETE = 0x08,
6090     /// \brief The element being mapped is a pointer-pointee pair; both the
6091     /// pointer and the pointee should be mapped.
6092     OMP_MAP_PTR_AND_OBJ = 0x10,
6093     /// \brief This flags signals that the base address of an entry should be
6094     /// passed to the target kernel as an argument.
6095     OMP_MAP_TARGET_PARAM = 0x20,
6096     /// \brief Signal that the runtime library has to return the device pointer
6097     /// in the current position for the data being mapped. Used when we have the
6098     /// use_device_ptr clause.
6099     OMP_MAP_RETURN_PARAM = 0x40,
6100     /// \brief This flag signals that the reference being passed is a pointer to
6101     /// private data.
6102     OMP_MAP_PRIVATE = 0x80,
6103     /// \brief Pass the element to the device by value.
6104     OMP_MAP_LITERAL = 0x100,
6105     /// Implicit map
6106     OMP_MAP_IMPLICIT = 0x200,
6107   };
6108
6109   /// Class that associates information with a base pointer to be passed to the
6110   /// runtime library.
6111   class BasePointerInfo {
6112     /// The base pointer.
6113     llvm::Value *Ptr = nullptr;
6114     /// The base declaration that refers to this device pointer, or null if
6115     /// there is none.
6116     const ValueDecl *DevPtrDecl = nullptr;
6117
6118   public:
6119     BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
6120         : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
6121     llvm::Value *operator*() const { return Ptr; }
6122     const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
6123     void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
6124   };
6125
6126   typedef SmallVector<BasePointerInfo, 16> MapBaseValuesArrayTy;
6127   typedef SmallVector<llvm::Value *, 16> MapValuesArrayTy;
6128   typedef SmallVector<uint64_t, 16> MapFlagsArrayTy;
6129
6130 private:
6131   /// \brief Directive from where the map clauses were extracted.
6132   const OMPExecutableDirective &CurDir;
6133
6134   /// \brief Function the directive is being generated for.
6135   CodeGenFunction &CGF;
6136
6137   /// \brief Set of all first private variables in the current directive.
6138   llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
6139   /// Set of all reduction variables in the current directive.
6140   llvm::SmallPtrSet<const VarDecl *, 8> ReductionDecls;
6141
6142   /// Map between device pointer declarations and their expression components.
6143   /// The key value for declarations in 'this' is null.
6144   llvm::DenseMap<
6145       const ValueDecl *,
6146       SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
6147       DevPointersMap;
6148
6149   llvm::Value *getExprTypeSize(const Expr *E) const {
6150     auto ExprTy = E->getType().getCanonicalType();
6151
6152     // Reference types are ignored for mapping purposes.
6153     if (auto *RefTy = ExprTy->getAs<ReferenceType>())
6154       ExprTy = RefTy->getPointeeType().getCanonicalType();
6155
6156     // Given that an array section is considered a built-in type, we need to
6157     // do the calculation based on the length of the section instead of relying
6158     // on CGF.getTypeSize(E->getType()).
6159     if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
6160       QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
6161                             OAE->getBase()->IgnoreParenImpCasts())
6162                             .getCanonicalType();
6163
6164       // If there is no length associated with the expression, that means we
6165       // are using the whole length of the base.
6166       if (!OAE->getLength() && OAE->getColonLoc().isValid())
6167         return CGF.getTypeSize(BaseTy);
6168
6169       llvm::Value *ElemSize;
6170       if (auto *PTy = BaseTy->getAs<PointerType>())
6171         ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
6172       else {
6173         auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
6174         assert(ATy && "Expecting array type if not a pointer type.");
6175         ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
6176       }
6177
6178       // If we don't have a length at this point, that is because we have an
6179       // array section with a single element.
6180       if (!OAE->getLength())
6181         return ElemSize;
6182
6183       auto *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
6184       LengthVal =
6185           CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false);
6186       return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
6187     }
6188     return CGF.getTypeSize(ExprTy);
6189   }
6190
6191   /// \brief Return the corresponding bits for a given map clause modifier. Add
6192   /// a flag marking the map as a pointer if requested. Add a flag marking the
6193   /// map as the first one of a series of maps that relate to the same map
6194   /// expression.
6195   uint64_t getMapTypeBits(OpenMPMapClauseKind MapType,
6196                           OpenMPMapClauseKind MapTypeModifier, bool AddPtrFlag,
6197                           bool AddIsTargetParamFlag) const {
6198     uint64_t Bits = 0u;
6199     switch (MapType) {
6200     case OMPC_MAP_alloc:
6201     case OMPC_MAP_release:
6202       // alloc and release is the default behavior in the runtime library,  i.e.
6203       // if we don't pass any bits alloc/release that is what the runtime is
6204       // going to do. Therefore, we don't need to signal anything for these two
6205       // type modifiers.
6206       break;
6207     case OMPC_MAP_to:
6208       Bits = OMP_MAP_TO;
6209       break;
6210     case OMPC_MAP_from:
6211       Bits = OMP_MAP_FROM;
6212       break;
6213     case OMPC_MAP_tofrom:
6214       Bits = OMP_MAP_TO | OMP_MAP_FROM;
6215       break;
6216     case OMPC_MAP_delete:
6217       Bits = OMP_MAP_DELETE;
6218       break;
6219     default:
6220       llvm_unreachable("Unexpected map type!");
6221       break;
6222     }
6223     if (AddPtrFlag)
6224       Bits |= OMP_MAP_PTR_AND_OBJ;
6225     if (AddIsTargetParamFlag)
6226       Bits |= OMP_MAP_TARGET_PARAM;
6227     if (MapTypeModifier == OMPC_MAP_always)
6228       Bits |= OMP_MAP_ALWAYS;
6229     return Bits;
6230   }
6231
6232   /// \brief Return true if the provided expression is a final array section. A
6233   /// final array section, is one whose length can't be proved to be one.
6234   bool isFinalArraySectionExpression(const Expr *E) const {
6235     auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
6236
6237     // It is not an array section and therefore not a unity-size one.
6238     if (!OASE)
6239       return false;
6240
6241     // An array section with no colon always refer to a single element.
6242     if (OASE->getColonLoc().isInvalid())
6243       return false;
6244
6245     auto *Length = OASE->getLength();
6246
6247     // If we don't have a length we have to check if the array has size 1
6248     // for this dimension. Also, we should always expect a length if the
6249     // base type is pointer.
6250     if (!Length) {
6251       auto BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
6252                          OASE->getBase()->IgnoreParenImpCasts())
6253                          .getCanonicalType();
6254       if (auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
6255         return ATy->getSize().getSExtValue() != 1;
6256       // If we don't have a constant dimension length, we have to consider
6257       // the current section as having any size, so it is not necessarily
6258       // unitary. If it happen to be unity size, that's user fault.
6259       return true;
6260     }
6261
6262     // Check if the length evaluates to 1.
6263     llvm::APSInt ConstLength;
6264     if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
6265       return true; // Can have more that size 1.
6266
6267     return ConstLength.getSExtValue() != 1;
6268   }
6269
6270   /// \brief Generate the base pointers, section pointers, sizes and map type
6271   /// bits for the provided map type, map modifier, and expression components.
6272   /// \a IsFirstComponent should be set to true if the provided set of
6273   /// components is the first associated with a capture.
6274   void generateInfoForComponentList(
6275       OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6276       OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6277       MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
6278       MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
6279       bool IsFirstComponentList, bool IsImplicit) const {
6280
6281     // The following summarizes what has to be generated for each map and the
6282     // types bellow. The generated information is expressed in this order:
6283     // base pointer, section pointer, size, flags
6284     // (to add to the ones that come from the map type and modifier).
6285     //
6286     // double d;
6287     // int i[100];
6288     // float *p;
6289     //
6290     // struct S1 {
6291     //   int i;
6292     //   float f[50];
6293     // }
6294     // struct S2 {
6295     //   int i;
6296     //   float f[50];
6297     //   S1 s;
6298     //   double *p;
6299     //   struct S2 *ps;
6300     // }
6301     // S2 s;
6302     // S2 *ps;
6303     //
6304     // map(d)
6305     // &d, &d, sizeof(double), noflags
6306     //
6307     // map(i)
6308     // &i, &i, 100*sizeof(int), noflags
6309     //
6310     // map(i[1:23])
6311     // &i(=&i[0]), &i[1], 23*sizeof(int), noflags
6312     //
6313     // map(p)
6314     // &p, &p, sizeof(float*), noflags
6315     //
6316     // map(p[1:24])
6317     // p, &p[1], 24*sizeof(float), noflags
6318     //
6319     // map(s)
6320     // &s, &s, sizeof(S2), noflags
6321     //
6322     // map(s.i)
6323     // &s, &(s.i), sizeof(int), noflags
6324     //
6325     // map(s.s.f)
6326     // &s, &(s.i.f), 50*sizeof(int), noflags
6327     //
6328     // map(s.p)
6329     // &s, &(s.p), sizeof(double*), noflags
6330     //
6331     // map(s.p[:22], s.a s.b)
6332     // &s, &(s.p), sizeof(double*), noflags
6333     // &(s.p), &(s.p[0]), 22*sizeof(double), ptr_flag
6334     //
6335     // map(s.ps)
6336     // &s, &(s.ps), sizeof(S2*), noflags
6337     //
6338     // map(s.ps->s.i)
6339     // &s, &(s.ps), sizeof(S2*), noflags
6340     // &(s.ps), &(s.ps->s.i), sizeof(int), ptr_flag
6341     //
6342     // map(s.ps->ps)
6343     // &s, &(s.ps), sizeof(S2*), noflags
6344     // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
6345     //
6346     // map(s.ps->ps->ps)
6347     // &s, &(s.ps), sizeof(S2*), noflags
6348     // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
6349     // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), ptr_flag
6350     //
6351     // map(s.ps->ps->s.f[:22])
6352     // &s, &(s.ps), sizeof(S2*), noflags
6353     // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
6354     // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), ptr_flag
6355     //
6356     // map(ps)
6357     // &ps, &ps, sizeof(S2*), noflags
6358     //
6359     // map(ps->i)
6360     // ps, &(ps->i), sizeof(int), noflags
6361     //
6362     // map(ps->s.f)
6363     // ps, &(ps->s.f[0]), 50*sizeof(float), noflags
6364     //
6365     // map(ps->p)
6366     // ps, &(ps->p), sizeof(double*), noflags
6367     //
6368     // map(ps->p[:22])
6369     // ps, &(ps->p), sizeof(double*), noflags
6370     // &(ps->p), &(ps->p[0]), 22*sizeof(double), ptr_flag
6371     //
6372     // map(ps->ps)
6373     // ps, &(ps->ps), sizeof(S2*), noflags
6374     //
6375     // map(ps->ps->s.i)
6376     // ps, &(ps->ps), sizeof(S2*), noflags
6377     // &(ps->ps), &(ps->ps->s.i), sizeof(int), ptr_flag
6378     //
6379     // map(ps->ps->ps)
6380     // ps, &(ps->ps), sizeof(S2*), noflags
6381     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
6382     //
6383     // map(ps->ps->ps->ps)
6384     // ps, &(ps->ps), sizeof(S2*), noflags
6385     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
6386     // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), ptr_flag
6387     //
6388     // map(ps->ps->ps->s.f[:22])
6389     // ps, &(ps->ps), sizeof(S2*), noflags
6390     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
6391     // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), ptr_flag
6392
6393     // Track if the map information being generated is the first for a capture.
6394     bool IsCaptureFirstInfo = IsFirstComponentList;
6395
6396     // Scan the components from the base to the complete expression.
6397     auto CI = Components.rbegin();
6398     auto CE = Components.rend();
6399     auto I = CI;
6400
6401     // Track if the map information being generated is the first for a list of
6402     // components.
6403     bool IsExpressionFirstInfo = true;
6404     llvm::Value *BP = nullptr;
6405
6406     if (auto *ME = dyn_cast<MemberExpr>(I->getAssociatedExpression())) {
6407       // The base is the 'this' pointer. The content of the pointer is going
6408       // to be the base of the field being mapped.
6409       BP = CGF.EmitScalarExpr(ME->getBase());
6410     } else {
6411       // The base is the reference to the variable.
6412       // BP = &Var.
6413       BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer();
6414
6415       // If the variable is a pointer and is being dereferenced (i.e. is not
6416       // the last component), the base has to be the pointer itself, not its
6417       // reference. References are ignored for mapping purposes.
6418       QualType Ty =
6419           I->getAssociatedDeclaration()->getType().getNonReferenceType();
6420       if (Ty->isAnyPointerType() && std::next(I) != CE) {
6421         auto PtrAddr = CGF.MakeNaturalAlignAddrLValue(BP, Ty);
6422         BP = CGF.EmitLoadOfPointerLValue(PtrAddr.getAddress(),
6423                                          Ty->castAs<PointerType>())
6424                  .getPointer();
6425
6426         // We do not need to generate individual map information for the
6427         // pointer, it can be associated with the combined storage.
6428         ++I;
6429       }
6430     }
6431
6432     uint64_t DefaultFlags = IsImplicit ? OMP_MAP_IMPLICIT : 0;
6433     for (; I != CE; ++I) {
6434       auto Next = std::next(I);
6435
6436       // We need to generate the addresses and sizes if this is the last
6437       // component, if the component is a pointer or if it is an array section
6438       // whose length can't be proved to be one. If this is a pointer, it
6439       // becomes the base address for the following components.
6440
6441       // A final array section, is one whose length can't be proved to be one.
6442       bool IsFinalArraySection =
6443           isFinalArraySectionExpression(I->getAssociatedExpression());
6444
6445       // Get information on whether the element is a pointer. Have to do a
6446       // special treatment for array sections given that they are built-in
6447       // types.
6448       const auto *OASE =
6449           dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
6450       bool IsPointer =
6451           (OASE &&
6452            OMPArraySectionExpr::getBaseOriginalType(OASE)
6453                .getCanonicalType()
6454                ->isAnyPointerType()) ||
6455           I->getAssociatedExpression()->getType()->isAnyPointerType();
6456
6457       if (Next == CE || IsPointer || IsFinalArraySection) {
6458
6459         // If this is not the last component, we expect the pointer to be
6460         // associated with an array expression or member expression.
6461         assert((Next == CE ||
6462                 isa<MemberExpr>(Next->getAssociatedExpression()) ||
6463                 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
6464                 isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
6465                "Unexpected expression");
6466
6467         llvm::Value *LB =
6468             CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer();
6469         auto *Size = getExprTypeSize(I->getAssociatedExpression());
6470
6471         // If we have a member expression and the current component is a
6472         // reference, we have to map the reference too. Whenever we have a
6473         // reference, the section that reference refers to is going to be a
6474         // load instruction from the storage assigned to the reference.
6475         if (isa<MemberExpr>(I->getAssociatedExpression()) &&
6476             I->getAssociatedDeclaration()->getType()->isReferenceType()) {
6477           auto *LI = cast<llvm::LoadInst>(LB);
6478           auto *RefAddr = LI->getPointerOperand();
6479
6480           BasePointers.push_back(BP);
6481           Pointers.push_back(RefAddr);
6482           Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
6483           Types.push_back(DefaultFlags |
6484                           getMapTypeBits(
6485                               /*MapType*/ OMPC_MAP_alloc,
6486                               /*MapTypeModifier=*/OMPC_MAP_unknown,
6487                               !IsExpressionFirstInfo, IsCaptureFirstInfo));
6488           IsExpressionFirstInfo = false;
6489           IsCaptureFirstInfo = false;
6490           // The reference will be the next base address.
6491           BP = RefAddr;
6492         }
6493
6494         BasePointers.push_back(BP);
6495         Pointers.push_back(LB);
6496         Sizes.push_back(Size);
6497
6498         // We need to add a pointer flag for each map that comes from the
6499         // same expression except for the first one. We also need to signal
6500         // this map is the first one that relates with the current capture
6501         // (there is a set of entries for each capture).
6502         Types.push_back(DefaultFlags | getMapTypeBits(MapType, MapTypeModifier,
6503                                                       !IsExpressionFirstInfo,
6504                                                       IsCaptureFirstInfo));
6505
6506         // If we have a final array section, we are done with this expression.
6507         if (IsFinalArraySection)
6508           break;
6509
6510         // The pointer becomes the base for the next element.
6511         if (Next != CE)
6512           BP = LB;
6513
6514         IsExpressionFirstInfo = false;
6515         IsCaptureFirstInfo = false;
6516       }
6517     }
6518   }
6519
6520   /// \brief Return the adjusted map modifiers if the declaration a capture
6521   /// refers to appears in a first-private clause. This is expected to be used
6522   /// only with directives that start with 'target'.
6523   unsigned adjustMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap,
6524                                                unsigned CurrentModifiers) {
6525     assert(Cap.capturesVariable() && "Expected capture by reference only!");
6526
6527     // A first private variable captured by reference will use only the
6528     // 'private ptr' and 'map to' flag. Return the right flags if the captured
6529     // declaration is known as first-private in this handler.
6530     if (FirstPrivateDecls.count(Cap.getCapturedVar()))
6531       return MappableExprsHandler::OMP_MAP_PRIVATE |
6532              MappableExprsHandler::OMP_MAP_TO;
6533     // Reduction variable  will use only the 'private ptr' and 'map to_from'
6534     // flag.
6535     if (ReductionDecls.count(Cap.getCapturedVar())) {
6536       return MappableExprsHandler::OMP_MAP_TO |
6537              MappableExprsHandler::OMP_MAP_FROM;
6538     }
6539
6540     // We didn't modify anything.
6541     return CurrentModifiers;
6542   }
6543
6544 public:
6545   MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
6546       : CurDir(Dir), CGF(CGF) {
6547     // Extract firstprivate clause information.
6548     for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
6549       for (const auto *D : C->varlists())
6550         FirstPrivateDecls.insert(
6551             cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
6552     for (const auto *C : Dir.getClausesOfKind<OMPReductionClause>()) {
6553       for (const auto *D : C->varlists()) {
6554         ReductionDecls.insert(
6555             cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
6556       }
6557     }
6558     // Extract device pointer clause information.
6559     for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
6560       for (auto L : C->component_lists())
6561         DevPointersMap[L.first].push_back(L.second);
6562   }
6563
6564   /// \brief Generate all the base pointers, section pointers, sizes and map
6565   /// types for the extracted mappable expressions. Also, for each item that
6566   /// relates with a device pointer, a pair of the relevant declaration and
6567   /// index where it occurs is appended to the device pointers info array.
6568   void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
6569                        MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
6570                        MapFlagsArrayTy &Types) const {
6571     BasePointers.clear();
6572     Pointers.clear();
6573     Sizes.clear();
6574     Types.clear();
6575
6576     struct MapInfo {
6577       /// Kind that defines how a device pointer has to be returned.
6578       enum ReturnPointerKind {
6579         // Don't have to return any pointer.
6580         RPK_None,
6581         // Pointer is the base of the declaration.
6582         RPK_Base,
6583         // Pointer is a member of the base declaration - 'this'
6584         RPK_Member,
6585         // Pointer is a reference and a member of the base declaration - 'this'
6586         RPK_MemberReference,
6587       };
6588       OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
6589       OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
6590       OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
6591       ReturnPointerKind ReturnDevicePointer = RPK_None;
6592       bool IsImplicit = false;
6593
6594       MapInfo() = default;
6595       MapInfo(
6596           OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6597           OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6598           ReturnPointerKind ReturnDevicePointer, bool IsImplicit)
6599           : Components(Components), MapType(MapType),
6600             MapTypeModifier(MapTypeModifier),
6601             ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
6602     };
6603
6604     // We have to process the component lists that relate with the same
6605     // declaration in a single chunk so that we can generate the map flags
6606     // correctly. Therefore, we organize all lists in a map.
6607     llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
6608
6609     // Helper function to fill the information map for the different supported
6610     // clauses.
6611     auto &&InfoGen = [&Info](
6612         const ValueDecl *D,
6613         OMPClauseMappableExprCommon::MappableExprComponentListRef L,
6614         OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
6615         MapInfo::ReturnPointerKind ReturnDevicePointer, bool IsImplicit) {
6616       const ValueDecl *VD =
6617           D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
6618       Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
6619                             IsImplicit);
6620     };
6621
6622     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
6623     for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
6624       for (auto L : C->component_lists()) {
6625         InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
6626                 MapInfo::RPK_None, C->isImplicit());
6627       }
6628     for (auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
6629       for (auto L : C->component_lists()) {
6630         InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
6631                 MapInfo::RPK_None, C->isImplicit());
6632       }
6633     for (auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
6634       for (auto L : C->component_lists()) {
6635         InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
6636                 MapInfo::RPK_None, C->isImplicit());
6637       }
6638
6639     // Look at the use_device_ptr clause information and mark the existing map
6640     // entries as such. If there is no map information for an entry in the
6641     // use_device_ptr list, we create one with map type 'alloc' and zero size
6642     // section. It is the user fault if that was not mapped before.
6643     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
6644     for (auto *C : this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>())
6645       for (auto L : C->component_lists()) {
6646         assert(!L.second.empty() && "Not expecting empty list of components!");
6647         const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
6648         VD = cast<ValueDecl>(VD->getCanonicalDecl());
6649         auto *IE = L.second.back().getAssociatedExpression();
6650         // If the first component is a member expression, we have to look into
6651         // 'this', which maps to null in the map of map information. Otherwise
6652         // look directly for the information.
6653         auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
6654
6655         // We potentially have map information for this declaration already.
6656         // Look for the first set of components that refer to it.
6657         if (It != Info.end()) {
6658           auto CI = std::find_if(
6659               It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
6660                 return MI.Components.back().getAssociatedDeclaration() == VD;
6661               });
6662           // If we found a map entry, signal that the pointer has to be returned
6663           // and move on to the next declaration.
6664           if (CI != It->second.end()) {
6665             CI->ReturnDevicePointer = isa<MemberExpr>(IE)
6666                                           ? (VD->getType()->isReferenceType()
6667                                                  ? MapInfo::RPK_MemberReference
6668                                                  : MapInfo::RPK_Member)
6669                                           : MapInfo::RPK_Base;
6670             continue;
6671           }
6672         }
6673
6674         // We didn't find any match in our map information - generate a zero
6675         // size array section.
6676         // FIXME: MSVC 2013 seems to require this-> to find member CGF.
6677         llvm::Value *Ptr =
6678             this->CGF
6679                 .EmitLoadOfLValue(this->CGF.EmitLValue(IE), SourceLocation())
6680                 .getScalarVal();
6681         BasePointers.push_back({Ptr, VD});
6682         Pointers.push_back(Ptr);
6683         Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
6684         Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
6685       }
6686
6687     for (auto &M : Info) {
6688       // We need to know when we generate information for the first component
6689       // associated with a capture, because the mapping flags depend on it.
6690       bool IsFirstComponentList = true;
6691       for (MapInfo &L : M.second) {
6692         assert(!L.Components.empty() &&
6693                "Not expecting declaration with no component lists.");
6694
6695         // Remember the current base pointer index.
6696         unsigned CurrentBasePointersIdx = BasePointers.size();
6697         // FIXME: MSVC 2013 seems to require this-> to find the member method.
6698         this->generateInfoForComponentList(
6699             L.MapType, L.MapTypeModifier, L.Components, BasePointers, Pointers,
6700             Sizes, Types, IsFirstComponentList, L.IsImplicit);
6701
6702         // If this entry relates with a device pointer, set the relevant
6703         // declaration and add the 'return pointer' flag.
6704         if (IsFirstComponentList &&
6705             L.ReturnDevicePointer != MapInfo::RPK_None) {
6706           // If the pointer is not the base of the map, we need to skip the
6707           // base. If it is a reference in a member field, we also need to skip
6708           // the map of the reference.
6709           if (L.ReturnDevicePointer != MapInfo::RPK_Base) {
6710             ++CurrentBasePointersIdx;
6711             if (L.ReturnDevicePointer == MapInfo::RPK_MemberReference)
6712               ++CurrentBasePointersIdx;
6713           }
6714           assert(BasePointers.size() > CurrentBasePointersIdx &&
6715                  "Unexpected number of mapped base pointers.");
6716
6717           auto *RelevantVD = L.Components.back().getAssociatedDeclaration();
6718           assert(RelevantVD &&
6719                  "No relevant declaration related with device pointer??");
6720
6721           BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
6722           Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
6723         }
6724         IsFirstComponentList = false;
6725       }
6726     }
6727   }
6728
6729   /// \brief Generate the base pointers, section pointers, sizes and map types
6730   /// associated to a given capture.
6731   void generateInfoForCapture(const CapturedStmt::Capture *Cap,
6732                               llvm::Value *Arg,
6733                               MapBaseValuesArrayTy &BasePointers,
6734                               MapValuesArrayTy &Pointers,
6735                               MapValuesArrayTy &Sizes,
6736                               MapFlagsArrayTy &Types) const {
6737     assert(!Cap->capturesVariableArrayType() &&
6738            "Not expecting to generate map info for a variable array type!");
6739
6740     BasePointers.clear();
6741     Pointers.clear();
6742     Sizes.clear();
6743     Types.clear();
6744
6745     // We need to know when we generating information for the first component
6746     // associated with a capture, because the mapping flags depend on it.
6747     bool IsFirstComponentList = true;
6748
6749     const ValueDecl *VD =
6750         Cap->capturesThis()
6751             ? nullptr
6752             : cast<ValueDecl>(Cap->getCapturedVar()->getCanonicalDecl());
6753
6754     // If this declaration appears in a is_device_ptr clause we just have to
6755     // pass the pointer by value. If it is a reference to a declaration, we just
6756     // pass its value, otherwise, if it is a member expression, we need to map
6757     // 'to' the field.
6758     if (!VD) {
6759       auto It = DevPointersMap.find(VD);
6760       if (It != DevPointersMap.end()) {
6761         for (auto L : It->second) {
6762           generateInfoForComponentList(
6763               /*MapType=*/OMPC_MAP_to, /*MapTypeModifier=*/OMPC_MAP_unknown, L,
6764               BasePointers, Pointers, Sizes, Types, IsFirstComponentList,
6765               /*IsImplicit=*/false);
6766           IsFirstComponentList = false;
6767         }
6768         return;
6769       }
6770     } else if (DevPointersMap.count(VD)) {
6771       BasePointers.push_back({Arg, VD});
6772       Pointers.push_back(Arg);
6773       Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
6774       Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
6775       return;
6776     }
6777
6778     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
6779     for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
6780       for (auto L : C->decl_component_lists(VD)) {
6781         assert(L.first == VD &&
6782                "We got information for the wrong declaration??");
6783         assert(!L.second.empty() &&
6784                "Not expecting declaration with no component lists.");
6785         generateInfoForComponentList(
6786             C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
6787             Pointers, Sizes, Types, IsFirstComponentList, C->isImplicit());
6788         IsFirstComponentList = false;
6789       }
6790
6791     return;
6792   }
6793
6794   /// \brief Generate the default map information for a given capture \a CI,
6795   /// record field declaration \a RI and captured value \a CV.
6796   void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
6797                               const FieldDecl &RI, llvm::Value *CV,
6798                               MapBaseValuesArrayTy &CurBasePointers,
6799                               MapValuesArrayTy &CurPointers,
6800                               MapValuesArrayTy &CurSizes,
6801                               MapFlagsArrayTy &CurMapTypes) {
6802
6803     // Do the default mapping.
6804     if (CI.capturesThis()) {
6805       CurBasePointers.push_back(CV);
6806       CurPointers.push_back(CV);
6807       const PointerType *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
6808       CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
6809       // Default map type.
6810       CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
6811     } else if (CI.capturesVariableByCopy()) {
6812       CurBasePointers.push_back(CV);
6813       CurPointers.push_back(CV);
6814       if (!RI.getType()->isAnyPointerType()) {
6815         // We have to signal to the runtime captures passed by value that are
6816         // not pointers.
6817         CurMapTypes.push_back(OMP_MAP_LITERAL);
6818         CurSizes.push_back(CGF.getTypeSize(RI.getType()));
6819       } else {
6820         // Pointers are implicitly mapped with a zero size and no flags
6821         // (other than first map that is added for all implicit maps).
6822         CurMapTypes.push_back(0u);
6823         CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
6824       }
6825     } else {
6826       assert(CI.capturesVariable() && "Expected captured reference.");
6827       CurBasePointers.push_back(CV);
6828       CurPointers.push_back(CV);
6829
6830       const ReferenceType *PtrTy =
6831           cast<ReferenceType>(RI.getType().getTypePtr());
6832       QualType ElementType = PtrTy->getPointeeType();
6833       CurSizes.push_back(CGF.getTypeSize(ElementType));
6834       // The default map type for a scalar/complex type is 'to' because by
6835       // default the value doesn't have to be retrieved. For an aggregate
6836       // type, the default is 'tofrom'.
6837       CurMapTypes.emplace_back(adjustMapModifiersForPrivateClauses(
6838           CI, ElementType->isAggregateType() ? (OMP_MAP_TO | OMP_MAP_FROM)
6839                                              : OMP_MAP_TO));
6840     }
6841     // Every default map produces a single argument which is a target parameter.
6842     CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
6843   }
6844 };
6845
6846 enum OpenMPOffloadingReservedDeviceIDs {
6847   /// \brief Device ID if the device was not defined, runtime should get it
6848   /// from environment variables in the spec.
6849   OMP_DEVICEID_UNDEF = -1,
6850 };
6851 } // anonymous namespace
6852
6853 /// \brief Emit the arrays used to pass the captures and map information to the
6854 /// offloading runtime library. If there is no map or capture information,
6855 /// return nullptr by reference.
6856 static void
6857 emitOffloadingArrays(CodeGenFunction &CGF,
6858                      MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
6859                      MappableExprsHandler::MapValuesArrayTy &Pointers,
6860                      MappableExprsHandler::MapValuesArrayTy &Sizes,
6861                      MappableExprsHandler::MapFlagsArrayTy &MapTypes,
6862                      CGOpenMPRuntime::TargetDataInfo &Info) {
6863   auto &CGM = CGF.CGM;
6864   auto &Ctx = CGF.getContext();
6865
6866   // Reset the array information.
6867   Info.clearArrayInfo();
6868   Info.NumberOfPtrs = BasePointers.size();
6869
6870   if (Info.NumberOfPtrs) {
6871     // Detect if we have any capture size requiring runtime evaluation of the
6872     // size so that a constant array could be eventually used.
6873     bool hasRuntimeEvaluationCaptureSize = false;
6874     for (auto *S : Sizes)
6875       if (!isa<llvm::Constant>(S)) {
6876         hasRuntimeEvaluationCaptureSize = true;
6877         break;
6878       }
6879
6880     llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
6881     QualType PointerArrayType =
6882         Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
6883                                  /*IndexTypeQuals=*/0);
6884
6885     Info.BasePointersArray =
6886         CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
6887     Info.PointersArray =
6888         CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
6889
6890     // If we don't have any VLA types or other types that require runtime
6891     // evaluation, we can use a constant array for the map sizes, otherwise we
6892     // need to fill up the arrays as we do for the pointers.
6893     if (hasRuntimeEvaluationCaptureSize) {
6894       QualType SizeArrayType = Ctx.getConstantArrayType(
6895           Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
6896           /*IndexTypeQuals=*/0);
6897       Info.SizesArray =
6898           CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
6899     } else {
6900       // We expect all the sizes to be constant, so we collect them to create
6901       // a constant array.
6902       SmallVector<llvm::Constant *, 16> ConstSizes;
6903       for (auto S : Sizes)
6904         ConstSizes.push_back(cast<llvm::Constant>(S));
6905
6906       auto *SizesArrayInit = llvm::ConstantArray::get(
6907           llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
6908       auto *SizesArrayGbl = new llvm::GlobalVariable(
6909           CGM.getModule(), SizesArrayInit->getType(),
6910           /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
6911           SizesArrayInit, ".offload_sizes");
6912       SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
6913       Info.SizesArray = SizesArrayGbl;
6914     }
6915
6916     // The map types are always constant so we don't need to generate code to
6917     // fill arrays. Instead, we create an array constant.
6918     llvm::Constant *MapTypesArrayInit =
6919         llvm::ConstantDataArray::get(CGF.Builder.getContext(), MapTypes);
6920     auto *MapTypesArrayGbl = new llvm::GlobalVariable(
6921         CGM.getModule(), MapTypesArrayInit->getType(),
6922         /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
6923         MapTypesArrayInit, ".offload_maptypes");
6924     MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
6925     Info.MapTypesArray = MapTypesArrayGbl;
6926
6927     for (unsigned i = 0; i < Info.NumberOfPtrs; ++i) {
6928       llvm::Value *BPVal = *BasePointers[i];
6929       llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
6930           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
6931           Info.BasePointersArray, 0, i);
6932       BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6933           BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
6934       Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
6935       CGF.Builder.CreateStore(BPVal, BPAddr);
6936
6937       if (Info.requiresDevicePointerInfo())
6938         if (auto *DevVD = BasePointers[i].getDevicePtrDecl())
6939           Info.CaptureDeviceAddrMap.insert(std::make_pair(DevVD, BPAddr));
6940
6941       llvm::Value *PVal = Pointers[i];
6942       llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
6943           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
6944           Info.PointersArray, 0, i);
6945       P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6946           P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
6947       Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
6948       CGF.Builder.CreateStore(PVal, PAddr);
6949
6950       if (hasRuntimeEvaluationCaptureSize) {
6951         llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
6952             llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
6953             Info.SizesArray,
6954             /*Idx0=*/0,
6955             /*Idx1=*/i);
6956         Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
6957         CGF.Builder.CreateStore(
6958             CGF.Builder.CreateIntCast(Sizes[i], CGM.SizeTy, /*isSigned=*/true),
6959             SAddr);
6960       }
6961     }
6962   }
6963 }
6964 /// \brief Emit the arguments to be passed to the runtime library based on the
6965 /// arrays of pointers, sizes and map types.
6966 static void emitOffloadingArraysArgument(
6967     CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
6968     llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
6969     llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
6970   auto &CGM = CGF.CGM;
6971   if (Info.NumberOfPtrs) {
6972     BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
6973         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
6974         Info.BasePointersArray,
6975         /*Idx0=*/0, /*Idx1=*/0);
6976     PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
6977         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
6978         Info.PointersArray,
6979         /*Idx0=*/0,
6980         /*Idx1=*/0);
6981     SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
6982         llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
6983         /*Idx0=*/0, /*Idx1=*/0);
6984     MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
6985         llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
6986         Info.MapTypesArray,
6987         /*Idx0=*/0,
6988         /*Idx1=*/0);
6989   } else {
6990     BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
6991     PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
6992     SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
6993     MapTypesArrayArg =
6994         llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
6995   }
6996 }
6997
6998 void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
6999                                      const OMPExecutableDirective &D,
7000                                      llvm::Value *OutlinedFn,
7001                                      llvm::Value *OutlinedFnID,
7002                                      const Expr *IfCond, const Expr *Device,
7003                                      ArrayRef<llvm::Value *> CapturedVars) {
7004   if (!CGF.HaveInsertPoint())
7005     return;
7006
7007   assert(OutlinedFn && "Invalid outlined function!");
7008
7009   // Fill up the arrays with all the captured variables.
7010   MappableExprsHandler::MapValuesArrayTy KernelArgs;
7011   MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
7012   MappableExprsHandler::MapValuesArrayTy Pointers;
7013   MappableExprsHandler::MapValuesArrayTy Sizes;
7014   MappableExprsHandler::MapFlagsArrayTy MapTypes;
7015
7016   MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
7017   MappableExprsHandler::MapValuesArrayTy CurPointers;
7018   MappableExprsHandler::MapValuesArrayTy CurSizes;
7019   MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
7020
7021   // Get mappable expression information.
7022   MappableExprsHandler MEHandler(D, CGF);
7023
7024   const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
7025   auto RI = CS.getCapturedRecordDecl()->field_begin();
7026   auto CV = CapturedVars.begin();
7027   for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
7028                                             CE = CS.capture_end();
7029        CI != CE; ++CI, ++RI, ++CV) {
7030     CurBasePointers.clear();
7031     CurPointers.clear();
7032     CurSizes.clear();
7033     CurMapTypes.clear();
7034
7035     // VLA sizes are passed to the outlined region by copy and do not have map
7036     // information associated.
7037     if (CI->capturesVariableArrayType()) {
7038       CurBasePointers.push_back(*CV);
7039       CurPointers.push_back(*CV);
7040       CurSizes.push_back(CGF.getTypeSize(RI->getType()));
7041       // Copy to the device as an argument. No need to retrieve it.
7042       CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
7043                             MappableExprsHandler::OMP_MAP_TARGET_PARAM);
7044     } else {
7045       // If we have any information in the map clause, we use it, otherwise we
7046       // just do a default mapping.
7047       MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
7048                                        CurSizes, CurMapTypes);
7049       if (CurBasePointers.empty())
7050         MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
7051                                          CurPointers, CurSizes, CurMapTypes);
7052     }
7053     // We expect to have at least an element of information for this capture.
7054     assert(!CurBasePointers.empty() && "Non-existing map pointer for capture!");
7055     assert(CurBasePointers.size() == CurPointers.size() &&
7056            CurBasePointers.size() == CurSizes.size() &&
7057            CurBasePointers.size() == CurMapTypes.size() &&
7058            "Inconsistent map information sizes!");
7059
7060     // The kernel args are always the first elements of the base pointers
7061     // associated with a capture.
7062     KernelArgs.push_back(*CurBasePointers.front());
7063     // We need to append the results of this capture to what we already have.
7064     BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7065     Pointers.append(CurPointers.begin(), CurPointers.end());
7066     Sizes.append(CurSizes.begin(), CurSizes.end());
7067     MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
7068   }
7069
7070   // Fill up the pointer arrays and transfer execution to the device.
7071   auto &&ThenGen = [this, &BasePointers, &Pointers, &Sizes, &MapTypes, Device,
7072                     OutlinedFn, OutlinedFnID, &D,
7073                     &KernelArgs](CodeGenFunction &CGF, PrePostActionTy &) {
7074     auto &RT = CGF.CGM.getOpenMPRuntime();
7075     // Emit the offloading arrays.
7076     TargetDataInfo Info;
7077     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
7078     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
7079                                  Info.PointersArray, Info.SizesArray,
7080                                  Info.MapTypesArray, Info);
7081
7082     // On top of the arrays that were filled up, the target offloading call
7083     // takes as arguments the device id as well as the host pointer. The host
7084     // pointer is used by the runtime library to identify the current target
7085     // region, so it only has to be unique and not necessarily point to
7086     // anything. It could be the pointer to the outlined function that
7087     // implements the target region, but we aren't using that so that the
7088     // compiler doesn't need to keep that, and could therefore inline the host
7089     // function if proven worthwhile during optimization.
7090
7091     // From this point on, we need to have an ID of the target region defined.
7092     assert(OutlinedFnID && "Invalid outlined function ID!");
7093
7094     // Emit device ID if any.
7095     llvm::Value *DeviceID;
7096     if (Device) {
7097       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7098                                            CGF.Int64Ty, /*isSigned=*/true);
7099     } else {
7100       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7101     }
7102
7103     // Emit the number of elements in the offloading arrays.
7104     llvm::Value *PointerNum = CGF.Builder.getInt32(BasePointers.size());
7105
7106     // Return value of the runtime offloading call.
7107     llvm::Value *Return;
7108
7109     auto *NumTeams = emitNumTeamsForTargetDirective(RT, CGF, D);
7110     auto *NumThreads = emitNumThreadsForTargetDirective(RT, CGF, D);
7111
7112     bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
7113     // The target region is an outlined function launched by the runtime
7114     // via calls __tgt_target() or __tgt_target_teams().
7115     //
7116     // __tgt_target() launches a target region with one team and one thread,
7117     // executing a serial region.  This master thread may in turn launch
7118     // more threads within its team upon encountering a parallel region,
7119     // however, no additional teams can be launched on the device.
7120     //
7121     // __tgt_target_teams() launches a target region with one or more teams,
7122     // each with one or more threads.  This call is required for target
7123     // constructs such as:
7124     //  'target teams'
7125     //  'target' / 'teams'
7126     //  'target teams distribute parallel for'
7127     //  'target parallel'
7128     // and so on.
7129     //
7130     // Note that on the host and CPU targets, the runtime implementation of
7131     // these calls simply call the outlined function without forking threads.
7132     // The outlined functions themselves have runtime calls to
7133     // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
7134     // the compiler in emitTeamsCall() and emitParallelCall().
7135     //
7136     // In contrast, on the NVPTX target, the implementation of
7137     // __tgt_target_teams() launches a GPU kernel with the requested number
7138     // of teams and threads so no additional calls to the runtime are required.
7139     if (NumTeams) {
7140       // If we have NumTeams defined this means that we have an enclosed teams
7141       // region. Therefore we also expect to have NumThreads defined. These two
7142       // values should be defined in the presence of a teams directive,
7143       // regardless of having any clauses associated. If the user is using teams
7144       // but no clauses, these two values will be the default that should be
7145       // passed to the runtime library - a 32-bit integer with the value zero.
7146       assert(NumThreads && "Thread limit expression should be available along "
7147                            "with number of teams.");
7148       llvm::Value *OffloadingArgs[] = {
7149           DeviceID,           OutlinedFnID,
7150           PointerNum,         Info.BasePointersArray,
7151           Info.PointersArray, Info.SizesArray,
7152           Info.MapTypesArray, NumTeams,
7153           NumThreads};
7154       Return = CGF.EmitRuntimeCall(
7155           RT.createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
7156                                              : OMPRTL__tgt_target_teams),
7157           OffloadingArgs);
7158     } else {
7159       llvm::Value *OffloadingArgs[] = {
7160           DeviceID,           OutlinedFnID,
7161           PointerNum,         Info.BasePointersArray,
7162           Info.PointersArray, Info.SizesArray,
7163           Info.MapTypesArray};
7164       Return = CGF.EmitRuntimeCall(
7165           RT.createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
7166                                              : OMPRTL__tgt_target),
7167           OffloadingArgs);
7168     }
7169
7170     // Check the error code and execute the host version if required.
7171     llvm::BasicBlock *OffloadFailedBlock =
7172         CGF.createBasicBlock("omp_offload.failed");
7173     llvm::BasicBlock *OffloadContBlock =
7174         CGF.createBasicBlock("omp_offload.cont");
7175     llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
7176     CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
7177
7178     CGF.EmitBlock(OffloadFailedBlock);
7179     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, KernelArgs);
7180     CGF.EmitBranch(OffloadContBlock);
7181
7182     CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
7183   };
7184
7185   // Notify that the host version must be executed.
7186   auto &&ElseGen = [this, &D, OutlinedFn, &KernelArgs](CodeGenFunction &CGF,
7187                                                       PrePostActionTy &) {
7188     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn,
7189                              KernelArgs);
7190   };
7191
7192   // If we have a target function ID it means that we need to support
7193   // offloading, otherwise, just execute on the host. We need to execute on host
7194   // regardless of the conditional in the if clause if, e.g., the user do not
7195   // specify target triples.
7196   if (OutlinedFnID) {
7197     if (IfCond)
7198       emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
7199     else {
7200       RegionCodeGenTy ThenRCG(ThenGen);
7201       ThenRCG(CGF);
7202     }
7203   } else {
7204     RegionCodeGenTy ElseRCG(ElseGen);
7205     ElseRCG(CGF);
7206   }
7207 }
7208
7209 void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
7210                                                     StringRef ParentName) {
7211   if (!S)
7212     return;
7213
7214   // Codegen OMP target directives that offload compute to the device.
7215   bool requiresDeviceCodegen =
7216       isa<OMPExecutableDirective>(S) &&
7217       isOpenMPTargetExecutionDirective(
7218           cast<OMPExecutableDirective>(S)->getDirectiveKind());
7219
7220   if (requiresDeviceCodegen) {
7221     auto &E = *cast<OMPExecutableDirective>(S);
7222     unsigned DeviceID;
7223     unsigned FileID;
7224     unsigned Line;
7225     getTargetEntryUniqueInfo(CGM.getContext(), E.getLocStart(), DeviceID,
7226                              FileID, Line);
7227
7228     // Is this a target region that should not be emitted as an entry point? If
7229     // so just signal we are done with this target region.
7230     if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
7231                                                             ParentName, Line))
7232       return;
7233
7234     switch (S->getStmtClass()) {
7235     case Stmt::OMPTargetDirectiveClass:
7236       CodeGenFunction::EmitOMPTargetDeviceFunction(
7237           CGM, ParentName, cast<OMPTargetDirective>(*S));
7238       break;
7239     case Stmt::OMPTargetParallelDirectiveClass:
7240       CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
7241           CGM, ParentName, cast<OMPTargetParallelDirective>(*S));
7242       break;
7243     case Stmt::OMPTargetTeamsDirectiveClass:
7244       CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
7245           CGM, ParentName, cast<OMPTargetTeamsDirective>(*S));
7246       break;
7247     case Stmt::OMPTargetTeamsDistributeDirectiveClass:
7248       CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
7249           CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(*S));
7250       break;
7251     case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
7252       CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
7253           CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(*S));
7254       break;
7255     case Stmt::OMPTargetParallelForDirectiveClass:
7256       CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
7257           CGM, ParentName, cast<OMPTargetParallelForDirective>(*S));
7258       break;
7259     case Stmt::OMPTargetParallelForSimdDirectiveClass:
7260       CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
7261           CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(*S));
7262       break;
7263     case Stmt::OMPTargetSimdDirectiveClass:
7264       CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
7265           CGM, ParentName, cast<OMPTargetSimdDirective>(*S));
7266       break;
7267     default:
7268       llvm_unreachable("Unknown target directive for OpenMP device codegen.");
7269     }
7270     return;
7271   }
7272
7273   if (const OMPExecutableDirective *E = dyn_cast<OMPExecutableDirective>(S)) {
7274     if (!E->hasAssociatedStmt())
7275       return;
7276
7277     scanForTargetRegionsFunctions(
7278         cast<CapturedStmt>(E->getAssociatedStmt())->getCapturedStmt(),
7279         ParentName);
7280     return;
7281   }
7282
7283   // If this is a lambda function, look into its body.
7284   if (auto *L = dyn_cast<LambdaExpr>(S))
7285     S = L->getBody();
7286
7287   // Keep looking for target regions recursively.
7288   for (auto *II : S->children())
7289     scanForTargetRegionsFunctions(II, ParentName);
7290 }
7291
7292 bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
7293   auto &FD = *cast<FunctionDecl>(GD.getDecl());
7294
7295   // If emitting code for the host, we do not process FD here. Instead we do
7296   // the normal code generation.
7297   if (!CGM.getLangOpts().OpenMPIsDevice)
7298     return false;
7299
7300   // Try to detect target regions in the function.
7301   scanForTargetRegionsFunctions(FD.getBody(), CGM.getMangledName(GD));
7302
7303   // We should not emit any function other that the ones created during the
7304   // scanning. Therefore, we signal that this function is completely dealt
7305   // with.
7306   return true;
7307 }
7308
7309 bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
7310   if (!CGM.getLangOpts().OpenMPIsDevice)
7311     return false;
7312
7313   // Check if there are Ctors/Dtors in this declaration and look for target
7314   // regions in it. We use the complete variant to produce the kernel name
7315   // mangling.
7316   QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
7317   if (auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
7318     for (auto *Ctor : RD->ctors()) {
7319       StringRef ParentName =
7320           CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
7321       scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
7322     }
7323     auto *Dtor = RD->getDestructor();
7324     if (Dtor) {
7325       StringRef ParentName =
7326           CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
7327       scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
7328     }
7329   }
7330
7331   // If we are in target mode, we do not emit any global (declare target is not
7332   // implemented yet). Therefore we signal that GD was processed in this case.
7333   return true;
7334 }
7335
7336 bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
7337   auto *VD = GD.getDecl();
7338   if (isa<FunctionDecl>(VD))
7339     return emitTargetFunctions(GD);
7340
7341   return emitTargetGlobalVariable(GD);
7342 }
7343
7344 llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
7345   // If we have offloading in the current module, we need to emit the entries
7346   // now and register the offloading descriptor.
7347   createOffloadEntriesAndInfoMetadata();
7348
7349   // Create and register the offloading binary descriptors. This is the main
7350   // entity that captures all the information about offloading in the current
7351   // compilation unit.
7352   return createOffloadingBinaryDescriptorRegistration();
7353 }
7354
7355 void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
7356                                     const OMPExecutableDirective &D,
7357                                     SourceLocation Loc,
7358                                     llvm::Value *OutlinedFn,
7359                                     ArrayRef<llvm::Value *> CapturedVars) {
7360   if (!CGF.HaveInsertPoint())
7361     return;
7362
7363   auto *RTLoc = emitUpdateLocation(CGF, Loc);
7364   CodeGenFunction::RunCleanupsScope Scope(CGF);
7365
7366   // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
7367   llvm::Value *Args[] = {
7368       RTLoc,
7369       CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
7370       CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
7371   llvm::SmallVector<llvm::Value *, 16> RealArgs;
7372   RealArgs.append(std::begin(Args), std::end(Args));
7373   RealArgs.append(CapturedVars.begin(), CapturedVars.end());
7374
7375   auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
7376   CGF.EmitRuntimeCall(RTLFn, RealArgs);
7377 }
7378
7379 void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
7380                                          const Expr *NumTeams,
7381                                          const Expr *ThreadLimit,
7382                                          SourceLocation Loc) {
7383   if (!CGF.HaveInsertPoint())
7384     return;
7385
7386   auto *RTLoc = emitUpdateLocation(CGF, Loc);
7387
7388   llvm::Value *NumTeamsVal =
7389       (NumTeams)
7390           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
7391                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
7392           : CGF.Builder.getInt32(0);
7393
7394   llvm::Value *ThreadLimitVal =
7395       (ThreadLimit)
7396           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
7397                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
7398           : CGF.Builder.getInt32(0);
7399
7400   // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
7401   llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
7402                                      ThreadLimitVal};
7403   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
7404                       PushNumTeamsArgs);
7405 }
7406
7407 void CGOpenMPRuntime::emitTargetDataCalls(
7408     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
7409     const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
7410   if (!CGF.HaveInsertPoint())
7411     return;
7412
7413   // Action used to replace the default codegen action and turn privatization
7414   // off.
7415   PrePostActionTy NoPrivAction;
7416
7417   // Generate the code for the opening of the data environment. Capture all the
7418   // arguments of the runtime call by reference because they are used in the
7419   // closing of the region.
7420   auto &&BeginThenGen = [&D, Device, &Info, &CodeGen](CodeGenFunction &CGF,
7421                                                       PrePostActionTy &) {
7422     // Fill up the arrays with all the mapped variables.
7423     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
7424     MappableExprsHandler::MapValuesArrayTy Pointers;
7425     MappableExprsHandler::MapValuesArrayTy Sizes;
7426     MappableExprsHandler::MapFlagsArrayTy MapTypes;
7427
7428     // Get map clause information.
7429     MappableExprsHandler MCHandler(D, CGF);
7430     MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
7431
7432     // Fill up the arrays and create the arguments.
7433     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
7434
7435     llvm::Value *BasePointersArrayArg = nullptr;
7436     llvm::Value *PointersArrayArg = nullptr;
7437     llvm::Value *SizesArrayArg = nullptr;
7438     llvm::Value *MapTypesArrayArg = nullptr;
7439     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
7440                                  SizesArrayArg, MapTypesArrayArg, Info);
7441
7442     // Emit device ID if any.
7443     llvm::Value *DeviceID = nullptr;
7444     if (Device) {
7445       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7446                                            CGF.Int64Ty, /*isSigned=*/true);
7447     } else {
7448       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7449     }
7450
7451     // Emit the number of elements in the offloading arrays.
7452     auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
7453
7454     llvm::Value *OffloadingArgs[] = {
7455         DeviceID,         PointerNum,    BasePointersArrayArg,
7456         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
7457     auto &RT = CGF.CGM.getOpenMPRuntime();
7458     CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__tgt_target_data_begin),
7459                         OffloadingArgs);
7460
7461     // If device pointer privatization is required, emit the body of the region
7462     // here. It will have to be duplicated: with and without privatization.
7463     if (!Info.CaptureDeviceAddrMap.empty())
7464       CodeGen(CGF);
7465   };
7466
7467   // Generate code for the closing of the data region.
7468   auto &&EndThenGen = [Device, &Info](CodeGenFunction &CGF, PrePostActionTy &) {
7469     assert(Info.isValid() && "Invalid data environment closing arguments.");
7470
7471     llvm::Value *BasePointersArrayArg = nullptr;
7472     llvm::Value *PointersArrayArg = nullptr;
7473     llvm::Value *SizesArrayArg = nullptr;
7474     llvm::Value *MapTypesArrayArg = nullptr;
7475     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
7476                                  SizesArrayArg, MapTypesArrayArg, Info);
7477
7478     // Emit device ID if any.
7479     llvm::Value *DeviceID = nullptr;
7480     if (Device) {
7481       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7482                                            CGF.Int64Ty, /*isSigned=*/true);
7483     } else {
7484       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7485     }
7486
7487     // Emit the number of elements in the offloading arrays.
7488     auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
7489
7490     llvm::Value *OffloadingArgs[] = {
7491         DeviceID,         PointerNum,    BasePointersArrayArg,
7492         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
7493     auto &RT = CGF.CGM.getOpenMPRuntime();
7494     CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__tgt_target_data_end),
7495                         OffloadingArgs);
7496   };
7497
7498   // If we need device pointer privatization, we need to emit the body of the
7499   // region with no privatization in the 'else' branch of the conditional.
7500   // Otherwise, we don't have to do anything.
7501   auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
7502                                                          PrePostActionTy &) {
7503     if (!Info.CaptureDeviceAddrMap.empty()) {
7504       CodeGen.setAction(NoPrivAction);
7505       CodeGen(CGF);
7506     }
7507   };
7508
7509   // We don't have to do anything to close the region if the if clause evaluates
7510   // to false.
7511   auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
7512
7513   if (IfCond) {
7514     emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
7515   } else {
7516     RegionCodeGenTy RCG(BeginThenGen);
7517     RCG(CGF);
7518   }
7519
7520   // If we don't require privatization of device pointers, we emit the body in
7521   // between the runtime calls. This avoids duplicating the body code.
7522   if (Info.CaptureDeviceAddrMap.empty()) {
7523     CodeGen.setAction(NoPrivAction);
7524     CodeGen(CGF);
7525   }
7526
7527   if (IfCond) {
7528     emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen);
7529   } else {
7530     RegionCodeGenTy RCG(EndThenGen);
7531     RCG(CGF);
7532   }
7533 }
7534
7535 void CGOpenMPRuntime::emitTargetDataStandAloneCall(
7536     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
7537     const Expr *Device) {
7538   if (!CGF.HaveInsertPoint())
7539     return;
7540
7541   assert((isa<OMPTargetEnterDataDirective>(D) ||
7542           isa<OMPTargetExitDataDirective>(D) ||
7543           isa<OMPTargetUpdateDirective>(D)) &&
7544          "Expecting either target enter, exit data, or update directives.");
7545
7546   // Generate the code for the opening of the data environment.
7547   auto &&ThenGen = [&D, Device](CodeGenFunction &CGF, PrePostActionTy &) {
7548     // Fill up the arrays with all the mapped variables.
7549     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
7550     MappableExprsHandler::MapValuesArrayTy Pointers;
7551     MappableExprsHandler::MapValuesArrayTy Sizes;
7552     MappableExprsHandler::MapFlagsArrayTy MapTypes;
7553
7554     // Get map clause information.
7555     MappableExprsHandler MEHandler(D, CGF);
7556     MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
7557
7558     // Fill up the arrays and create the arguments.
7559     TargetDataInfo Info;
7560     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
7561     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
7562                                  Info.PointersArray, Info.SizesArray,
7563                                  Info.MapTypesArray, Info);
7564
7565     // Emit device ID if any.
7566     llvm::Value *DeviceID = nullptr;
7567     if (Device) {
7568       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7569                                            CGF.Int64Ty, /*isSigned=*/true);
7570     } else {
7571       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7572     }
7573
7574     // Emit the number of elements in the offloading arrays.
7575     auto *PointerNum = CGF.Builder.getInt32(BasePointers.size());
7576
7577     llvm::Value *OffloadingArgs[] = {
7578         DeviceID,           PointerNum,      Info.BasePointersArray,
7579         Info.PointersArray, Info.SizesArray, Info.MapTypesArray};
7580
7581     auto &RT = CGF.CGM.getOpenMPRuntime();
7582     // Select the right runtime function call for each expected standalone
7583     // directive.
7584     const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
7585     OpenMPRTLFunction RTLFn;
7586     switch (D.getDirectiveKind()) {
7587     default:
7588       llvm_unreachable("Unexpected standalone target data directive.");
7589       break;
7590     case OMPD_target_enter_data:
7591       RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
7592                         : OMPRTL__tgt_target_data_begin;
7593       break;
7594     case OMPD_target_exit_data:
7595       RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
7596                         : OMPRTL__tgt_target_data_end;
7597       break;
7598     case OMPD_target_update:
7599       RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
7600                         : OMPRTL__tgt_target_data_update;
7601       break;
7602     }
7603     CGF.EmitRuntimeCall(RT.createRuntimeFunction(RTLFn), OffloadingArgs);
7604   };
7605
7606   // In the event we get an if clause, we don't have to take any action on the
7607   // else side.
7608   auto &&ElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
7609
7610   if (IfCond) {
7611     emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
7612   } else {
7613     RegionCodeGenTy ThenGenRCG(ThenGen);
7614     ThenGenRCG(CGF);
7615   }
7616 }
7617
7618 namespace {
7619   /// Kind of parameter in a function with 'declare simd' directive.
7620   enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
7621   /// Attribute set of the parameter.
7622   struct ParamAttrTy {
7623     ParamKindTy Kind = Vector;
7624     llvm::APSInt StrideOrArg;
7625     llvm::APSInt Alignment;
7626   };
7627 } // namespace
7628
7629 static unsigned evaluateCDTSize(const FunctionDecl *FD,
7630                                 ArrayRef<ParamAttrTy> ParamAttrs) {
7631   // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
7632   // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
7633   // of that clause. The VLEN value must be power of 2.
7634   // In other case the notion of the function`s "characteristic data type" (CDT)
7635   // is used to compute the vector length.
7636   // CDT is defined in the following order:
7637   //   a) For non-void function, the CDT is the return type.
7638   //   b) If the function has any non-uniform, non-linear parameters, then the
7639   //   CDT is the type of the first such parameter.
7640   //   c) If the CDT determined by a) or b) above is struct, union, or class
7641   //   type which is pass-by-value (except for the type that maps to the
7642   //   built-in complex data type), the characteristic data type is int.
7643   //   d) If none of the above three cases is applicable, the CDT is int.
7644   // The VLEN is then determined based on the CDT and the size of vector
7645   // register of that ISA for which current vector version is generated. The
7646   // VLEN is computed using the formula below:
7647   //   VLEN  = sizeof(vector_register) / sizeof(CDT),
7648   // where vector register size specified in section 3.2.1 Registers and the
7649   // Stack Frame of original AMD64 ABI document.
7650   QualType RetType = FD->getReturnType();
7651   if (RetType.isNull())
7652     return 0;
7653   ASTContext &C = FD->getASTContext();
7654   QualType CDT;
7655   if (!RetType.isNull() && !RetType->isVoidType())
7656     CDT = RetType;
7657   else {
7658     unsigned Offset = 0;
7659     if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
7660       if (ParamAttrs[Offset].Kind == Vector)
7661         CDT = C.getPointerType(C.getRecordType(MD->getParent()));
7662       ++Offset;
7663     }
7664     if (CDT.isNull()) {
7665       for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
7666         if (ParamAttrs[I + Offset].Kind == Vector) {
7667           CDT = FD->getParamDecl(I)->getType();
7668           break;
7669         }
7670       }
7671     }
7672   }
7673   if (CDT.isNull())
7674     CDT = C.IntTy;
7675   CDT = CDT->getCanonicalTypeUnqualified();
7676   if (CDT->isRecordType() || CDT->isUnionType())
7677     CDT = C.IntTy;
7678   return C.getTypeSize(CDT);
7679 }
7680
7681 static void
7682 emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
7683                            const llvm::APSInt &VLENVal,
7684                            ArrayRef<ParamAttrTy> ParamAttrs,
7685                            OMPDeclareSimdDeclAttr::BranchStateTy State) {
7686   struct ISADataTy {
7687     char ISA;
7688     unsigned VecRegSize;
7689   };
7690   ISADataTy ISAData[] = {
7691       {
7692           'b', 128
7693       }, // SSE
7694       {
7695           'c', 256
7696       }, // AVX
7697       {
7698           'd', 256
7699       }, // AVX2
7700       {
7701           'e', 512
7702       }, // AVX512
7703   };
7704   llvm::SmallVector<char, 2> Masked;
7705   switch (State) {
7706   case OMPDeclareSimdDeclAttr::BS_Undefined:
7707     Masked.push_back('N');
7708     Masked.push_back('M');
7709     break;
7710   case OMPDeclareSimdDeclAttr::BS_Notinbranch:
7711     Masked.push_back('N');
7712     break;
7713   case OMPDeclareSimdDeclAttr::BS_Inbranch:
7714     Masked.push_back('M');
7715     break;
7716   }
7717   for (auto Mask : Masked) {
7718     for (auto &Data : ISAData) {
7719       SmallString<256> Buffer;
7720       llvm::raw_svector_ostream Out(Buffer);
7721       Out << "_ZGV" << Data.ISA << Mask;
7722       if (!VLENVal) {
7723         Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
7724                                          evaluateCDTSize(FD, ParamAttrs));
7725       } else
7726         Out << VLENVal;
7727       for (auto &ParamAttr : ParamAttrs) {
7728         switch (ParamAttr.Kind){
7729         case LinearWithVarStride:
7730           Out << 's' << ParamAttr.StrideOrArg;
7731           break;
7732         case Linear:
7733           Out << 'l';
7734           if (!!ParamAttr.StrideOrArg)
7735             Out << ParamAttr.StrideOrArg;
7736           break;
7737         case Uniform:
7738           Out << 'u';
7739           break;
7740         case Vector:
7741           Out << 'v';
7742           break;
7743         }
7744         if (!!ParamAttr.Alignment)
7745           Out << 'a' << ParamAttr.Alignment;
7746       }
7747       Out << '_' << Fn->getName();
7748       Fn->addFnAttr(Out.str());
7749     }
7750   }
7751 }
7752
7753 void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
7754                                               llvm::Function *Fn) {
7755   ASTContext &C = CGM.getContext();
7756   FD = FD->getCanonicalDecl();
7757   // Map params to their positions in function decl.
7758   llvm::DenseMap<const Decl *, unsigned> ParamPositions;
7759   if (isa<CXXMethodDecl>(FD))
7760     ParamPositions.insert({FD, 0});
7761   unsigned ParamPos = ParamPositions.size();
7762   for (auto *P : FD->parameters()) {
7763     ParamPositions.insert({P->getCanonicalDecl(), ParamPos});
7764     ++ParamPos;
7765   }
7766   for (auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
7767     llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
7768     // Mark uniform parameters.
7769     for (auto *E : Attr->uniforms()) {
7770       E = E->IgnoreParenImpCasts();
7771       unsigned Pos;
7772       if (isa<CXXThisExpr>(E))
7773         Pos = ParamPositions[FD];
7774       else {
7775         auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
7776                         ->getCanonicalDecl();
7777         Pos = ParamPositions[PVD];
7778       }
7779       ParamAttrs[Pos].Kind = Uniform;
7780     }
7781     // Get alignment info.
7782     auto NI = Attr->alignments_begin();
7783     for (auto *E : Attr->aligneds()) {
7784       E = E->IgnoreParenImpCasts();
7785       unsigned Pos;
7786       QualType ParmTy;
7787       if (isa<CXXThisExpr>(E)) {
7788         Pos = ParamPositions[FD];
7789         ParmTy = E->getType();
7790       } else {
7791         auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
7792                         ->getCanonicalDecl();
7793         Pos = ParamPositions[PVD];
7794         ParmTy = PVD->getType();
7795       }
7796       ParamAttrs[Pos].Alignment =
7797           (*NI) ? (*NI)->EvaluateKnownConstInt(C)
7798                 : llvm::APSInt::getUnsigned(
7799                       C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
7800                           .getQuantity());
7801       ++NI;
7802     }
7803     // Mark linear parameters.
7804     auto SI = Attr->steps_begin();
7805     auto MI = Attr->modifiers_begin();
7806     for (auto *E : Attr->linears()) {
7807       E = E->IgnoreParenImpCasts();
7808       unsigned Pos;
7809       if (isa<CXXThisExpr>(E))
7810         Pos = ParamPositions[FD];
7811       else {
7812         auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
7813                         ->getCanonicalDecl();
7814         Pos = ParamPositions[PVD];
7815       }
7816       auto &ParamAttr = ParamAttrs[Pos];
7817       ParamAttr.Kind = Linear;
7818       if (*SI) {
7819         if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
7820                                   Expr::SE_AllowSideEffects)) {
7821           if (auto *DRE = cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
7822             if (auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
7823               ParamAttr.Kind = LinearWithVarStride;
7824               ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
7825                   ParamPositions[StridePVD->getCanonicalDecl()]);
7826             }
7827           }
7828         }
7829       }
7830       ++SI;
7831       ++MI;
7832     }
7833     llvm::APSInt VLENVal;
7834     if (const Expr *VLEN = Attr->getSimdlen())
7835       VLENVal = VLEN->EvaluateKnownConstInt(C);
7836     OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
7837     if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
7838         CGM.getTriple().getArch() == llvm::Triple::x86_64)
7839       emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
7840   }
7841 }
7842
7843 namespace {
7844 /// Cleanup action for doacross support.
7845 class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
7846 public:
7847   static const int DoacrossFinArgs = 2;
7848
7849 private:
7850   llvm::Value *RTLFn;
7851   llvm::Value *Args[DoacrossFinArgs];
7852
7853 public:
7854   DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs)
7855       : RTLFn(RTLFn) {
7856     assert(CallArgs.size() == DoacrossFinArgs);
7857     std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
7858   }
7859   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
7860     if (!CGF.HaveInsertPoint())
7861       return;
7862     CGF.EmitRuntimeCall(RTLFn, Args);
7863   }
7864 };
7865 } // namespace
7866
7867 void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
7868                                        const OMPLoopDirective &D) {
7869   if (!CGF.HaveInsertPoint())
7870     return;
7871
7872   ASTContext &C = CGM.getContext();
7873   QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
7874   RecordDecl *RD;
7875   if (KmpDimTy.isNull()) {
7876     // Build struct kmp_dim {  // loop bounds info casted to kmp_int64
7877     //  kmp_int64 lo; // lower
7878     //  kmp_int64 up; // upper
7879     //  kmp_int64 st; // stride
7880     // };
7881     RD = C.buildImplicitRecord("kmp_dim");
7882     RD->startDefinition();
7883     addFieldToRecordDecl(C, RD, Int64Ty);
7884     addFieldToRecordDecl(C, RD, Int64Ty);
7885     addFieldToRecordDecl(C, RD, Int64Ty);
7886     RD->completeDefinition();
7887     KmpDimTy = C.getRecordType(RD);
7888   } else
7889     RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
7890
7891   Address DimsAddr = CGF.CreateMemTemp(KmpDimTy, "dims");
7892   CGF.EmitNullInitialization(DimsAddr, KmpDimTy);
7893   enum { LowerFD = 0, UpperFD, StrideFD };
7894   // Fill dims with data.
7895   LValue DimsLVal = CGF.MakeAddrLValue(DimsAddr, KmpDimTy);
7896   // dims.upper = num_iterations;
7897   LValue UpperLVal =
7898       CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), UpperFD));
7899   llvm::Value *NumIterVal = CGF.EmitScalarConversion(
7900       CGF.EmitScalarExpr(D.getNumIterations()), D.getNumIterations()->getType(),
7901       Int64Ty, D.getNumIterations()->getExprLoc());
7902   CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
7903   // dims.stride = 1;
7904   LValue StrideLVal =
7905       CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), StrideFD));
7906   CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
7907                         StrideLVal);
7908
7909   // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
7910   // kmp_int32 num_dims, struct kmp_dim * dims);
7911   llvm::Value *Args[] = {emitUpdateLocation(CGF, D.getLocStart()),
7912                          getThreadID(CGF, D.getLocStart()),
7913                          llvm::ConstantInt::getSigned(CGM.Int32Ty, 1),
7914                          CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7915                              DimsAddr.getPointer(), CGM.VoidPtrTy)};
7916
7917   llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
7918   CGF.EmitRuntimeCall(RTLFn, Args);
7919   llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
7920       emitUpdateLocation(CGF, D.getLocEnd()), getThreadID(CGF, D.getLocEnd())};
7921   llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
7922   CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
7923                                              llvm::makeArrayRef(FiniArgs));
7924 }
7925
7926 void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
7927                                           const OMPDependClause *C) {
7928   QualType Int64Ty =
7929       CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
7930   const Expr *CounterVal = C->getCounterValue();
7931   assert(CounterVal);
7932   llvm::Value *CntVal = CGF.EmitScalarConversion(CGF.EmitScalarExpr(CounterVal),
7933                                                  CounterVal->getType(), Int64Ty,
7934                                                  CounterVal->getExprLoc());
7935   Address CntAddr = CGF.CreateMemTemp(Int64Ty, ".cnt.addr");
7936   CGF.EmitStoreOfScalar(CntVal, CntAddr, /*Volatile=*/false, Int64Ty);
7937   llvm::Value *Args[] = {emitUpdateLocation(CGF, C->getLocStart()),
7938                          getThreadID(CGF, C->getLocStart()),
7939                          CntAddr.getPointer()};
7940   llvm::Value *RTLFn;
7941   if (C->getDependencyKind() == OMPC_DEPEND_source)
7942     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
7943   else {
7944     assert(C->getDependencyKind() == OMPC_DEPEND_sink);
7945     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
7946   }
7947   CGF.EmitRuntimeCall(RTLFn, Args);
7948 }
7949
7950 void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, llvm::Value *Callee,
7951                                ArrayRef<llvm::Value *> Args,
7952                                SourceLocation Loc) const {
7953   auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
7954
7955   if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
7956     if (Fn->doesNotThrow()) {
7957       CGF.EmitNounwindRuntimeCall(Fn, Args);
7958       return;
7959     }
7960   }
7961   CGF.EmitRuntimeCall(Callee, Args);
7962 }
7963
7964 void CGOpenMPRuntime::emitOutlinedFunctionCall(
7965     CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
7966     ArrayRef<llvm::Value *> Args) const {
7967   assert(Loc.isValid() && "Outlined function call location must be valid.");
7968   emitCall(CGF, OutlinedFn, Args, Loc);
7969 }
7970
7971 Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
7972                                              const VarDecl *NativeParam,
7973                                              const VarDecl *TargetParam) const {
7974   return CGF.GetAddrOfLocalVar(NativeParam);
7975 }