]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp
Merge lld trunk r338150 (just before the 7.0.0 branch point), and
[FreeBSD/FreeBSD.git] / contrib / llvm / tools / clang / lib / CodeGen / CGOpenMPRuntime.cpp
1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides a class for OpenMP runtime code generation.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenFunction.h"
19 #include "clang/CodeGen/ConstantInitBuilder.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/StmtOpenMP.h"
22 #include "clang/Basic/BitmaskEnum.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/Bitcode/BitcodeReader.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/GlobalValue.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/Format.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <cassert>
32
33 using namespace clang;
34 using namespace CodeGen;
35
36 namespace {
37 /// Base class for handling code generation inside OpenMP regions.
38 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
39 public:
40   /// Kinds of OpenMP regions used in codegen.
41   enum CGOpenMPRegionKind {
42     /// Region with outlined function for standalone 'parallel'
43     /// directive.
44     ParallelOutlinedRegion,
45     /// Region with outlined function for standalone 'task' directive.
46     TaskOutlinedRegion,
47     /// Region for constructs that do not require function outlining,
48     /// like 'for', 'sections', 'atomic' etc. directives.
49     InlinedRegion,
50     /// Region with outlined function for standalone 'target' directive.
51     TargetRegion,
52   };
53
54   CGOpenMPRegionInfo(const CapturedStmt &CS,
55                      const CGOpenMPRegionKind RegionKind,
56                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
57                      bool HasCancel)
58       : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
59         CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
60
61   CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
62                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
63                      bool HasCancel)
64       : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
65         Kind(Kind), HasCancel(HasCancel) {}
66
67   /// Get a variable or parameter for storing global thread id
68   /// inside OpenMP construct.
69   virtual const VarDecl *getThreadIDVariable() const = 0;
70
71   /// Emit the captured statement body.
72   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
73
74   /// Get an LValue for the current ThreadID variable.
75   /// \return LValue for thread id variable. This LValue always has type int32*.
76   virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
77
78   virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
79
80   CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
81
82   OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
83
84   bool hasCancel() const { return HasCancel; }
85
86   static bool classof(const CGCapturedStmtInfo *Info) {
87     return Info->getKind() == CR_OpenMP;
88   }
89
90   ~CGOpenMPRegionInfo() override = default;
91
92 protected:
93   CGOpenMPRegionKind RegionKind;
94   RegionCodeGenTy CodeGen;
95   OpenMPDirectiveKind Kind;
96   bool HasCancel;
97 };
98
99 /// API for captured statement code generation in OpenMP constructs.
100 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
101 public:
102   CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
103                              const RegionCodeGenTy &CodeGen,
104                              OpenMPDirectiveKind Kind, bool HasCancel,
105                              StringRef HelperName)
106       : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
107                            HasCancel),
108         ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
109     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
110   }
111
112   /// Get a variable or parameter for storing global thread id
113   /// inside OpenMP construct.
114   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
115
116   /// Get the name of the capture helper.
117   StringRef getHelperName() const override { return HelperName; }
118
119   static bool classof(const CGCapturedStmtInfo *Info) {
120     return CGOpenMPRegionInfo::classof(Info) &&
121            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
122                ParallelOutlinedRegion;
123   }
124
125 private:
126   /// A variable or parameter storing global thread id for OpenMP
127   /// constructs.
128   const VarDecl *ThreadIDVar;
129   StringRef HelperName;
130 };
131
132 /// API for captured statement code generation in OpenMP constructs.
133 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
134 public:
135   class UntiedTaskActionTy final : public PrePostActionTy {
136     bool Untied;
137     const VarDecl *PartIDVar;
138     const RegionCodeGenTy UntiedCodeGen;
139     llvm::SwitchInst *UntiedSwitch = nullptr;
140
141   public:
142     UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
143                        const RegionCodeGenTy &UntiedCodeGen)
144         : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
145     void Enter(CodeGenFunction &CGF) override {
146       if (Untied) {
147         // Emit task switching point.
148         LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
149             CGF.GetAddrOfLocalVar(PartIDVar),
150             PartIDVar->getType()->castAs<PointerType>());
151         llvm::Value *Res =
152             CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
153         llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
154         UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
155         CGF.EmitBlock(DoneBB);
156         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
157         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
158         UntiedSwitch->addCase(CGF.Builder.getInt32(0),
159                               CGF.Builder.GetInsertBlock());
160         emitUntiedSwitch(CGF);
161       }
162     }
163     void emitUntiedSwitch(CodeGenFunction &CGF) const {
164       if (Untied) {
165         LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
166             CGF.GetAddrOfLocalVar(PartIDVar),
167             PartIDVar->getType()->castAs<PointerType>());
168         CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
169                               PartIdLVal);
170         UntiedCodeGen(CGF);
171         CodeGenFunction::JumpDest CurPoint =
172             CGF.getJumpDestInCurrentScope(".untied.next.");
173         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
174         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
175         UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
176                               CGF.Builder.GetInsertBlock());
177         CGF.EmitBranchThroughCleanup(CurPoint);
178         CGF.EmitBlock(CurPoint.getBlock());
179       }
180     }
181     unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
182   };
183   CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
184                                  const VarDecl *ThreadIDVar,
185                                  const RegionCodeGenTy &CodeGen,
186                                  OpenMPDirectiveKind Kind, bool HasCancel,
187                                  const UntiedTaskActionTy &Action)
188       : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
189         ThreadIDVar(ThreadIDVar), Action(Action) {
190     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
191   }
192
193   /// Get a variable or parameter for storing global thread id
194   /// inside OpenMP construct.
195   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
196
197   /// Get an LValue for the current ThreadID variable.
198   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
199
200   /// Get the name of the capture helper.
201   StringRef getHelperName() const override { return ".omp_outlined."; }
202
203   void emitUntiedSwitch(CodeGenFunction &CGF) override {
204     Action.emitUntiedSwitch(CGF);
205   }
206
207   static bool classof(const CGCapturedStmtInfo *Info) {
208     return CGOpenMPRegionInfo::classof(Info) &&
209            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
210                TaskOutlinedRegion;
211   }
212
213 private:
214   /// A variable or parameter storing global thread id for OpenMP
215   /// constructs.
216   const VarDecl *ThreadIDVar;
217   /// Action for emitting code for untied tasks.
218   const UntiedTaskActionTy &Action;
219 };
220
221 /// API for inlined captured statement code generation in OpenMP
222 /// constructs.
223 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
224 public:
225   CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
226                             const RegionCodeGenTy &CodeGen,
227                             OpenMPDirectiveKind Kind, bool HasCancel)
228       : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
229         OldCSI(OldCSI),
230         OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
231
232   // Retrieve the value of the context parameter.
233   llvm::Value *getContextValue() const override {
234     if (OuterRegionInfo)
235       return OuterRegionInfo->getContextValue();
236     llvm_unreachable("No context value for inlined OpenMP region");
237   }
238
239   void setContextValue(llvm::Value *V) override {
240     if (OuterRegionInfo) {
241       OuterRegionInfo->setContextValue(V);
242       return;
243     }
244     llvm_unreachable("No context value for inlined OpenMP region");
245   }
246
247   /// Lookup the captured field decl for a variable.
248   const FieldDecl *lookup(const VarDecl *VD) const override {
249     if (OuterRegionInfo)
250       return OuterRegionInfo->lookup(VD);
251     // If there is no outer outlined region,no need to lookup in a list of
252     // captured variables, we can use the original one.
253     return nullptr;
254   }
255
256   FieldDecl *getThisFieldDecl() const override {
257     if (OuterRegionInfo)
258       return OuterRegionInfo->getThisFieldDecl();
259     return nullptr;
260   }
261
262   /// Get a variable or parameter for storing global thread id
263   /// inside OpenMP construct.
264   const VarDecl *getThreadIDVariable() const override {
265     if (OuterRegionInfo)
266       return OuterRegionInfo->getThreadIDVariable();
267     return nullptr;
268   }
269
270   /// Get an LValue for the current ThreadID variable.
271   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
272     if (OuterRegionInfo)
273       return OuterRegionInfo->getThreadIDVariableLValue(CGF);
274     llvm_unreachable("No LValue for inlined OpenMP construct");
275   }
276
277   /// Get the name of the capture helper.
278   StringRef getHelperName() const override {
279     if (auto *OuterRegionInfo = getOldCSI())
280       return OuterRegionInfo->getHelperName();
281     llvm_unreachable("No helper name for inlined OpenMP construct");
282   }
283
284   void emitUntiedSwitch(CodeGenFunction &CGF) override {
285     if (OuterRegionInfo)
286       OuterRegionInfo->emitUntiedSwitch(CGF);
287   }
288
289   CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
290
291   static bool classof(const CGCapturedStmtInfo *Info) {
292     return CGOpenMPRegionInfo::classof(Info) &&
293            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
294   }
295
296   ~CGOpenMPInlinedRegionInfo() override = default;
297
298 private:
299   /// CodeGen info about outer OpenMP region.
300   CodeGenFunction::CGCapturedStmtInfo *OldCSI;
301   CGOpenMPRegionInfo *OuterRegionInfo;
302 };
303
304 /// API for captured statement code generation in OpenMP target
305 /// constructs. For this captures, implicit parameters are used instead of the
306 /// captured fields. The name of the target region has to be unique in a given
307 /// application so it is provided by the client, because only the client has
308 /// the information to generate that.
309 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
310 public:
311   CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
312                            const RegionCodeGenTy &CodeGen, StringRef HelperName)
313       : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
314                            /*HasCancel=*/false),
315         HelperName(HelperName) {}
316
317   /// This is unused for target regions because each starts executing
318   /// with a single thread.
319   const VarDecl *getThreadIDVariable() const override { return nullptr; }
320
321   /// Get the name of the capture helper.
322   StringRef getHelperName() const override { return HelperName; }
323
324   static bool classof(const CGCapturedStmtInfo *Info) {
325     return CGOpenMPRegionInfo::classof(Info) &&
326            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
327   }
328
329 private:
330   StringRef HelperName;
331 };
332
333 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
334   llvm_unreachable("No codegen for expressions");
335 }
336 /// API for generation of expressions captured in a innermost OpenMP
337 /// region.
338 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
339 public:
340   CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
341       : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
342                                   OMPD_unknown,
343                                   /*HasCancel=*/false),
344         PrivScope(CGF) {
345     // Make sure the globals captured in the provided statement are local by
346     // using the privatization logic. We assume the same variable is not
347     // captured more than once.
348     for (const auto &C : CS.captures()) {
349       if (!C.capturesVariable() && !C.capturesVariableByCopy())
350         continue;
351
352       const VarDecl *VD = C.getCapturedVar();
353       if (VD->isLocalVarDeclOrParm())
354         continue;
355
356       DeclRefExpr DRE(const_cast<VarDecl *>(VD),
357                       /*RefersToEnclosingVariableOrCapture=*/false,
358                       VD->getType().getNonReferenceType(), VK_LValue,
359                       C.getLocation());
360       PrivScope.addPrivate(
361           VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
362     }
363     (void)PrivScope.Privatize();
364   }
365
366   /// Lookup the captured field decl for a variable.
367   const FieldDecl *lookup(const VarDecl *VD) const override {
368     if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
369       return FD;
370     return nullptr;
371   }
372
373   /// Emit the captured statement body.
374   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
375     llvm_unreachable("No body for expressions");
376   }
377
378   /// Get a variable or parameter for storing global thread id
379   /// inside OpenMP construct.
380   const VarDecl *getThreadIDVariable() const override {
381     llvm_unreachable("No thread id for expressions");
382   }
383
384   /// Get the name of the capture helper.
385   StringRef getHelperName() const override {
386     llvm_unreachable("No helper name for expressions");
387   }
388
389   static bool classof(const CGCapturedStmtInfo *Info) { return false; }
390
391 private:
392   /// Private scope to capture global variables.
393   CodeGenFunction::OMPPrivateScope PrivScope;
394 };
395
396 /// RAII for emitting code of OpenMP constructs.
397 class InlinedOpenMPRegionRAII {
398   CodeGenFunction &CGF;
399   llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
400   FieldDecl *LambdaThisCaptureField = nullptr;
401   const CodeGen::CGBlockInfo *BlockInfo = nullptr;
402
403 public:
404   /// Constructs region for combined constructs.
405   /// \param CodeGen Code generation sequence for combined directives. Includes
406   /// a list of functions used for code generation of implicitly inlined
407   /// regions.
408   InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
409                           OpenMPDirectiveKind Kind, bool HasCancel)
410       : CGF(CGF) {
411     // Start emission for the construct.
412     CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
413         CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
414     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
415     LambdaThisCaptureField = CGF.LambdaThisCaptureField;
416     CGF.LambdaThisCaptureField = nullptr;
417     BlockInfo = CGF.BlockInfo;
418     CGF.BlockInfo = nullptr;
419   }
420
421   ~InlinedOpenMPRegionRAII() {
422     // Restore original CapturedStmtInfo only if we're done with code emission.
423     auto *OldCSI =
424         cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
425     delete CGF.CapturedStmtInfo;
426     CGF.CapturedStmtInfo = OldCSI;
427     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
428     CGF.LambdaThisCaptureField = LambdaThisCaptureField;
429     CGF.BlockInfo = BlockInfo;
430   }
431 };
432
433 /// Values for bit flags used in the ident_t to describe the fields.
434 /// All enumeric elements are named and described in accordance with the code
435 /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
436 enum OpenMPLocationFlags : unsigned {
437   /// Use trampoline for internal microtask.
438   OMP_IDENT_IMD = 0x01,
439   /// Use c-style ident structure.
440   OMP_IDENT_KMPC = 0x02,
441   /// Atomic reduction option for kmpc_reduce.
442   OMP_ATOMIC_REDUCE = 0x10,
443   /// Explicit 'barrier' directive.
444   OMP_IDENT_BARRIER_EXPL = 0x20,
445   /// Implicit barrier in code.
446   OMP_IDENT_BARRIER_IMPL = 0x40,
447   /// Implicit barrier in 'for' directive.
448   OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
449   /// Implicit barrier in 'sections' directive.
450   OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
451   /// Implicit barrier in 'single' directive.
452   OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
453   /// Call of __kmp_for_static_init for static loop.
454   OMP_IDENT_WORK_LOOP = 0x200,
455   /// Call of __kmp_for_static_init for sections.
456   OMP_IDENT_WORK_SECTIONS = 0x400,
457   /// Call of __kmp_for_static_init for distribute.
458   OMP_IDENT_WORK_DISTRIBUTE = 0x800,
459   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
460 };
461
462 /// Describes ident structure that describes a source location.
463 /// All descriptions are taken from
464 /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
465 /// Original structure:
466 /// typedef struct ident {
467 ///    kmp_int32 reserved_1;   /**<  might be used in Fortran;
468 ///                                  see above  */
469 ///    kmp_int32 flags;        /**<  also f.flags; KMP_IDENT_xxx flags;
470 ///                                  KMP_IDENT_KMPC identifies this union
471 ///                                  member  */
472 ///    kmp_int32 reserved_2;   /**<  not really used in Fortran any more;
473 ///                                  see above */
474 ///#if USE_ITT_BUILD
475 ///                            /*  but currently used for storing
476 ///                                region-specific ITT */
477 ///                            /*  contextual information. */
478 ///#endif /* USE_ITT_BUILD */
479 ///    kmp_int32 reserved_3;   /**< source[4] in Fortran, do not use for
480 ///                                 C++  */
481 ///    char const *psource;    /**< String describing the source location.
482 ///                            The string is composed of semi-colon separated
483 //                             fields which describe the source file,
484 ///                            the function and a pair of line numbers that
485 ///                            delimit the construct.
486 ///                             */
487 /// } ident_t;
488 enum IdentFieldIndex {
489   /// might be used in Fortran
490   IdentField_Reserved_1,
491   /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
492   IdentField_Flags,
493   /// Not really used in Fortran any more
494   IdentField_Reserved_2,
495   /// Source[4] in Fortran, do not use for C++
496   IdentField_Reserved_3,
497   /// String describing the source location. The string is composed of
498   /// semi-colon separated fields which describe the source file, the function
499   /// and a pair of line numbers that delimit the construct.
500   IdentField_PSource
501 };
502
503 /// Schedule types for 'omp for' loops (these enumerators are taken from
504 /// the enum sched_type in kmp.h).
505 enum OpenMPSchedType {
506   /// Lower bound for default (unordered) versions.
507   OMP_sch_lower = 32,
508   OMP_sch_static_chunked = 33,
509   OMP_sch_static = 34,
510   OMP_sch_dynamic_chunked = 35,
511   OMP_sch_guided_chunked = 36,
512   OMP_sch_runtime = 37,
513   OMP_sch_auto = 38,
514   /// static with chunk adjustment (e.g., simd)
515   OMP_sch_static_balanced_chunked = 45,
516   /// Lower bound for 'ordered' versions.
517   OMP_ord_lower = 64,
518   OMP_ord_static_chunked = 65,
519   OMP_ord_static = 66,
520   OMP_ord_dynamic_chunked = 67,
521   OMP_ord_guided_chunked = 68,
522   OMP_ord_runtime = 69,
523   OMP_ord_auto = 70,
524   OMP_sch_default = OMP_sch_static,
525   /// dist_schedule types
526   OMP_dist_sch_static_chunked = 91,
527   OMP_dist_sch_static = 92,
528   /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
529   /// Set if the monotonic schedule modifier was present.
530   OMP_sch_modifier_monotonic = (1 << 29),
531   /// Set if the nonmonotonic schedule modifier was present.
532   OMP_sch_modifier_nonmonotonic = (1 << 30),
533 };
534
535 enum OpenMPRTLFunction {
536   /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
537   /// kmpc_micro microtask, ...);
538   OMPRTL__kmpc_fork_call,
539   /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
540   /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
541   OMPRTL__kmpc_threadprivate_cached,
542   /// Call to void __kmpc_threadprivate_register( ident_t *,
543   /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
544   OMPRTL__kmpc_threadprivate_register,
545   // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
546   OMPRTL__kmpc_global_thread_num,
547   // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
548   // kmp_critical_name *crit);
549   OMPRTL__kmpc_critical,
550   // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
551   // global_tid, kmp_critical_name *crit, uintptr_t hint);
552   OMPRTL__kmpc_critical_with_hint,
553   // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
554   // kmp_critical_name *crit);
555   OMPRTL__kmpc_end_critical,
556   // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
557   // global_tid);
558   OMPRTL__kmpc_cancel_barrier,
559   // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
560   OMPRTL__kmpc_barrier,
561   // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
562   OMPRTL__kmpc_for_static_fini,
563   // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
564   // global_tid);
565   OMPRTL__kmpc_serialized_parallel,
566   // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
567   // global_tid);
568   OMPRTL__kmpc_end_serialized_parallel,
569   // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
570   // kmp_int32 num_threads);
571   OMPRTL__kmpc_push_num_threads,
572   // Call to void __kmpc_flush(ident_t *loc);
573   OMPRTL__kmpc_flush,
574   // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
575   OMPRTL__kmpc_master,
576   // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
577   OMPRTL__kmpc_end_master,
578   // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
579   // int end_part);
580   OMPRTL__kmpc_omp_taskyield,
581   // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
582   OMPRTL__kmpc_single,
583   // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
584   OMPRTL__kmpc_end_single,
585   // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
586   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
587   // kmp_routine_entry_t *task_entry);
588   OMPRTL__kmpc_omp_task_alloc,
589   // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
590   // new_task);
591   OMPRTL__kmpc_omp_task,
592   // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
593   // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
594   // kmp_int32 didit);
595   OMPRTL__kmpc_copyprivate,
596   // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
597   // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
598   // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
599   OMPRTL__kmpc_reduce,
600   // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
601   // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
602   // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
603   // *lck);
604   OMPRTL__kmpc_reduce_nowait,
605   // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
606   // kmp_critical_name *lck);
607   OMPRTL__kmpc_end_reduce,
608   // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
609   // kmp_critical_name *lck);
610   OMPRTL__kmpc_end_reduce_nowait,
611   // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
612   // kmp_task_t * new_task);
613   OMPRTL__kmpc_omp_task_begin_if0,
614   // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
615   // kmp_task_t * new_task);
616   OMPRTL__kmpc_omp_task_complete_if0,
617   // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
618   OMPRTL__kmpc_ordered,
619   // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
620   OMPRTL__kmpc_end_ordered,
621   // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
622   // global_tid);
623   OMPRTL__kmpc_omp_taskwait,
624   // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
625   OMPRTL__kmpc_taskgroup,
626   // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
627   OMPRTL__kmpc_end_taskgroup,
628   // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
629   // int proc_bind);
630   OMPRTL__kmpc_push_proc_bind,
631   // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
632   // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
633   // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
634   OMPRTL__kmpc_omp_task_with_deps,
635   // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
636   // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
637   // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
638   OMPRTL__kmpc_omp_wait_deps,
639   // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
640   // global_tid, kmp_int32 cncl_kind);
641   OMPRTL__kmpc_cancellationpoint,
642   // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
643   // kmp_int32 cncl_kind);
644   OMPRTL__kmpc_cancel,
645   // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
646   // kmp_int32 num_teams, kmp_int32 thread_limit);
647   OMPRTL__kmpc_push_num_teams,
648   // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
649   // microtask, ...);
650   OMPRTL__kmpc_fork_teams,
651   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
652   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
653   // sched, kmp_uint64 grainsize, void *task_dup);
654   OMPRTL__kmpc_taskloop,
655   // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
656   // num_dims, struct kmp_dim *dims);
657   OMPRTL__kmpc_doacross_init,
658   // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
659   OMPRTL__kmpc_doacross_fini,
660   // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
661   // *vec);
662   OMPRTL__kmpc_doacross_post,
663   // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
664   // *vec);
665   OMPRTL__kmpc_doacross_wait,
666   // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
667   // *data);
668   OMPRTL__kmpc_task_reduction_init,
669   // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
670   // *d);
671   OMPRTL__kmpc_task_reduction_get_th_data,
672
673   //
674   // Offloading related calls
675   //
676   // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
677   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
678   // *arg_types);
679   OMPRTL__tgt_target,
680   // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
681   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
682   // *arg_types);
683   OMPRTL__tgt_target_nowait,
684   // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
685   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
686   // *arg_types, int32_t num_teams, int32_t thread_limit);
687   OMPRTL__tgt_target_teams,
688   // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
689   // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
690   // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
691   OMPRTL__tgt_target_teams_nowait,
692   // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
693   OMPRTL__tgt_register_lib,
694   // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
695   OMPRTL__tgt_unregister_lib,
696   // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
697   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
698   OMPRTL__tgt_target_data_begin,
699   // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
700   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
701   // *arg_types);
702   OMPRTL__tgt_target_data_begin_nowait,
703   // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
704   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
705   OMPRTL__tgt_target_data_end,
706   // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
707   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
708   // *arg_types);
709   OMPRTL__tgt_target_data_end_nowait,
710   // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
711   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
712   OMPRTL__tgt_target_data_update,
713   // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
714   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
715   // *arg_types);
716   OMPRTL__tgt_target_data_update_nowait,
717 };
718
719 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
720 /// region.
721 class CleanupTy final : public EHScopeStack::Cleanup {
722   PrePostActionTy *Action;
723
724 public:
725   explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
726   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
727     if (!CGF.HaveInsertPoint())
728       return;
729     Action->Exit(CGF);
730   }
731 };
732
733 } // anonymous namespace
734
735 void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
736   CodeGenFunction::RunCleanupsScope Scope(CGF);
737   if (PrePostAction) {
738     CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
739     Callback(CodeGen, CGF, *PrePostAction);
740   } else {
741     PrePostActionTy Action;
742     Callback(CodeGen, CGF, Action);
743   }
744 }
745
746 /// Check if the combiner is a call to UDR combiner and if it is so return the
747 /// UDR decl used for reduction.
748 static const OMPDeclareReductionDecl *
749 getReductionInit(const Expr *ReductionOp) {
750   if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
751     if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
752       if (const auto *DRE =
753               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
754         if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
755           return DRD;
756   return nullptr;
757 }
758
759 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
760                                              const OMPDeclareReductionDecl *DRD,
761                                              const Expr *InitOp,
762                                              Address Private, Address Original,
763                                              QualType Ty) {
764   if (DRD->getInitializer()) {
765     std::pair<llvm::Function *, llvm::Function *> Reduction =
766         CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
767     const auto *CE = cast<CallExpr>(InitOp);
768     const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
769     const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
770     const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
771     const auto *LHSDRE =
772         cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
773     const auto *RHSDRE =
774         cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
775     CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
776     PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
777                             [=]() { return Private; });
778     PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
779                             [=]() { return Original; });
780     (void)PrivateScope.Privatize();
781     RValue Func = RValue::get(Reduction.second);
782     CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
783     CGF.EmitIgnoredExpr(InitOp);
784   } else {
785     llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
786     std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
787     auto *GV = new llvm::GlobalVariable(
788         CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
789         llvm::GlobalValue::PrivateLinkage, Init, Name);
790     LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
791     RValue InitRVal;
792     switch (CGF.getEvaluationKind(Ty)) {
793     case TEK_Scalar:
794       InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
795       break;
796     case TEK_Complex:
797       InitRVal =
798           RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
799       break;
800     case TEK_Aggregate:
801       InitRVal = RValue::getAggregate(LV.getAddress());
802       break;
803     }
804     OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
805     CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
806     CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
807                          /*IsInitializer=*/false);
808   }
809 }
810
811 /// Emit initialization of arrays of complex types.
812 /// \param DestAddr Address of the array.
813 /// \param Type Type of array.
814 /// \param Init Initial expression of array.
815 /// \param SrcAddr Address of the original array.
816 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
817                                  QualType Type, bool EmitDeclareReductionInit,
818                                  const Expr *Init,
819                                  const OMPDeclareReductionDecl *DRD,
820                                  Address SrcAddr = Address::invalid()) {
821   // Perform element-by-element initialization.
822   QualType ElementTy;
823
824   // Drill down to the base element type on both arrays.
825   const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
826   llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
827   DestAddr =
828       CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
829   if (DRD)
830     SrcAddr =
831         CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
832
833   llvm::Value *SrcBegin = nullptr;
834   if (DRD)
835     SrcBegin = SrcAddr.getPointer();
836   llvm::Value *DestBegin = DestAddr.getPointer();
837   // Cast from pointer to array type to pointer to single element.
838   llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
839   // The basic structure here is a while-do loop.
840   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
841   llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
842   llvm::Value *IsEmpty =
843       CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
844   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
845
846   // Enter the loop body, making that address the current address.
847   llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
848   CGF.EmitBlock(BodyBB);
849
850   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
851
852   llvm::PHINode *SrcElementPHI = nullptr;
853   Address SrcElementCurrent = Address::invalid();
854   if (DRD) {
855     SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
856                                           "omp.arraycpy.srcElementPast");
857     SrcElementPHI->addIncoming(SrcBegin, EntryBB);
858     SrcElementCurrent =
859         Address(SrcElementPHI,
860                 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
861   }
862   llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
863       DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
864   DestElementPHI->addIncoming(DestBegin, EntryBB);
865   Address DestElementCurrent =
866       Address(DestElementPHI,
867               DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
868
869   // Emit copy.
870   {
871     CodeGenFunction::RunCleanupsScope InitScope(CGF);
872     if (EmitDeclareReductionInit) {
873       emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
874                                        SrcElementCurrent, ElementTy);
875     } else
876       CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
877                            /*IsInitializer=*/false);
878   }
879
880   if (DRD) {
881     // Shift the address forward by one element.
882     llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
883         SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
884     SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
885   }
886
887   // Shift the address forward by one element.
888   llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
889       DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
890   // Check whether we've reached the end.
891   llvm::Value *Done =
892       CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
893   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
894   DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
895
896   // Done.
897   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
898 }
899
900 static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
901 isDeclareTargetDeclaration(const ValueDecl *VD) {
902   for (const Decl *D : VD->redecls()) {
903     if (!D->hasAttrs())
904       continue;
905     if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
906       return Attr->getMapType();
907   }
908   if (const auto *V = dyn_cast<VarDecl>(VD)) {
909     if (const VarDecl *TD = V->getTemplateInstantiationPattern())
910       return isDeclareTargetDeclaration(TD);
911   } else if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
912     if (const auto *TD = FD->getTemplateInstantiationPattern())
913       return isDeclareTargetDeclaration(TD);
914   }
915
916   return llvm::None;
917 }
918
919 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
920   return CGF.EmitOMPSharedLValue(E);
921 }
922
923 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
924                                             const Expr *E) {
925   if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
926     return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
927   return LValue();
928 }
929
930 void ReductionCodeGen::emitAggregateInitialization(
931     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
932     const OMPDeclareReductionDecl *DRD) {
933   // Emit VarDecl with copy init for arrays.
934   // Get the address of the original variable captured in current
935   // captured region.
936   const auto *PrivateVD =
937       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
938   bool EmitDeclareReductionInit =
939       DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
940   EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
941                        EmitDeclareReductionInit,
942                        EmitDeclareReductionInit ? ClausesData[N].ReductionOp
943                                                 : PrivateVD->getInit(),
944                        DRD, SharedLVal.getAddress());
945 }
946
947 ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
948                                    ArrayRef<const Expr *> Privates,
949                                    ArrayRef<const Expr *> ReductionOps) {
950   ClausesData.reserve(Shareds.size());
951   SharedAddresses.reserve(Shareds.size());
952   Sizes.reserve(Shareds.size());
953   BaseDecls.reserve(Shareds.size());
954   auto IPriv = Privates.begin();
955   auto IRed = ReductionOps.begin();
956   for (const Expr *Ref : Shareds) {
957     ClausesData.emplace_back(Ref, *IPriv, *IRed);
958     std::advance(IPriv, 1);
959     std::advance(IRed, 1);
960   }
961 }
962
963 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
964   assert(SharedAddresses.size() == N &&
965          "Number of generated lvalues must be exactly N.");
966   LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
967   LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
968   SharedAddresses.emplace_back(First, Second);
969 }
970
971 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
972   const auto *PrivateVD =
973       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
974   QualType PrivateType = PrivateVD->getType();
975   bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
976   if (!PrivateType->isVariablyModifiedType()) {
977     Sizes.emplace_back(
978         CGF.getTypeSize(
979             SharedAddresses[N].first.getType().getNonReferenceType()),
980         nullptr);
981     return;
982   }
983   llvm::Value *Size;
984   llvm::Value *SizeInChars;
985   auto *ElemType =
986       cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
987           ->getElementType();
988   auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
989   if (AsArraySection) {
990     Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
991                                      SharedAddresses[N].first.getPointer());
992     Size = CGF.Builder.CreateNUWAdd(
993         Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
994     SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
995   } else {
996     SizeInChars = CGF.getTypeSize(
997         SharedAddresses[N].first.getType().getNonReferenceType());
998     Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
999   }
1000   Sizes.emplace_back(SizeInChars, Size);
1001   CodeGenFunction::OpaqueValueMapping OpaqueMap(
1002       CGF,
1003       cast<OpaqueValueExpr>(
1004           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1005       RValue::get(Size));
1006   CGF.EmitVariablyModifiedType(PrivateType);
1007 }
1008
1009 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
1010                                          llvm::Value *Size) {
1011   const auto *PrivateVD =
1012       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1013   QualType PrivateType = PrivateVD->getType();
1014   if (!PrivateType->isVariablyModifiedType()) {
1015     assert(!Size && !Sizes[N].second &&
1016            "Size should be nullptr for non-variably modified reduction "
1017            "items.");
1018     return;
1019   }
1020   CodeGenFunction::OpaqueValueMapping OpaqueMap(
1021       CGF,
1022       cast<OpaqueValueExpr>(
1023           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1024       RValue::get(Size));
1025   CGF.EmitVariablyModifiedType(PrivateType);
1026 }
1027
1028 void ReductionCodeGen::emitInitialization(
1029     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1030     llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1031   assert(SharedAddresses.size() > N && "No variable was generated");
1032   const auto *PrivateVD =
1033       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1034   const OMPDeclareReductionDecl *DRD =
1035       getReductionInit(ClausesData[N].ReductionOp);
1036   QualType PrivateType = PrivateVD->getType();
1037   PrivateAddr = CGF.Builder.CreateElementBitCast(
1038       PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1039   QualType SharedType = SharedAddresses[N].first.getType();
1040   SharedLVal = CGF.MakeAddrLValue(
1041       CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1042                                        CGF.ConvertTypeForMem(SharedType)),
1043       SharedType, SharedAddresses[N].first.getBaseInfo(),
1044       CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1045   if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1046     emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1047   } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1048     emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1049                                      PrivateAddr, SharedLVal.getAddress(),
1050                                      SharedLVal.getType());
1051   } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1052              !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1053     CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1054                          PrivateVD->getType().getQualifiers(),
1055                          /*IsInitializer=*/false);
1056   }
1057 }
1058
1059 bool ReductionCodeGen::needCleanups(unsigned N) {
1060   const auto *PrivateVD =
1061       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1062   QualType PrivateType = PrivateVD->getType();
1063   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1064   return DTorKind != QualType::DK_none;
1065 }
1066
1067 void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
1068                                     Address PrivateAddr) {
1069   const auto *PrivateVD =
1070       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1071   QualType PrivateType = PrivateVD->getType();
1072   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1073   if (needCleanups(N)) {
1074     PrivateAddr = CGF.Builder.CreateElementBitCast(
1075         PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1076     CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1077   }
1078 }
1079
1080 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1081                           LValue BaseLV) {
1082   BaseTy = BaseTy.getNonReferenceType();
1083   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1084          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1085     if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
1086       BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1087     } else {
1088       LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1089       BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1090     }
1091     BaseTy = BaseTy->getPointeeType();
1092   }
1093   return CGF.MakeAddrLValue(
1094       CGF.Builder.CreateElementBitCast(BaseLV.getAddress(),
1095                                        CGF.ConvertTypeForMem(ElTy)),
1096       BaseLV.getType(), BaseLV.getBaseInfo(),
1097       CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1098 }
1099
1100 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1101                           llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1102                           llvm::Value *Addr) {
1103   Address Tmp = Address::invalid();
1104   Address TopTmp = Address::invalid();
1105   Address MostTopTmp = Address::invalid();
1106   BaseTy = BaseTy.getNonReferenceType();
1107   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1108          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1109     Tmp = CGF.CreateMemTemp(BaseTy);
1110     if (TopTmp.isValid())
1111       CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1112     else
1113       MostTopTmp = Tmp;
1114     TopTmp = Tmp;
1115     BaseTy = BaseTy->getPointeeType();
1116   }
1117   llvm::Type *Ty = BaseLVType;
1118   if (Tmp.isValid())
1119     Ty = Tmp.getElementType();
1120   Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1121   if (Tmp.isValid()) {
1122     CGF.Builder.CreateStore(Addr, Tmp);
1123     return MostTopTmp;
1124   }
1125   return Address(Addr, BaseLVAlignment);
1126 }
1127
1128 static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
1129   const VarDecl *OrigVD = nullptr;
1130   if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
1131     const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
1132     while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1133       Base = TempOASE->getBase()->IgnoreParenImpCasts();
1134     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1135       Base = TempASE->getBase()->IgnoreParenImpCasts();
1136     DE = cast<DeclRefExpr>(Base);
1137     OrigVD = cast<VarDecl>(DE->getDecl());
1138   } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
1139     const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
1140     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1141       Base = TempASE->getBase()->IgnoreParenImpCasts();
1142     DE = cast<DeclRefExpr>(Base);
1143     OrigVD = cast<VarDecl>(DE->getDecl());
1144   }
1145   return OrigVD;
1146 }
1147
1148 Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1149                                                Address PrivateAddr) {
1150   const DeclRefExpr *DE;
1151   if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1152     BaseDecls.emplace_back(OrigVD);
1153     LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1154     LValue BaseLValue =
1155         loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1156                     OriginalBaseLValue);
1157     llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1158         BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1159     llvm::Value *PrivatePointer =
1160         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1161             PrivateAddr.getPointer(),
1162             SharedAddresses[N].first.getAddress().getType());
1163     llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1164     return castToBase(CGF, OrigVD->getType(),
1165                       SharedAddresses[N].first.getType(),
1166                       OriginalBaseLValue.getAddress().getType(),
1167                       OriginalBaseLValue.getAlignment(), Ptr);
1168   }
1169   BaseDecls.emplace_back(
1170       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1171   return PrivateAddr;
1172 }
1173
1174 bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1175   const OMPDeclareReductionDecl *DRD =
1176       getReductionInit(ClausesData[N].ReductionOp);
1177   return DRD && DRD->getInitializer();
1178 }
1179
1180 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1181   return CGF.EmitLoadOfPointerLValue(
1182       CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1183       getThreadIDVariable()->getType()->castAs<PointerType>());
1184 }
1185
1186 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1187   if (!CGF.HaveInsertPoint())
1188     return;
1189   // 1.2.2 OpenMP Language Terminology
1190   // Structured block - An executable statement with a single entry at the
1191   // top and a single exit at the bottom.
1192   // The point of exit cannot be a branch out of the structured block.
1193   // longjmp() and throw() must not violate the entry/exit criteria.
1194   CGF.EHStack.pushTerminate();
1195   CodeGen(CGF);
1196   CGF.EHStack.popTerminate();
1197 }
1198
1199 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1200     CodeGenFunction &CGF) {
1201   return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1202                             getThreadIDVariable()->getType(),
1203                             AlignmentSource::Decl);
1204 }
1205
1206 static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
1207                                        QualType FieldTy) {
1208   auto *Field = FieldDecl::Create(
1209       C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1210       C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1211       /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1212   Field->setAccess(AS_public);
1213   DC->addDecl(Field);
1214   return Field;
1215 }
1216
1217 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1218                                  StringRef Separator)
1219     : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1220       OffloadEntriesInfoManager(CGM) {
1221   ASTContext &C = CGM.getContext();
1222   RecordDecl *RD = C.buildImplicitRecord("ident_t");
1223   QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1224   RD->startDefinition();
1225   // reserved_1
1226   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1227   // flags
1228   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1229   // reserved_2
1230   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1231   // reserved_3
1232   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1233   // psource
1234   addFieldToRecordDecl(C, RD, C.VoidPtrTy);
1235   RD->completeDefinition();
1236   IdentQTy = C.getRecordType(RD);
1237   IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
1238   KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1239
1240   loadOffloadInfoMetadata();
1241 }
1242
1243 void CGOpenMPRuntime::clear() {
1244   InternalVars.clear();
1245 }
1246
1247 std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1248   SmallString<128> Buffer;
1249   llvm::raw_svector_ostream OS(Buffer);
1250   StringRef Sep = FirstSeparator;
1251   for (StringRef Part : Parts) {
1252     OS << Sep << Part;
1253     Sep = Separator;
1254   }
1255   return OS.str();
1256 }
1257
1258 static llvm::Function *
1259 emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1260                           const Expr *CombinerInitializer, const VarDecl *In,
1261                           const VarDecl *Out, bool IsCombiner) {
1262   // void .omp_combiner.(Ty *in, Ty *out);
1263   ASTContext &C = CGM.getContext();
1264   QualType PtrTy = C.getPointerType(Ty).withRestrict();
1265   FunctionArgList Args;
1266   ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1267                                /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1268   ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1269                               /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1270   Args.push_back(&OmpOutParm);
1271   Args.push_back(&OmpInParm);
1272   const CGFunctionInfo &FnInfo =
1273       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1274   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1275   std::string Name = CGM.getOpenMPRuntime().getName(
1276       {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1277   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1278                                     Name, &CGM.getModule());
1279   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1280   Fn->removeFnAttr(llvm::Attribute::NoInline);
1281   Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1282   Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1283   CodeGenFunction CGF(CGM);
1284   // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1285   // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1286   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1287                     Out->getLocation());
1288   CodeGenFunction::OMPPrivateScope Scope(CGF);
1289   Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1290   Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1291     return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1292         .getAddress();
1293   });
1294   Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1295   Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1296     return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1297         .getAddress();
1298   });
1299   (void)Scope.Privatize();
1300   if (!IsCombiner && Out->hasInit() &&
1301       !CGF.isTrivialInitializer(Out->getInit())) {
1302     CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1303                          Out->getType().getQualifiers(),
1304                          /*IsInitializer=*/true);
1305   }
1306   if (CombinerInitializer)
1307     CGF.EmitIgnoredExpr(CombinerInitializer);
1308   Scope.ForceCleanup();
1309   CGF.FinishFunction();
1310   return Fn;
1311 }
1312
1313 void CGOpenMPRuntime::emitUserDefinedReduction(
1314     CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1315   if (UDRMap.count(D) > 0)
1316     return;
1317   ASTContext &C = CGM.getContext();
1318   if (!In || !Out) {
1319     In = &C.Idents.get("omp_in");
1320     Out = &C.Idents.get("omp_out");
1321   }
1322   llvm::Function *Combiner = emitCombinerOrInitializer(
1323       CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
1324       cast<VarDecl>(D->lookup(Out).front()),
1325       /*IsCombiner=*/true);
1326   llvm::Function *Initializer = nullptr;
1327   if (const Expr *Init = D->getInitializer()) {
1328     if (!Priv || !Orig) {
1329       Priv = &C.Idents.get("omp_priv");
1330       Orig = &C.Idents.get("omp_orig");
1331     }
1332     Initializer = emitCombinerOrInitializer(
1333         CGM, D->getType(),
1334         D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1335                                                                      : nullptr,
1336         cast<VarDecl>(D->lookup(Orig).front()),
1337         cast<VarDecl>(D->lookup(Priv).front()),
1338         /*IsCombiner=*/false);
1339   }
1340   UDRMap.try_emplace(D, Combiner, Initializer);
1341   if (CGF) {
1342     auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1343     Decls.second.push_back(D);
1344   }
1345 }
1346
1347 std::pair<llvm::Function *, llvm::Function *>
1348 CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1349   auto I = UDRMap.find(D);
1350   if (I != UDRMap.end())
1351     return I->second;
1352   emitUserDefinedReduction(/*CGF=*/nullptr, D);
1353   return UDRMap.lookup(D);
1354 }
1355
1356 static llvm::Value *emitParallelOrTeamsOutlinedFunction(
1357     CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1358     const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1359     const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1360   assert(ThreadIDVar->getType()->isPointerType() &&
1361          "thread id variable must be of type kmp_int32 *");
1362   CodeGenFunction CGF(CGM, true);
1363   bool HasCancel = false;
1364   if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1365     HasCancel = OPD->hasCancel();
1366   else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1367     HasCancel = OPSD->hasCancel();
1368   else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1369     HasCancel = OPFD->hasCancel();
1370   else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1371     HasCancel = OPFD->hasCancel();
1372   else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1373     HasCancel = OPFD->hasCancel();
1374   else if (const auto *OPFD =
1375                dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1376     HasCancel = OPFD->hasCancel();
1377   else if (const auto *OPFD =
1378                dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1379     HasCancel = OPFD->hasCancel();
1380   CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1381                                     HasCancel, OutlinedHelperName);
1382   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1383   return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1384 }
1385
1386 llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
1387     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1388     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1389   const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1390   return emitParallelOrTeamsOutlinedFunction(
1391       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1392 }
1393
1394 llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1395     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1396     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1397   const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1398   return emitParallelOrTeamsOutlinedFunction(
1399       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1400 }
1401
1402 llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
1403     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1404     const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1405     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1406     bool Tied, unsigned &NumberOfParts) {
1407   auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1408                                               PrePostActionTy &) {
1409     llvm::Value *ThreadID = getThreadID(CGF, D.getLocStart());
1410     llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
1411     llvm::Value *TaskArgs[] = {
1412         UpLoc, ThreadID,
1413         CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1414                                     TaskTVar->getType()->castAs<PointerType>())
1415             .getPointer()};
1416     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1417   };
1418   CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1419                                                             UntiedCodeGen);
1420   CodeGen.setAction(Action);
1421   assert(!ThreadIDVar->getType()->isPointerType() &&
1422          "thread id variable must be of type kmp_int32 for tasks");
1423   const OpenMPDirectiveKind Region =
1424       isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1425                                                       : OMPD_task;
1426   const CapturedStmt *CS = D.getCapturedStmt(Region);
1427   const auto *TD = dyn_cast<OMPTaskDirective>(&D);
1428   CodeGenFunction CGF(CGM, true);
1429   CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1430                                         InnermostKind,
1431                                         TD ? TD->hasCancel() : false, Action);
1432   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1433   llvm::Value *Res = CGF.GenerateCapturedStmtFunction(*CS);
1434   if (!Tied)
1435     NumberOfParts = Action.getNumberOfParts();
1436   return Res;
1437 }
1438
1439 static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
1440                              const RecordDecl *RD, const CGRecordLayout &RL,
1441                              ArrayRef<llvm::Constant *> Data) {
1442   llvm::StructType *StructTy = RL.getLLVMType();
1443   unsigned PrevIdx = 0;
1444   ConstantInitBuilder CIBuilder(CGM);
1445   auto DI = Data.begin();
1446   for (const FieldDecl *FD : RD->fields()) {
1447     unsigned Idx = RL.getLLVMFieldNo(FD);
1448     // Fill the alignment.
1449     for (unsigned I = PrevIdx; I < Idx; ++I)
1450       Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1451     PrevIdx = Idx + 1;
1452     Fields.add(*DI);
1453     ++DI;
1454   }
1455 }
1456
1457 template <class... As>
1458 static llvm::GlobalVariable *
1459 createConstantGlobalStruct(CodeGenModule &CGM, QualType Ty,
1460                            ArrayRef<llvm::Constant *> Data, const Twine &Name,
1461                            As &&... Args) {
1462   const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1463   const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1464   ConstantInitBuilder CIBuilder(CGM);
1465   ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1466   buildStructValue(Fields, CGM, RD, RL, Data);
1467   return Fields.finishAndCreateGlobal(
1468       Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty),
1469       /*isConstant=*/true, std::forward<As>(Args)...);
1470 }
1471
1472 template <typename T>
1473 static void
1474 createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
1475                                          ArrayRef<llvm::Constant *> Data,
1476                                          T &Parent) {
1477   const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1478   const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1479   ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1480   buildStructValue(Fields, CGM, RD, RL, Data);
1481   Fields.finishAndAddTo(Parent);
1482 }
1483
1484 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1485   CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1486   llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
1487   if (!Entry) {
1488     if (!DefaultOpenMPPSource) {
1489       // Initialize default location for psource field of ident_t structure of
1490       // all ident_t objects. Format is ";file;function;line;column;;".
1491       // Taken from
1492       // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
1493       DefaultOpenMPPSource =
1494           CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1495       DefaultOpenMPPSource =
1496           llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1497     }
1498
1499     llvm::Constant *Data[] = {llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1500                               llvm::ConstantInt::get(CGM.Int32Ty, Flags),
1501                               llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1502                               llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1503                               DefaultOpenMPPSource};
1504     llvm::GlobalValue *DefaultOpenMPLocation = createConstantGlobalStruct(
1505         CGM, IdentQTy, Data, "", llvm::GlobalValue::PrivateLinkage);
1506     DefaultOpenMPLocation->setUnnamedAddr(
1507         llvm::GlobalValue::UnnamedAddr::Global);
1508
1509     OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
1510   }
1511   return Address(Entry, Align);
1512 }
1513
1514 llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1515                                                  SourceLocation Loc,
1516                                                  unsigned Flags) {
1517   Flags |= OMP_IDENT_KMPC;
1518   // If no debug info is generated - return global default location.
1519   if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1520       Loc.isInvalid())
1521     return getOrCreateDefaultLocation(Flags).getPointer();
1522
1523   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1524
1525   CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1526   Address LocValue = Address::invalid();
1527   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1528   if (I != OpenMPLocThreadIDMap.end())
1529     LocValue = Address(I->second.DebugLoc, Align);
1530
1531   // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1532   // GetOpenMPThreadID was called before this routine.
1533   if (!LocValue.isValid()) {
1534     // Generate "ident_t .kmpc_loc.addr;"
1535     Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
1536     auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1537     Elem.second.DebugLoc = AI.getPointer();
1538     LocValue = AI;
1539
1540     CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1541     CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1542     CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1543                              CGF.getTypeSize(IdentQTy));
1544   }
1545
1546   // char **psource = &.kmpc_loc_<flags>.addr.psource;
1547   LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
1548   auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
1549   LValue PSource =
1550       CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
1551
1552   llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1553   if (OMPDebugLoc == nullptr) {
1554     SmallString<128> Buffer2;
1555     llvm::raw_svector_ostream OS2(Buffer2);
1556     // Build debug location
1557     PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1558     OS2 << ";" << PLoc.getFilename() << ";";
1559     if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1560       OS2 << FD->getQualifiedNameAsString();
1561     OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1562     OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1563     OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1564   }
1565   // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1566   CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
1567
1568   // Our callers always pass this to a runtime function, so for
1569   // convenience, go ahead and return a naked pointer.
1570   return LocValue.getPointer();
1571 }
1572
1573 llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1574                                           SourceLocation Loc) {
1575   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1576
1577   llvm::Value *ThreadID = nullptr;
1578   // Check whether we've already cached a load of the thread id in this
1579   // function.
1580   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1581   if (I != OpenMPLocThreadIDMap.end()) {
1582     ThreadID = I->second.ThreadID;
1583     if (ThreadID != nullptr)
1584       return ThreadID;
1585   }
1586   // If exceptions are enabled, do not use parameter to avoid possible crash.
1587   if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1588       !CGF.getLangOpts().CXXExceptions ||
1589       CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1590     if (auto *OMPRegionInfo =
1591             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1592       if (OMPRegionInfo->getThreadIDVariable()) {
1593         // Check if this an outlined function with thread id passed as argument.
1594         LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1595         ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1596         // If value loaded in entry block, cache it and use it everywhere in
1597         // function.
1598         if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1599           auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1600           Elem.second.ThreadID = ThreadID;
1601         }
1602         return ThreadID;
1603       }
1604     }
1605   }
1606
1607   // This is not an outlined function region - need to call __kmpc_int32
1608   // kmpc_global_thread_num(ident_t *loc).
1609   // Generate thread id value and cache this value for use across the
1610   // function.
1611   CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1612   CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1613   llvm::CallInst *Call = CGF.Builder.CreateCall(
1614       createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
1615       emitUpdateLocation(CGF, Loc));
1616   Call->setCallingConv(CGF.getRuntimeCC());
1617   auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1618   Elem.second.ThreadID = Call;
1619   return Call;
1620 }
1621
1622 void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1623   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1624   if (OpenMPLocThreadIDMap.count(CGF.CurFn))
1625     OpenMPLocThreadIDMap.erase(CGF.CurFn);
1626   if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1627     for(auto *D : FunctionUDRMap[CGF.CurFn])
1628       UDRMap.erase(D);
1629     FunctionUDRMap.erase(CGF.CurFn);
1630   }
1631 }
1632
1633 llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1634   return IdentTy->getPointerTo();
1635 }
1636
1637 llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1638   if (!Kmpc_MicroTy) {
1639     // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1640     llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1641                                  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1642     Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1643   }
1644   return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1645 }
1646
1647 llvm::Constant *
1648 CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1649   llvm::Constant *RTLFn = nullptr;
1650   switch (static_cast<OpenMPRTLFunction>(Function)) {
1651   case OMPRTL__kmpc_fork_call: {
1652     // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1653     // microtask, ...);
1654     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1655                                 getKmpc_MicroPointerTy()};
1656     auto *FnTy =
1657         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1658     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1659     break;
1660   }
1661   case OMPRTL__kmpc_global_thread_num: {
1662     // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1663     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1664     auto *FnTy =
1665         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1666     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1667     break;
1668   }
1669   case OMPRTL__kmpc_threadprivate_cached: {
1670     // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1671     // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1672     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1673                                 CGM.VoidPtrTy, CGM.SizeTy,
1674                                 CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1675     auto *FnTy =
1676         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1677     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1678     break;
1679   }
1680   case OMPRTL__kmpc_critical: {
1681     // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1682     // kmp_critical_name *crit);
1683     llvm::Type *TypeParams[] = {
1684         getIdentTyPointerTy(), CGM.Int32Ty,
1685         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1686     auto *FnTy =
1687         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1688     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1689     break;
1690   }
1691   case OMPRTL__kmpc_critical_with_hint: {
1692     // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1693     // kmp_critical_name *crit, uintptr_t hint);
1694     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1695                                 llvm::PointerType::getUnqual(KmpCriticalNameTy),
1696                                 CGM.IntPtrTy};
1697     auto *FnTy =
1698         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1699     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1700     break;
1701   }
1702   case OMPRTL__kmpc_threadprivate_register: {
1703     // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1704     // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1705     // typedef void *(*kmpc_ctor)(void *);
1706     auto *KmpcCtorTy =
1707         llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1708                                 /*isVarArg*/ false)->getPointerTo();
1709     // typedef void *(*kmpc_cctor)(void *, void *);
1710     llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1711     auto *KmpcCopyCtorTy =
1712         llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1713                                 /*isVarArg*/ false)
1714             ->getPointerTo();
1715     // typedef void (*kmpc_dtor)(void *);
1716     auto *KmpcDtorTy =
1717         llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1718             ->getPointerTo();
1719     llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1720                               KmpcCopyCtorTy, KmpcDtorTy};
1721     auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1722                                         /*isVarArg*/ false);
1723     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1724     break;
1725   }
1726   case OMPRTL__kmpc_end_critical: {
1727     // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1728     // kmp_critical_name *crit);
1729     llvm::Type *TypeParams[] = {
1730         getIdentTyPointerTy(), CGM.Int32Ty,
1731         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1732     auto *FnTy =
1733         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1734     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1735     break;
1736   }
1737   case OMPRTL__kmpc_cancel_barrier: {
1738     // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1739     // global_tid);
1740     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1741     auto *FnTy =
1742         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1743     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1744     break;
1745   }
1746   case OMPRTL__kmpc_barrier: {
1747     // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1748     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1749     auto *FnTy =
1750         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1751     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1752     break;
1753   }
1754   case OMPRTL__kmpc_for_static_fini: {
1755     // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1756     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1757     auto *FnTy =
1758         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1759     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1760     break;
1761   }
1762   case OMPRTL__kmpc_push_num_threads: {
1763     // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1764     // kmp_int32 num_threads)
1765     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1766                                 CGM.Int32Ty};
1767     auto *FnTy =
1768         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1769     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1770     break;
1771   }
1772   case OMPRTL__kmpc_serialized_parallel: {
1773     // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1774     // global_tid);
1775     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1776     auto *FnTy =
1777         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1778     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1779     break;
1780   }
1781   case OMPRTL__kmpc_end_serialized_parallel: {
1782     // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1783     // global_tid);
1784     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1785     auto *FnTy =
1786         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1787     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1788     break;
1789   }
1790   case OMPRTL__kmpc_flush: {
1791     // Build void __kmpc_flush(ident_t *loc);
1792     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1793     auto *FnTy =
1794         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1795     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1796     break;
1797   }
1798   case OMPRTL__kmpc_master: {
1799     // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1800     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1801     auto *FnTy =
1802         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1803     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1804     break;
1805   }
1806   case OMPRTL__kmpc_end_master: {
1807     // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1808     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1809     auto *FnTy =
1810         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1811     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1812     break;
1813   }
1814   case OMPRTL__kmpc_omp_taskyield: {
1815     // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1816     // int end_part);
1817     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1818     auto *FnTy =
1819         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1820     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1821     break;
1822   }
1823   case OMPRTL__kmpc_single: {
1824     // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1825     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1826     auto *FnTy =
1827         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1828     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1829     break;
1830   }
1831   case OMPRTL__kmpc_end_single: {
1832     // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1833     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1834     auto *FnTy =
1835         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1836     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1837     break;
1838   }
1839   case OMPRTL__kmpc_omp_task_alloc: {
1840     // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1841     // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1842     // kmp_routine_entry_t *task_entry);
1843     assert(KmpRoutineEntryPtrTy != nullptr &&
1844            "Type kmp_routine_entry_t must be created.");
1845     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1846                                 CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1847     // Return void * and then cast to particular kmp_task_t type.
1848     auto *FnTy =
1849         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1850     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1851     break;
1852   }
1853   case OMPRTL__kmpc_omp_task: {
1854     // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1855     // *new_task);
1856     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1857                                 CGM.VoidPtrTy};
1858     auto *FnTy =
1859         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1860     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1861     break;
1862   }
1863   case OMPRTL__kmpc_copyprivate: {
1864     // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1865     // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1866     // kmp_int32 didit);
1867     llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1868     auto *CpyFnTy =
1869         llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1870     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1871                                 CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1872                                 CGM.Int32Ty};
1873     auto *FnTy =
1874         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1875     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1876     break;
1877   }
1878   case OMPRTL__kmpc_reduce: {
1879     // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1880     // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1881     // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1882     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1883     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1884                                                /*isVarArg=*/false);
1885     llvm::Type *TypeParams[] = {
1886         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1887         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1888         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1889     auto *FnTy =
1890         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1891     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1892     break;
1893   }
1894   case OMPRTL__kmpc_reduce_nowait: {
1895     // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1896     // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1897     // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1898     // *lck);
1899     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1900     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1901                                                /*isVarArg=*/false);
1902     llvm::Type *TypeParams[] = {
1903         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1904         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1905         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1906     auto *FnTy =
1907         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1908     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1909     break;
1910   }
1911   case OMPRTL__kmpc_end_reduce: {
1912     // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
1913     // kmp_critical_name *lck);
1914     llvm::Type *TypeParams[] = {
1915         getIdentTyPointerTy(), CGM.Int32Ty,
1916         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1917     auto *FnTy =
1918         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1919     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
1920     break;
1921   }
1922   case OMPRTL__kmpc_end_reduce_nowait: {
1923     // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
1924     // kmp_critical_name *lck);
1925     llvm::Type *TypeParams[] = {
1926         getIdentTyPointerTy(), CGM.Int32Ty,
1927         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1928     auto *FnTy =
1929         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1930     RTLFn =
1931         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
1932     break;
1933   }
1934   case OMPRTL__kmpc_omp_task_begin_if0: {
1935     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1936     // *new_task);
1937     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1938                                 CGM.VoidPtrTy};
1939     auto *FnTy =
1940         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1941     RTLFn =
1942         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
1943     break;
1944   }
1945   case OMPRTL__kmpc_omp_task_complete_if0: {
1946     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1947     // *new_task);
1948     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1949                                 CGM.VoidPtrTy};
1950     auto *FnTy =
1951         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1952     RTLFn = CGM.CreateRuntimeFunction(FnTy,
1953                                       /*Name=*/"__kmpc_omp_task_complete_if0");
1954     break;
1955   }
1956   case OMPRTL__kmpc_ordered: {
1957     // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
1958     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1959     auto *FnTy =
1960         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1961     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
1962     break;
1963   }
1964   case OMPRTL__kmpc_end_ordered: {
1965     // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
1966     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1967     auto *FnTy =
1968         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1969     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
1970     break;
1971   }
1972   case OMPRTL__kmpc_omp_taskwait: {
1973     // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
1974     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1975     auto *FnTy =
1976         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1977     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
1978     break;
1979   }
1980   case OMPRTL__kmpc_taskgroup: {
1981     // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
1982     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1983     auto *FnTy =
1984         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1985     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
1986     break;
1987   }
1988   case OMPRTL__kmpc_end_taskgroup: {
1989     // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
1990     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1991     auto *FnTy =
1992         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1993     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
1994     break;
1995   }
1996   case OMPRTL__kmpc_push_proc_bind: {
1997     // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
1998     // int proc_bind)
1999     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2000     auto *FnTy =
2001         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2002     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
2003     break;
2004   }
2005   case OMPRTL__kmpc_omp_task_with_deps: {
2006     // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
2007     // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
2008     // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
2009     llvm::Type *TypeParams[] = {
2010         getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
2011         CGM.VoidPtrTy,         CGM.Int32Ty, CGM.VoidPtrTy};
2012     auto *FnTy =
2013         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2014     RTLFn =
2015         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
2016     break;
2017   }
2018   case OMPRTL__kmpc_omp_wait_deps: {
2019     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
2020     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
2021     // kmp_depend_info_t *noalias_dep_list);
2022     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2023                                 CGM.Int32Ty,           CGM.VoidPtrTy,
2024                                 CGM.Int32Ty,           CGM.VoidPtrTy};
2025     auto *FnTy =
2026         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2027     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
2028     break;
2029   }
2030   case OMPRTL__kmpc_cancellationpoint: {
2031     // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
2032     // global_tid, kmp_int32 cncl_kind)
2033     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2034     auto *FnTy =
2035         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2036     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
2037     break;
2038   }
2039   case OMPRTL__kmpc_cancel: {
2040     // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
2041     // kmp_int32 cncl_kind)
2042     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2043     auto *FnTy =
2044         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2045     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
2046     break;
2047   }
2048   case OMPRTL__kmpc_push_num_teams: {
2049     // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
2050     // kmp_int32 num_teams, kmp_int32 num_threads)
2051     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2052         CGM.Int32Ty};
2053     auto *FnTy =
2054         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2055     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
2056     break;
2057   }
2058   case OMPRTL__kmpc_fork_teams: {
2059     // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
2060     // microtask, ...);
2061     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2062                                 getKmpc_MicroPointerTy()};
2063     auto *FnTy =
2064         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
2065     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
2066     break;
2067   }
2068   case OMPRTL__kmpc_taskloop: {
2069     // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
2070     // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
2071     // sched, kmp_uint64 grainsize, void *task_dup);
2072     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2073                                 CGM.IntTy,
2074                                 CGM.VoidPtrTy,
2075                                 CGM.IntTy,
2076                                 CGM.Int64Ty->getPointerTo(),
2077                                 CGM.Int64Ty->getPointerTo(),
2078                                 CGM.Int64Ty,
2079                                 CGM.IntTy,
2080                                 CGM.IntTy,
2081                                 CGM.Int64Ty,
2082                                 CGM.VoidPtrTy};
2083     auto *FnTy =
2084         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2085     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
2086     break;
2087   }
2088   case OMPRTL__kmpc_doacross_init: {
2089     // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
2090     // num_dims, struct kmp_dim *dims);
2091     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2092                                 CGM.Int32Ty,
2093                                 CGM.Int32Ty,
2094                                 CGM.VoidPtrTy};
2095     auto *FnTy =
2096         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2097     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2098     break;
2099   }
2100   case OMPRTL__kmpc_doacross_fini: {
2101     // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2102     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2103     auto *FnTy =
2104         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2105     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2106     break;
2107   }
2108   case OMPRTL__kmpc_doacross_post: {
2109     // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2110     // *vec);
2111     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2112                                 CGM.Int64Ty->getPointerTo()};
2113     auto *FnTy =
2114         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2115     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2116     break;
2117   }
2118   case OMPRTL__kmpc_doacross_wait: {
2119     // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2120     // *vec);
2121     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2122                                 CGM.Int64Ty->getPointerTo()};
2123     auto *FnTy =
2124         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2125     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2126     break;
2127   }
2128   case OMPRTL__kmpc_task_reduction_init: {
2129     // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2130     // *data);
2131     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2132     auto *FnTy =
2133         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2134     RTLFn =
2135         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2136     break;
2137   }
2138   case OMPRTL__kmpc_task_reduction_get_th_data: {
2139     // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2140     // *d);
2141     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2142     auto *FnTy =
2143         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2144     RTLFn = CGM.CreateRuntimeFunction(
2145         FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2146     break;
2147   }
2148   case OMPRTL__tgt_target: {
2149     // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2150     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2151     // *arg_types);
2152     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2153                                 CGM.VoidPtrTy,
2154                                 CGM.Int32Ty,
2155                                 CGM.VoidPtrPtrTy,
2156                                 CGM.VoidPtrPtrTy,
2157                                 CGM.SizeTy->getPointerTo(),
2158                                 CGM.Int64Ty->getPointerTo()};
2159     auto *FnTy =
2160         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2161     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2162     break;
2163   }
2164   case OMPRTL__tgt_target_nowait: {
2165     // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2166     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2167     // int64_t *arg_types);
2168     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2169                                 CGM.VoidPtrTy,
2170                                 CGM.Int32Ty,
2171                                 CGM.VoidPtrPtrTy,
2172                                 CGM.VoidPtrPtrTy,
2173                                 CGM.SizeTy->getPointerTo(),
2174                                 CGM.Int64Ty->getPointerTo()};
2175     auto *FnTy =
2176         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2177     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2178     break;
2179   }
2180   case OMPRTL__tgt_target_teams: {
2181     // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2182     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2183     // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2184     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2185                                 CGM.VoidPtrTy,
2186                                 CGM.Int32Ty,
2187                                 CGM.VoidPtrPtrTy,
2188                                 CGM.VoidPtrPtrTy,
2189                                 CGM.SizeTy->getPointerTo(),
2190                                 CGM.Int64Ty->getPointerTo(),
2191                                 CGM.Int32Ty,
2192                                 CGM.Int32Ty};
2193     auto *FnTy =
2194         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2195     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2196     break;
2197   }
2198   case OMPRTL__tgt_target_teams_nowait: {
2199     // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2200     // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2201     // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2202     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2203                                 CGM.VoidPtrTy,
2204                                 CGM.Int32Ty,
2205                                 CGM.VoidPtrPtrTy,
2206                                 CGM.VoidPtrPtrTy,
2207                                 CGM.SizeTy->getPointerTo(),
2208                                 CGM.Int64Ty->getPointerTo(),
2209                                 CGM.Int32Ty,
2210                                 CGM.Int32Ty};
2211     auto *FnTy =
2212         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2213     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2214     break;
2215   }
2216   case OMPRTL__tgt_register_lib: {
2217     // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2218     QualType ParamTy =
2219         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2220     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2221     auto *FnTy =
2222         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2223     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2224     break;
2225   }
2226   case OMPRTL__tgt_unregister_lib: {
2227     // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2228     QualType ParamTy =
2229         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2230     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2231     auto *FnTy =
2232         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2233     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2234     break;
2235   }
2236   case OMPRTL__tgt_target_data_begin: {
2237     // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2238     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2239     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2240                                 CGM.Int32Ty,
2241                                 CGM.VoidPtrPtrTy,
2242                                 CGM.VoidPtrPtrTy,
2243                                 CGM.SizeTy->getPointerTo(),
2244                                 CGM.Int64Ty->getPointerTo()};
2245     auto *FnTy =
2246         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2247     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2248     break;
2249   }
2250   case OMPRTL__tgt_target_data_begin_nowait: {
2251     // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2252     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2253     // *arg_types);
2254     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2255                                 CGM.Int32Ty,
2256                                 CGM.VoidPtrPtrTy,
2257                                 CGM.VoidPtrPtrTy,
2258                                 CGM.SizeTy->getPointerTo(),
2259                                 CGM.Int64Ty->getPointerTo()};
2260     auto *FnTy =
2261         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2262     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2263     break;
2264   }
2265   case OMPRTL__tgt_target_data_end: {
2266     // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2267     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2268     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2269                                 CGM.Int32Ty,
2270                                 CGM.VoidPtrPtrTy,
2271                                 CGM.VoidPtrPtrTy,
2272                                 CGM.SizeTy->getPointerTo(),
2273                                 CGM.Int64Ty->getPointerTo()};
2274     auto *FnTy =
2275         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2276     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2277     break;
2278   }
2279   case OMPRTL__tgt_target_data_end_nowait: {
2280     // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2281     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2282     // *arg_types);
2283     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2284                                 CGM.Int32Ty,
2285                                 CGM.VoidPtrPtrTy,
2286                                 CGM.VoidPtrPtrTy,
2287                                 CGM.SizeTy->getPointerTo(),
2288                                 CGM.Int64Ty->getPointerTo()};
2289     auto *FnTy =
2290         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2291     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2292     break;
2293   }
2294   case OMPRTL__tgt_target_data_update: {
2295     // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2296     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2297     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2298                                 CGM.Int32Ty,
2299                                 CGM.VoidPtrPtrTy,
2300                                 CGM.VoidPtrPtrTy,
2301                                 CGM.SizeTy->getPointerTo(),
2302                                 CGM.Int64Ty->getPointerTo()};
2303     auto *FnTy =
2304         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2305     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2306     break;
2307   }
2308   case OMPRTL__tgt_target_data_update_nowait: {
2309     // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2310     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2311     // *arg_types);
2312     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2313                                 CGM.Int32Ty,
2314                                 CGM.VoidPtrPtrTy,
2315                                 CGM.VoidPtrPtrTy,
2316                                 CGM.SizeTy->getPointerTo(),
2317                                 CGM.Int64Ty->getPointerTo()};
2318     auto *FnTy =
2319         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2320     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2321     break;
2322   }
2323   }
2324   assert(RTLFn && "Unable to find OpenMP runtime function");
2325   return RTLFn;
2326 }
2327
2328 llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
2329                                                              bool IVSigned) {
2330   assert((IVSize == 32 || IVSize == 64) &&
2331          "IV size is not compatible with the omp runtime");
2332   StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2333                                             : "__kmpc_for_static_init_4u")
2334                                 : (IVSigned ? "__kmpc_for_static_init_8"
2335                                             : "__kmpc_for_static_init_8u");
2336   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2337   auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2338   llvm::Type *TypeParams[] = {
2339     getIdentTyPointerTy(),                     // loc
2340     CGM.Int32Ty,                               // tid
2341     CGM.Int32Ty,                               // schedtype
2342     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2343     PtrTy,                                     // p_lower
2344     PtrTy,                                     // p_upper
2345     PtrTy,                                     // p_stride
2346     ITy,                                       // incr
2347     ITy                                        // chunk
2348   };
2349   auto *FnTy =
2350       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2351   return CGM.CreateRuntimeFunction(FnTy, Name);
2352 }
2353
2354 llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
2355                                                             bool IVSigned) {
2356   assert((IVSize == 32 || IVSize == 64) &&
2357          "IV size is not compatible with the omp runtime");
2358   StringRef Name =
2359       IVSize == 32
2360           ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2361           : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2362   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2363   llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2364                                CGM.Int32Ty,           // tid
2365                                CGM.Int32Ty,           // schedtype
2366                                ITy,                   // lower
2367                                ITy,                   // upper
2368                                ITy,                   // stride
2369                                ITy                    // chunk
2370   };
2371   auto *FnTy =
2372       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2373   return CGM.CreateRuntimeFunction(FnTy, Name);
2374 }
2375
2376 llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
2377                                                             bool IVSigned) {
2378   assert((IVSize == 32 || IVSize == 64) &&
2379          "IV size is not compatible with the omp runtime");
2380   StringRef Name =
2381       IVSize == 32
2382           ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2383           : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2384   llvm::Type *TypeParams[] = {
2385       getIdentTyPointerTy(), // loc
2386       CGM.Int32Ty,           // tid
2387   };
2388   auto *FnTy =
2389       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2390   return CGM.CreateRuntimeFunction(FnTy, Name);
2391 }
2392
2393 llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
2394                                                             bool IVSigned) {
2395   assert((IVSize == 32 || IVSize == 64) &&
2396          "IV size is not compatible with the omp runtime");
2397   StringRef Name =
2398       IVSize == 32
2399           ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2400           : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2401   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2402   auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2403   llvm::Type *TypeParams[] = {
2404     getIdentTyPointerTy(),                     // loc
2405     CGM.Int32Ty,                               // tid
2406     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2407     PtrTy,                                     // p_lower
2408     PtrTy,                                     // p_upper
2409     PtrTy                                      // p_stride
2410   };
2411   auto *FnTy =
2412       llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2413   return CGM.CreateRuntimeFunction(FnTy, Name);
2414 }
2415
2416 Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
2417   if (CGM.getLangOpts().OpenMPSimd)
2418     return Address::invalid();
2419   llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2420       isDeclareTargetDeclaration(VD);
2421   if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
2422     SmallString<64> PtrName;
2423     {
2424       llvm::raw_svector_ostream OS(PtrName);
2425       OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_link_ptr";
2426     }
2427     llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
2428     if (!Ptr) {
2429       QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
2430       Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
2431                                         PtrName);
2432       if (!CGM.getLangOpts().OpenMPIsDevice) {
2433         auto *GV = cast<llvm::GlobalVariable>(Ptr);
2434         GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
2435         GV->setInitializer(CGM.GetAddrOfGlobal(VD));
2436       }
2437       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ptr));
2438       registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
2439     }
2440     return Address(Ptr, CGM.getContext().getDeclAlign(VD));
2441   }
2442   return Address::invalid();
2443 }
2444
2445 llvm::Constant *
2446 CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
2447   assert(!CGM.getLangOpts().OpenMPUseTLS ||
2448          !CGM.getContext().getTargetInfo().isTLSSupported());
2449   // Lookup the entry, lazily creating it if necessary.
2450   std::string Suffix = getName({"cache", ""});
2451   return getOrCreateInternalVariable(
2452       CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
2453 }
2454
2455 Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
2456                                                 const VarDecl *VD,
2457                                                 Address VDAddr,
2458                                                 SourceLocation Loc) {
2459   if (CGM.getLangOpts().OpenMPUseTLS &&
2460       CGM.getContext().getTargetInfo().isTLSSupported())
2461     return VDAddr;
2462
2463   llvm::Type *VarTy = VDAddr.getElementType();
2464   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2465                          CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2466                                                        CGM.Int8PtrTy),
2467                          CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
2468                          getOrCreateThreadPrivateCache(VD)};
2469   return Address(CGF.EmitRuntimeCall(
2470       createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2471                  VDAddr.getAlignment());
2472 }
2473
2474 void CGOpenMPRuntime::emitThreadPrivateVarInit(
2475     CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2476     llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2477   // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2478   // library.
2479   llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
2480   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
2481                       OMPLoc);
2482   // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2483   // to register constructor/destructor for variable.
2484   llvm::Value *Args[] = {
2485       OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
2486       Ctor, CopyCtor, Dtor};
2487   CGF.EmitRuntimeCall(
2488       createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
2489 }
2490
2491 llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
2492     const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2493     bool PerformInit, CodeGenFunction *CGF) {
2494   if (CGM.getLangOpts().OpenMPUseTLS &&
2495       CGM.getContext().getTargetInfo().isTLSSupported())
2496     return nullptr;
2497
2498   VD = VD->getDefinition(CGM.getContext());
2499   if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
2500     ThreadPrivateWithDefinition.insert(VD);
2501     QualType ASTTy = VD->getType();
2502
2503     llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2504     const Expr *Init = VD->getAnyInitializer();
2505     if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2506       // Generate function that re-emits the declaration's initializer into the
2507       // threadprivate copy of the variable VD
2508       CodeGenFunction CtorCGF(CGM);
2509       FunctionArgList Args;
2510       ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2511                             /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2512                             ImplicitParamDecl::Other);
2513       Args.push_back(&Dst);
2514
2515       const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2516           CGM.getContext().VoidPtrTy, Args);
2517       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2518       std::string Name = getName({"__kmpc_global_ctor_", ""});
2519       llvm::Function *Fn =
2520           CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2521       CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2522                             Args, Loc, Loc);
2523       llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
2524           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2525           CGM.getContext().VoidPtrTy, Dst.getLocation());
2526       Address Arg = Address(ArgVal, VDAddr.getAlignment());
2527       Arg = CtorCGF.Builder.CreateElementBitCast(
2528           Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2529       CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2530                                /*IsInitializer=*/true);
2531       ArgVal = CtorCGF.EmitLoadOfScalar(
2532           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2533           CGM.getContext().VoidPtrTy, Dst.getLocation());
2534       CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2535       CtorCGF.FinishFunction();
2536       Ctor = Fn;
2537     }
2538     if (VD->getType().isDestructedType() != QualType::DK_none) {
2539       // Generate function that emits destructor call for the threadprivate copy
2540       // of the variable VD
2541       CodeGenFunction DtorCGF(CGM);
2542       FunctionArgList Args;
2543       ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2544                             /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2545                             ImplicitParamDecl::Other);
2546       Args.push_back(&Dst);
2547
2548       const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2549           CGM.getContext().VoidTy, Args);
2550       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2551       std::string Name = getName({"__kmpc_global_dtor_", ""});
2552       llvm::Function *Fn =
2553           CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2554       auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2555       DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2556                             Loc, Loc);
2557       // Create a scope with an artificial location for the body of this function.
2558       auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2559       llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
2560           DtorCGF.GetAddrOfLocalVar(&Dst),
2561           /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2562       DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2563                           DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2564                           DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2565       DtorCGF.FinishFunction();
2566       Dtor = Fn;
2567     }
2568     // Do not emit init function if it is not required.
2569     if (!Ctor && !Dtor)
2570       return nullptr;
2571
2572     llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2573     auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2574                                                /*isVarArg=*/false)
2575                            ->getPointerTo();
2576     // Copying constructor for the threadprivate variable.
2577     // Must be NULL - reserved by runtime, but currently it requires that this
2578     // parameter is always NULL. Otherwise it fires assertion.
2579     CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2580     if (Ctor == nullptr) {
2581       auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2582                                              /*isVarArg=*/false)
2583                          ->getPointerTo();
2584       Ctor = llvm::Constant::getNullValue(CtorTy);
2585     }
2586     if (Dtor == nullptr) {
2587       auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2588                                              /*isVarArg=*/false)
2589                          ->getPointerTo();
2590       Dtor = llvm::Constant::getNullValue(DtorTy);
2591     }
2592     if (!CGF) {
2593       auto *InitFunctionTy =
2594           llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2595       std::string Name = getName({"__omp_threadprivate_init_", ""});
2596       llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2597           InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
2598       CodeGenFunction InitCGF(CGM);
2599       FunctionArgList ArgList;
2600       InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2601                             CGM.getTypes().arrangeNullaryFunction(), ArgList,
2602                             Loc, Loc);
2603       emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2604       InitCGF.FinishFunction();
2605       return InitFunction;
2606     }
2607     emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2608   }
2609   return nullptr;
2610 }
2611
2612 /// Obtain information that uniquely identifies a target entry. This
2613 /// consists of the file and device IDs as well as line number associated with
2614 /// the relevant entry source location.
2615 static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
2616                                      unsigned &DeviceID, unsigned &FileID,
2617                                      unsigned &LineNum) {
2618   SourceManager &SM = C.getSourceManager();
2619
2620   // The loc should be always valid and have a file ID (the user cannot use
2621   // #pragma directives in macros)
2622
2623   assert(Loc.isValid() && "Source location is expected to be always valid.");
2624
2625   PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2626   assert(PLoc.isValid() && "Source location is expected to be always valid.");
2627
2628   llvm::sys::fs::UniqueID ID;
2629   if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
2630     SM.getDiagnostics().Report(diag::err_cannot_open_file)
2631         << PLoc.getFilename() << EC.message();
2632
2633   DeviceID = ID.getDevice();
2634   FileID = ID.getFile();
2635   LineNum = PLoc.getLine();
2636 }
2637
2638 bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
2639                                                      llvm::GlobalVariable *Addr,
2640                                                      bool PerformInit) {
2641   Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2642       isDeclareTargetDeclaration(VD);
2643   if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
2644     return false;
2645   VD = VD->getDefinition(CGM.getContext());
2646   if (VD && !DeclareTargetWithDefinition.insert(VD).second)
2647     return CGM.getLangOpts().OpenMPIsDevice;
2648
2649   QualType ASTTy = VD->getType();
2650
2651   SourceLocation Loc = VD->getCanonicalDecl()->getLocStart();
2652   // Produce the unique prefix to identify the new target regions. We use
2653   // the source location of the variable declaration which we know to not
2654   // conflict with any target region.
2655   unsigned DeviceID;
2656   unsigned FileID;
2657   unsigned Line;
2658   getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
2659   SmallString<128> Buffer, Out;
2660   {
2661     llvm::raw_svector_ostream OS(Buffer);
2662     OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
2663        << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
2664   }
2665
2666   const Expr *Init = VD->getAnyInitializer();
2667   if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2668     llvm::Constant *Ctor;
2669     llvm::Constant *ID;
2670     if (CGM.getLangOpts().OpenMPIsDevice) {
2671       // Generate function that re-emits the declaration's initializer into
2672       // the threadprivate copy of the variable VD
2673       CodeGenFunction CtorCGF(CGM);
2674
2675       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2676       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2677       llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2678           FTy, Twine(Buffer, "_ctor"), FI, Loc);
2679       auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
2680       CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2681                             FunctionArgList(), Loc, Loc);
2682       auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
2683       CtorCGF.EmitAnyExprToMem(Init,
2684                                Address(Addr, CGM.getContext().getDeclAlign(VD)),
2685                                Init->getType().getQualifiers(),
2686                                /*IsInitializer=*/true);
2687       CtorCGF.FinishFunction();
2688       Ctor = Fn;
2689       ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2690       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
2691     } else {
2692       Ctor = new llvm::GlobalVariable(
2693           CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2694           llvm::GlobalValue::PrivateLinkage,
2695           llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
2696       ID = Ctor;
2697     }
2698
2699     // Register the information for the entry associated with the constructor.
2700     Out.clear();
2701     OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2702         DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
2703         ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
2704   }
2705   if (VD->getType().isDestructedType() != QualType::DK_none) {
2706     llvm::Constant *Dtor;
2707     llvm::Constant *ID;
2708     if (CGM.getLangOpts().OpenMPIsDevice) {
2709       // Generate function that emits destructor call for the threadprivate
2710       // copy of the variable VD
2711       CodeGenFunction DtorCGF(CGM);
2712
2713       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2714       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2715       llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2716           FTy, Twine(Buffer, "_dtor"), FI, Loc);
2717       auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2718       DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2719                             FunctionArgList(), Loc, Loc);
2720       // Create a scope with an artificial location for the body of this
2721       // function.
2722       auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2723       DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
2724                           ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2725                           DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2726       DtorCGF.FinishFunction();
2727       Dtor = Fn;
2728       ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2729       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
2730     } else {
2731       Dtor = new llvm::GlobalVariable(
2732           CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2733           llvm::GlobalValue::PrivateLinkage,
2734           llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
2735       ID = Dtor;
2736     }
2737     // Register the information for the entry associated with the destructor.
2738     Out.clear();
2739     OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2740         DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2741         ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
2742   }
2743   return CGM.getLangOpts().OpenMPIsDevice;
2744 }
2745
2746 Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2747                                                           QualType VarType,
2748                                                           StringRef Name) {
2749   std::string Suffix = getName({"artificial", ""});
2750   std::string CacheSuffix = getName({"cache", ""});
2751   llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2752   llvm::Value *GAddr =
2753       getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2754   llvm::Value *Args[] = {
2755       emitUpdateLocation(CGF, SourceLocation()),
2756       getThreadID(CGF, SourceLocation()),
2757       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2758       CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2759                                 /*IsSigned=*/false),
2760       getOrCreateInternalVariable(
2761           CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2762   return Address(
2763       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2764           CGF.EmitRuntimeCall(
2765               createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2766           VarLVType->getPointerTo(/*AddrSpace=*/0)),
2767       CGM.getPointerAlign());
2768 }
2769
2770 void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
2771                                       const RegionCodeGenTy &ThenGen,
2772                                       const RegionCodeGenTy &ElseGen) {
2773   CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2774
2775   // If the condition constant folds and can be elided, try to avoid emitting
2776   // the condition and the dead arm of the if/else.
2777   bool CondConstant;
2778   if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2779     if (CondConstant)
2780       ThenGen(CGF);
2781     else
2782       ElseGen(CGF);
2783     return;
2784   }
2785
2786   // Otherwise, the condition did not fold, or we couldn't elide it.  Just
2787   // emit the conditional branch.
2788   llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2789   llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2790   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2791   CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2792
2793   // Emit the 'then' code.
2794   CGF.EmitBlock(ThenBlock);
2795   ThenGen(CGF);
2796   CGF.EmitBranch(ContBlock);
2797   // Emit the 'else' code if present.
2798   // There is no need to emit line number for unconditional branch.
2799   (void)ApplyDebugLocation::CreateEmpty(CGF);
2800   CGF.EmitBlock(ElseBlock);
2801   ElseGen(CGF);
2802   // There is no need to emit line number for unconditional branch.
2803   (void)ApplyDebugLocation::CreateEmpty(CGF);
2804   CGF.EmitBranch(ContBlock);
2805   // Emit the continuation block for code after the if.
2806   CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2807 }
2808
2809 void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2810                                        llvm::Value *OutlinedFn,
2811                                        ArrayRef<llvm::Value *> CapturedVars,
2812                                        const Expr *IfCond) {
2813   if (!CGF.HaveInsertPoint())
2814     return;
2815   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2816   auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2817                                                      PrePostActionTy &) {
2818     // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2819     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2820     llvm::Value *Args[] = {
2821         RTLoc,
2822         CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2823         CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2824     llvm::SmallVector<llvm::Value *, 16> RealArgs;
2825     RealArgs.append(std::begin(Args), std::end(Args));
2826     RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2827
2828     llvm::Value *RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2829     CGF.EmitRuntimeCall(RTLFn, RealArgs);
2830   };
2831   auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2832                                                           PrePostActionTy &) {
2833     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2834     llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2835     // Build calls:
2836     // __kmpc_serialized_parallel(&Loc, GTid);
2837     llvm::Value *Args[] = {RTLoc, ThreadID};
2838     CGF.EmitRuntimeCall(
2839         RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2840
2841     // OutlinedFn(&GTid, &zero, CapturedStruct);
2842     Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2843                                                         /*Name*/ ".zero.addr");
2844     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2845     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2846     // ThreadId for serialized parallels is 0.
2847     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2848     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2849     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2850     RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2851
2852     // __kmpc_end_serialized_parallel(&Loc, GTid);
2853     llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2854     CGF.EmitRuntimeCall(
2855         RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2856         EndArgs);
2857   };
2858   if (IfCond) {
2859     emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
2860   } else {
2861     RegionCodeGenTy ThenRCG(ThenGen);
2862     ThenRCG(CGF);
2863   }
2864 }
2865
2866 // If we're inside an (outlined) parallel region, use the region info's
2867 // thread-ID variable (it is passed in a first argument of the outlined function
2868 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2869 // regular serial code region, get thread ID by calling kmp_int32
2870 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2871 // return the address of that temp.
2872 Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2873                                              SourceLocation Loc) {
2874   if (auto *OMPRegionInfo =
2875           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2876     if (OMPRegionInfo->getThreadIDVariable())
2877       return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
2878
2879   llvm::Value *ThreadID = getThreadID(CGF, Loc);
2880   QualType Int32Ty =
2881       CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2882   Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2883   CGF.EmitStoreOfScalar(ThreadID,
2884                         CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2885
2886   return ThreadIDTemp;
2887 }
2888
2889 llvm::Constant *
2890 CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
2891                                              const llvm::Twine &Name) {
2892   SmallString<256> Buffer;
2893   llvm::raw_svector_ostream Out(Buffer);
2894   Out << Name;
2895   StringRef RuntimeName = Out.str();
2896   auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2897   if (Elem.second) {
2898     assert(Elem.second->getType()->getPointerElementType() == Ty &&
2899            "OMP internal variable has different type than requested");
2900     return &*Elem.second;
2901   }
2902
2903   return Elem.second = new llvm::GlobalVariable(
2904              CGM.getModule(), Ty, /*IsConstant*/ false,
2905              llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2906              Elem.first());
2907 }
2908
2909 llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2910   std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2911   std::string Name = getName({Prefix, "var"});
2912   return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
2913 }
2914
2915 namespace {
2916 /// Common pre(post)-action for different OpenMP constructs.
2917 class CommonActionTy final : public PrePostActionTy {
2918   llvm::Value *EnterCallee;
2919   ArrayRef<llvm::Value *> EnterArgs;
2920   llvm::Value *ExitCallee;
2921   ArrayRef<llvm::Value *> ExitArgs;
2922   bool Conditional;
2923   llvm::BasicBlock *ContBlock = nullptr;
2924
2925 public:
2926   CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
2927                  llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
2928                  bool Conditional = false)
2929       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2930         ExitArgs(ExitArgs), Conditional(Conditional) {}
2931   void Enter(CodeGenFunction &CGF) override {
2932     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2933     if (Conditional) {
2934       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2935       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2936       ContBlock = CGF.createBasicBlock("omp_if.end");
2937       // Generate the branch (If-stmt)
2938       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2939       CGF.EmitBlock(ThenBlock);
2940     }
2941   }
2942   void Done(CodeGenFunction &CGF) {
2943     // Emit the rest of blocks/branches
2944     CGF.EmitBranch(ContBlock);
2945     CGF.EmitBlock(ContBlock, true);
2946   }
2947   void Exit(CodeGenFunction &CGF) override {
2948     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2949   }
2950 };
2951 } // anonymous namespace
2952
2953 void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2954                                          StringRef CriticalName,
2955                                          const RegionCodeGenTy &CriticalOpGen,
2956                                          SourceLocation Loc, const Expr *Hint) {
2957   // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2958   // CriticalOpGen();
2959   // __kmpc_end_critical(ident_t *, gtid, Lock);
2960   // Prepare arguments and build a call to __kmpc_critical
2961   if (!CGF.HaveInsertPoint())
2962     return;
2963   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2964                          getCriticalRegionLock(CriticalName)};
2965   llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2966                                                 std::end(Args));
2967   if (Hint) {
2968     EnterArgs.push_back(CGF.Builder.CreateIntCast(
2969         CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
2970   }
2971   CommonActionTy Action(
2972       createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
2973                                  : OMPRTL__kmpc_critical),
2974       EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
2975   CriticalOpGen.setAction(Action);
2976   emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2977 }
2978
2979 void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2980                                        const RegionCodeGenTy &MasterOpGen,
2981                                        SourceLocation Loc) {
2982   if (!CGF.HaveInsertPoint())
2983     return;
2984   // if(__kmpc_master(ident_t *, gtid)) {
2985   //   MasterOpGen();
2986   //   __kmpc_end_master(ident_t *, gtid);
2987   // }
2988   // Prepare arguments and build a call to __kmpc_master
2989   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2990   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
2991                         createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
2992                         /*Conditional=*/true);
2993   MasterOpGen.setAction(Action);
2994   emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2995   Action.Done(CGF);
2996 }
2997
2998 void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2999                                         SourceLocation Loc) {
3000   if (!CGF.HaveInsertPoint())
3001     return;
3002   // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
3003   llvm::Value *Args[] = {
3004       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3005       llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
3006   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
3007   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3008     Region->emitUntiedSwitch(CGF);
3009 }
3010
3011 void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
3012                                           const RegionCodeGenTy &TaskgroupOpGen,
3013                                           SourceLocation Loc) {
3014   if (!CGF.HaveInsertPoint())
3015     return;
3016   // __kmpc_taskgroup(ident_t *, gtid);
3017   // TaskgroupOpGen();
3018   // __kmpc_end_taskgroup(ident_t *, gtid);
3019   // Prepare arguments and build a call to __kmpc_taskgroup
3020   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3021   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
3022                         createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
3023                         Args);
3024   TaskgroupOpGen.setAction(Action);
3025   emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
3026 }
3027
3028 /// Given an array of pointers to variables, project the address of a
3029 /// given variable.
3030 static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
3031                                       unsigned Index, const VarDecl *Var) {
3032   // Pull out the pointer to the variable.
3033   Address PtrAddr =
3034       CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
3035   llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
3036
3037   Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
3038   Addr = CGF.Builder.CreateElementBitCast(
3039       Addr, CGF.ConvertTypeForMem(Var->getType()));
3040   return Addr;
3041 }
3042
3043 static llvm::Value *emitCopyprivateCopyFunction(
3044     CodeGenModule &CGM, llvm::Type *ArgsType,
3045     ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
3046     ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
3047     SourceLocation Loc) {
3048   ASTContext &C = CGM.getContext();
3049   // void copy_func(void *LHSArg, void *RHSArg);
3050   FunctionArgList Args;
3051   ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3052                            ImplicitParamDecl::Other);
3053   ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3054                            ImplicitParamDecl::Other);
3055   Args.push_back(&LHSArg);
3056   Args.push_back(&RHSArg);
3057   const auto &CGFI =
3058       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3059   std::string Name =
3060       CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
3061   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3062                                     llvm::GlobalValue::InternalLinkage, Name,
3063                                     &CGM.getModule());
3064   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3065   Fn->setDoesNotRecurse();
3066   CodeGenFunction CGF(CGM);
3067   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3068   // Dest = (void*[n])(LHSArg);
3069   // Src = (void*[n])(RHSArg);
3070   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3071       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
3072       ArgsType), CGF.getPointerAlign());
3073   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3074       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
3075       ArgsType), CGF.getPointerAlign());
3076   // *(Type0*)Dst[0] = *(Type0*)Src[0];
3077   // *(Type1*)Dst[1] = *(Type1*)Src[1];
3078   // ...
3079   // *(Typen*)Dst[n] = *(Typen*)Src[n];
3080   for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
3081     const auto *DestVar =
3082         cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
3083     Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
3084
3085     const auto *SrcVar =
3086         cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
3087     Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
3088
3089     const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
3090     QualType Type = VD->getType();
3091     CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
3092   }
3093   CGF.FinishFunction();
3094   return Fn;
3095 }
3096
3097 void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
3098                                        const RegionCodeGenTy &SingleOpGen,
3099                                        SourceLocation Loc,
3100                                        ArrayRef<const Expr *> CopyprivateVars,
3101                                        ArrayRef<const Expr *> SrcExprs,
3102                                        ArrayRef<const Expr *> DstExprs,
3103                                        ArrayRef<const Expr *> AssignmentOps) {
3104   if (!CGF.HaveInsertPoint())
3105     return;
3106   assert(CopyprivateVars.size() == SrcExprs.size() &&
3107          CopyprivateVars.size() == DstExprs.size() &&
3108          CopyprivateVars.size() == AssignmentOps.size());
3109   ASTContext &C = CGM.getContext();
3110   // int32 did_it = 0;
3111   // if(__kmpc_single(ident_t *, gtid)) {
3112   //   SingleOpGen();
3113   //   __kmpc_end_single(ident_t *, gtid);
3114   //   did_it = 1;
3115   // }
3116   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3117   // <copy_func>, did_it);
3118
3119   Address DidIt = Address::invalid();
3120   if (!CopyprivateVars.empty()) {
3121     // int32 did_it = 0;
3122     QualType KmpInt32Ty =
3123         C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3124     DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
3125     CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
3126   }
3127   // Prepare arguments and build a call to __kmpc_single
3128   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3129   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
3130                         createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
3131                         /*Conditional=*/true);
3132   SingleOpGen.setAction(Action);
3133   emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
3134   if (DidIt.isValid()) {
3135     // did_it = 1;
3136     CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
3137   }
3138   Action.Done(CGF);
3139   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3140   // <copy_func>, did_it);
3141   if (DidIt.isValid()) {
3142     llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
3143     QualType CopyprivateArrayTy =
3144         C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
3145                                /*IndexTypeQuals=*/0);
3146     // Create a list of all private variables for copyprivate.
3147     Address CopyprivateList =
3148         CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
3149     for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
3150       Address Elem = CGF.Builder.CreateConstArrayGEP(
3151           CopyprivateList, I, CGF.getPointerSize());
3152       CGF.Builder.CreateStore(
3153           CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3154               CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
3155           Elem);
3156     }
3157     // Build function that copies private values from single region to all other
3158     // threads in the corresponding parallel region.
3159     llvm::Value *CpyFn = emitCopyprivateCopyFunction(
3160         CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
3161         CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
3162     llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
3163     Address CL =
3164       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
3165                                                       CGF.VoidPtrTy);
3166     llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
3167     llvm::Value *Args[] = {
3168         emitUpdateLocation(CGF, Loc), // ident_t *<loc>
3169         getThreadID(CGF, Loc),        // i32 <gtid>
3170         BufSize,                      // size_t <buf_size>
3171         CL.getPointer(),              // void *<copyprivate list>
3172         CpyFn,                        // void (*) (void *, void *) <copy_func>
3173         DidItVal                      // i32 did_it
3174     };
3175     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
3176   }
3177 }
3178
3179 void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
3180                                         const RegionCodeGenTy &OrderedOpGen,
3181                                         SourceLocation Loc, bool IsThreads) {
3182   if (!CGF.HaveInsertPoint())
3183     return;
3184   // __kmpc_ordered(ident_t *, gtid);
3185   // OrderedOpGen();
3186   // __kmpc_end_ordered(ident_t *, gtid);
3187   // Prepare arguments and build a call to __kmpc_ordered
3188   if (IsThreads) {
3189     llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3190     CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
3191                           createRuntimeFunction(OMPRTL__kmpc_end_ordered),
3192                           Args);
3193     OrderedOpGen.setAction(Action);
3194     emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3195     return;
3196   }
3197   emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3198 }
3199
3200 void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
3201                                       OpenMPDirectiveKind Kind, bool EmitChecks,
3202                                       bool ForceSimpleCall) {
3203   if (!CGF.HaveInsertPoint())
3204     return;
3205   // Build call __kmpc_cancel_barrier(loc, thread_id);
3206   // Build call __kmpc_barrier(loc, thread_id);
3207   unsigned Flags;
3208   if (Kind == OMPD_for)
3209     Flags = OMP_IDENT_BARRIER_IMPL_FOR;
3210   else if (Kind == OMPD_sections)
3211     Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
3212   else if (Kind == OMPD_single)
3213     Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
3214   else if (Kind == OMPD_barrier)
3215     Flags = OMP_IDENT_BARRIER_EXPL;
3216   else
3217     Flags = OMP_IDENT_BARRIER_IMPL;
3218   // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
3219   // thread_id);
3220   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
3221                          getThreadID(CGF, Loc)};
3222   if (auto *OMPRegionInfo =
3223           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
3224     if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
3225       llvm::Value *Result = CGF.EmitRuntimeCall(
3226           createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
3227       if (EmitChecks) {
3228         // if (__kmpc_cancel_barrier()) {
3229         //   exit from construct;
3230         // }
3231         llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
3232         llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
3233         llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
3234         CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
3235         CGF.EmitBlock(ExitBB);
3236         //   exit from construct;
3237         CodeGenFunction::JumpDest CancelDestination =
3238             CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
3239         CGF.EmitBranchThroughCleanup(CancelDestination);
3240         CGF.EmitBlock(ContBB, /*IsFinished=*/true);
3241       }
3242       return;
3243     }
3244   }
3245   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
3246 }
3247
3248 /// Map the OpenMP loop schedule to the runtime enumeration.
3249 static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
3250                                           bool Chunked, bool Ordered) {
3251   switch (ScheduleKind) {
3252   case OMPC_SCHEDULE_static:
3253     return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
3254                    : (Ordered ? OMP_ord_static : OMP_sch_static);
3255   case OMPC_SCHEDULE_dynamic:
3256     return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
3257   case OMPC_SCHEDULE_guided:
3258     return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
3259   case OMPC_SCHEDULE_runtime:
3260     return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3261   case OMPC_SCHEDULE_auto:
3262     return Ordered ? OMP_ord_auto : OMP_sch_auto;
3263   case OMPC_SCHEDULE_unknown:
3264     assert(!Chunked && "chunk was specified but schedule kind not known");
3265     return Ordered ? OMP_ord_static : OMP_sch_static;
3266   }
3267   llvm_unreachable("Unexpected runtime schedule");
3268 }
3269
3270 /// Map the OpenMP distribute schedule to the runtime enumeration.
3271 static OpenMPSchedType
3272 getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
3273   // only static is allowed for dist_schedule
3274   return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
3275 }
3276
3277 bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
3278                                          bool Chunked) const {
3279   OpenMPSchedType Schedule =
3280       getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3281   return Schedule == OMP_sch_static;
3282 }
3283
3284 bool CGOpenMPRuntime::isStaticNonchunked(
3285     OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3286   OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3287   return Schedule == OMP_dist_sch_static;
3288 }
3289
3290
3291 bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
3292   OpenMPSchedType Schedule =
3293       getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3294   assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3295   return Schedule != OMP_sch_static;
3296 }
3297
3298 static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
3299                                   OpenMPScheduleClauseModifier M1,
3300                                   OpenMPScheduleClauseModifier M2) {
3301   int Modifier = 0;
3302   switch (M1) {
3303   case OMPC_SCHEDULE_MODIFIER_monotonic:
3304     Modifier = OMP_sch_modifier_monotonic;
3305     break;
3306   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3307     Modifier = OMP_sch_modifier_nonmonotonic;
3308     break;
3309   case OMPC_SCHEDULE_MODIFIER_simd:
3310     if (Schedule == OMP_sch_static_chunked)
3311       Schedule = OMP_sch_static_balanced_chunked;
3312     break;
3313   case OMPC_SCHEDULE_MODIFIER_last:
3314   case OMPC_SCHEDULE_MODIFIER_unknown:
3315     break;
3316   }
3317   switch (M2) {
3318   case OMPC_SCHEDULE_MODIFIER_monotonic:
3319     Modifier = OMP_sch_modifier_monotonic;
3320     break;
3321   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3322     Modifier = OMP_sch_modifier_nonmonotonic;
3323     break;
3324   case OMPC_SCHEDULE_MODIFIER_simd:
3325     if (Schedule == OMP_sch_static_chunked)
3326       Schedule = OMP_sch_static_balanced_chunked;
3327     break;
3328   case OMPC_SCHEDULE_MODIFIER_last:
3329   case OMPC_SCHEDULE_MODIFIER_unknown:
3330     break;
3331   }
3332   return Schedule | Modifier;
3333 }
3334
3335 void CGOpenMPRuntime::emitForDispatchInit(
3336     CodeGenFunction &CGF, SourceLocation Loc,
3337     const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3338     bool Ordered, const DispatchRTInput &DispatchValues) {
3339   if (!CGF.HaveInsertPoint())
3340     return;
3341   OpenMPSchedType Schedule = getRuntimeSchedule(
3342       ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3343   assert(Ordered ||
3344          (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3345           Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3346           Schedule != OMP_sch_static_balanced_chunked));
3347   // Call __kmpc_dispatch_init(
3348   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3349   //          kmp_int[32|64] lower, kmp_int[32|64] upper,
3350   //          kmp_int[32|64] stride, kmp_int[32|64] chunk);
3351
3352   // If the Chunk was not specified in the clause - use default value 1.
3353   llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3354                                             : CGF.Builder.getIntN(IVSize, 1);
3355   llvm::Value *Args[] = {
3356       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3357       CGF.Builder.getInt32(addMonoNonMonoModifier(
3358           Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3359       DispatchValues.LB,                                // Lower
3360       DispatchValues.UB,                                // Upper
3361       CGF.Builder.getIntN(IVSize, 1),                   // Stride
3362       Chunk                                             // Chunk
3363   };
3364   CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3365 }
3366
3367 static void emitForStaticInitCall(
3368     CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3369     llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
3370     OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
3371     const CGOpenMPRuntime::StaticRTInput &Values) {
3372   if (!CGF.HaveInsertPoint())
3373     return;
3374
3375   assert(!Values.Ordered);
3376   assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3377          Schedule == OMP_sch_static_balanced_chunked ||
3378          Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3379          Schedule == OMP_dist_sch_static ||
3380          Schedule == OMP_dist_sch_static_chunked);
3381
3382   // Call __kmpc_for_static_init(
3383   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3384   //          kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3385   //          kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3386   //          kmp_int[32|64] incr, kmp_int[32|64] chunk);
3387   llvm::Value *Chunk = Values.Chunk;
3388   if (Chunk == nullptr) {
3389     assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3390             Schedule == OMP_dist_sch_static) &&
3391            "expected static non-chunked schedule");
3392     // If the Chunk was not specified in the clause - use default value 1.
3393     Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3394   } else {
3395     assert((Schedule == OMP_sch_static_chunked ||
3396             Schedule == OMP_sch_static_balanced_chunked ||
3397             Schedule == OMP_ord_static_chunked ||
3398             Schedule == OMP_dist_sch_static_chunked) &&
3399            "expected static chunked schedule");
3400   }
3401   llvm::Value *Args[] = {
3402       UpdateLocation,
3403       ThreadId,
3404       CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3405                                                   M2)), // Schedule type
3406       Values.IL.getPointer(),                           // &isLastIter
3407       Values.LB.getPointer(),                           // &LB
3408       Values.UB.getPointer(),                           // &UB
3409       Values.ST.getPointer(),                           // &Stride
3410       CGF.Builder.getIntN(Values.IVSize, 1),            // Incr
3411       Chunk                                             // Chunk
3412   };
3413   CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3414 }
3415
3416 void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
3417                                         SourceLocation Loc,
3418                                         OpenMPDirectiveKind DKind,
3419                                         const OpenMPScheduleTy &ScheduleKind,
3420                                         const StaticRTInput &Values) {
3421   OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3422       ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3423   assert(isOpenMPWorksharingDirective(DKind) &&
3424          "Expected loop-based or sections-based directive.");
3425   llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3426                                              isOpenMPLoopDirective(DKind)
3427                                                  ? OMP_IDENT_WORK_LOOP
3428                                                  : OMP_IDENT_WORK_SECTIONS);
3429   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3430   llvm::Constant *StaticInitFunction =
3431       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3432   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3433                         ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3434 }
3435
3436 void CGOpenMPRuntime::emitDistributeStaticInit(
3437     CodeGenFunction &CGF, SourceLocation Loc,
3438     OpenMPDistScheduleClauseKind SchedKind,
3439     const CGOpenMPRuntime::StaticRTInput &Values) {
3440   OpenMPSchedType ScheduleNum =
3441       getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3442   llvm::Value *UpdatedLocation =
3443       emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3444   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3445   llvm::Constant *StaticInitFunction =
3446       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3447   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3448                         ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3449                         OMPC_SCHEDULE_MODIFIER_unknown, Values);
3450 }
3451
3452 void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
3453                                           SourceLocation Loc,
3454                                           OpenMPDirectiveKind DKind) {
3455   if (!CGF.HaveInsertPoint())
3456     return;
3457   // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3458   llvm::Value *Args[] = {
3459       emitUpdateLocation(CGF, Loc,
3460                          isOpenMPDistributeDirective(DKind)
3461                              ? OMP_IDENT_WORK_DISTRIBUTE
3462                              : isOpenMPLoopDirective(DKind)
3463                                    ? OMP_IDENT_WORK_LOOP
3464                                    : OMP_IDENT_WORK_SECTIONS),
3465       getThreadID(CGF, Loc)};
3466   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
3467                       Args);
3468 }
3469
3470 void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
3471                                                  SourceLocation Loc,
3472                                                  unsigned IVSize,
3473                                                  bool IVSigned) {
3474   if (!CGF.HaveInsertPoint())
3475     return;
3476   // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3477   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3478   CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3479 }
3480
3481 llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
3482                                           SourceLocation Loc, unsigned IVSize,
3483                                           bool IVSigned, Address IL,
3484                                           Address LB, Address UB,
3485                                           Address ST) {
3486   // Call __kmpc_dispatch_next(
3487   //          ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3488   //          kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3489   //          kmp_int[32|64] *p_stride);
3490   llvm::Value *Args[] = {
3491       emitUpdateLocation(CGF, Loc),
3492       getThreadID(CGF, Loc),
3493       IL.getPointer(), // &isLastIter
3494       LB.getPointer(), // &Lower
3495       UB.getPointer(), // &Upper
3496       ST.getPointer()  // &Stride
3497   };
3498   llvm::Value *Call =
3499       CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3500   return CGF.EmitScalarConversion(
3501       Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
3502       CGF.getContext().BoolTy, Loc);
3503 }
3504
3505 void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
3506                                            llvm::Value *NumThreads,
3507                                            SourceLocation Loc) {
3508   if (!CGF.HaveInsertPoint())
3509     return;
3510   // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3511   llvm::Value *Args[] = {
3512       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3513       CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3514   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
3515                       Args);
3516 }
3517
3518 void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
3519                                          OpenMPProcBindClauseKind ProcBind,
3520                                          SourceLocation Loc) {
3521   if (!CGF.HaveInsertPoint())
3522     return;
3523   // Constants for proc bind value accepted by the runtime.
3524   enum ProcBindTy {
3525     ProcBindFalse = 0,
3526     ProcBindTrue,
3527     ProcBindMaster,
3528     ProcBindClose,
3529     ProcBindSpread,
3530     ProcBindIntel,
3531     ProcBindDefault
3532   } RuntimeProcBind;
3533   switch (ProcBind) {
3534   case OMPC_PROC_BIND_master:
3535     RuntimeProcBind = ProcBindMaster;
3536     break;
3537   case OMPC_PROC_BIND_close:
3538     RuntimeProcBind = ProcBindClose;
3539     break;
3540   case OMPC_PROC_BIND_spread:
3541     RuntimeProcBind = ProcBindSpread;
3542     break;
3543   case OMPC_PROC_BIND_unknown:
3544     llvm_unreachable("Unsupported proc_bind value.");
3545   }
3546   // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3547   llvm::Value *Args[] = {
3548       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3549       llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3550   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
3551 }
3552
3553 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3554                                 SourceLocation Loc) {
3555   if (!CGF.HaveInsertPoint())
3556     return;
3557   // Build call void __kmpc_flush(ident_t *loc)
3558   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
3559                       emitUpdateLocation(CGF, Loc));
3560 }
3561
3562 namespace {
3563 /// Indexes of fields for type kmp_task_t.
3564 enum KmpTaskTFields {
3565   /// List of shared variables.
3566   KmpTaskTShareds,
3567   /// Task routine.
3568   KmpTaskTRoutine,
3569   /// Partition id for the untied tasks.
3570   KmpTaskTPartId,
3571   /// Function with call of destructors for private variables.
3572   Data1,
3573   /// Task priority.
3574   Data2,
3575   /// (Taskloops only) Lower bound.
3576   KmpTaskTLowerBound,
3577   /// (Taskloops only) Upper bound.
3578   KmpTaskTUpperBound,
3579   /// (Taskloops only) Stride.
3580   KmpTaskTStride,
3581   /// (Taskloops only) Is last iteration flag.
3582   KmpTaskTLastIter,
3583   /// (Taskloops only) Reduction data.
3584   KmpTaskTReductions,
3585 };
3586 } // anonymous namespace
3587
3588 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3589   return OffloadEntriesTargetRegion.empty() &&
3590          OffloadEntriesDeviceGlobalVar.empty();
3591 }
3592
3593 /// Initialize target region entry.
3594 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3595     initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3596                                     StringRef ParentName, unsigned LineNum,
3597                                     unsigned Order) {
3598   assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3599                                              "only required for the device "
3600                                              "code generation.");
3601   OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3602       OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3603                                    OMPTargetRegionEntryTargetRegion);
3604   ++OffloadingEntriesNum;
3605 }
3606
3607 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3608     registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3609                                   StringRef ParentName, unsigned LineNum,
3610                                   llvm::Constant *Addr, llvm::Constant *ID,
3611                                   OMPTargetRegionEntryKind Flags) {
3612   // If we are emitting code for a target, the entry is already initialized,
3613   // only has to be registered.
3614   if (CGM.getLangOpts().OpenMPIsDevice) {
3615     if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
3616       unsigned DiagID = CGM.getDiags().getCustomDiagID(
3617           DiagnosticsEngine::Error,
3618           "Unable to find target region on line '%0' in the device code.");
3619       CGM.getDiags().Report(DiagID) << LineNum;
3620       return;
3621     }
3622     auto &Entry =
3623         OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3624     assert(Entry.isValid() && "Entry not initialized!");
3625     Entry.setAddress(Addr);
3626     Entry.setID(ID);
3627     Entry.setFlags(Flags);
3628   } else {
3629     OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3630     OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3631     ++OffloadingEntriesNum;
3632   }
3633 }
3634
3635 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3636     unsigned DeviceID, unsigned FileID, StringRef ParentName,
3637     unsigned LineNum) const {
3638   auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3639   if (PerDevice == OffloadEntriesTargetRegion.end())
3640     return false;
3641   auto PerFile = PerDevice->second.find(FileID);
3642   if (PerFile == PerDevice->second.end())
3643     return false;
3644   auto PerParentName = PerFile->second.find(ParentName);
3645   if (PerParentName == PerFile->second.end())
3646     return false;
3647   auto PerLine = PerParentName->second.find(LineNum);
3648   if (PerLine == PerParentName->second.end())
3649     return false;
3650   // Fail if this entry is already registered.
3651   if (PerLine->second.getAddress() || PerLine->second.getID())
3652     return false;
3653   return true;
3654 }
3655
3656 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3657     const OffloadTargetRegionEntryInfoActTy &Action) {
3658   // Scan all target region entries and perform the provided action.
3659   for (const auto &D : OffloadEntriesTargetRegion)
3660     for (const auto &F : D.second)
3661       for (const auto &P : F.second)
3662         for (const auto &L : P.second)
3663           Action(D.first, F.first, P.first(), L.first, L.second);
3664 }
3665
3666 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3667     initializeDeviceGlobalVarEntryInfo(StringRef Name,
3668                                        OMPTargetGlobalVarEntryKind Flags,
3669                                        unsigned Order) {
3670   assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3671                                              "only required for the device "
3672                                              "code generation.");
3673   OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3674   ++OffloadingEntriesNum;
3675 }
3676
3677 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3678     registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3679                                      CharUnits VarSize,
3680                                      OMPTargetGlobalVarEntryKind Flags,
3681                                      llvm::GlobalValue::LinkageTypes Linkage) {
3682   if (CGM.getLangOpts().OpenMPIsDevice) {
3683     auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3684     assert(Entry.isValid() && Entry.getFlags() == Flags &&
3685            "Entry not initialized!");
3686     assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3687            "Resetting with the new address.");
3688     if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName))
3689       return;
3690     Entry.setAddress(Addr);
3691     Entry.setVarSize(VarSize);
3692     Entry.setLinkage(Linkage);
3693   } else {
3694     if (hasDeviceGlobalVarEntryInfo(VarName))
3695       return;
3696     OffloadEntriesDeviceGlobalVar.try_emplace(
3697         VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3698     ++OffloadingEntriesNum;
3699   }
3700 }
3701
3702 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3703     actOnDeviceGlobalVarEntriesInfo(
3704         const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3705   // Scan all target region entries and perform the provided action.
3706   for (const auto &E : OffloadEntriesDeviceGlobalVar)
3707     Action(E.getKey(), E.getValue());
3708 }
3709
3710 llvm::Function *
3711 CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
3712   // If we don't have entries or if we are emitting code for the device, we
3713   // don't need to do anything.
3714   if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3715     return nullptr;
3716
3717   llvm::Module &M = CGM.getModule();
3718   ASTContext &C = CGM.getContext();
3719
3720   // Get list of devices we care about
3721   const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
3722
3723   // We should be creating an offloading descriptor only if there are devices
3724   // specified.
3725   assert(!Devices.empty() && "No OpenMP offloading devices??");
3726
3727   // Create the external variables that will point to the begin and end of the
3728   // host entries section. These will be defined by the linker.
3729   llvm::Type *OffloadEntryTy =
3730       CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
3731   std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
3732   auto *HostEntriesBegin = new llvm::GlobalVariable(
3733       M, OffloadEntryTy, /*isConstant=*/true,
3734       llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3735       EntriesBeginName);
3736   std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
3737   auto *HostEntriesEnd =
3738       new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
3739                                llvm::GlobalValue::ExternalLinkage,
3740                                /*Initializer=*/nullptr, EntriesEndName);
3741
3742   // Create all device images
3743   auto *DeviceImageTy = cast<llvm::StructType>(
3744       CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
3745   ConstantInitBuilder DeviceImagesBuilder(CGM);
3746   ConstantArrayBuilder DeviceImagesEntries =
3747       DeviceImagesBuilder.beginArray(DeviceImageTy);
3748
3749   for (const llvm::Triple &Device : Devices) {
3750     StringRef T = Device.getTriple();
3751     std::string BeginName = getName({"omp_offloading", "img_start", ""});
3752     auto *ImgBegin = new llvm::GlobalVariable(
3753         M, CGM.Int8Ty, /*isConstant=*/true,
3754         llvm::GlobalValue::ExternalWeakLinkage,
3755         /*Initializer=*/nullptr, Twine(BeginName).concat(T));
3756     std::string EndName = getName({"omp_offloading", "img_end", ""});
3757     auto *ImgEnd = new llvm::GlobalVariable(
3758         M, CGM.Int8Ty, /*isConstant=*/true,
3759         llvm::GlobalValue::ExternalWeakLinkage,
3760         /*Initializer=*/nullptr, Twine(EndName).concat(T));
3761
3762     llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
3763                               HostEntriesEnd};
3764     createConstantGlobalStructAndAddToParent(CGM, getTgtDeviceImageQTy(), Data,
3765                                              DeviceImagesEntries);
3766   }
3767
3768   // Create device images global array.
3769   std::string ImagesName = getName({"omp_offloading", "device_images"});
3770   llvm::GlobalVariable *DeviceImages =
3771       DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
3772                                                 CGM.getPointerAlign(),
3773                                                 /*isConstant=*/true);
3774   DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3775
3776   // This is a Zero array to be used in the creation of the constant expressions
3777   llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3778                              llvm::Constant::getNullValue(CGM.Int32Ty)};
3779
3780   // Create the target region descriptor.
3781   llvm::Constant *Data[] = {
3782       llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
3783       llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3784                                            DeviceImages, Index),
3785       HostEntriesBegin, HostEntriesEnd};
3786   std::string Descriptor = getName({"omp_offloading", "descriptor"});
3787   llvm::GlobalVariable *Desc = createConstantGlobalStruct(
3788       CGM, getTgtBinaryDescriptorQTy(), Data, Descriptor);
3789
3790   // Emit code to register or unregister the descriptor at execution
3791   // startup or closing, respectively.
3792
3793   llvm::Function *UnRegFn;
3794   {
3795     FunctionArgList Args;
3796     ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
3797     Args.push_back(&DummyPtr);
3798
3799     CodeGenFunction CGF(CGM);
3800     // Disable debug info for global (de-)initializer because they are not part
3801     // of some particular construct.
3802     CGF.disableDebugInfo();
3803     const auto &FI =
3804         CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3805     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3806     std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
3807     UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
3808     CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
3809     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
3810                         Desc);
3811     CGF.FinishFunction();
3812   }
3813   llvm::Function *RegFn;
3814   {
3815     CodeGenFunction CGF(CGM);
3816     // Disable debug info for global (de-)initializer because they are not part
3817     // of some particular construct.
3818     CGF.disableDebugInfo();
3819     const auto &FI = CGM.getTypes().arrangeNullaryFunction();
3820     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3821     std::string Descriptor = getName({"omp_offloading", "descriptor_reg"});
3822     RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
3823     CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
3824     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), Desc);
3825     // Create a variable to drive the registration and unregistration of the
3826     // descriptor, so we can reuse the logic that emits Ctors and Dtors.
3827     ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
3828                                   SourceLocation(), nullptr, C.CharTy,
3829                                   ImplicitParamDecl::Other);
3830     CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
3831     CGF.FinishFunction();
3832   }
3833   if (CGM.supportsCOMDAT()) {
3834     // It is sufficient to call registration function only once, so create a
3835     // COMDAT group for registration/unregistration functions and associated
3836     // data. That would reduce startup time and code size. Registration
3837     // function serves as a COMDAT group key.
3838     llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
3839     RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3840     RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3841     RegFn->setComdat(ComdatKey);
3842     UnRegFn->setComdat(ComdatKey);
3843     DeviceImages->setComdat(ComdatKey);
3844     Desc->setComdat(ComdatKey);
3845   }
3846   return RegFn;
3847 }
3848
3849 void CGOpenMPRuntime::createOffloadEntry(
3850     llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
3851     llvm::GlobalValue::LinkageTypes Linkage) {
3852   StringRef Name = Addr->getName();
3853   llvm::Module &M = CGM.getModule();
3854   llvm::LLVMContext &C = M.getContext();
3855
3856   // Create constant string with the name.
3857   llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3858
3859   std::string StringName = getName({"omp_offloading", "entry_name"});
3860   auto *Str = new llvm::GlobalVariable(
3861       M, StrPtrInit->getType(), /*isConstant=*/true,
3862       llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
3863   Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3864
3865   llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
3866                             llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
3867                             llvm::ConstantInt::get(CGM.SizeTy, Size),
3868                             llvm::ConstantInt::get(CGM.Int32Ty, Flags),
3869                             llvm::ConstantInt::get(CGM.Int32Ty, 0)};
3870   std::string EntryName = getName({"omp_offloading", "entry", ""});
3871   llvm::GlobalVariable *Entry = createConstantGlobalStruct(
3872       CGM, getTgtOffloadEntryQTy(), Data, Twine(EntryName).concat(Name),
3873       llvm::GlobalValue::WeakAnyLinkage);
3874
3875   // The entry has to be created in the section the linker expects it to be.
3876   std::string Section = getName({"omp_offloading", "entries"});
3877   Entry->setSection(Section);
3878 }
3879
3880 void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3881   // Emit the offloading entries and metadata so that the device codegen side
3882   // can easily figure out what to emit. The produced metadata looks like
3883   // this:
3884   //
3885   // !omp_offload.info = !{!1, ...}
3886   //
3887   // Right now we only generate metadata for function that contain target
3888   // regions.
3889
3890   // If we do not have entries, we don't need to do anything.
3891   if (OffloadEntriesInfoManager.empty())
3892     return;
3893
3894   llvm::Module &M = CGM.getModule();
3895   llvm::LLVMContext &C = M.getContext();
3896   SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
3897       OrderedEntries(OffloadEntriesInfoManager.size());
3898
3899   // Auxiliary methods to create metadata values and strings.
3900   auto &&GetMDInt = [this](unsigned V) {
3901     return llvm::ConstantAsMetadata::get(
3902         llvm::ConstantInt::get(CGM.Int32Ty, V));
3903   };
3904
3905   auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
3906
3907   // Create the offloading info metadata node.
3908   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3909
3910   // Create function that emits metadata for each target region entry;
3911   auto &&TargetRegionMetadataEmitter =
3912       [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString](
3913           unsigned DeviceID, unsigned FileID, StringRef ParentName,
3914           unsigned Line,
3915           const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3916         // Generate metadata for target regions. Each entry of this metadata
3917         // contains:
3918         // - Entry 0 -> Kind of this type of metadata (0).
3919         // - Entry 1 -> Device ID of the file where the entry was identified.
3920         // - Entry 2 -> File ID of the file where the entry was identified.
3921         // - Entry 3 -> Mangled name of the function where the entry was
3922         // identified.
3923         // - Entry 4 -> Line in the file where the entry was identified.
3924         // - Entry 5 -> Order the entry was created.
3925         // The first element of the metadata node is the kind.
3926         llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
3927                                  GetMDInt(FileID),      GetMDString(ParentName),
3928                                  GetMDInt(Line),        GetMDInt(E.getOrder())};
3929
3930         // Save this entry in the right position of the ordered entries array.
3931         OrderedEntries[E.getOrder()] = &E;
3932
3933         // Add metadata to the named metadata node.
3934         MD->addOperand(llvm::MDNode::get(C, Ops));
3935       };
3936
3937   OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3938       TargetRegionMetadataEmitter);
3939
3940   // Create function that emits metadata for each device global variable entry;
3941   auto &&DeviceGlobalVarMetadataEmitter =
3942       [&C, &OrderedEntries, &GetMDInt, &GetMDString,
3943        MD](StringRef MangledName,
3944            const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
3945                &E) {
3946         // Generate metadata for global variables. Each entry of this metadata
3947         // contains:
3948         // - Entry 0 -> Kind of this type of metadata (1).
3949         // - Entry 1 -> Mangled name of the variable.
3950         // - Entry 2 -> Declare target kind.
3951         // - Entry 3 -> Order the entry was created.
3952         // The first element of the metadata node is the kind.
3953         llvm::Metadata *Ops[] = {
3954             GetMDInt(E.getKind()), GetMDString(MangledName),
3955             GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
3956
3957         // Save this entry in the right position of the ordered entries array.
3958         OrderedEntries[E.getOrder()] = &E;
3959
3960         // Add metadata to the named metadata node.
3961         MD->addOperand(llvm::MDNode::get(C, Ops));
3962       };
3963
3964   OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
3965       DeviceGlobalVarMetadataEmitter);
3966
3967   for (const auto *E : OrderedEntries) {
3968     assert(E && "All ordered entries must exist!");
3969     if (const auto *CE =
3970             dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3971                 E)) {
3972       if (!CE->getID() || !CE->getAddress()) {
3973         unsigned DiagID = CGM.getDiags().getCustomDiagID(
3974             DiagnosticsEngine::Error,
3975             "Offloading entry for target region is incorrect: either the "
3976             "address or the ID is invalid.");
3977         CGM.getDiags().Report(DiagID);
3978         continue;
3979       }
3980       createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
3981                          CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
3982     } else if (const auto *CE =
3983                    dyn_cast<OffloadEntriesInfoManagerTy::
3984                                 OffloadEntryInfoDeviceGlobalVar>(E)) {
3985       OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
3986           static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3987               CE->getFlags());
3988       switch (Flags) {
3989       case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
3990         if (!CE->getAddress()) {
3991           unsigned DiagID = CGM.getDiags().getCustomDiagID(
3992               DiagnosticsEngine::Error,
3993               "Offloading entry for declare target variable is incorrect: the "
3994               "address is invalid.");
3995           CGM.getDiags().Report(DiagID);
3996           continue;
3997         }
3998         break;
3999       }
4000       case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
4001         assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
4002                 (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
4003                "Declaret target link address is set.");
4004         if (CGM.getLangOpts().OpenMPIsDevice)
4005           continue;
4006         if (!CE->getAddress()) {
4007           unsigned DiagID = CGM.getDiags().getCustomDiagID(
4008               DiagnosticsEngine::Error,
4009               "Offloading entry for declare target variable is incorrect: the "
4010               "address is invalid.");
4011           CGM.getDiags().Report(DiagID);
4012           continue;
4013         }
4014         break;
4015       }
4016       createOffloadEntry(CE->getAddress(), CE->getAddress(),
4017                          CE->getVarSize().getQuantity(), Flags,
4018                          CE->getLinkage());
4019     } else {
4020       llvm_unreachable("Unsupported entry kind.");
4021     }
4022   }
4023 }
4024
4025 /// Loads all the offload entries information from the host IR
4026 /// metadata.
4027 void CGOpenMPRuntime::loadOffloadInfoMetadata() {
4028   // If we are in target mode, load the metadata from the host IR. This code has
4029   // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
4030
4031   if (!CGM.getLangOpts().OpenMPIsDevice)
4032     return;
4033
4034   if (CGM.getLangOpts().OMPHostIRFile.empty())
4035     return;
4036
4037   auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
4038   if (auto EC = Buf.getError()) {
4039     CGM.getDiags().Report(diag::err_cannot_open_file)
4040         << CGM.getLangOpts().OMPHostIRFile << EC.message();
4041     return;
4042   }
4043
4044   llvm::LLVMContext C;
4045   auto ME = expectedToErrorOrAndEmitErrors(
4046       C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
4047
4048   if (auto EC = ME.getError()) {
4049     unsigned DiagID = CGM.getDiags().getCustomDiagID(
4050         DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
4051     CGM.getDiags().Report(DiagID)
4052         << CGM.getLangOpts().OMPHostIRFile << EC.message();
4053     return;
4054   }
4055
4056   llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
4057   if (!MD)
4058     return;
4059
4060   for (llvm::MDNode *MN : MD->operands()) {
4061     auto &&GetMDInt = [MN](unsigned Idx) {
4062       auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
4063       return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
4064     };
4065
4066     auto &&GetMDString = [MN](unsigned Idx) {
4067       auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
4068       return V->getString();
4069     };
4070
4071     switch (GetMDInt(0)) {
4072     default:
4073       llvm_unreachable("Unexpected metadata!");
4074       break;
4075     case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4076         OffloadingEntryInfoTargetRegion:
4077       OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
4078           /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
4079           /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
4080           /*Order=*/GetMDInt(5));
4081       break;
4082     case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4083         OffloadingEntryInfoDeviceGlobalVar:
4084       OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
4085           /*MangledName=*/GetMDString(1),
4086           static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
4087               /*Flags=*/GetMDInt(2)),
4088           /*Order=*/GetMDInt(3));
4089       break;
4090     }
4091   }
4092 }
4093
4094 void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
4095   if (!KmpRoutineEntryPtrTy) {
4096     // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
4097     ASTContext &C = CGM.getContext();
4098     QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
4099     FunctionProtoType::ExtProtoInfo EPI;
4100     KmpRoutineEntryPtrQTy = C.getPointerType(
4101         C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
4102     KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
4103   }
4104 }
4105
4106 QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
4107   // Make sure the type of the entry is already created. This is the type we
4108   // have to create:
4109   // struct __tgt_offload_entry{
4110   //   void      *addr;       // Pointer to the offload entry info.
4111   //                          // (function or global)
4112   //   char      *name;       // Name of the function or global.
4113   //   size_t     size;       // Size of the entry info (0 if it a function).
4114   //   int32_t    flags;      // Flags associated with the entry, e.g. 'link'.
4115   //   int32_t    reserved;   // Reserved, to use by the runtime library.
4116   // };
4117   if (TgtOffloadEntryQTy.isNull()) {
4118     ASTContext &C = CGM.getContext();
4119     RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
4120     RD->startDefinition();
4121     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4122     addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
4123     addFieldToRecordDecl(C, RD, C.getSizeType());
4124     addFieldToRecordDecl(
4125         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4126     addFieldToRecordDecl(
4127         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4128     RD->completeDefinition();
4129     RD->addAttr(PackedAttr::CreateImplicit(C));
4130     TgtOffloadEntryQTy = C.getRecordType(RD);
4131   }
4132   return TgtOffloadEntryQTy;
4133 }
4134
4135 QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
4136   // These are the types we need to build:
4137   // struct __tgt_device_image{
4138   // void   *ImageStart;       // Pointer to the target code start.
4139   // void   *ImageEnd;         // Pointer to the target code end.
4140   // // We also add the host entries to the device image, as it may be useful
4141   // // for the target runtime to have access to that information.
4142   // __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all
4143   //                                       // the entries.
4144   // __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
4145   //                                       // entries (non inclusive).
4146   // };
4147   if (TgtDeviceImageQTy.isNull()) {
4148     ASTContext &C = CGM.getContext();
4149     RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
4150     RD->startDefinition();
4151     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4152     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4153     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4154     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4155     RD->completeDefinition();
4156     TgtDeviceImageQTy = C.getRecordType(RD);
4157   }
4158   return TgtDeviceImageQTy;
4159 }
4160
4161 QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
4162   // struct __tgt_bin_desc{
4163   //   int32_t              NumDevices;      // Number of devices supported.
4164   //   __tgt_device_image   *DeviceImages;   // Arrays of device images
4165   //                                         // (one per device).
4166   //   __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all the
4167   //                                         // entries.
4168   //   __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
4169   //                                         // entries (non inclusive).
4170   // };
4171   if (TgtBinaryDescriptorQTy.isNull()) {
4172     ASTContext &C = CGM.getContext();
4173     RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
4174     RD->startDefinition();
4175     addFieldToRecordDecl(
4176         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4177     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
4178     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4179     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4180     RD->completeDefinition();
4181     TgtBinaryDescriptorQTy = C.getRecordType(RD);
4182   }
4183   return TgtBinaryDescriptorQTy;
4184 }
4185
4186 namespace {
4187 struct PrivateHelpersTy {
4188   PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
4189                    const VarDecl *PrivateElemInit)
4190       : Original(Original), PrivateCopy(PrivateCopy),
4191         PrivateElemInit(PrivateElemInit) {}
4192   const VarDecl *Original;
4193   const VarDecl *PrivateCopy;
4194   const VarDecl *PrivateElemInit;
4195 };
4196 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
4197 } // anonymous namespace
4198
4199 static RecordDecl *
4200 createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
4201   if (!Privates.empty()) {
4202     ASTContext &C = CGM.getContext();
4203     // Build struct .kmp_privates_t. {
4204     //         /*  private vars  */
4205     //       };
4206     RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
4207     RD->startDefinition();
4208     for (const auto &Pair : Privates) {
4209       const VarDecl *VD = Pair.second.Original;
4210       QualType Type = VD->getType().getNonReferenceType();
4211       FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
4212       if (VD->hasAttrs()) {
4213         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
4214              E(VD->getAttrs().end());
4215              I != E; ++I)
4216           FD->addAttr(*I);
4217       }
4218     }
4219     RD->completeDefinition();
4220     return RD;
4221   }
4222   return nullptr;
4223 }
4224
4225 static RecordDecl *
4226 createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
4227                          QualType KmpInt32Ty,
4228                          QualType KmpRoutineEntryPointerQTy) {
4229   ASTContext &C = CGM.getContext();
4230   // Build struct kmp_task_t {
4231   //         void *              shareds;
4232   //         kmp_routine_entry_t routine;
4233   //         kmp_int32           part_id;
4234   //         kmp_cmplrdata_t data1;
4235   //         kmp_cmplrdata_t data2;
4236   // For taskloops additional fields:
4237   //         kmp_uint64          lb;
4238   //         kmp_uint64          ub;
4239   //         kmp_int64           st;
4240   //         kmp_int32           liter;
4241   //         void *              reductions;
4242   //       };
4243   RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
4244   UD->startDefinition();
4245   addFieldToRecordDecl(C, UD, KmpInt32Ty);
4246   addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
4247   UD->completeDefinition();
4248   QualType KmpCmplrdataTy = C.getRecordType(UD);
4249   RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
4250   RD->startDefinition();
4251   addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4252   addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
4253   addFieldToRecordDecl(C, RD, KmpInt32Ty);
4254   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4255   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4256   if (isOpenMPTaskLoopDirective(Kind)) {
4257     QualType KmpUInt64Ty =
4258         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4259     QualType KmpInt64Ty =
4260         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4261     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4262     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4263     addFieldToRecordDecl(C, RD, KmpInt64Ty);
4264     addFieldToRecordDecl(C, RD, KmpInt32Ty);
4265     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4266   }
4267   RD->completeDefinition();
4268   return RD;
4269 }
4270
4271 static RecordDecl *
4272 createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
4273                                      ArrayRef<PrivateDataTy> Privates) {
4274   ASTContext &C = CGM.getContext();
4275   // Build struct kmp_task_t_with_privates {
4276   //         kmp_task_t task_data;
4277   //         .kmp_privates_t. privates;
4278   //       };
4279   RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
4280   RD->startDefinition();
4281   addFieldToRecordDecl(C, RD, KmpTaskTQTy);
4282   if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
4283     addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
4284   RD->completeDefinition();
4285   return RD;
4286 }
4287
4288 /// Emit a proxy function which accepts kmp_task_t as the second
4289 /// argument.
4290 /// \code
4291 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
4292 ///   TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
4293 ///   For taskloops:
4294 ///   tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4295 ///   tt->reductions, tt->shareds);
4296 ///   return 0;
4297 /// }
4298 /// \endcode
4299 static llvm::Value *
4300 emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
4301                       OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
4302                       QualType KmpTaskTWithPrivatesPtrQTy,
4303                       QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
4304                       QualType SharedsPtrTy, llvm::Value *TaskFunction,
4305                       llvm::Value *TaskPrivatesMap) {
4306   ASTContext &C = CGM.getContext();
4307   FunctionArgList Args;
4308   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4309                             ImplicitParamDecl::Other);
4310   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4311                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4312                                 ImplicitParamDecl::Other);
4313   Args.push_back(&GtidArg);
4314   Args.push_back(&TaskTypeArg);
4315   const auto &TaskEntryFnInfo =
4316       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4317   llvm::FunctionType *TaskEntryTy =
4318       CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
4319   std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
4320   auto *TaskEntry = llvm::Function::Create(
4321       TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4322   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
4323   TaskEntry->setDoesNotRecurse();
4324   CodeGenFunction CGF(CGM);
4325   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
4326                     Loc, Loc);
4327
4328   // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
4329   // tt,
4330   // For taskloops:
4331   // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4332   // tt->task_data.shareds);
4333   llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
4334       CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
4335   LValue TDBase = CGF.EmitLoadOfPointerLValue(
4336       CGF.GetAddrOfLocalVar(&TaskTypeArg),
4337       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4338   const auto *KmpTaskTWithPrivatesQTyRD =
4339       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4340   LValue Base =
4341       CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4342   const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4343   auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4344   LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
4345   llvm::Value *PartidParam = PartIdLVal.getPointer();
4346
4347   auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
4348   LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
4349   llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4350       CGF.EmitLoadOfScalar(SharedsLVal, Loc),
4351       CGF.ConvertTypeForMem(SharedsPtrTy));
4352
4353   auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4354   llvm::Value *PrivatesParam;
4355   if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
4356     LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
4357     PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4358         PrivatesLVal.getPointer(), CGF.VoidPtrTy);
4359   } else {
4360     PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4361   }
4362
4363   llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
4364                                TaskPrivatesMap,
4365                                CGF.Builder
4366                                    .CreatePointerBitCastOrAddrSpaceCast(
4367                                        TDBase.getAddress(), CGF.VoidPtrTy)
4368                                    .getPointer()};
4369   SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4370                                           std::end(CommonArgs));
4371   if (isOpenMPTaskLoopDirective(Kind)) {
4372     auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4373     LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4374     llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4375     auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4376     LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4377     llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4378     auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4379     LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
4380     llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4381     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4382     LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4383     llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4384     auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4385     LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
4386     llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4387     CallArgs.push_back(LBParam);
4388     CallArgs.push_back(UBParam);
4389     CallArgs.push_back(StParam);
4390     CallArgs.push_back(LIParam);
4391     CallArgs.push_back(RParam);
4392   }
4393   CallArgs.push_back(SharedsParam);
4394
4395   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4396                                                   CallArgs);
4397   CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4398                              CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4399   CGF.FinishFunction();
4400   return TaskEntry;
4401 }
4402
4403 static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
4404                                             SourceLocation Loc,
4405                                             QualType KmpInt32Ty,
4406                                             QualType KmpTaskTWithPrivatesPtrQTy,
4407                                             QualType KmpTaskTWithPrivatesQTy) {
4408   ASTContext &C = CGM.getContext();
4409   FunctionArgList Args;
4410   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4411                             ImplicitParamDecl::Other);
4412   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4413                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4414                                 ImplicitParamDecl::Other);
4415   Args.push_back(&GtidArg);
4416   Args.push_back(&TaskTypeArg);
4417   const auto &DestructorFnInfo =
4418       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4419   llvm::FunctionType *DestructorFnTy =
4420       CGM.getTypes().GetFunctionType(DestructorFnInfo);
4421   std::string Name =
4422       CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
4423   auto *DestructorFn =
4424       llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
4425                              Name, &CGM.getModule());
4426   CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
4427                                     DestructorFnInfo);
4428   DestructorFn->setDoesNotRecurse();
4429   CodeGenFunction CGF(CGM);
4430   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4431                     Args, Loc, Loc);
4432
4433   LValue Base = CGF.EmitLoadOfPointerLValue(
4434       CGF.GetAddrOfLocalVar(&TaskTypeArg),
4435       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4436   const auto *KmpTaskTWithPrivatesQTyRD =
4437       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4438   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4439   Base = CGF.EmitLValueForField(Base, *FI);
4440   for (const auto *Field :
4441        cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4442     if (QualType::DestructionKind DtorKind =
4443             Field->getType().isDestructedType()) {
4444       LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
4445       CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4446     }
4447   }
4448   CGF.FinishFunction();
4449   return DestructorFn;
4450 }
4451
4452 /// Emit a privates mapping function for correct handling of private and
4453 /// firstprivate variables.
4454 /// \code
4455 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
4456 /// **noalias priv1,...,  <tyn> **noalias privn) {
4457 ///   *priv1 = &.privates.priv1;
4458 ///   ...;
4459 ///   *privn = &.privates.privn;
4460 /// }
4461 /// \endcode
4462 static llvm::Value *
4463 emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
4464                                ArrayRef<const Expr *> PrivateVars,
4465                                ArrayRef<const Expr *> FirstprivateVars,
4466                                ArrayRef<const Expr *> LastprivateVars,
4467                                QualType PrivatesQTy,
4468                                ArrayRef<PrivateDataTy> Privates) {
4469   ASTContext &C = CGM.getContext();
4470   FunctionArgList Args;
4471   ImplicitParamDecl TaskPrivatesArg(
4472       C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4473       C.getPointerType(PrivatesQTy).withConst().withRestrict(),
4474       ImplicitParamDecl::Other);
4475   Args.push_back(&TaskPrivatesArg);
4476   llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
4477   unsigned Counter = 1;
4478   for (const Expr *E : PrivateVars) {
4479     Args.push_back(ImplicitParamDecl::Create(
4480         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4481         C.getPointerType(C.getPointerType(E->getType()))
4482             .withConst()
4483             .withRestrict(),
4484         ImplicitParamDecl::Other));
4485     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4486     PrivateVarsPos[VD] = Counter;
4487     ++Counter;
4488   }
4489   for (const Expr *E : FirstprivateVars) {
4490     Args.push_back(ImplicitParamDecl::Create(
4491         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4492         C.getPointerType(C.getPointerType(E->getType()))
4493             .withConst()
4494             .withRestrict(),
4495         ImplicitParamDecl::Other));
4496     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4497     PrivateVarsPos[VD] = Counter;
4498     ++Counter;
4499   }
4500   for (const Expr *E : LastprivateVars) {
4501     Args.push_back(ImplicitParamDecl::Create(
4502         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4503         C.getPointerType(C.getPointerType(E->getType()))
4504             .withConst()
4505             .withRestrict(),
4506         ImplicitParamDecl::Other));
4507     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4508     PrivateVarsPos[VD] = Counter;
4509     ++Counter;
4510   }
4511   const auto &TaskPrivatesMapFnInfo =
4512       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4513   llvm::FunctionType *TaskPrivatesMapTy =
4514       CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
4515   std::string Name =
4516       CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
4517   auto *TaskPrivatesMap = llvm::Function::Create(
4518       TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
4519       &CGM.getModule());
4520   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
4521                                     TaskPrivatesMapFnInfo);
4522   TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
4523   TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
4524   TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
4525   CodeGenFunction CGF(CGM);
4526   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
4527                     TaskPrivatesMapFnInfo, Args, Loc, Loc);
4528
4529   // *privi = &.privates.privi;
4530   LValue Base = CGF.EmitLoadOfPointerLValue(
4531       CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
4532       TaskPrivatesArg.getType()->castAs<PointerType>());
4533   const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
4534   Counter = 0;
4535   for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
4536     LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
4537     const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
4538     LValue RefLVal =
4539         CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
4540     LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
4541         RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
4542     CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
4543     ++Counter;
4544   }
4545   CGF.FinishFunction();
4546   return TaskPrivatesMap;
4547 }
4548
4549 static bool stable_sort_comparator(const PrivateDataTy P1,
4550                                    const PrivateDataTy P2) {
4551   return P1.first > P2.first;
4552 }
4553
4554 /// Emit initialization for private variables in task-based directives.
4555 static void emitPrivatesInit(CodeGenFunction &CGF,
4556                              const OMPExecutableDirective &D,
4557                              Address KmpTaskSharedsPtr, LValue TDBase,
4558                              const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4559                              QualType SharedsTy, QualType SharedsPtrTy,
4560                              const OMPTaskDataTy &Data,
4561                              ArrayRef<PrivateDataTy> Privates, bool ForDup) {
4562   ASTContext &C = CGF.getContext();
4563   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4564   LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
4565   OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
4566                                  ? OMPD_taskloop
4567                                  : OMPD_task;
4568   const CapturedStmt &CS = *D.getCapturedStmt(Kind);
4569   CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
4570   LValue SrcBase;
4571   bool IsTargetTask =
4572       isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
4573       isOpenMPTargetExecutionDirective(D.getDirectiveKind());
4574   // For target-based directives skip 3 firstprivate arrays BasePointersArray,
4575   // PointersArray and SizesArray. The original variables for these arrays are
4576   // not captured and we get their addresses explicitly.
4577   if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
4578       (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
4579     SrcBase = CGF.MakeAddrLValue(
4580         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4581             KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
4582         SharedsTy);
4583   }
4584   FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
4585   for (const PrivateDataTy &Pair : Privates) {
4586     const VarDecl *VD = Pair.second.PrivateCopy;
4587     const Expr *Init = VD->getAnyInitializer();
4588     if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
4589                              !CGF.isTrivialInitializer(Init)))) {
4590       LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
4591       if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
4592         const VarDecl *OriginalVD = Pair.second.Original;
4593         // Check if the variable is the target-based BasePointersArray,
4594         // PointersArray or SizesArray.
4595         LValue SharedRefLValue;
4596         QualType Type = OriginalVD->getType();
4597         const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
4598         if (IsTargetTask && !SharedField) {
4599           assert(isa<ImplicitParamDecl>(OriginalVD) &&
4600                  isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
4601                  cast<CapturedDecl>(OriginalVD->getDeclContext())
4602                          ->getNumParams() == 0 &&
4603                  isa<TranslationUnitDecl>(
4604                      cast<CapturedDecl>(OriginalVD->getDeclContext())
4605                          ->getDeclContext()) &&
4606                  "Expected artificial target data variable.");
4607           SharedRefLValue =
4608               CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
4609         } else {
4610           SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
4611           SharedRefLValue = CGF.MakeAddrLValue(
4612               Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
4613               SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
4614               SharedRefLValue.getTBAAInfo());
4615         }
4616         if (Type->isArrayType()) {
4617           // Initialize firstprivate array.
4618           if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
4619             // Perform simple memcpy.
4620             CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
4621           } else {
4622             // Initialize firstprivate array using element-by-element
4623             // initialization.
4624             CGF.EmitOMPAggregateAssign(
4625                 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
4626                 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
4627                                                   Address SrcElement) {
4628                   // Clean up any temporaries needed by the initialization.
4629                   CodeGenFunction::OMPPrivateScope InitScope(CGF);
4630                   InitScope.addPrivate(
4631                       Elem, [SrcElement]() -> Address { return SrcElement; });
4632                   (void)InitScope.Privatize();
4633                   // Emit initialization for single element.
4634                   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
4635                       CGF, &CapturesInfo);
4636                   CGF.EmitAnyExprToMem(Init, DestElement,
4637                                        Init->getType().getQualifiers(),
4638                                        /*IsInitializer=*/false);
4639                 });
4640           }
4641         } else {
4642           CodeGenFunction::OMPPrivateScope InitScope(CGF);
4643           InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
4644             return SharedRefLValue.getAddress();
4645           });
4646           (void)InitScope.Privatize();
4647           CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
4648           CGF.EmitExprAsInit(Init, VD, PrivateLValue,
4649                              /*capturedByInit=*/false);
4650         }
4651       } else {
4652         CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
4653       }
4654     }
4655     ++FI;
4656   }
4657 }
4658
4659 /// Check if duplication function is required for taskloops.
4660 static bool checkInitIsRequired(CodeGenFunction &CGF,
4661                                 ArrayRef<PrivateDataTy> Privates) {
4662   bool InitRequired = false;
4663   for (const PrivateDataTy &Pair : Privates) {
4664     const VarDecl *VD = Pair.second.PrivateCopy;
4665     const Expr *Init = VD->getAnyInitializer();
4666     InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
4667                                     !CGF.isTrivialInitializer(Init));
4668     if (InitRequired)
4669       break;
4670   }
4671   return InitRequired;
4672 }
4673
4674
4675 /// Emit task_dup function (for initialization of
4676 /// private/firstprivate/lastprivate vars and last_iter flag)
4677 /// \code
4678 /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
4679 /// lastpriv) {
4680 /// // setup lastprivate flag
4681 ///    task_dst->last = lastpriv;
4682 /// // could be constructor calls here...
4683 /// }
4684 /// \endcode
4685 static llvm::Value *
4686 emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
4687                     const OMPExecutableDirective &D,
4688                     QualType KmpTaskTWithPrivatesPtrQTy,
4689                     const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4690                     const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
4691                     QualType SharedsPtrTy, const OMPTaskDataTy &Data,
4692                     ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
4693   ASTContext &C = CGM.getContext();
4694   FunctionArgList Args;
4695   ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4696                            KmpTaskTWithPrivatesPtrQTy,
4697                            ImplicitParamDecl::Other);
4698   ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4699                            KmpTaskTWithPrivatesPtrQTy,
4700                            ImplicitParamDecl::Other);
4701   ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
4702                                 ImplicitParamDecl::Other);
4703   Args.push_back(&DstArg);
4704   Args.push_back(&SrcArg);
4705   Args.push_back(&LastprivArg);
4706   const auto &TaskDupFnInfo =
4707       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4708   llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4709   std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
4710   auto *TaskDup = llvm::Function::Create(
4711       TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4712   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
4713   TaskDup->setDoesNotRecurse();
4714   CodeGenFunction CGF(CGM);
4715   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4716                     Loc);
4717
4718   LValue TDBase = CGF.EmitLoadOfPointerLValue(
4719       CGF.GetAddrOfLocalVar(&DstArg),
4720       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4721   // task_dst->liter = lastpriv;
4722   if (WithLastIter) {
4723     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4724     LValue Base = CGF.EmitLValueForField(
4725         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4726     LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4727     llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4728         CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4729     CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4730   }
4731
4732   // Emit initial values for private copies (if any).
4733   assert(!Privates.empty());
4734   Address KmpTaskSharedsPtr = Address::invalid();
4735   if (!Data.FirstprivateVars.empty()) {
4736     LValue TDBase = CGF.EmitLoadOfPointerLValue(
4737         CGF.GetAddrOfLocalVar(&SrcArg),
4738         KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4739     LValue Base = CGF.EmitLValueForField(
4740         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4741     KmpTaskSharedsPtr = Address(
4742         CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4743                                  Base, *std::next(KmpTaskTQTyRD->field_begin(),
4744                                                   KmpTaskTShareds)),
4745                              Loc),
4746         CGF.getNaturalTypeAlignment(SharedsTy));
4747   }
4748   emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4749                    SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4750   CGF.FinishFunction();
4751   return TaskDup;
4752 }
4753
4754 /// Checks if destructor function is required to be generated.
4755 /// \return true if cleanups are required, false otherwise.
4756 static bool
4757 checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
4758   bool NeedsCleanup = false;
4759   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4760   const auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
4761   for (const FieldDecl *FD : PrivateRD->fields()) {
4762     NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
4763     if (NeedsCleanup)
4764       break;
4765   }
4766   return NeedsCleanup;
4767 }
4768
4769 CGOpenMPRuntime::TaskResultTy
4770 CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4771                               const OMPExecutableDirective &D,
4772                               llvm::Value *TaskFunction, QualType SharedsTy,
4773                               Address Shareds, const OMPTaskDataTy &Data) {
4774   ASTContext &C = CGM.getContext();
4775   llvm::SmallVector<PrivateDataTy, 4> Privates;
4776   // Aggregate privates and sort them by the alignment.
4777   auto I = Data.PrivateCopies.begin();
4778   for (const Expr *E : Data.PrivateVars) {
4779     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4780     Privates.emplace_back(
4781         C.getDeclAlign(VD),
4782         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4783                          /*PrivateElemInit=*/nullptr));
4784     ++I;
4785   }
4786   I = Data.FirstprivateCopies.begin();
4787   auto IElemInitRef = Data.FirstprivateInits.begin();
4788   for (const Expr *E : Data.FirstprivateVars) {
4789     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4790     Privates.emplace_back(
4791         C.getDeclAlign(VD),
4792         PrivateHelpersTy(
4793             VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4794             cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
4795     ++I;
4796     ++IElemInitRef;
4797   }
4798   I = Data.LastprivateCopies.begin();
4799   for (const Expr *E : Data.LastprivateVars) {
4800     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4801     Privates.emplace_back(
4802         C.getDeclAlign(VD),
4803         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4804                          /*PrivateElemInit=*/nullptr));
4805     ++I;
4806   }
4807   std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
4808   QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4809   // Build type kmp_routine_entry_t (if not built yet).
4810   emitKmpRoutineEntryT(KmpInt32Ty);
4811   // Build type kmp_task_t (if not built yet).
4812   if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4813     if (SavedKmpTaskloopTQTy.isNull()) {
4814       SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4815           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4816     }
4817     KmpTaskTQTy = SavedKmpTaskloopTQTy;
4818   } else {
4819     assert((D.getDirectiveKind() == OMPD_task ||
4820             isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
4821             isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
4822            "Expected taskloop, task or target directive");
4823     if (SavedKmpTaskTQTy.isNull()) {
4824       SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4825           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4826     }
4827     KmpTaskTQTy = SavedKmpTaskTQTy;
4828   }
4829   const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4830   // Build particular struct kmp_task_t for the given task.
4831   const RecordDecl *KmpTaskTWithPrivatesQTyRD =
4832       createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4833   QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4834   QualType KmpTaskTWithPrivatesPtrQTy =
4835       C.getPointerType(KmpTaskTWithPrivatesQTy);
4836   llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4837   llvm::Type *KmpTaskTWithPrivatesPtrTy =
4838       KmpTaskTWithPrivatesTy->getPointerTo();
4839   llvm::Value *KmpTaskTWithPrivatesTySize =
4840       CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4841   QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4842
4843   // Emit initial values for private copies (if any).
4844   llvm::Value *TaskPrivatesMap = nullptr;
4845   llvm::Type *TaskPrivatesMapTy =
4846       std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
4847   if (!Privates.empty()) {
4848     auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4849     TaskPrivatesMap = emitTaskPrivateMappingFunction(
4850         CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
4851         FI->getType(), Privates);
4852     TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4853         TaskPrivatesMap, TaskPrivatesMapTy);
4854   } else {
4855     TaskPrivatesMap = llvm::ConstantPointerNull::get(
4856         cast<llvm::PointerType>(TaskPrivatesMapTy));
4857   }
4858   // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4859   // kmp_task_t *tt);
4860   llvm::Value *TaskEntry = emitProxyTaskFunction(
4861       CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4862       KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4863       TaskPrivatesMap);
4864
4865   // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4866   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4867   // kmp_routine_entry_t *task_entry);
4868   // Task flags. Format is taken from
4869   // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
4870   // description of kmp_tasking_flags struct.
4871   enum {
4872     TiedFlag = 0x1,
4873     FinalFlag = 0x2,
4874     DestructorsFlag = 0x8,
4875     PriorityFlag = 0x20
4876   };
4877   unsigned Flags = Data.Tied ? TiedFlag : 0;
4878   bool NeedsCleanup = false;
4879   if (!Privates.empty()) {
4880     NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
4881     if (NeedsCleanup)
4882       Flags = Flags | DestructorsFlag;
4883   }
4884   if (Data.Priority.getInt())
4885     Flags = Flags | PriorityFlag;
4886   llvm::Value *TaskFlags =
4887       Data.Final.getPointer()
4888           ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4889                                      CGF.Builder.getInt32(FinalFlag),
4890                                      CGF.Builder.getInt32(/*C=*/0))
4891           : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4892   TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4893   llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4894   llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
4895                               getThreadID(CGF, Loc), TaskFlags,
4896                               KmpTaskTWithPrivatesTySize, SharedsSize,
4897                               CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4898                                   TaskEntry, KmpRoutineEntryPtrTy)};
4899   llvm::Value *NewTask = CGF.EmitRuntimeCall(
4900       createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
4901   llvm::Value *NewTaskNewTaskTTy =
4902       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4903           NewTask, KmpTaskTWithPrivatesPtrTy);
4904   LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4905                                                KmpTaskTWithPrivatesQTy);
4906   LValue TDBase =
4907       CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4908   // Fill the data in the resulting kmp_task_t record.
4909   // Copy shareds if there are any.
4910   Address KmpTaskSharedsPtr = Address::invalid();
4911   if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4912     KmpTaskSharedsPtr =
4913         Address(CGF.EmitLoadOfScalar(
4914                     CGF.EmitLValueForField(
4915                         TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4916                                            KmpTaskTShareds)),
4917                     Loc),
4918                 CGF.getNaturalTypeAlignment(SharedsTy));
4919     LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4920     LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4921     CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
4922   }
4923   // Emit initial values for private copies (if any).
4924   TaskResultTy Result;
4925   if (!Privates.empty()) {
4926     emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4927                      SharedsTy, SharedsPtrTy, Data, Privates,
4928                      /*ForDup=*/false);
4929     if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4930         (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4931       Result.TaskDupFn = emitTaskDupFunction(
4932           CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4933           KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4934           /*WithLastIter=*/!Data.LastprivateVars.empty());
4935     }
4936   }
4937   // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4938   enum { Priority = 0, Destructors = 1 };
4939   // Provide pointer to function with destructors for privates.
4940   auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4941   const RecordDecl *KmpCmplrdataUD =
4942       (*FI)->getType()->getAsUnionType()->getDecl();
4943   if (NeedsCleanup) {
4944     llvm::Value *DestructorFn = emitDestructorsFunction(
4945         CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4946         KmpTaskTWithPrivatesQTy);
4947     LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4948     LValue DestructorsLV = CGF.EmitLValueForField(
4949         Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4950     CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4951                               DestructorFn, KmpRoutineEntryPtrTy),
4952                           DestructorsLV);
4953   }
4954   // Set priority.
4955   if (Data.Priority.getInt()) {
4956     LValue Data2LV = CGF.EmitLValueForField(
4957         TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4958     LValue PriorityLV = CGF.EmitLValueForField(
4959         Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4960     CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4961   }
4962   Result.NewTask = NewTask;
4963   Result.TaskEntry = TaskEntry;
4964   Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4965   Result.TDBase = TDBase;
4966   Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4967   return Result;
4968 }
4969
4970 void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
4971                                    const OMPExecutableDirective &D,
4972                                    llvm::Value *TaskFunction,
4973                                    QualType SharedsTy, Address Shareds,
4974                                    const Expr *IfCond,
4975                                    const OMPTaskDataTy &Data) {
4976   if (!CGF.HaveInsertPoint())
4977     return;
4978
4979   TaskResultTy Result =
4980       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4981   llvm::Value *NewTask = Result.NewTask;
4982   llvm::Value *TaskEntry = Result.TaskEntry;
4983   llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
4984   LValue TDBase = Result.TDBase;
4985   const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
4986   ASTContext &C = CGM.getContext();
4987   // Process list of dependences.
4988   Address DependenciesArray = Address::invalid();
4989   unsigned NumDependencies = Data.Dependences.size();
4990   if (NumDependencies) {
4991     // Dependence kind for RTL.
4992     enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
4993     enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4994     RecordDecl *KmpDependInfoRD;
4995     QualType FlagsTy =
4996         C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4997     llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4998     if (KmpDependInfoTy.isNull()) {
4999       KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
5000       KmpDependInfoRD->startDefinition();
5001       addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
5002       addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
5003       addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
5004       KmpDependInfoRD->completeDefinition();
5005       KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
5006     } else {
5007       KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
5008     }
5009     CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
5010     // Define type kmp_depend_info[<Dependences.size()>];
5011     QualType KmpDependInfoArrayTy = C.getConstantArrayType(
5012         KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
5013         ArrayType::Normal, /*IndexTypeQuals=*/0);
5014     // kmp_depend_info[<Dependences.size()>] deps;
5015     DependenciesArray =
5016         CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
5017     for (unsigned I = 0; I < NumDependencies; ++I) {
5018       const Expr *E = Data.Dependences[I].second;
5019       LValue Addr = CGF.EmitLValue(E);
5020       llvm::Value *Size;
5021       QualType Ty = E->getType();
5022       if (const auto *ASE =
5023               dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
5024         LValue UpAddrLVal =
5025             CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
5026         llvm::Value *UpAddr =
5027             CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
5028         llvm::Value *LowIntPtr =
5029             CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
5030         llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
5031         Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
5032       } else {
5033         Size = CGF.getTypeSize(Ty);
5034       }
5035       LValue Base = CGF.MakeAddrLValue(
5036           CGF.Builder.CreateConstArrayGEP(DependenciesArray, I, DependencySize),
5037           KmpDependInfoTy);
5038       // deps[i].base_addr = &<Dependences[i].second>;
5039       LValue BaseAddrLVal = CGF.EmitLValueForField(
5040           Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
5041       CGF.EmitStoreOfScalar(
5042           CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
5043           BaseAddrLVal);
5044       // deps[i].len = sizeof(<Dependences[i].second>);
5045       LValue LenLVal = CGF.EmitLValueForField(
5046           Base, *std::next(KmpDependInfoRD->field_begin(), Len));
5047       CGF.EmitStoreOfScalar(Size, LenLVal);
5048       // deps[i].flags = <Dependences[i].first>;
5049       RTLDependenceKindTy DepKind;
5050       switch (Data.Dependences[I].first) {
5051       case OMPC_DEPEND_in:
5052         DepKind = DepIn;
5053         break;
5054       // Out and InOut dependencies must use the same code.
5055       case OMPC_DEPEND_out:
5056       case OMPC_DEPEND_inout:
5057         DepKind = DepInOut;
5058         break;
5059       case OMPC_DEPEND_source:
5060       case OMPC_DEPEND_sink:
5061       case OMPC_DEPEND_unknown:
5062         llvm_unreachable("Unknown task dependence type");
5063       }
5064       LValue FlagsLVal = CGF.EmitLValueForField(
5065           Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
5066       CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
5067                             FlagsLVal);
5068     }
5069     DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5070         CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
5071         CGF.VoidPtrTy);
5072   }
5073
5074   // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5075   // libcall.
5076   // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
5077   // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
5078   // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
5079   // list is not empty
5080   llvm::Value *ThreadID = getThreadID(CGF, Loc);
5081   llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5082   llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
5083   llvm::Value *DepTaskArgs[7];
5084   if (NumDependencies) {
5085     DepTaskArgs[0] = UpLoc;
5086     DepTaskArgs[1] = ThreadID;
5087     DepTaskArgs[2] = NewTask;
5088     DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
5089     DepTaskArgs[4] = DependenciesArray.getPointer();
5090     DepTaskArgs[5] = CGF.Builder.getInt32(0);
5091     DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5092   }
5093   auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
5094                         &TaskArgs,
5095                         &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
5096     if (!Data.Tied) {
5097       auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
5098       LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
5099       CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
5100     }
5101     if (NumDependencies) {
5102       CGF.EmitRuntimeCall(
5103           createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
5104     } else {
5105       CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
5106                           TaskArgs);
5107     }
5108     // Check if parent region is untied and build return for untied task;
5109     if (auto *Region =
5110             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5111       Region->emitUntiedSwitch(CGF);
5112   };
5113
5114   llvm::Value *DepWaitTaskArgs[6];
5115   if (NumDependencies) {
5116     DepWaitTaskArgs[0] = UpLoc;
5117     DepWaitTaskArgs[1] = ThreadID;
5118     DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
5119     DepWaitTaskArgs[3] = DependenciesArray.getPointer();
5120     DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
5121     DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5122   }
5123   auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
5124                         NumDependencies, &DepWaitTaskArgs,
5125                         Loc](CodeGenFunction &CGF, PrePostActionTy &) {
5126     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5127     CodeGenFunction::RunCleanupsScope LocalScope(CGF);
5128     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
5129     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
5130     // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
5131     // is specified.
5132     if (NumDependencies)
5133       CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
5134                           DepWaitTaskArgs);
5135     // Call proxy_task_entry(gtid, new_task);
5136     auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
5137                       Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
5138       Action.Enter(CGF);
5139       llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
5140       CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
5141                                                           OutlinedFnArgs);
5142     };
5143
5144     // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
5145     // kmp_task_t *new_task);
5146     // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
5147     // kmp_task_t *new_task);
5148     RegionCodeGenTy RCG(CodeGen);
5149     CommonActionTy Action(
5150         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
5151         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
5152     RCG.setAction(Action);
5153     RCG(CGF);
5154   };
5155
5156   if (IfCond) {
5157     emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
5158   } else {
5159     RegionCodeGenTy ThenRCG(ThenCodeGen);
5160     ThenRCG(CGF);
5161   }
5162 }
5163
5164 void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
5165                                        const OMPLoopDirective &D,
5166                                        llvm::Value *TaskFunction,
5167                                        QualType SharedsTy, Address Shareds,
5168                                        const Expr *IfCond,
5169                                        const OMPTaskDataTy &Data) {
5170   if (!CGF.HaveInsertPoint())
5171     return;
5172   TaskResultTy Result =
5173       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5174   // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5175   // libcall.
5176   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
5177   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
5178   // sched, kmp_uint64 grainsize, void *task_dup);
5179   llvm::Value *ThreadID = getThreadID(CGF, Loc);
5180   llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5181   llvm::Value *IfVal;
5182   if (IfCond) {
5183     IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
5184                                       /*isSigned=*/true);
5185   } else {
5186     IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
5187   }
5188
5189   LValue LBLVal = CGF.EmitLValueForField(
5190       Result.TDBase,
5191       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
5192   const auto *LBVar =
5193       cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
5194   CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
5195                        /*IsInitializer=*/true);
5196   LValue UBLVal = CGF.EmitLValueForField(
5197       Result.TDBase,
5198       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
5199   const auto *UBVar =
5200       cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
5201   CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
5202                        /*IsInitializer=*/true);
5203   LValue StLVal = CGF.EmitLValueForField(
5204       Result.TDBase,
5205       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
5206   const auto *StVar =
5207       cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
5208   CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
5209                        /*IsInitializer=*/true);
5210   // Store reductions address.
5211   LValue RedLVal = CGF.EmitLValueForField(
5212       Result.TDBase,
5213       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
5214   if (Data.Reductions) {
5215     CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
5216   } else {
5217     CGF.EmitNullInitialization(RedLVal.getAddress(),
5218                                CGF.getContext().VoidPtrTy);
5219   }
5220   enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
5221   llvm::Value *TaskArgs[] = {
5222       UpLoc,
5223       ThreadID,
5224       Result.NewTask,
5225       IfVal,
5226       LBLVal.getPointer(),
5227       UBLVal.getPointer(),
5228       CGF.EmitLoadOfScalar(StLVal, Loc),
5229       llvm::ConstantInt::getNullValue(
5230           CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
5231       llvm::ConstantInt::getSigned(
5232           CGF.IntTy, Data.Schedule.getPointer()
5233                          ? Data.Schedule.getInt() ? NumTasks : Grainsize
5234                          : NoSchedule),
5235       Data.Schedule.getPointer()
5236           ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
5237                                       /*isSigned=*/false)
5238           : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
5239       Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5240                              Result.TaskDupFn, CGF.VoidPtrTy)
5241                        : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
5242   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
5243 }
5244
5245 /// Emit reduction operation for each element of array (required for
5246 /// array sections) LHS op = RHS.
5247 /// \param Type Type of array.
5248 /// \param LHSVar Variable on the left side of the reduction operation
5249 /// (references element of array in original variable).
5250 /// \param RHSVar Variable on the right side of the reduction operation
5251 /// (references element of array in original variable).
5252 /// \param RedOpGen Generator of reduction operation with use of LHSVar and
5253 /// RHSVar.
5254 static void EmitOMPAggregateReduction(
5255     CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
5256     const VarDecl *RHSVar,
5257     const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
5258                                   const Expr *, const Expr *)> &RedOpGen,
5259     const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
5260     const Expr *UpExpr = nullptr) {
5261   // Perform element-by-element initialization.
5262   QualType ElementTy;
5263   Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
5264   Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
5265
5266   // Drill down to the base element type on both arrays.
5267   const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
5268   llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
5269
5270   llvm::Value *RHSBegin = RHSAddr.getPointer();
5271   llvm::Value *LHSBegin = LHSAddr.getPointer();
5272   // Cast from pointer to array type to pointer to single element.
5273   llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
5274   // The basic structure here is a while-do loop.
5275   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
5276   llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
5277   llvm::Value *IsEmpty =
5278       CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
5279   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5280
5281   // Enter the loop body, making that address the current address.
5282   llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5283   CGF.EmitBlock(BodyBB);
5284
5285   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
5286
5287   llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
5288       RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
5289   RHSElementPHI->addIncoming(RHSBegin, EntryBB);
5290   Address RHSElementCurrent =
5291       Address(RHSElementPHI,
5292               RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5293
5294   llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
5295       LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
5296   LHSElementPHI->addIncoming(LHSBegin, EntryBB);
5297   Address LHSElementCurrent =
5298       Address(LHSElementPHI,
5299               LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5300
5301   // Emit copy.
5302   CodeGenFunction::OMPPrivateScope Scope(CGF);
5303   Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
5304   Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
5305   Scope.Privatize();
5306   RedOpGen(CGF, XExpr, EExpr, UpExpr);
5307   Scope.ForceCleanup();
5308
5309   // Shift the address forward by one element.
5310   llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
5311       LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
5312   llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
5313       RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
5314   // Check whether we've reached the end.
5315   llvm::Value *Done =
5316       CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
5317   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
5318   LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
5319   RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
5320
5321   // Done.
5322   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5323 }
5324
5325 /// Emit reduction combiner. If the combiner is a simple expression emit it as
5326 /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
5327 /// UDR combiner function.
5328 static void emitReductionCombiner(CodeGenFunction &CGF,
5329                                   const Expr *ReductionOp) {
5330   if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
5331     if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
5332       if (const auto *DRE =
5333               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
5334         if (const auto *DRD =
5335                 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
5336           std::pair<llvm::Function *, llvm::Function *> Reduction =
5337               CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
5338           RValue Func = RValue::get(Reduction.first);
5339           CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
5340           CGF.EmitIgnoredExpr(ReductionOp);
5341           return;
5342         }
5343   CGF.EmitIgnoredExpr(ReductionOp);
5344 }
5345
5346 llvm::Value *CGOpenMPRuntime::emitReductionFunction(
5347     CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
5348     ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
5349     ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
5350   ASTContext &C = CGM.getContext();
5351
5352   // void reduction_func(void *LHSArg, void *RHSArg);
5353   FunctionArgList Args;
5354   ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5355                            ImplicitParamDecl::Other);
5356   ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5357                            ImplicitParamDecl::Other);
5358   Args.push_back(&LHSArg);
5359   Args.push_back(&RHSArg);
5360   const auto &CGFI =
5361       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5362   std::string Name = getName({"omp", "reduction", "reduction_func"});
5363   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
5364                                     llvm::GlobalValue::InternalLinkage, Name,
5365                                     &CGM.getModule());
5366   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
5367   Fn->setDoesNotRecurse();
5368   CodeGenFunction CGF(CGM);
5369   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
5370
5371   // Dst = (void*[n])(LHSArg);
5372   // Src = (void*[n])(RHSArg);
5373   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5374       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
5375       ArgsType), CGF.getPointerAlign());
5376   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5377       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
5378       ArgsType), CGF.getPointerAlign());
5379
5380   //  ...
5381   //  *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
5382   //  ...
5383   CodeGenFunction::OMPPrivateScope Scope(CGF);
5384   auto IPriv = Privates.begin();
5385   unsigned Idx = 0;
5386   for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
5387     const auto *RHSVar =
5388         cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
5389     Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
5390       return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
5391     });
5392     const auto *LHSVar =
5393         cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5394     Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
5395       return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5396     });
5397     QualType PrivTy = (*IPriv)->getType();
5398     if (PrivTy->isVariablyModifiedType()) {
5399       // Get array size and emit VLA type.
5400       ++Idx;
5401       Address Elem =
5402           CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
5403       llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5404       const VariableArrayType *VLA =
5405           CGF.getContext().getAsVariableArrayType(PrivTy);
5406       const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5407       CodeGenFunction::OpaqueValueMapping OpaqueMap(
5408           CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5409       CGF.EmitVariablyModifiedType(PrivTy);
5410     }
5411   }
5412   Scope.Privatize();
5413   IPriv = Privates.begin();
5414   auto ILHS = LHSExprs.begin();
5415   auto IRHS = RHSExprs.begin();
5416   for (const Expr *E : ReductionOps) {
5417     if ((*IPriv)->getType()->isArrayType()) {
5418       // Emit reduction for array section.
5419       const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5420       const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5421       EmitOMPAggregateReduction(
5422           CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5423           [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5424             emitReductionCombiner(CGF, E);
5425           });
5426     } else {
5427       // Emit reduction for array subscript or single variable.
5428       emitReductionCombiner(CGF, E);
5429     }
5430     ++IPriv;
5431     ++ILHS;
5432     ++IRHS;
5433   }
5434   Scope.ForceCleanup();
5435   CGF.FinishFunction();
5436   return Fn;
5437 }
5438
5439 void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5440                                                   const Expr *ReductionOp,
5441                                                   const Expr *PrivateRef,
5442                                                   const DeclRefExpr *LHS,
5443                                                   const DeclRefExpr *RHS) {
5444   if (PrivateRef->getType()->isArrayType()) {
5445     // Emit reduction for array section.
5446     const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5447     const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5448     EmitOMPAggregateReduction(
5449         CGF, PrivateRef->getType(), LHSVar, RHSVar,
5450         [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5451           emitReductionCombiner(CGF, ReductionOp);
5452         });
5453   } else {
5454     // Emit reduction for array subscript or single variable.
5455     emitReductionCombiner(CGF, ReductionOp);
5456   }
5457 }
5458
5459 void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5460                                     ArrayRef<const Expr *> Privates,
5461                                     ArrayRef<const Expr *> LHSExprs,
5462                                     ArrayRef<const Expr *> RHSExprs,
5463                                     ArrayRef<const Expr *> ReductionOps,
5464                                     ReductionOptionsTy Options) {
5465   if (!CGF.HaveInsertPoint())
5466     return;
5467
5468   bool WithNowait = Options.WithNowait;
5469   bool SimpleReduction = Options.SimpleReduction;
5470
5471   // Next code should be emitted for reduction:
5472   //
5473   // static kmp_critical_name lock = { 0 };
5474   //
5475   // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5476   //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5477   //  ...
5478   //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5479   //  *(Type<n>-1*)rhs[<n>-1]);
5480   // }
5481   //
5482   // ...
5483   // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5484   // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5485   // RedList, reduce_func, &<lock>)) {
5486   // case 1:
5487   //  ...
5488   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5489   //  ...
5490   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5491   // break;
5492   // case 2:
5493   //  ...
5494   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5495   //  ...
5496   // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5497   // break;
5498   // default:;
5499   // }
5500   //
5501   // if SimpleReduction is true, only the next code is generated:
5502   //  ...
5503   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5504   //  ...
5505
5506   ASTContext &C = CGM.getContext();
5507
5508   if (SimpleReduction) {
5509     CodeGenFunction::RunCleanupsScope Scope(CGF);
5510     auto IPriv = Privates.begin();
5511     auto ILHS = LHSExprs.begin();
5512     auto IRHS = RHSExprs.begin();
5513     for (const Expr *E : ReductionOps) {
5514       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5515                                   cast<DeclRefExpr>(*IRHS));
5516       ++IPriv;
5517       ++ILHS;
5518       ++IRHS;
5519     }
5520     return;
5521   }
5522
5523   // 1. Build a list of reduction variables.
5524   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5525   auto Size = RHSExprs.size();
5526   for (const Expr *E : Privates) {
5527     if (E->getType()->isVariablyModifiedType())
5528       // Reserve place for array size.
5529       ++Size;
5530   }
5531   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5532   QualType ReductionArrayTy =
5533       C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
5534                              /*IndexTypeQuals=*/0);
5535   Address ReductionList =
5536       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5537   auto IPriv = Privates.begin();
5538   unsigned Idx = 0;
5539   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5540     Address Elem =
5541       CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
5542     CGF.Builder.CreateStore(
5543         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5544             CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
5545         Elem);
5546     if ((*IPriv)->getType()->isVariablyModifiedType()) {
5547       // Store array size.
5548       ++Idx;
5549       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
5550                                              CGF.getPointerSize());
5551       llvm::Value *Size = CGF.Builder.CreateIntCast(
5552           CGF.getVLASize(
5553                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5554               .NumElts,
5555           CGF.SizeTy, /*isSigned=*/false);
5556       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5557                               Elem);
5558     }
5559   }
5560
5561   // 2. Emit reduce_func().
5562   llvm::Value *ReductionFn = emitReductionFunction(
5563       CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
5564       Privates, LHSExprs, RHSExprs, ReductionOps);
5565
5566   // 3. Create static kmp_critical_name lock = { 0 };
5567   std::string Name = getName({"reduction"});
5568   llvm::Value *Lock = getCriticalRegionLock(Name);
5569
5570   // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5571   // RedList, reduce_func, &<lock>);
5572   llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5573   llvm::Value *ThreadId = getThreadID(CGF, Loc);
5574   llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5575   llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5576       ReductionList.getPointer(), CGF.VoidPtrTy);
5577   llvm::Value *Args[] = {
5578       IdentTLoc,                             // ident_t *<loc>
5579       ThreadId,                              // i32 <gtid>
5580       CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5581       ReductionArrayTySize,                  // size_type sizeof(RedList)
5582       RL,                                    // void *RedList
5583       ReductionFn, // void (*) (void *, void *) <reduce_func>
5584       Lock         // kmp_critical_name *&<lock>
5585   };
5586   llvm::Value *Res = CGF.EmitRuntimeCall(
5587       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
5588                                        : OMPRTL__kmpc_reduce),
5589       Args);
5590
5591   // 5. Build switch(res)
5592   llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5593   llvm::SwitchInst *SwInst =
5594       CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5595
5596   // 6. Build case 1:
5597   //  ...
5598   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5599   //  ...
5600   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5601   // break;
5602   llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5603   SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5604   CGF.EmitBlock(Case1BB);
5605
5606   // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5607   llvm::Value *EndArgs[] = {
5608       IdentTLoc, // ident_t *<loc>
5609       ThreadId,  // i32 <gtid>
5610       Lock       // kmp_critical_name *&<lock>
5611   };
5612   auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5613                        CodeGenFunction &CGF, PrePostActionTy &Action) {
5614     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5615     auto IPriv = Privates.begin();
5616     auto ILHS = LHSExprs.begin();
5617     auto IRHS = RHSExprs.begin();
5618     for (const Expr *E : ReductionOps) {
5619       RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5620                                      cast<DeclRefExpr>(*IRHS));
5621       ++IPriv;
5622       ++ILHS;
5623       ++IRHS;
5624     }
5625   };
5626   RegionCodeGenTy RCG(CodeGen);
5627   CommonActionTy Action(
5628       nullptr, llvm::None,
5629       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
5630                                        : OMPRTL__kmpc_end_reduce),
5631       EndArgs);
5632   RCG.setAction(Action);
5633   RCG(CGF);
5634
5635   CGF.EmitBranch(DefaultBB);
5636
5637   // 7. Build case 2:
5638   //  ...
5639   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5640   //  ...
5641   // break;
5642   llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5643   SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5644   CGF.EmitBlock(Case2BB);
5645
5646   auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5647                              CodeGenFunction &CGF, PrePostActionTy &Action) {
5648     auto ILHS = LHSExprs.begin();
5649     auto IRHS = RHSExprs.begin();
5650     auto IPriv = Privates.begin();
5651     for (const Expr *E : ReductionOps) {
5652       const Expr *XExpr = nullptr;
5653       const Expr *EExpr = nullptr;
5654       const Expr *UpExpr = nullptr;
5655       BinaryOperatorKind BO = BO_Comma;
5656       if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
5657         if (BO->getOpcode() == BO_Assign) {
5658           XExpr = BO->getLHS();
5659           UpExpr = BO->getRHS();
5660         }
5661       }
5662       // Try to emit update expression as a simple atomic.
5663       const Expr *RHSExpr = UpExpr;
5664       if (RHSExpr) {
5665         // Analyze RHS part of the whole expression.
5666         if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5667                 RHSExpr->IgnoreParenImpCasts())) {
5668           // If this is a conditional operator, analyze its condition for
5669           // min/max reduction operator.
5670           RHSExpr = ACO->getCond();
5671         }
5672         if (const auto *BORHS =
5673                 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5674           EExpr = BORHS->getRHS();
5675           BO = BORHS->getOpcode();
5676         }
5677       }
5678       if (XExpr) {
5679         const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5680         auto &&AtomicRedGen = [BO, VD,
5681                                Loc](CodeGenFunction &CGF, const Expr *XExpr,
5682                                     const Expr *EExpr, const Expr *UpExpr) {
5683           LValue X = CGF.EmitLValue(XExpr);
5684           RValue E;
5685           if (EExpr)
5686             E = CGF.EmitAnyExpr(EExpr);
5687           CGF.EmitOMPAtomicSimpleUpdateExpr(
5688               X, E, BO, /*IsXLHSInRHSPart=*/true,
5689               llvm::AtomicOrdering::Monotonic, Loc,
5690               [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5691                 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5692                 PrivateScope.addPrivate(
5693                     VD, [&CGF, VD, XRValue, Loc]() {
5694                       Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5695                       CGF.emitOMPSimpleStore(
5696                           CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5697                           VD->getType().getNonReferenceType(), Loc);
5698                       return LHSTemp;
5699                     });
5700                 (void)PrivateScope.Privatize();
5701                 return CGF.EmitAnyExpr(UpExpr);
5702               });
5703         };
5704         if ((*IPriv)->getType()->isArrayType()) {
5705           // Emit atomic reduction for array section.
5706           const auto *RHSVar =
5707               cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5708           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5709                                     AtomicRedGen, XExpr, EExpr, UpExpr);
5710         } else {
5711           // Emit atomic reduction for array subscript or single variable.
5712           AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5713         }
5714       } else {
5715         // Emit as a critical region.
5716         auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5717                                            const Expr *, const Expr *) {
5718           CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5719           std::string Name = RT.getName({"atomic_reduction"});
5720           RT.emitCriticalRegion(
5721               CGF, Name,
5722               [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5723                 Action.Enter(CGF);
5724                 emitReductionCombiner(CGF, E);
5725               },
5726               Loc);
5727         };
5728         if ((*IPriv)->getType()->isArrayType()) {
5729           const auto *LHSVar =
5730               cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5731           const auto *RHSVar =
5732               cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5733           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5734                                     CritRedGen);
5735         } else {
5736           CritRedGen(CGF, nullptr, nullptr, nullptr);
5737         }
5738       }
5739       ++ILHS;
5740       ++IRHS;
5741       ++IPriv;
5742     }
5743   };
5744   RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5745   if (!WithNowait) {
5746     // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5747     llvm::Value *EndArgs[] = {
5748         IdentTLoc, // ident_t *<loc>
5749         ThreadId,  // i32 <gtid>
5750         Lock       // kmp_critical_name *&<lock>
5751     };
5752     CommonActionTy Action(nullptr, llvm::None,
5753                           createRuntimeFunction(OMPRTL__kmpc_end_reduce),
5754                           EndArgs);
5755     AtomicRCG.setAction(Action);
5756     AtomicRCG(CGF);
5757   } else {
5758     AtomicRCG(CGF);
5759   }
5760
5761   CGF.EmitBranch(DefaultBB);
5762   CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5763 }
5764
5765 /// Generates unique name for artificial threadprivate variables.
5766 /// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
5767 static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
5768                                       const Expr *Ref) {
5769   SmallString<256> Buffer;
5770   llvm::raw_svector_ostream Out(Buffer);
5771   const clang::DeclRefExpr *DE;
5772   const VarDecl *D = ::getBaseDecl(Ref, DE);
5773   if (!D)
5774     D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
5775   D = D->getCanonicalDecl();
5776   std::string Name = CGM.getOpenMPRuntime().getName(
5777       {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
5778   Out << Prefix << Name << "_"
5779       << D->getCanonicalDecl()->getLocStart().getRawEncoding();
5780   return Out.str();
5781 }
5782
5783 /// Emits reduction initializer function:
5784 /// \code
5785 /// void @.red_init(void* %arg) {
5786 /// %0 = bitcast void* %arg to <type>*
5787 /// store <type> <init>, <type>* %0
5788 /// ret void
5789 /// }
5790 /// \endcode
5791 static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5792                                            SourceLocation Loc,
5793                                            ReductionCodeGen &RCG, unsigned N) {
5794   ASTContext &C = CGM.getContext();
5795   FunctionArgList Args;
5796   ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5797                           ImplicitParamDecl::Other);
5798   Args.emplace_back(&Param);
5799   const auto &FnInfo =
5800       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5801   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5802   std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
5803   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5804                                     Name, &CGM.getModule());
5805   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5806   Fn->setDoesNotRecurse();
5807   CodeGenFunction CGF(CGM);
5808   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5809   Address PrivateAddr = CGF.EmitLoadOfPointer(
5810       CGF.GetAddrOfLocalVar(&Param),
5811       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5812   llvm::Value *Size = nullptr;
5813   // If the size of the reduction item is non-constant, load it from global
5814   // threadprivate variable.
5815   if (RCG.getSizes(N).second) {
5816     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5817         CGF, CGM.getContext().getSizeType(),
5818         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5819     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5820                                 CGM.getContext().getSizeType(), Loc);
5821   }
5822   RCG.emitAggregateType(CGF, N, Size);
5823   LValue SharedLVal;
5824   // If initializer uses initializer from declare reduction construct, emit a
5825   // pointer to the address of the original reduction item (reuired by reduction
5826   // initializer)
5827   if (RCG.usesReductionInitializer(N)) {
5828     Address SharedAddr =
5829         CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5830             CGF, CGM.getContext().VoidPtrTy,
5831             generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
5832     SharedAddr = CGF.EmitLoadOfPointer(
5833         SharedAddr,
5834         CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
5835     SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5836   } else {
5837     SharedLVal = CGF.MakeNaturalAlignAddrLValue(
5838         llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5839         CGM.getContext().VoidPtrTy);
5840   }
5841   // Emit the initializer:
5842   // %0 = bitcast void* %arg to <type>*
5843   // store <type> <init>, <type>* %0
5844   RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
5845                          [](CodeGenFunction &) { return false; });
5846   CGF.FinishFunction();
5847   return Fn;
5848 }
5849
5850 /// Emits reduction combiner function:
5851 /// \code
5852 /// void @.red_comb(void* %arg0, void* %arg1) {
5853 /// %lhs = bitcast void* %arg0 to <type>*
5854 /// %rhs = bitcast void* %arg1 to <type>*
5855 /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5856 /// store <type> %2, <type>* %lhs
5857 /// ret void
5858 /// }
5859 /// \endcode
5860 static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5861                                            SourceLocation Loc,
5862                                            ReductionCodeGen &RCG, unsigned N,
5863                                            const Expr *ReductionOp,
5864                                            const Expr *LHS, const Expr *RHS,
5865                                            const Expr *PrivateRef) {
5866   ASTContext &C = CGM.getContext();
5867   const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5868   const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5869   FunctionArgList Args;
5870   ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
5871                                C.VoidPtrTy, ImplicitParamDecl::Other);
5872   ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5873                             ImplicitParamDecl::Other);
5874   Args.emplace_back(&ParamInOut);
5875   Args.emplace_back(&ParamIn);
5876   const auto &FnInfo =
5877       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5878   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5879   std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
5880   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5881                                     Name, &CGM.getModule());
5882   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5883   Fn->setDoesNotRecurse();
5884   CodeGenFunction CGF(CGM);
5885   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5886   llvm::Value *Size = nullptr;
5887   // If the size of the reduction item is non-constant, load it from global
5888   // threadprivate variable.
5889   if (RCG.getSizes(N).second) {
5890     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5891         CGF, CGM.getContext().getSizeType(),
5892         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5893     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5894                                 CGM.getContext().getSizeType(), Loc);
5895   }
5896   RCG.emitAggregateType(CGF, N, Size);
5897   // Remap lhs and rhs variables to the addresses of the function arguments.
5898   // %lhs = bitcast void* %arg0 to <type>*
5899   // %rhs = bitcast void* %arg1 to <type>*
5900   CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5901   PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
5902     // Pull out the pointer to the variable.
5903     Address PtrAddr = CGF.EmitLoadOfPointer(
5904         CGF.GetAddrOfLocalVar(&ParamInOut),
5905         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5906     return CGF.Builder.CreateElementBitCast(
5907         PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5908   });
5909   PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
5910     // Pull out the pointer to the variable.
5911     Address PtrAddr = CGF.EmitLoadOfPointer(
5912         CGF.GetAddrOfLocalVar(&ParamIn),
5913         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5914     return CGF.Builder.CreateElementBitCast(
5915         PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5916   });
5917   PrivateScope.Privatize();
5918   // Emit the combiner body:
5919   // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
5920   // store <type> %2, <type>* %lhs
5921   CGM.getOpenMPRuntime().emitSingleReductionCombiner(
5922       CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
5923       cast<DeclRefExpr>(RHS));
5924   CGF.FinishFunction();
5925   return Fn;
5926 }
5927
5928 /// Emits reduction finalizer function:
5929 /// \code
5930 /// void @.red_fini(void* %arg) {
5931 /// %0 = bitcast void* %arg to <type>*
5932 /// <destroy>(<type>* %0)
5933 /// ret void
5934 /// }
5935 /// \endcode
5936 static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
5937                                            SourceLocation Loc,
5938                                            ReductionCodeGen &RCG, unsigned N) {
5939   if (!RCG.needCleanups(N))
5940     return nullptr;
5941   ASTContext &C = CGM.getContext();
5942   FunctionArgList Args;
5943   ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5944                           ImplicitParamDecl::Other);
5945   Args.emplace_back(&Param);
5946   const auto &FnInfo =
5947       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5948   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5949   std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
5950   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5951                                     Name, &CGM.getModule());
5952   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5953   Fn->setDoesNotRecurse();
5954   CodeGenFunction CGF(CGM);
5955   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5956   Address PrivateAddr = CGF.EmitLoadOfPointer(
5957       CGF.GetAddrOfLocalVar(&Param),
5958       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5959   llvm::Value *Size = nullptr;
5960   // If the size of the reduction item is non-constant, load it from global
5961   // threadprivate variable.
5962   if (RCG.getSizes(N).second) {
5963     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5964         CGF, CGM.getContext().getSizeType(),
5965         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5966     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5967                                 CGM.getContext().getSizeType(), Loc);
5968   }
5969   RCG.emitAggregateType(CGF, N, Size);
5970   // Emit the finalizer body:
5971   // <destroy>(<type>* %0)
5972   RCG.emitCleanups(CGF, N, PrivateAddr);
5973   CGF.FinishFunction();
5974   return Fn;
5975 }
5976
5977 llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
5978     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
5979     ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
5980   if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
5981     return nullptr;
5982
5983   // Build typedef struct:
5984   // kmp_task_red_input {
5985   //   void *reduce_shar; // shared reduction item
5986   //   size_t reduce_size; // size of data item
5987   //   void *reduce_init; // data initialization routine
5988   //   void *reduce_fini; // data finalization routine
5989   //   void *reduce_comb; // data combiner routine
5990   //   kmp_task_red_flags_t flags; // flags for additional info from compiler
5991   // } kmp_task_red_input_t;
5992   ASTContext &C = CGM.getContext();
5993   RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
5994   RD->startDefinition();
5995   const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5996   const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
5997   const FieldDecl *InitFD  = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5998   const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5999   const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6000   const FieldDecl *FlagsFD = addFieldToRecordDecl(
6001       C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
6002   RD->completeDefinition();
6003   QualType RDType = C.getRecordType(RD);
6004   unsigned Size = Data.ReductionVars.size();
6005   llvm::APInt ArraySize(/*numBits=*/64, Size);
6006   QualType ArrayRDType = C.getConstantArrayType(
6007       RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
6008   // kmp_task_red_input_t .rd_input.[Size];
6009   Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
6010   ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
6011                        Data.ReductionOps);
6012   for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
6013     // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
6014     llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
6015                            llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
6016     llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
6017         TaskRedInput.getPointer(), Idxs,
6018         /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
6019         ".rd_input.gep.");
6020     LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
6021     // ElemLVal.reduce_shar = &Shareds[Cnt];
6022     LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
6023     RCG.emitSharedLValue(CGF, Cnt);
6024     llvm::Value *CastedShared =
6025         CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer());
6026     CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
6027     RCG.emitAggregateType(CGF, Cnt);
6028     llvm::Value *SizeValInChars;
6029     llvm::Value *SizeVal;
6030     std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
6031     // We use delayed creation/initialization for VLAs, array sections and
6032     // custom reduction initializations. It is required because runtime does not
6033     // provide the way to pass the sizes of VLAs/array sections to
6034     // initializer/combiner/finalizer functions and does not pass the pointer to
6035     // original reduction item to the initializer. Instead threadprivate global
6036     // variables are used to store these values and use them in the functions.
6037     bool DelayedCreation = !!SizeVal;
6038     SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
6039                                                /*isSigned=*/false);
6040     LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
6041     CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
6042     // ElemLVal.reduce_init = init;
6043     LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
6044     llvm::Value *InitAddr =
6045         CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
6046     CGF.EmitStoreOfScalar(InitAddr, InitLVal);
6047     DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
6048     // ElemLVal.reduce_fini = fini;
6049     LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
6050     llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
6051     llvm::Value *FiniAddr = Fini
6052                                 ? CGF.EmitCastToVoidPtr(Fini)
6053                                 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
6054     CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
6055     // ElemLVal.reduce_comb = comb;
6056     LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
6057     llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
6058         CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
6059         RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
6060     CGF.EmitStoreOfScalar(CombAddr, CombLVal);
6061     // ElemLVal.flags = 0;
6062     LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
6063     if (DelayedCreation) {
6064       CGF.EmitStoreOfScalar(
6065           llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
6066           FlagsLVal);
6067     } else
6068       CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
6069   }
6070   // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
6071   // *data);
6072   llvm::Value *Args[] = {
6073       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6074                                 /*isSigned=*/true),
6075       llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6076       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
6077                                                       CGM.VoidPtrTy)};
6078   return CGF.EmitRuntimeCall(
6079       createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
6080 }
6081
6082 void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
6083                                               SourceLocation Loc,
6084                                               ReductionCodeGen &RCG,
6085                                               unsigned N) {
6086   auto Sizes = RCG.getSizes(N);
6087   // Emit threadprivate global variable if the type is non-constant
6088   // (Sizes.second = nullptr).
6089   if (Sizes.second) {
6090     llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
6091                                                      /*isSigned=*/false);
6092     Address SizeAddr = getAddrOfArtificialThreadPrivate(
6093         CGF, CGM.getContext().getSizeType(),
6094         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
6095     CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
6096   }
6097   // Store address of the original reduction item if custom initializer is used.
6098   if (RCG.usesReductionInitializer(N)) {
6099     Address SharedAddr = getAddrOfArtificialThreadPrivate(
6100         CGF, CGM.getContext().VoidPtrTy,
6101         generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
6102     CGF.Builder.CreateStore(
6103         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6104             RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
6105         SharedAddr, /*IsVolatile=*/false);
6106   }
6107 }
6108
6109 Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
6110                                               SourceLocation Loc,
6111                                               llvm::Value *ReductionsPtr,
6112                                               LValue SharedLVal) {
6113   // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
6114   // *d);
6115   llvm::Value *Args[] = {
6116       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6117                                 /*isSigned=*/true),
6118       ReductionsPtr,
6119       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(),
6120                                                       CGM.VoidPtrTy)};
6121   return Address(
6122       CGF.EmitRuntimeCall(
6123           createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
6124       SharedLVal.getAlignment());
6125 }
6126
6127 void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
6128                                        SourceLocation Loc) {
6129   if (!CGF.HaveInsertPoint())
6130     return;
6131   // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
6132   // global_tid);
6133   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
6134   // Ignore return result until untied tasks are supported.
6135   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
6136   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
6137     Region->emitUntiedSwitch(CGF);
6138 }
6139
6140 void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
6141                                            OpenMPDirectiveKind InnerKind,
6142                                            const RegionCodeGenTy &CodeGen,
6143                                            bool HasCancel) {
6144   if (!CGF.HaveInsertPoint())
6145     return;
6146   InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
6147   CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
6148 }
6149
6150 namespace {
6151 enum RTCancelKind {
6152   CancelNoreq = 0,
6153   CancelParallel = 1,
6154   CancelLoop = 2,
6155   CancelSections = 3,
6156   CancelTaskgroup = 4
6157 };
6158 } // anonymous namespace
6159
6160 static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
6161   RTCancelKind CancelKind = CancelNoreq;
6162   if (CancelRegion == OMPD_parallel)
6163     CancelKind = CancelParallel;
6164   else if (CancelRegion == OMPD_for)
6165     CancelKind = CancelLoop;
6166   else if (CancelRegion == OMPD_sections)
6167     CancelKind = CancelSections;
6168   else {
6169     assert(CancelRegion == OMPD_taskgroup);
6170     CancelKind = CancelTaskgroup;
6171   }
6172   return CancelKind;
6173 }
6174
6175 void CGOpenMPRuntime::emitCancellationPointCall(
6176     CodeGenFunction &CGF, SourceLocation Loc,
6177     OpenMPDirectiveKind CancelRegion) {
6178   if (!CGF.HaveInsertPoint())
6179     return;
6180   // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
6181   // global_tid, kmp_int32 cncl_kind);
6182   if (auto *OMPRegionInfo =
6183           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6184     // For 'cancellation point taskgroup', the task region info may not have a
6185     // cancel. This may instead happen in another adjacent task.
6186     if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
6187       llvm::Value *Args[] = {
6188           emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
6189           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6190       // Ignore return result until untied tasks are supported.
6191       llvm::Value *Result = CGF.EmitRuntimeCall(
6192           createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
6193       // if (__kmpc_cancellationpoint()) {
6194       //   exit from construct;
6195       // }
6196       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6197       llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6198       llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6199       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6200       CGF.EmitBlock(ExitBB);
6201       // exit from construct;
6202       CodeGenFunction::JumpDest CancelDest =
6203           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6204       CGF.EmitBranchThroughCleanup(CancelDest);
6205       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6206     }
6207   }
6208 }
6209
6210 void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
6211                                      const Expr *IfCond,
6212                                      OpenMPDirectiveKind CancelRegion) {
6213   if (!CGF.HaveInsertPoint())
6214     return;
6215   // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
6216   // kmp_int32 cncl_kind);
6217   if (auto *OMPRegionInfo =
6218           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6219     auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
6220                                                         PrePostActionTy &) {
6221       CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
6222       llvm::Value *Args[] = {
6223           RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
6224           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6225       // Ignore return result until untied tasks are supported.
6226       llvm::Value *Result = CGF.EmitRuntimeCall(
6227           RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
6228       // if (__kmpc_cancel()) {
6229       //   exit from construct;
6230       // }
6231       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6232       llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6233       llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6234       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6235       CGF.EmitBlock(ExitBB);
6236       // exit from construct;
6237       CodeGenFunction::JumpDest CancelDest =
6238           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6239       CGF.EmitBranchThroughCleanup(CancelDest);
6240       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6241     };
6242     if (IfCond) {
6243       emitOMPIfClause(CGF, IfCond, ThenGen,
6244                       [](CodeGenFunction &, PrePostActionTy &) {});
6245     } else {
6246       RegionCodeGenTy ThenRCG(ThenGen);
6247       ThenRCG(CGF);
6248     }
6249   }
6250 }
6251
6252 void CGOpenMPRuntime::emitTargetOutlinedFunction(
6253     const OMPExecutableDirective &D, StringRef ParentName,
6254     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6255     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6256   assert(!ParentName.empty() && "Invalid target region parent name!");
6257   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
6258                                    IsOffloadEntry, CodeGen);
6259 }
6260
6261 void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
6262     const OMPExecutableDirective &D, StringRef ParentName,
6263     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6264     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6265   // Create a unique name for the entry function using the source location
6266   // information of the current target region. The name will be something like:
6267   //
6268   // __omp_offloading_DD_FFFF_PP_lBB
6269   //
6270   // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
6271   // mangled name of the function that encloses the target region and BB is the
6272   // line number of the target region.
6273
6274   unsigned DeviceID;
6275   unsigned FileID;
6276   unsigned Line;
6277   getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
6278                            Line);
6279   SmallString<64> EntryFnName;
6280   {
6281     llvm::raw_svector_ostream OS(EntryFnName);
6282     OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
6283        << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
6284   }
6285
6286   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6287
6288   CodeGenFunction CGF(CGM, true);
6289   CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
6290   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6291
6292   OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
6293
6294   // If this target outline function is not an offload entry, we don't need to
6295   // register it.
6296   if (!IsOffloadEntry)
6297     return;
6298
6299   // The target region ID is used by the runtime library to identify the current
6300   // target region, so it only has to be unique and not necessarily point to
6301   // anything. It could be the pointer to the outlined function that implements
6302   // the target region, but we aren't using that so that the compiler doesn't
6303   // need to keep that, and could therefore inline the host function if proven
6304   // worthwhile during optimization. In the other hand, if emitting code for the
6305   // device, the ID has to be the function address so that it can retrieved from
6306   // the offloading entry and launched by the runtime library. We also mark the
6307   // outlined function to have external linkage in case we are emitting code for
6308   // the device, because these functions will be entry points to the device.
6309
6310   if (CGM.getLangOpts().OpenMPIsDevice) {
6311     OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
6312     OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
6313     OutlinedFn->setDSOLocal(false);
6314   } else {
6315     std::string Name = getName({EntryFnName, "region_id"});
6316     OutlinedFnID = new llvm::GlobalVariable(
6317         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
6318         llvm::GlobalValue::WeakAnyLinkage,
6319         llvm::Constant::getNullValue(CGM.Int8Ty), Name);
6320   }
6321
6322   // Register the information for the entry associated with this target region.
6323   OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
6324       DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
6325       OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
6326 }
6327
6328 /// discard all CompoundStmts intervening between two constructs
6329 static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
6330   while (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
6331     Body = CS->body_front();
6332
6333   return Body;
6334 }
6335
6336 /// Emit the number of teams for a target directive.  Inspect the num_teams
6337 /// clause associated with a teams construct combined or closely nested
6338 /// with the target directive.
6339 ///
6340 /// Emit a team of size one for directives such as 'target parallel' that
6341 /// have no associated teams construct.
6342 ///
6343 /// Otherwise, return nullptr.
6344 static llvm::Value *
6345 emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6346                                CodeGenFunction &CGF,
6347                                const OMPExecutableDirective &D) {
6348   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6349                                               "teams directive expected to be "
6350                                               "emitted only for the host!");
6351
6352   CGBuilderTy &Bld = CGF.Builder;
6353
6354   // If the target directive is combined with a teams directive:
6355   //   Return the value in the num_teams clause, if any.
6356   //   Otherwise, return 0 to denote the runtime default.
6357   if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
6358     if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
6359       CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
6360       llvm::Value *NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
6361                                                  /*IgnoreResultAssign*/ true);
6362       return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6363                                /*IsSigned=*/true);
6364     }
6365
6366     // The default value is 0.
6367     return Bld.getInt32(0);
6368   }
6369
6370   // If the target directive is combined with a parallel directive but not a
6371   // teams directive, start one team.
6372   if (isOpenMPParallelDirective(D.getDirectiveKind()))
6373     return Bld.getInt32(1);
6374
6375   // If the current target region has a teams region enclosed, we need to get
6376   // the number of teams to pass to the runtime function call. This is done
6377   // by generating the expression in a inlined region. This is required because
6378   // the expression is captured in the enclosing target environment when the
6379   // teams directive is not combined with target.
6380
6381   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6382
6383   if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6384           ignoreCompoundStmts(CS.getCapturedStmt()))) {
6385     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6386       if (const auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
6387         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6388         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6389         llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
6390         return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6391                                  /*IsSigned=*/true);
6392       }
6393
6394       // If we have an enclosed teams directive but no num_teams clause we use
6395       // the default value 0.
6396       return Bld.getInt32(0);
6397     }
6398   }
6399
6400   // No teams associated with the directive.
6401   return nullptr;
6402 }
6403
6404 /// Emit the number of threads for a target directive.  Inspect the
6405 /// thread_limit clause associated with a teams construct combined or closely
6406 /// nested with the target directive.
6407 ///
6408 /// Emit the num_threads clause for directives such as 'target parallel' that
6409 /// have no associated teams construct.
6410 ///
6411 /// Otherwise, return nullptr.
6412 static llvm::Value *
6413 emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6414                                  CodeGenFunction &CGF,
6415                                  const OMPExecutableDirective &D) {
6416   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6417                                               "teams directive expected to be "
6418                                               "emitted only for the host!");
6419
6420   CGBuilderTy &Bld = CGF.Builder;
6421
6422   //
6423   // If the target directive is combined with a teams directive:
6424   //   Return the value in the thread_limit clause, if any.
6425   //
6426   // If the target directive is combined with a parallel directive:
6427   //   Return the value in the num_threads clause, if any.
6428   //
6429   // If both clauses are set, select the minimum of the two.
6430   //
6431   // If neither teams or parallel combined directives set the number of threads
6432   // in a team, return 0 to denote the runtime default.
6433   //
6434   // If this is not a teams directive return nullptr.
6435
6436   if (isOpenMPTeamsDirective(D.getDirectiveKind()) ||
6437       isOpenMPParallelDirective(D.getDirectiveKind())) {
6438     llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0);
6439     llvm::Value *NumThreadsVal = nullptr;
6440     llvm::Value *ThreadLimitVal = nullptr;
6441
6442     if (const auto *ThreadLimitClause =
6443             D.getSingleClause<OMPThreadLimitClause>()) {
6444       CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6445       llvm::Value *ThreadLimit =
6446           CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
6447                              /*IgnoreResultAssign*/ true);
6448       ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6449                                          /*IsSigned=*/true);
6450     }
6451
6452     if (const auto *NumThreadsClause =
6453             D.getSingleClause<OMPNumThreadsClause>()) {
6454       CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
6455       llvm::Value *NumThreads =
6456           CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
6457                              /*IgnoreResultAssign*/ true);
6458       NumThreadsVal =
6459           Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true);
6460     }
6461
6462     // Select the lesser of thread_limit and num_threads.
6463     if (NumThreadsVal)
6464       ThreadLimitVal = ThreadLimitVal
6465                            ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal,
6466                                                                 ThreadLimitVal),
6467                                               NumThreadsVal, ThreadLimitVal)
6468                            : NumThreadsVal;
6469
6470     // Set default value passed to the runtime if either teams or a target
6471     // parallel type directive is found but no clause is specified.
6472     if (!ThreadLimitVal)
6473       ThreadLimitVal = DefaultThreadLimitVal;
6474
6475     return ThreadLimitVal;
6476   }
6477
6478   // If the current target region has a teams region enclosed, we need to get
6479   // the thread limit to pass to the runtime function call. This is done
6480   // by generating the expression in a inlined region. This is required because
6481   // the expression is captured in the enclosing target environment when the
6482   // teams directive is not combined with target.
6483
6484   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6485
6486   if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6487           ignoreCompoundStmts(CS.getCapturedStmt()))) {
6488     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6489       if (const auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
6490         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6491         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6492         llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
6493         return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6494                                          /*IsSigned=*/true);
6495       }
6496
6497       // If we have an enclosed teams directive but no thread_limit clause we
6498       // use the default value 0.
6499       return CGF.Builder.getInt32(0);
6500     }
6501   }
6502
6503   // No teams associated with the directive.
6504   return nullptr;
6505 }
6506
6507 namespace {
6508 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
6509
6510 // Utility to handle information from clauses associated with a given
6511 // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
6512 // It provides a convenient interface to obtain the information and generate
6513 // code for that information.
6514 class MappableExprsHandler {
6515 public:
6516   /// Values for bit flags used to specify the mapping type for
6517   /// offloading.
6518   enum OpenMPOffloadMappingFlags : uint64_t {
6519     /// No flags
6520     OMP_MAP_NONE = 0x0,
6521     /// Allocate memory on the device and move data from host to device.
6522     OMP_MAP_TO = 0x01,
6523     /// Allocate memory on the device and move data from device to host.
6524     OMP_MAP_FROM = 0x02,
6525     /// Always perform the requested mapping action on the element, even
6526     /// if it was already mapped before.
6527     OMP_MAP_ALWAYS = 0x04,
6528     /// Delete the element from the device environment, ignoring the
6529     /// current reference count associated with the element.
6530     OMP_MAP_DELETE = 0x08,
6531     /// The element being mapped is a pointer-pointee pair; both the
6532     /// pointer and the pointee should be mapped.
6533     OMP_MAP_PTR_AND_OBJ = 0x10,
6534     /// This flags signals that the base address of an entry should be
6535     /// passed to the target kernel as an argument.
6536     OMP_MAP_TARGET_PARAM = 0x20,
6537     /// Signal that the runtime library has to return the device pointer
6538     /// in the current position for the data being mapped. Used when we have the
6539     /// use_device_ptr clause.
6540     OMP_MAP_RETURN_PARAM = 0x40,
6541     /// This flag signals that the reference being passed is a pointer to
6542     /// private data.
6543     OMP_MAP_PRIVATE = 0x80,
6544     /// Pass the element to the device by value.
6545     OMP_MAP_LITERAL = 0x100,
6546     /// Implicit map
6547     OMP_MAP_IMPLICIT = 0x200,
6548     /// The 16 MSBs of the flags indicate whether the entry is member of some
6549     /// struct/class.
6550     OMP_MAP_MEMBER_OF = 0xffff000000000000,
6551     LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF),
6552   };
6553
6554   /// Class that associates information with a base pointer to be passed to the
6555   /// runtime library.
6556   class BasePointerInfo {
6557     /// The base pointer.
6558     llvm::Value *Ptr = nullptr;
6559     /// The base declaration that refers to this device pointer, or null if
6560     /// there is none.
6561     const ValueDecl *DevPtrDecl = nullptr;
6562
6563   public:
6564     BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
6565         : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
6566     llvm::Value *operator*() const { return Ptr; }
6567     const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
6568     void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
6569   };
6570
6571   using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
6572   using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
6573   using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
6574
6575   /// Map between a struct and the its lowest & highest elements which have been
6576   /// mapped.
6577   /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
6578   ///                    HE(FieldIndex, Pointer)}
6579   struct StructRangeInfoTy {
6580     std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
6581         0, Address::invalid()};
6582     std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
6583         0, Address::invalid()};
6584     Address Base = Address::invalid();
6585   };
6586
6587 private:
6588   /// Kind that defines how a device pointer has to be returned.
6589   struct MapInfo {
6590     OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
6591     OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
6592     OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
6593     bool ReturnDevicePointer = false;
6594     bool IsImplicit = false;
6595
6596     MapInfo() = default;
6597     MapInfo(
6598         OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6599         OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6600         bool ReturnDevicePointer, bool IsImplicit)
6601         : Components(Components), MapType(MapType),
6602           MapTypeModifier(MapTypeModifier),
6603           ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
6604   };
6605
6606   /// If use_device_ptr is used on a pointer which is a struct member and there
6607   /// is no map information about it, then emission of that entry is deferred
6608   /// until the whole struct has been processed.
6609   struct DeferredDevicePtrEntryTy {
6610     const Expr *IE = nullptr;
6611     const ValueDecl *VD = nullptr;
6612
6613     DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD)
6614         : IE(IE), VD(VD) {}
6615   };
6616
6617   /// Directive from where the map clauses were extracted.
6618   const OMPExecutableDirective &CurDir;
6619
6620   /// Function the directive is being generated for.
6621   CodeGenFunction &CGF;
6622
6623   /// Set of all first private variables in the current directive.
6624   llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
6625
6626   /// Map between device pointer declarations and their expression components.
6627   /// The key value for declarations in 'this' is null.
6628   llvm::DenseMap<
6629       const ValueDecl *,
6630       SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
6631       DevPointersMap;
6632
6633   llvm::Value *getExprTypeSize(const Expr *E) const {
6634     QualType ExprTy = E->getType().getCanonicalType();
6635
6636     // Reference types are ignored for mapping purposes.
6637     if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
6638       ExprTy = RefTy->getPointeeType().getCanonicalType();
6639
6640     // Given that an array section is considered a built-in type, we need to
6641     // do the calculation based on the length of the section instead of relying
6642     // on CGF.getTypeSize(E->getType()).
6643     if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
6644       QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
6645                             OAE->getBase()->IgnoreParenImpCasts())
6646                             .getCanonicalType();
6647
6648       // If there is no length associated with the expression, that means we
6649       // are using the whole length of the base.
6650       if (!OAE->getLength() && OAE->getColonLoc().isValid())
6651         return CGF.getTypeSize(BaseTy);
6652
6653       llvm::Value *ElemSize;
6654       if (const auto *PTy = BaseTy->getAs<PointerType>()) {
6655         ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
6656       } else {
6657         const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
6658         assert(ATy && "Expecting array type if not a pointer type.");
6659         ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
6660       }
6661
6662       // If we don't have a length at this point, that is because we have an
6663       // array section with a single element.
6664       if (!OAE->getLength())
6665         return ElemSize;
6666
6667       llvm::Value *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
6668       LengthVal =
6669           CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false);
6670       return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
6671     }
6672     return CGF.getTypeSize(ExprTy);
6673   }
6674
6675   /// Return the corresponding bits for a given map clause modifier. Add
6676   /// a flag marking the map as a pointer if requested. Add a flag marking the
6677   /// map as the first one of a series of maps that relate to the same map
6678   /// expression.
6679   OpenMPOffloadMappingFlags getMapTypeBits(OpenMPMapClauseKind MapType,
6680                                            OpenMPMapClauseKind MapTypeModifier,
6681                                            bool IsImplicit, bool AddPtrFlag,
6682                                            bool AddIsTargetParamFlag) const {
6683     OpenMPOffloadMappingFlags Bits =
6684         IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
6685     switch (MapType) {
6686     case OMPC_MAP_alloc:
6687     case OMPC_MAP_release:
6688       // alloc and release is the default behavior in the runtime library,  i.e.
6689       // if we don't pass any bits alloc/release that is what the runtime is
6690       // going to do. Therefore, we don't need to signal anything for these two
6691       // type modifiers.
6692       break;
6693     case OMPC_MAP_to:
6694       Bits |= OMP_MAP_TO;
6695       break;
6696     case OMPC_MAP_from:
6697       Bits |= OMP_MAP_FROM;
6698       break;
6699     case OMPC_MAP_tofrom:
6700       Bits |= OMP_MAP_TO | OMP_MAP_FROM;
6701       break;
6702     case OMPC_MAP_delete:
6703       Bits |= OMP_MAP_DELETE;
6704       break;
6705     case OMPC_MAP_always:
6706     case OMPC_MAP_unknown:
6707       llvm_unreachable("Unexpected map type!");
6708     }
6709     if (AddPtrFlag)
6710       Bits |= OMP_MAP_PTR_AND_OBJ;
6711     if (AddIsTargetParamFlag)
6712       Bits |= OMP_MAP_TARGET_PARAM;
6713     if (MapTypeModifier == OMPC_MAP_always)
6714       Bits |= OMP_MAP_ALWAYS;
6715     return Bits;
6716   }
6717
6718   /// Return true if the provided expression is a final array section. A
6719   /// final array section, is one whose length can't be proved to be one.
6720   bool isFinalArraySectionExpression(const Expr *E) const {
6721     const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
6722
6723     // It is not an array section and therefore not a unity-size one.
6724     if (!OASE)
6725       return false;
6726
6727     // An array section with no colon always refer to a single element.
6728     if (OASE->getColonLoc().isInvalid())
6729       return false;
6730
6731     const Expr *Length = OASE->getLength();
6732
6733     // If we don't have a length we have to check if the array has size 1
6734     // for this dimension. Also, we should always expect a length if the
6735     // base type is pointer.
6736     if (!Length) {
6737       QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
6738                              OASE->getBase()->IgnoreParenImpCasts())
6739                              .getCanonicalType();
6740       if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
6741         return ATy->getSize().getSExtValue() != 1;
6742       // If we don't have a constant dimension length, we have to consider
6743       // the current section as having any size, so it is not necessarily
6744       // unitary. If it happen to be unity size, that's user fault.
6745       return true;
6746     }
6747
6748     // Check if the length evaluates to 1.
6749     llvm::APSInt ConstLength;
6750     if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
6751       return true; // Can have more that size 1.
6752
6753     return ConstLength.getSExtValue() != 1;
6754   }
6755
6756   /// Generate the base pointers, section pointers, sizes and map type
6757   /// bits for the provided map type, map modifier, and expression components.
6758   /// \a IsFirstComponent should be set to true if the provided set of
6759   /// components is the first associated with a capture.
6760   void generateInfoForComponentList(
6761       OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6762       OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6763       MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
6764       MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
6765       StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
6766       bool IsImplicit) const {
6767     // The following summarizes what has to be generated for each map and the
6768     // types below. The generated information is expressed in this order:
6769     // base pointer, section pointer, size, flags
6770     // (to add to the ones that come from the map type and modifier).
6771     //
6772     // double d;
6773     // int i[100];
6774     // float *p;
6775     //
6776     // struct S1 {
6777     //   int i;
6778     //   float f[50];
6779     // }
6780     // struct S2 {
6781     //   int i;
6782     //   float f[50];
6783     //   S1 s;
6784     //   double *p;
6785     //   struct S2 *ps;
6786     // }
6787     // S2 s;
6788     // S2 *ps;
6789     //
6790     // map(d)
6791     // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
6792     //
6793     // map(i)
6794     // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
6795     //
6796     // map(i[1:23])
6797     // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
6798     //
6799     // map(p)
6800     // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
6801     //
6802     // map(p[1:24])
6803     // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
6804     //
6805     // map(s)
6806     // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
6807     //
6808     // map(s.i)
6809     // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
6810     //
6811     // map(s.s.f)
6812     // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
6813     //
6814     // map(s.p)
6815     // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
6816     //
6817     // map(to: s.p[:22])
6818     // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
6819     // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
6820     // &(s.p), &(s.p[0]), 22*sizeof(double),
6821     //   MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
6822     // (*) alloc space for struct members, only this is a target parameter
6823     // (**) map the pointer (nothing to be mapped in this example) (the compiler
6824     //      optimizes this entry out, same in the examples below)
6825     // (***) map the pointee (map: to)
6826     //
6827     // map(s.ps)
6828     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
6829     //
6830     // map(from: s.ps->s.i)
6831     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6832     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6833     // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ  | FROM
6834     //
6835     // map(to: s.ps->ps)
6836     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6837     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6838     // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ  | TO
6839     //
6840     // map(s.ps->ps->ps)
6841     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6842     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6843     // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6844     // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
6845     //
6846     // map(to: s.ps->ps->s.f[:22])
6847     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6848     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6849     // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6850     // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
6851     //
6852     // map(ps)
6853     // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
6854     //
6855     // map(ps->i)
6856     // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
6857     //
6858     // map(ps->s.f)
6859     // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
6860     //
6861     // map(from: ps->p)
6862     // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
6863     //
6864     // map(to: ps->p[:22])
6865     // ps, &(ps->p), sizeof(double*), TARGET_PARAM
6866     // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
6867     // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
6868     //
6869     // map(ps->ps)
6870     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
6871     //
6872     // map(from: ps->ps->s.i)
6873     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6874     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6875     // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6876     //
6877     // map(from: ps->ps->ps)
6878     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6879     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6880     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6881     //
6882     // map(ps->ps->ps->ps)
6883     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6884     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6885     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6886     // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
6887     //
6888     // map(to: ps->ps->ps->s.f[:22])
6889     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6890     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6891     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6892     // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
6893     //
6894     // map(to: s.f[:22]) map(from: s.p[:33])
6895     // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
6896     //     sizeof(double*) (**), TARGET_PARAM
6897     // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
6898     // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
6899     // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6900     // (*) allocate contiguous space needed to fit all mapped members even if
6901     //     we allocate space for members not mapped (in this example,
6902     //     s.f[22..49] and s.s are not mapped, yet we must allocate space for
6903     //     them as well because they fall between &s.f[0] and &s.p)
6904     //
6905     // map(from: s.f[:22]) map(to: ps->p[:33])
6906     // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
6907     // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
6908     // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
6909     // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
6910     // (*) the struct this entry pertains to is the 2nd element in the list of
6911     //     arguments, hence MEMBER_OF(2)
6912     //
6913     // map(from: s.f[:22], s.s) map(to: ps->p[:33])
6914     // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
6915     // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
6916     // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
6917     // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
6918     // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
6919     // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
6920     // (*) the struct this entry pertains to is the 4th element in the list
6921     //     of arguments, hence MEMBER_OF(4)
6922
6923     // Track if the map information being generated is the first for a capture.
6924     bool IsCaptureFirstInfo = IsFirstComponentList;
6925     bool IsLink = false; // Is this variable a "declare target link"?
6926
6927     // Scan the components from the base to the complete expression.
6928     auto CI = Components.rbegin();
6929     auto CE = Components.rend();
6930     auto I = CI;
6931
6932     // Track if the map information being generated is the first for a list of
6933     // components.
6934     bool IsExpressionFirstInfo = true;
6935     Address BP = Address::invalid();
6936
6937     if (isa<MemberExpr>(I->getAssociatedExpression())) {
6938       // The base is the 'this' pointer. The content of the pointer is going
6939       // to be the base of the field being mapped.
6940       BP = CGF.LoadCXXThisAddress();
6941     } else {
6942       // The base is the reference to the variable.
6943       // BP = &Var.
6944       BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
6945       if (const auto *VD =
6946               dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
6947         if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
6948                 isDeclareTargetDeclaration(VD))
6949           if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
6950             IsLink = true;
6951             BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
6952           }
6953       }
6954
6955       // If the variable is a pointer and is being dereferenced (i.e. is not
6956       // the last component), the base has to be the pointer itself, not its
6957       // reference. References are ignored for mapping purposes.
6958       QualType Ty =
6959           I->getAssociatedDeclaration()->getType().getNonReferenceType();
6960       if (Ty->isAnyPointerType() && std::next(I) != CE) {
6961         BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
6962
6963         // We do not need to generate individual map information for the
6964         // pointer, it can be associated with the combined storage.
6965         ++I;
6966       }
6967     }
6968
6969     // Track whether a component of the list should be marked as MEMBER_OF some
6970     // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
6971     // in a component list should be marked as MEMBER_OF, all subsequent entries
6972     // do not belong to the base struct. E.g.
6973     // struct S2 s;
6974     // s.ps->ps->ps->f[:]
6975     //   (1) (2) (3) (4)
6976     // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
6977     // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
6978     // is the pointee of ps(2) which is not member of struct s, so it should not
6979     // be marked as such (it is still PTR_AND_OBJ).
6980     // The variable is initialized to false so that PTR_AND_OBJ entries which
6981     // are not struct members are not considered (e.g. array of pointers to
6982     // data).
6983     bool ShouldBeMemberOf = false;
6984
6985     // Variable keeping track of whether or not we have encountered a component
6986     // in the component list which is a member expression. Useful when we have a
6987     // pointer or a final array section, in which case it is the previous
6988     // component in the list which tells us whether we have a member expression.
6989     // E.g. X.f[:]
6990     // While processing the final array section "[:]" it is "f" which tells us
6991     // whether we are dealing with a member of a declared struct.
6992     const MemberExpr *EncounteredME = nullptr;
6993
6994     for (; I != CE; ++I) {
6995       // If the current component is member of a struct (parent struct) mark it.
6996       if (!EncounteredME) {
6997         EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
6998         // If we encounter a PTR_AND_OBJ entry from now on it should be marked
6999         // as MEMBER_OF the parent struct.
7000         if (EncounteredME)
7001           ShouldBeMemberOf = true;
7002       }
7003
7004       auto Next = std::next(I);
7005
7006       // We need to generate the addresses and sizes if this is the last
7007       // component, if the component is a pointer or if it is an array section
7008       // whose length can't be proved to be one. If this is a pointer, it
7009       // becomes the base address for the following components.
7010
7011       // A final array section, is one whose length can't be proved to be one.
7012       bool IsFinalArraySection =
7013           isFinalArraySectionExpression(I->getAssociatedExpression());
7014
7015       // Get information on whether the element is a pointer. Have to do a
7016       // special treatment for array sections given that they are built-in
7017       // types.
7018       const auto *OASE =
7019           dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
7020       bool IsPointer =
7021           (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
7022                        .getCanonicalType()
7023                        ->isAnyPointerType()) ||
7024           I->getAssociatedExpression()->getType()->isAnyPointerType();
7025
7026       if (Next == CE || IsPointer || IsFinalArraySection) {
7027         // If this is not the last component, we expect the pointer to be
7028         // associated with an array expression or member expression.
7029         assert((Next == CE ||
7030                 isa<MemberExpr>(Next->getAssociatedExpression()) ||
7031                 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
7032                 isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
7033                "Unexpected expression");
7034
7035         Address LB =
7036             CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
7037         llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
7038
7039         // If this component is a pointer inside the base struct then we don't
7040         // need to create any entry for it - it will be combined with the object
7041         // it is pointing to into a single PTR_AND_OBJ entry.
7042         bool IsMemberPointer =
7043             IsPointer && EncounteredME &&
7044             (dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
7045              EncounteredME);
7046         if (!IsMemberPointer) {
7047           BasePointers.push_back(BP.getPointer());
7048           Pointers.push_back(LB.getPointer());
7049           Sizes.push_back(Size);
7050
7051           // We need to add a pointer flag for each map that comes from the
7052           // same expression except for the first one. We also need to signal
7053           // this map is the first one that relates with the current capture
7054           // (there is a set of entries for each capture).
7055           OpenMPOffloadMappingFlags Flags = getMapTypeBits(
7056               MapType, MapTypeModifier, IsImplicit,
7057               !IsExpressionFirstInfo || IsLink, IsCaptureFirstInfo && !IsLink);
7058
7059           if (!IsExpressionFirstInfo) {
7060             // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
7061             // then we reset the TO/FROM/ALWAYS/DELETE flags.
7062             if (IsPointer)
7063               Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
7064                          OMP_MAP_DELETE);
7065
7066             if (ShouldBeMemberOf) {
7067               // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
7068               // should be later updated with the correct value of MEMBER_OF.
7069               Flags |= OMP_MAP_MEMBER_OF;
7070               // From now on, all subsequent PTR_AND_OBJ entries should not be
7071               // marked as MEMBER_OF.
7072               ShouldBeMemberOf = false;
7073             }
7074           }
7075
7076           Types.push_back(Flags);
7077         }
7078
7079         // If we have encountered a member expression so far, keep track of the
7080         // mapped member. If the parent is "*this", then the value declaration
7081         // is nullptr.
7082         if (EncounteredME) {
7083           const auto *FD = dyn_cast<FieldDecl>(EncounteredME->getMemberDecl());
7084           unsigned FieldIndex = FD->getFieldIndex();
7085
7086           // Update info about the lowest and highest elements for this struct
7087           if (!PartialStruct.Base.isValid()) {
7088             PartialStruct.LowestElem = {FieldIndex, LB};
7089             PartialStruct.HighestElem = {FieldIndex, LB};
7090             PartialStruct.Base = BP;
7091           } else if (FieldIndex < PartialStruct.LowestElem.first) {
7092             PartialStruct.LowestElem = {FieldIndex, LB};
7093           } else if (FieldIndex > PartialStruct.HighestElem.first) {
7094             PartialStruct.HighestElem = {FieldIndex, LB};
7095           }
7096         }
7097
7098         // If we have a final array section, we are done with this expression.
7099         if (IsFinalArraySection)
7100           break;
7101
7102         // The pointer becomes the base for the next element.
7103         if (Next != CE)
7104           BP = LB;
7105
7106         IsExpressionFirstInfo = false;
7107         IsCaptureFirstInfo = false;
7108       }
7109     }
7110   }
7111
7112   /// Return the adjusted map modifiers if the declaration a capture refers to
7113   /// appears in a first-private clause. This is expected to be used only with
7114   /// directives that start with 'target'.
7115   MappableExprsHandler::OpenMPOffloadMappingFlags
7116   getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
7117     assert(Cap.capturesVariable() && "Expected capture by reference only!");
7118
7119     // A first private variable captured by reference will use only the
7120     // 'private ptr' and 'map to' flag. Return the right flags if the captured
7121     // declaration is known as first-private in this handler.
7122     if (FirstPrivateDecls.count(Cap.getCapturedVar()))
7123       return MappableExprsHandler::OMP_MAP_PRIVATE |
7124              MappableExprsHandler::OMP_MAP_TO;
7125     return MappableExprsHandler::OMP_MAP_TO |
7126            MappableExprsHandler::OMP_MAP_FROM;
7127   }
7128
7129   static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
7130     // Member of is given by the 16 MSB of the flag, so rotate by 48 bits.
7131     return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
7132                                                   << 48);
7133   }
7134
7135   static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
7136                                      OpenMPOffloadMappingFlags MemberOfFlag) {
7137     // If the entry is PTR_AND_OBJ but has not been marked with the special
7138     // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
7139     // marked as MEMBER_OF.
7140     if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
7141         ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
7142       return;
7143
7144     // Reset the placeholder value to prepare the flag for the assignment of the
7145     // proper MEMBER_OF value.
7146     Flags &= ~OMP_MAP_MEMBER_OF;
7147     Flags |= MemberOfFlag;
7148   }
7149
7150 public:
7151   MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
7152       : CurDir(Dir), CGF(CGF) {
7153     // Extract firstprivate clause information.
7154     for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
7155       for (const auto *D : C->varlists())
7156         FirstPrivateDecls.insert(
7157             cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
7158     // Extract device pointer clause information.
7159     for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
7160       for (auto L : C->component_lists())
7161         DevPointersMap[L.first].push_back(L.second);
7162   }
7163
7164   /// Generate code for the combined entry if we have a partially mapped struct
7165   /// and take care of the mapping flags of the arguments corresponding to
7166   /// individual struct members.
7167   void emitCombinedEntry(MapBaseValuesArrayTy &BasePointers,
7168                          MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
7169                          MapFlagsArrayTy &Types, MapFlagsArrayTy &CurTypes,
7170                          const StructRangeInfoTy &PartialStruct) const {
7171     // Base is the base of the struct
7172     BasePointers.push_back(PartialStruct.Base.getPointer());
7173     // Pointer is the address of the lowest element
7174     llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
7175     Pointers.push_back(LB);
7176     // Size is (addr of {highest+1} element) - (addr of lowest element)
7177     llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
7178     llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
7179     llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
7180     llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
7181     llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
7182     llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.SizeTy,
7183                                                   /*isSinged=*/false);
7184     Sizes.push_back(Size);
7185     // Map type is always TARGET_PARAM
7186     Types.push_back(OMP_MAP_TARGET_PARAM);
7187     // Remove TARGET_PARAM flag from the first element
7188     (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
7189
7190     // All other current entries will be MEMBER_OF the combined entry
7191     // (except for PTR_AND_OBJ entries which do not have a placeholder value
7192     // 0xFFFF in the MEMBER_OF field).
7193     OpenMPOffloadMappingFlags MemberOfFlag =
7194         getMemberOfFlag(BasePointers.size() - 1);
7195     for (auto &M : CurTypes)
7196       setCorrectMemberOfFlag(M, MemberOfFlag);
7197   }
7198
7199   /// Generate all the base pointers, section pointers, sizes and map
7200   /// types for the extracted mappable expressions. Also, for each item that
7201   /// relates with a device pointer, a pair of the relevant declaration and
7202   /// index where it occurs is appended to the device pointers info array.
7203   void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
7204                        MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
7205                        MapFlagsArrayTy &Types) const {
7206     // We have to process the component lists that relate with the same
7207     // declaration in a single chunk so that we can generate the map flags
7208     // correctly. Therefore, we organize all lists in a map.
7209     llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
7210
7211     // Helper function to fill the information map for the different supported
7212     // clauses.
7213     auto &&InfoGen = [&Info](
7214         const ValueDecl *D,
7215         OMPClauseMappableExprCommon::MappableExprComponentListRef L,
7216         OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
7217         bool ReturnDevicePointer, bool IsImplicit) {
7218       const ValueDecl *VD =
7219           D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
7220       Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
7221                             IsImplicit);
7222     };
7223
7224     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7225     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7226       for (const auto &L : C->component_lists()) {
7227         InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
7228             /*ReturnDevicePointer=*/false, C->isImplicit());
7229       }
7230     for (const auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
7231       for (const auto &L : C->component_lists()) {
7232         InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
7233             /*ReturnDevicePointer=*/false, C->isImplicit());
7234       }
7235     for (const auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
7236       for (const auto &L : C->component_lists()) {
7237         InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
7238             /*ReturnDevicePointer=*/false, C->isImplicit());
7239       }
7240
7241     // Look at the use_device_ptr clause information and mark the existing map
7242     // entries as such. If there is no map information for an entry in the
7243     // use_device_ptr list, we create one with map type 'alloc' and zero size
7244     // section. It is the user fault if that was not mapped before. If there is
7245     // no map information and the pointer is a struct member, then we defer the
7246     // emission of that entry until the whole struct has been processed.
7247     llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
7248         DeferredInfo;
7249
7250     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7251     for (const auto *C :
7252         this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>()) {
7253       for (const auto &L : C->component_lists()) {
7254         assert(!L.second.empty() && "Not expecting empty list of components!");
7255         const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
7256         VD = cast<ValueDecl>(VD->getCanonicalDecl());
7257         const Expr *IE = L.second.back().getAssociatedExpression();
7258         // If the first component is a member expression, we have to look into
7259         // 'this', which maps to null in the map of map information. Otherwise
7260         // look directly for the information.
7261         auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
7262
7263         // We potentially have map information for this declaration already.
7264         // Look for the first set of components that refer to it.
7265         if (It != Info.end()) {
7266           auto CI = std::find_if(
7267               It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
7268                 return MI.Components.back().getAssociatedDeclaration() == VD;
7269               });
7270           // If we found a map entry, signal that the pointer has to be returned
7271           // and move on to the next declaration.
7272           if (CI != It->second.end()) {
7273             CI->ReturnDevicePointer = true;
7274             continue;
7275           }
7276         }
7277
7278         // We didn't find any match in our map information - generate a zero
7279         // size array section - if the pointer is a struct member we defer this
7280         // action until the whole struct has been processed.
7281         // FIXME: MSVC 2013 seems to require this-> to find member CGF.
7282         if (isa<MemberExpr>(IE)) {
7283           // Insert the pointer into Info to be processed by
7284           // generateInfoForComponentList. Because it is a member pointer
7285           // without a pointee, no entry will be generated for it, therefore
7286           // we need to generate one after the whole struct has been processed.
7287           // Nonetheless, generateInfoForComponentList must be called to take
7288           // the pointer into account for the calculation of the range of the
7289           // partial struct.
7290           InfoGen(nullptr, L.second, OMPC_MAP_unknown, OMPC_MAP_unknown,
7291                   /*ReturnDevicePointer=*/false, C->isImplicit());
7292           DeferredInfo[nullptr].emplace_back(IE, VD);
7293         } else {
7294           llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
7295               this->CGF.EmitLValue(IE), IE->getExprLoc());
7296           BasePointers.emplace_back(Ptr, VD);
7297           Pointers.push_back(Ptr);
7298           Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
7299           Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
7300         }
7301       }
7302     }
7303
7304     for (const auto &M : Info) {
7305       // We need to know when we generate information for the first component
7306       // associated with a capture, because the mapping flags depend on it.
7307       bool IsFirstComponentList = true;
7308
7309       // Temporary versions of arrays
7310       MapBaseValuesArrayTy CurBasePointers;
7311       MapValuesArrayTy CurPointers;
7312       MapValuesArrayTy CurSizes;
7313       MapFlagsArrayTy CurTypes;
7314       StructRangeInfoTy PartialStruct;
7315
7316       for (const MapInfo &L : M.second) {
7317         assert(!L.Components.empty() &&
7318                "Not expecting declaration with no component lists.");
7319
7320         // Remember the current base pointer index.
7321         unsigned CurrentBasePointersIdx = CurBasePointers.size();
7322         // FIXME: MSVC 2013 seems to require this-> to find the member method.
7323         this->generateInfoForComponentList(
7324             L.MapType, L.MapTypeModifier, L.Components, CurBasePointers,
7325             CurPointers, CurSizes, CurTypes, PartialStruct,
7326             IsFirstComponentList, L.IsImplicit);
7327
7328         // If this entry relates with a device pointer, set the relevant
7329         // declaration and add the 'return pointer' flag.
7330         if (L.ReturnDevicePointer) {
7331           assert(CurBasePointers.size() > CurrentBasePointersIdx &&
7332                  "Unexpected number of mapped base pointers.");
7333
7334           const ValueDecl *RelevantVD =
7335               L.Components.back().getAssociatedDeclaration();
7336           assert(RelevantVD &&
7337                  "No relevant declaration related with device pointer??");
7338
7339           CurBasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
7340           CurTypes[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
7341         }
7342         IsFirstComponentList = false;
7343       }
7344
7345       // Append any pending zero-length pointers which are struct members and
7346       // used with use_device_ptr.
7347       auto CI = DeferredInfo.find(M.first);
7348       if (CI != DeferredInfo.end()) {
7349         for (const DeferredDevicePtrEntryTy &L : CI->second) {
7350           llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer();
7351           llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
7352               this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
7353           CurBasePointers.emplace_back(BasePtr, L.VD);
7354           CurPointers.push_back(Ptr);
7355           CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
7356           // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
7357           // value MEMBER_OF=FFFF so that the entry is later updated with the
7358           // correct value of MEMBER_OF.
7359           CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
7360                              OMP_MAP_MEMBER_OF);
7361         }
7362       }
7363
7364       // If there is an entry in PartialStruct it means we have a struct with
7365       // individual members mapped. Emit an extra combined entry.
7366       if (PartialStruct.Base.isValid())
7367         emitCombinedEntry(BasePointers, Pointers, Sizes, Types, CurTypes,
7368                           PartialStruct);
7369
7370       // We need to append the results of this capture to what we already have.
7371       BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7372       Pointers.append(CurPointers.begin(), CurPointers.end());
7373       Sizes.append(CurSizes.begin(), CurSizes.end());
7374       Types.append(CurTypes.begin(), CurTypes.end());
7375     }
7376   }
7377
7378   /// Generate the base pointers, section pointers, sizes and map types
7379   /// associated to a given capture.
7380   void generateInfoForCapture(const CapturedStmt::Capture *Cap,
7381                               llvm::Value *Arg,
7382                               MapBaseValuesArrayTy &BasePointers,
7383                               MapValuesArrayTy &Pointers,
7384                               MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
7385                               StructRangeInfoTy &PartialStruct) const {
7386     assert(!Cap->capturesVariableArrayType() &&
7387            "Not expecting to generate map info for a variable array type!");
7388
7389     // We need to know when we generating information for the first component
7390     // associated with a capture, because the mapping flags depend on it.
7391     bool IsFirstComponentList = true;
7392
7393     const ValueDecl *VD = Cap->capturesThis()
7394                               ? nullptr
7395                               : Cap->getCapturedVar()->getCanonicalDecl();
7396
7397     // If this declaration appears in a is_device_ptr clause we just have to
7398     // pass the pointer by value. If it is a reference to a declaration, we just
7399     // pass its value.
7400     if (DevPointersMap.count(VD)) {
7401       BasePointers.emplace_back(Arg, VD);
7402       Pointers.push_back(Arg);
7403       Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
7404       Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
7405       return;
7406     }
7407
7408     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7409     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7410       for (const auto &L : C->decl_component_lists(VD)) {
7411         assert(L.first == VD &&
7412                "We got information for the wrong declaration??");
7413         assert(!L.second.empty() &&
7414                "Not expecting declaration with no component lists.");
7415         generateInfoForComponentList(C->getMapType(), C->getMapTypeModifier(),
7416                                      L.second, BasePointers, Pointers, Sizes,
7417                                      Types, PartialStruct, IsFirstComponentList,
7418                                      C->isImplicit());
7419         IsFirstComponentList = false;
7420       }
7421   }
7422
7423   /// Generate the base pointers, section pointers, sizes and map types
7424   /// associated with the declare target link variables.
7425   void generateInfoForDeclareTargetLink(MapBaseValuesArrayTy &BasePointers,
7426                                         MapValuesArrayTy &Pointers,
7427                                         MapValuesArrayTy &Sizes,
7428                                         MapFlagsArrayTy &Types) const {
7429     // Map other list items in the map clause which are not captured variables
7430     // but "declare target link" global variables.,
7431     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
7432       for (const auto &L : C->component_lists()) {
7433         if (!L.first)
7434           continue;
7435         const auto *VD = dyn_cast<VarDecl>(L.first);
7436         if (!VD)
7437           continue;
7438         llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7439             isDeclareTargetDeclaration(VD);
7440         if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
7441           continue;
7442         StructRangeInfoTy PartialStruct;
7443         generateInfoForComponentList(
7444             C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
7445             Pointers, Sizes, Types, PartialStruct,
7446             /*IsFirstComponentList=*/true, C->isImplicit());
7447         assert(!PartialStruct.Base.isValid() &&
7448                "No partial structs for declare target link expected.");
7449       }
7450     }
7451   }
7452
7453   /// Generate the default map information for a given capture \a CI,
7454   /// record field declaration \a RI and captured value \a CV.
7455   void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
7456                               const FieldDecl &RI, llvm::Value *CV,
7457                               MapBaseValuesArrayTy &CurBasePointers,
7458                               MapValuesArrayTy &CurPointers,
7459                               MapValuesArrayTy &CurSizes,
7460                               MapFlagsArrayTy &CurMapTypes) const {
7461     // Do the default mapping.
7462     if (CI.capturesThis()) {
7463       CurBasePointers.push_back(CV);
7464       CurPointers.push_back(CV);
7465       const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
7466       CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
7467       // Default map type.
7468       CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
7469     } else if (CI.capturesVariableByCopy()) {
7470       CurBasePointers.push_back(CV);
7471       CurPointers.push_back(CV);
7472       if (!RI.getType()->isAnyPointerType()) {
7473         // We have to signal to the runtime captures passed by value that are
7474         // not pointers.
7475         CurMapTypes.push_back(OMP_MAP_LITERAL);
7476         CurSizes.push_back(CGF.getTypeSize(RI.getType()));
7477       } else {
7478         // Pointers are implicitly mapped with a zero size and no flags
7479         // (other than first map that is added for all implicit maps).
7480         CurMapTypes.push_back(OMP_MAP_NONE);
7481         CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
7482       }
7483     } else {
7484       assert(CI.capturesVariable() && "Expected captured reference.");
7485       CurBasePointers.push_back(CV);
7486       CurPointers.push_back(CV);
7487
7488       const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
7489       QualType ElementType = PtrTy->getPointeeType();
7490       CurSizes.push_back(CGF.getTypeSize(ElementType));
7491       // The default map type for a scalar/complex type is 'to' because by
7492       // default the value doesn't have to be retrieved. For an aggregate
7493       // type, the default is 'tofrom'.
7494       CurMapTypes.push_back(getMapModifiersForPrivateClauses(CI));
7495     }
7496     // Every default map produces a single argument which is a target parameter.
7497     CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
7498
7499     // Add flag stating this is an implicit map.
7500     CurMapTypes.back() |= OMP_MAP_IMPLICIT;
7501   }
7502 };
7503
7504 enum OpenMPOffloadingReservedDeviceIDs {
7505   /// Device ID if the device was not defined, runtime should get it
7506   /// from environment variables in the spec.
7507   OMP_DEVICEID_UNDEF = -1,
7508 };
7509 } // anonymous namespace
7510
7511 /// Emit the arrays used to pass the captures and map information to the
7512 /// offloading runtime library. If there is no map or capture information,
7513 /// return nullptr by reference.
7514 static void
7515 emitOffloadingArrays(CodeGenFunction &CGF,
7516                      MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
7517                      MappableExprsHandler::MapValuesArrayTy &Pointers,
7518                      MappableExprsHandler::MapValuesArrayTy &Sizes,
7519                      MappableExprsHandler::MapFlagsArrayTy &MapTypes,
7520                      CGOpenMPRuntime::TargetDataInfo &Info) {
7521   CodeGenModule &CGM = CGF.CGM;
7522   ASTContext &Ctx = CGF.getContext();
7523
7524   // Reset the array information.
7525   Info.clearArrayInfo();
7526   Info.NumberOfPtrs = BasePointers.size();
7527
7528   if (Info.NumberOfPtrs) {
7529     // Detect if we have any capture size requiring runtime evaluation of the
7530     // size so that a constant array could be eventually used.
7531     bool hasRuntimeEvaluationCaptureSize = false;
7532     for (llvm::Value *S : Sizes)
7533       if (!isa<llvm::Constant>(S)) {
7534         hasRuntimeEvaluationCaptureSize = true;
7535         break;
7536       }
7537
7538     llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
7539     QualType PointerArrayType =
7540         Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
7541                                  /*IndexTypeQuals=*/0);
7542
7543     Info.BasePointersArray =
7544         CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
7545     Info.PointersArray =
7546         CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
7547
7548     // If we don't have any VLA types or other types that require runtime
7549     // evaluation, we can use a constant array for the map sizes, otherwise we
7550     // need to fill up the arrays as we do for the pointers.
7551     if (hasRuntimeEvaluationCaptureSize) {
7552       QualType SizeArrayType = Ctx.getConstantArrayType(
7553           Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
7554           /*IndexTypeQuals=*/0);
7555       Info.SizesArray =
7556           CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
7557     } else {
7558       // We expect all the sizes to be constant, so we collect them to create
7559       // a constant array.
7560       SmallVector<llvm::Constant *, 16> ConstSizes;
7561       for (llvm::Value *S : Sizes)
7562         ConstSizes.push_back(cast<llvm::Constant>(S));
7563
7564       auto *SizesArrayInit = llvm::ConstantArray::get(
7565           llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
7566       std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
7567       auto *SizesArrayGbl = new llvm::GlobalVariable(
7568           CGM.getModule(), SizesArrayInit->getType(),
7569           /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7570           SizesArrayInit, Name);
7571       SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7572       Info.SizesArray = SizesArrayGbl;
7573     }
7574
7575     // The map types are always constant so we don't need to generate code to
7576     // fill arrays. Instead, we create an array constant.
7577     SmallVector<uint64_t, 4> Mapping(MapTypes.size(), 0);
7578     llvm::copy(MapTypes, Mapping.begin());
7579     llvm::Constant *MapTypesArrayInit =
7580         llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
7581     std::string MaptypesName =
7582         CGM.getOpenMPRuntime().getName({"offload_maptypes"});
7583     auto *MapTypesArrayGbl = new llvm::GlobalVariable(
7584         CGM.getModule(), MapTypesArrayInit->getType(),
7585         /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7586         MapTypesArrayInit, MaptypesName);
7587     MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7588     Info.MapTypesArray = MapTypesArrayGbl;
7589
7590     for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
7591       llvm::Value *BPVal = *BasePointers[I];
7592       llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
7593           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7594           Info.BasePointersArray, 0, I);
7595       BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7596           BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
7597       Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7598       CGF.Builder.CreateStore(BPVal, BPAddr);
7599
7600       if (Info.requiresDevicePointerInfo())
7601         if (const ValueDecl *DevVD = BasePointers[I].getDevicePtrDecl())
7602           Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
7603
7604       llvm::Value *PVal = Pointers[I];
7605       llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
7606           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7607           Info.PointersArray, 0, I);
7608       P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7609           P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
7610       Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7611       CGF.Builder.CreateStore(PVal, PAddr);
7612
7613       if (hasRuntimeEvaluationCaptureSize) {
7614         llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
7615             llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
7616             Info.SizesArray,
7617             /*Idx0=*/0,
7618             /*Idx1=*/I);
7619         Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
7620         CGF.Builder.CreateStore(
7621             CGF.Builder.CreateIntCast(Sizes[I], CGM.SizeTy, /*isSigned=*/true),
7622             SAddr);
7623       }
7624     }
7625   }
7626 }
7627 /// Emit the arguments to be passed to the runtime library based on the
7628 /// arrays of pointers, sizes and map types.
7629 static void emitOffloadingArraysArgument(
7630     CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
7631     llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
7632     llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
7633   CodeGenModule &CGM = CGF.CGM;
7634   if (Info.NumberOfPtrs) {
7635     BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7636         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7637         Info.BasePointersArray,
7638         /*Idx0=*/0, /*Idx1=*/0);
7639     PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7640         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7641         Info.PointersArray,
7642         /*Idx0=*/0,
7643         /*Idx1=*/0);
7644     SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7645         llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
7646         /*Idx0=*/0, /*Idx1=*/0);
7647     MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7648         llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
7649         Info.MapTypesArray,
7650         /*Idx0=*/0,
7651         /*Idx1=*/0);
7652   } else {
7653     BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7654     PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7655     SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
7656     MapTypesArrayArg =
7657         llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
7658   }
7659 }
7660
7661 void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
7662                                      const OMPExecutableDirective &D,
7663                                      llvm::Value *OutlinedFn,
7664                                      llvm::Value *OutlinedFnID,
7665                                      const Expr *IfCond, const Expr *Device) {
7666   if (!CGF.HaveInsertPoint())
7667     return;
7668
7669   assert(OutlinedFn && "Invalid outlined function!");
7670
7671   const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
7672   llvm::SmallVector<llvm::Value *, 16> CapturedVars;
7673   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
7674   auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
7675                                             PrePostActionTy &) {
7676     CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7677   };
7678   emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
7679
7680   CodeGenFunction::OMPTargetDataInfo InputInfo;
7681   llvm::Value *MapTypesArray = nullptr;
7682   // Fill up the pointer arrays and transfer execution to the device.
7683   auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
7684                     &MapTypesArray, &CS, RequiresOuterTask,
7685                     &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) {
7686     // On top of the arrays that were filled up, the target offloading call
7687     // takes as arguments the device id as well as the host pointer. The host
7688     // pointer is used by the runtime library to identify the current target
7689     // region, so it only has to be unique and not necessarily point to
7690     // anything. It could be the pointer to the outlined function that
7691     // implements the target region, but we aren't using that so that the
7692     // compiler doesn't need to keep that, and could therefore inline the host
7693     // function if proven worthwhile during optimization.
7694
7695     // From this point on, we need to have an ID of the target region defined.
7696     assert(OutlinedFnID && "Invalid outlined function ID!");
7697
7698     // Emit device ID if any.
7699     llvm::Value *DeviceID;
7700     if (Device) {
7701       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7702                                            CGF.Int64Ty, /*isSigned=*/true);
7703     } else {
7704       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7705     }
7706
7707     // Emit the number of elements in the offloading arrays.
7708     llvm::Value *PointerNum =
7709         CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
7710
7711     // Return value of the runtime offloading call.
7712     llvm::Value *Return;
7713
7714     llvm::Value *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D);
7715     llvm::Value *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D);
7716
7717     bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
7718     // The target region is an outlined function launched by the runtime
7719     // via calls __tgt_target() or __tgt_target_teams().
7720     //
7721     // __tgt_target() launches a target region with one team and one thread,
7722     // executing a serial region.  This master thread may in turn launch
7723     // more threads within its team upon encountering a parallel region,
7724     // however, no additional teams can be launched on the device.
7725     //
7726     // __tgt_target_teams() launches a target region with one or more teams,
7727     // each with one or more threads.  This call is required for target
7728     // constructs such as:
7729     //  'target teams'
7730     //  'target' / 'teams'
7731     //  'target teams distribute parallel for'
7732     //  'target parallel'
7733     // and so on.
7734     //
7735     // Note that on the host and CPU targets, the runtime implementation of
7736     // these calls simply call the outlined function without forking threads.
7737     // The outlined functions themselves have runtime calls to
7738     // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
7739     // the compiler in emitTeamsCall() and emitParallelCall().
7740     //
7741     // In contrast, on the NVPTX target, the implementation of
7742     // __tgt_target_teams() launches a GPU kernel with the requested number
7743     // of teams and threads so no additional calls to the runtime are required.
7744     if (NumTeams) {
7745       // If we have NumTeams defined this means that we have an enclosed teams
7746       // region. Therefore we also expect to have NumThreads defined. These two
7747       // values should be defined in the presence of a teams directive,
7748       // regardless of having any clauses associated. If the user is using teams
7749       // but no clauses, these two values will be the default that should be
7750       // passed to the runtime library - a 32-bit integer with the value zero.
7751       assert(NumThreads && "Thread limit expression should be available along "
7752                            "with number of teams.");
7753       llvm::Value *OffloadingArgs[] = {DeviceID,
7754                                        OutlinedFnID,
7755                                        PointerNum,
7756                                        InputInfo.BasePointersArray.getPointer(),
7757                                        InputInfo.PointersArray.getPointer(),
7758                                        InputInfo.SizesArray.getPointer(),
7759                                        MapTypesArray,
7760                                        NumTeams,
7761                                        NumThreads};
7762       Return = CGF.EmitRuntimeCall(
7763           createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
7764                                           : OMPRTL__tgt_target_teams),
7765           OffloadingArgs);
7766     } else {
7767       llvm::Value *OffloadingArgs[] = {DeviceID,
7768                                        OutlinedFnID,
7769                                        PointerNum,
7770                                        InputInfo.BasePointersArray.getPointer(),
7771                                        InputInfo.PointersArray.getPointer(),
7772                                        InputInfo.SizesArray.getPointer(),
7773                                        MapTypesArray};
7774       Return = CGF.EmitRuntimeCall(
7775           createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
7776                                           : OMPRTL__tgt_target),
7777           OffloadingArgs);
7778     }
7779
7780     // Check the error code and execute the host version if required.
7781     llvm::BasicBlock *OffloadFailedBlock =
7782         CGF.createBasicBlock("omp_offload.failed");
7783     llvm::BasicBlock *OffloadContBlock =
7784         CGF.createBasicBlock("omp_offload.cont");
7785     llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
7786     CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
7787
7788     CGF.EmitBlock(OffloadFailedBlock);
7789     if (RequiresOuterTask) {
7790       CapturedVars.clear();
7791       CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7792     }
7793     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7794     CGF.EmitBranch(OffloadContBlock);
7795
7796     CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
7797   };
7798
7799   // Notify that the host version must be executed.
7800   auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
7801                     RequiresOuterTask](CodeGenFunction &CGF,
7802                                        PrePostActionTy &) {
7803     if (RequiresOuterTask) {
7804       CapturedVars.clear();
7805       CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7806     }
7807     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7808   };
7809
7810   auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
7811                           &CapturedVars, RequiresOuterTask,
7812                           &CS](CodeGenFunction &CGF, PrePostActionTy &) {
7813     // Fill up the arrays with all the captured variables.
7814     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
7815     MappableExprsHandler::MapValuesArrayTy Pointers;
7816     MappableExprsHandler::MapValuesArrayTy Sizes;
7817     MappableExprsHandler::MapFlagsArrayTy MapTypes;
7818
7819     // Get mappable expression information.
7820     MappableExprsHandler MEHandler(D, CGF);
7821
7822     auto RI = CS.getCapturedRecordDecl()->field_begin();
7823     auto CV = CapturedVars.begin();
7824     for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
7825                                               CE = CS.capture_end();
7826          CI != CE; ++CI, ++RI, ++CV) {
7827       MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
7828       MappableExprsHandler::MapValuesArrayTy CurPointers;
7829       MappableExprsHandler::MapValuesArrayTy CurSizes;
7830       MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
7831       MappableExprsHandler::StructRangeInfoTy PartialStruct;
7832
7833       // VLA sizes are passed to the outlined region by copy and do not have map
7834       // information associated.
7835       if (CI->capturesVariableArrayType()) {
7836         CurBasePointers.push_back(*CV);
7837         CurPointers.push_back(*CV);
7838         CurSizes.push_back(CGF.getTypeSize(RI->getType()));
7839         // Copy to the device as an argument. No need to retrieve it.
7840         CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
7841                               MappableExprsHandler::OMP_MAP_TARGET_PARAM);
7842       } else {
7843         // If we have any information in the map clause, we use it, otherwise we
7844         // just do a default mapping.
7845         MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
7846                                          CurSizes, CurMapTypes, PartialStruct);
7847         if (CurBasePointers.empty())
7848           MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
7849                                            CurPointers, CurSizes, CurMapTypes);
7850       }
7851       // We expect to have at least an element of information for this capture.
7852       assert(!CurBasePointers.empty() &&
7853              "Non-existing map pointer for capture!");
7854       assert(CurBasePointers.size() == CurPointers.size() &&
7855              CurBasePointers.size() == CurSizes.size() &&
7856              CurBasePointers.size() == CurMapTypes.size() &&
7857              "Inconsistent map information sizes!");
7858
7859       // If there is an entry in PartialStruct it means we have a struct with
7860       // individual members mapped. Emit an extra combined entry.
7861       if (PartialStruct.Base.isValid())
7862         MEHandler.emitCombinedEntry(BasePointers, Pointers, Sizes, MapTypes,
7863                                     CurMapTypes, PartialStruct);
7864
7865       // We need to append the results of this capture to what we already have.
7866       BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7867       Pointers.append(CurPointers.begin(), CurPointers.end());
7868       Sizes.append(CurSizes.begin(), CurSizes.end());
7869       MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
7870     }
7871     // Map other list items in the map clause which are not captured variables
7872     // but "declare target link" global variables.
7873     MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
7874                                                MapTypes);
7875
7876     TargetDataInfo Info;
7877     // Fill up the arrays and create the arguments.
7878     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
7879     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
7880                                  Info.PointersArray, Info.SizesArray,
7881                                  Info.MapTypesArray, Info);
7882     InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
7883     InputInfo.BasePointersArray =
7884         Address(Info.BasePointersArray, CGM.getPointerAlign());
7885     InputInfo.PointersArray =
7886         Address(Info.PointersArray, CGM.getPointerAlign());
7887     InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
7888     MapTypesArray = Info.MapTypesArray;
7889     if (RequiresOuterTask)
7890       CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
7891     else
7892       emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
7893   };
7894
7895   auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
7896                              CodeGenFunction &CGF, PrePostActionTy &) {
7897     if (RequiresOuterTask) {
7898       CodeGenFunction::OMPTargetDataInfo InputInfo;
7899       CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
7900     } else {
7901       emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
7902     }
7903   };
7904
7905   // If we have a target function ID it means that we need to support
7906   // offloading, otherwise, just execute on the host. We need to execute on host
7907   // regardless of the conditional in the if clause if, e.g., the user do not
7908   // specify target triples.
7909   if (OutlinedFnID) {
7910     if (IfCond) {
7911       emitOMPIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
7912     } else {
7913       RegionCodeGenTy ThenRCG(TargetThenGen);
7914       ThenRCG(CGF);
7915     }
7916   } else {
7917     RegionCodeGenTy ElseRCG(TargetElseGen);
7918     ElseRCG(CGF);
7919   }
7920 }
7921
7922 void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
7923                                                     StringRef ParentName) {
7924   if (!S)
7925     return;
7926
7927   // Codegen OMP target directives that offload compute to the device.
7928   bool RequiresDeviceCodegen =
7929       isa<OMPExecutableDirective>(S) &&
7930       isOpenMPTargetExecutionDirective(
7931           cast<OMPExecutableDirective>(S)->getDirectiveKind());
7932
7933   if (RequiresDeviceCodegen) {
7934     const auto &E = *cast<OMPExecutableDirective>(S);
7935     unsigned DeviceID;
7936     unsigned FileID;
7937     unsigned Line;
7938     getTargetEntryUniqueInfo(CGM.getContext(), E.getLocStart(), DeviceID,
7939                              FileID, Line);
7940
7941     // Is this a target region that should not be emitted as an entry point? If
7942     // so just signal we are done with this target region.
7943     if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
7944                                                             ParentName, Line))
7945       return;
7946
7947     switch (E.getDirectiveKind()) {
7948     case OMPD_target:
7949       CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
7950                                                    cast<OMPTargetDirective>(E));
7951       break;
7952     case OMPD_target_parallel:
7953       CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
7954           CGM, ParentName, cast<OMPTargetParallelDirective>(E));
7955       break;
7956     case OMPD_target_teams:
7957       CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
7958           CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
7959       break;
7960     case OMPD_target_teams_distribute:
7961       CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
7962           CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
7963       break;
7964     case OMPD_target_teams_distribute_simd:
7965       CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
7966           CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
7967       break;
7968     case OMPD_target_parallel_for:
7969       CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
7970           CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
7971       break;
7972     case OMPD_target_parallel_for_simd:
7973       CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
7974           CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
7975       break;
7976     case OMPD_target_simd:
7977       CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
7978           CGM, ParentName, cast<OMPTargetSimdDirective>(E));
7979       break;
7980     case OMPD_target_teams_distribute_parallel_for:
7981       CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
7982           CGM, ParentName,
7983           cast<OMPTargetTeamsDistributeParallelForDirective>(E));
7984       break;
7985     case OMPD_target_teams_distribute_parallel_for_simd:
7986       CodeGenFunction::
7987           EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
7988               CGM, ParentName,
7989               cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
7990       break;
7991     case OMPD_parallel:
7992     case OMPD_for:
7993     case OMPD_parallel_for:
7994     case OMPD_parallel_sections:
7995     case OMPD_for_simd:
7996     case OMPD_parallel_for_simd:
7997     case OMPD_cancel:
7998     case OMPD_cancellation_point:
7999     case OMPD_ordered:
8000     case OMPD_threadprivate:
8001     case OMPD_task:
8002     case OMPD_simd:
8003     case OMPD_sections:
8004     case OMPD_section:
8005     case OMPD_single:
8006     case OMPD_master:
8007     case OMPD_critical:
8008     case OMPD_taskyield:
8009     case OMPD_barrier:
8010     case OMPD_taskwait:
8011     case OMPD_taskgroup:
8012     case OMPD_atomic:
8013     case OMPD_flush:
8014     case OMPD_teams:
8015     case OMPD_target_data:
8016     case OMPD_target_exit_data:
8017     case OMPD_target_enter_data:
8018     case OMPD_distribute:
8019     case OMPD_distribute_simd:
8020     case OMPD_distribute_parallel_for:
8021     case OMPD_distribute_parallel_for_simd:
8022     case OMPD_teams_distribute:
8023     case OMPD_teams_distribute_simd:
8024     case OMPD_teams_distribute_parallel_for:
8025     case OMPD_teams_distribute_parallel_for_simd:
8026     case OMPD_target_update:
8027     case OMPD_declare_simd:
8028     case OMPD_declare_target:
8029     case OMPD_end_declare_target:
8030     case OMPD_declare_reduction:
8031     case OMPD_taskloop:
8032     case OMPD_taskloop_simd:
8033     case OMPD_unknown:
8034       llvm_unreachable("Unknown target directive for OpenMP device codegen.");
8035     }
8036     return;
8037   }
8038
8039   if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
8040     if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
8041       return;
8042
8043     scanForTargetRegionsFunctions(
8044         E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName);
8045     return;
8046   }
8047
8048   // If this is a lambda function, look into its body.
8049   if (const auto *L = dyn_cast<LambdaExpr>(S))
8050     S = L->getBody();
8051
8052   // Keep looking for target regions recursively.
8053   for (const Stmt *II : S->children())
8054     scanForTargetRegionsFunctions(II, ParentName);
8055 }
8056
8057 bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
8058   const auto *FD = cast<FunctionDecl>(GD.getDecl());
8059
8060   // If emitting code for the host, we do not process FD here. Instead we do
8061   // the normal code generation.
8062   if (!CGM.getLangOpts().OpenMPIsDevice)
8063     return false;
8064
8065   // Try to detect target regions in the function.
8066   scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
8067
8068   // Do not to emit function if it is not marked as declare target.
8069   return !isDeclareTargetDeclaration(FD) &&
8070          AlreadyEmittedTargetFunctions.count(FD->getCanonicalDecl()) == 0;
8071 }
8072
8073 bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
8074   if (!CGM.getLangOpts().OpenMPIsDevice)
8075     return false;
8076
8077   // Check if there are Ctors/Dtors in this declaration and look for target
8078   // regions in it. We use the complete variant to produce the kernel name
8079   // mangling.
8080   QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
8081   if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
8082     for (const CXXConstructorDecl *Ctor : RD->ctors()) {
8083       StringRef ParentName =
8084           CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
8085       scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
8086     }
8087     if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
8088       StringRef ParentName =
8089           CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
8090       scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
8091     }
8092   }
8093
8094   // Do not to emit variable if it is not marked as declare target.
8095   llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
8096       isDeclareTargetDeclaration(cast<VarDecl>(GD.getDecl()));
8097   return !Res || *Res == OMPDeclareTargetDeclAttr::MT_Link;
8098 }
8099
8100 void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
8101                                                    llvm::Constant *Addr) {
8102   if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
8103           isDeclareTargetDeclaration(VD)) {
8104     OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
8105     StringRef VarName;
8106     CharUnits VarSize;
8107     llvm::GlobalValue::LinkageTypes Linkage;
8108     switch (*Res) {
8109     case OMPDeclareTargetDeclAttr::MT_To:
8110       Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
8111       VarName = CGM.getMangledName(VD);
8112       VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
8113       Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
8114       // Temp solution to prevent optimizations of the internal variables.
8115       if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
8116         std::string RefName = getName({VarName, "ref"});
8117         if (!CGM.GetGlobalValue(RefName)) {
8118           llvm::Constant *AddrRef =
8119               getOrCreateInternalVariable(Addr->getType(), RefName);
8120           auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
8121           GVAddrRef->setConstant(/*Val=*/true);
8122           GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
8123           GVAddrRef->setInitializer(Addr);
8124           CGM.addCompilerUsedGlobal(GVAddrRef);
8125         }
8126       }
8127       break;
8128     case OMPDeclareTargetDeclAttr::MT_Link:
8129       Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
8130       if (CGM.getLangOpts().OpenMPIsDevice) {
8131         VarName = Addr->getName();
8132         Addr = nullptr;
8133       } else {
8134         VarName = getAddrOfDeclareTargetLink(VD).getName();
8135         Addr =
8136             cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
8137       }
8138       VarSize = CGM.getPointerSize();
8139       Linkage = llvm::GlobalValue::WeakAnyLinkage;
8140       break;
8141     }
8142     OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
8143         VarName, Addr, VarSize, Flags, Linkage);
8144   }
8145 }
8146
8147 bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
8148   if (isa<FunctionDecl>(GD.getDecl()))
8149     return emitTargetFunctions(GD);
8150
8151   return emitTargetGlobalVariable(GD);
8152 }
8153
8154 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
8155     CodeGenModule &CGM)
8156     : CGM(CGM) {
8157   if (CGM.getLangOpts().OpenMPIsDevice) {
8158     SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
8159     CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
8160   }
8161 }
8162
8163 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
8164   if (CGM.getLangOpts().OpenMPIsDevice)
8165     CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
8166 }
8167
8168 bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
8169   if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
8170     return true;
8171
8172   const auto *D = cast<FunctionDecl>(GD.getDecl());
8173   const FunctionDecl *FD = D->getCanonicalDecl();
8174   // Do not to emit function if it is marked as declare target as it was already
8175   // emitted.
8176   if (isDeclareTargetDeclaration(D)) {
8177     if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
8178       if (auto *F = dyn_cast_or_null<llvm::Function>(
8179               CGM.GetGlobalValue(CGM.getMangledName(GD))))
8180         return !F->isDeclaration();
8181       return false;
8182     }
8183     return true;
8184   }
8185
8186   return !AlreadyEmittedTargetFunctions.insert(FD).second;
8187 }
8188
8189 llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
8190   // If we have offloading in the current module, we need to emit the entries
8191   // now and register the offloading descriptor.
8192   createOffloadEntriesAndInfoMetadata();
8193
8194   // Create and register the offloading binary descriptors. This is the main
8195   // entity that captures all the information about offloading in the current
8196   // compilation unit.
8197   return createOffloadingBinaryDescriptorRegistration();
8198 }
8199
8200 void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
8201                                     const OMPExecutableDirective &D,
8202                                     SourceLocation Loc,
8203                                     llvm::Value *OutlinedFn,
8204                                     ArrayRef<llvm::Value *> CapturedVars) {
8205   if (!CGF.HaveInsertPoint())
8206     return;
8207
8208   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
8209   CodeGenFunction::RunCleanupsScope Scope(CGF);
8210
8211   // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
8212   llvm::Value *Args[] = {
8213       RTLoc,
8214       CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
8215       CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
8216   llvm::SmallVector<llvm::Value *, 16> RealArgs;
8217   RealArgs.append(std::begin(Args), std::end(Args));
8218   RealArgs.append(CapturedVars.begin(), CapturedVars.end());
8219
8220   llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
8221   CGF.EmitRuntimeCall(RTLFn, RealArgs);
8222 }
8223
8224 void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
8225                                          const Expr *NumTeams,
8226                                          const Expr *ThreadLimit,
8227                                          SourceLocation Loc) {
8228   if (!CGF.HaveInsertPoint())
8229     return;
8230
8231   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
8232
8233   llvm::Value *NumTeamsVal =
8234       NumTeams
8235           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
8236                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
8237           : CGF.Builder.getInt32(0);
8238
8239   llvm::Value *ThreadLimitVal =
8240       ThreadLimit
8241           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
8242                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
8243           : CGF.Builder.getInt32(0);
8244
8245   // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
8246   llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
8247                                      ThreadLimitVal};
8248   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
8249                       PushNumTeamsArgs);
8250 }
8251
8252 void CGOpenMPRuntime::emitTargetDataCalls(
8253     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8254     const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
8255   if (!CGF.HaveInsertPoint())
8256     return;
8257
8258   // Action used to replace the default codegen action and turn privatization
8259   // off.
8260   PrePostActionTy NoPrivAction;
8261
8262   // Generate the code for the opening of the data environment. Capture all the
8263   // arguments of the runtime call by reference because they are used in the
8264   // closing of the region.
8265   auto &&BeginThenGen = [this, &D, Device, &Info,
8266                          &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
8267     // Fill up the arrays with all the mapped variables.
8268     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8269     MappableExprsHandler::MapValuesArrayTy Pointers;
8270     MappableExprsHandler::MapValuesArrayTy Sizes;
8271     MappableExprsHandler::MapFlagsArrayTy MapTypes;
8272
8273     // Get map clause information.
8274     MappableExprsHandler MCHandler(D, CGF);
8275     MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8276
8277     // Fill up the arrays and create the arguments.
8278     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8279
8280     llvm::Value *BasePointersArrayArg = nullptr;
8281     llvm::Value *PointersArrayArg = nullptr;
8282     llvm::Value *SizesArrayArg = nullptr;
8283     llvm::Value *MapTypesArrayArg = nullptr;
8284     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8285                                  SizesArrayArg, MapTypesArrayArg, Info);
8286
8287     // Emit device ID if any.
8288     llvm::Value *DeviceID = nullptr;
8289     if (Device) {
8290       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8291                                            CGF.Int64Ty, /*isSigned=*/true);
8292     } else {
8293       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8294     }
8295
8296     // Emit the number of elements in the offloading arrays.
8297     llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8298
8299     llvm::Value *OffloadingArgs[] = {
8300         DeviceID,         PointerNum,    BasePointersArrayArg,
8301         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8302     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin),
8303                         OffloadingArgs);
8304
8305     // If device pointer privatization is required, emit the body of the region
8306     // here. It will have to be duplicated: with and without privatization.
8307     if (!Info.CaptureDeviceAddrMap.empty())
8308       CodeGen(CGF);
8309   };
8310
8311   // Generate code for the closing of the data region.
8312   auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF,
8313                                             PrePostActionTy &) {
8314     assert(Info.isValid() && "Invalid data environment closing arguments.");
8315
8316     llvm::Value *BasePointersArrayArg = nullptr;
8317     llvm::Value *PointersArrayArg = nullptr;
8318     llvm::Value *SizesArrayArg = nullptr;
8319     llvm::Value *MapTypesArrayArg = nullptr;
8320     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8321                                  SizesArrayArg, MapTypesArrayArg, Info);
8322
8323     // Emit device ID if any.
8324     llvm::Value *DeviceID = nullptr;
8325     if (Device) {
8326       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8327                                            CGF.Int64Ty, /*isSigned=*/true);
8328     } else {
8329       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8330     }
8331
8332     // Emit the number of elements in the offloading arrays.
8333     llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8334
8335     llvm::Value *OffloadingArgs[] = {
8336         DeviceID,         PointerNum,    BasePointersArrayArg,
8337         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8338     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end),
8339                         OffloadingArgs);
8340   };
8341
8342   // If we need device pointer privatization, we need to emit the body of the
8343   // region with no privatization in the 'else' branch of the conditional.
8344   // Otherwise, we don't have to do anything.
8345   auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
8346                                                          PrePostActionTy &) {
8347     if (!Info.CaptureDeviceAddrMap.empty()) {
8348       CodeGen.setAction(NoPrivAction);
8349       CodeGen(CGF);
8350     }
8351   };
8352
8353   // We don't have to do anything to close the region if the if clause evaluates
8354   // to false.
8355   auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
8356
8357   if (IfCond) {
8358     emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
8359   } else {
8360     RegionCodeGenTy RCG(BeginThenGen);
8361     RCG(CGF);
8362   }
8363
8364   // If we don't require privatization of device pointers, we emit the body in
8365   // between the runtime calls. This avoids duplicating the body code.
8366   if (Info.CaptureDeviceAddrMap.empty()) {
8367     CodeGen.setAction(NoPrivAction);
8368     CodeGen(CGF);
8369   }
8370
8371   if (IfCond) {
8372     emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen);
8373   } else {
8374     RegionCodeGenTy RCG(EndThenGen);
8375     RCG(CGF);
8376   }
8377 }
8378
8379 void CGOpenMPRuntime::emitTargetDataStandAloneCall(
8380     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8381     const Expr *Device) {
8382   if (!CGF.HaveInsertPoint())
8383     return;
8384
8385   assert((isa<OMPTargetEnterDataDirective>(D) ||
8386           isa<OMPTargetExitDataDirective>(D) ||
8387           isa<OMPTargetUpdateDirective>(D)) &&
8388          "Expecting either target enter, exit data, or update directives.");
8389
8390   CodeGenFunction::OMPTargetDataInfo InputInfo;
8391   llvm::Value *MapTypesArray = nullptr;
8392   // Generate the code for the opening of the data environment.
8393   auto &&ThenGen = [this, &D, Device, &InputInfo,
8394                     &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) {
8395     // Emit device ID if any.
8396     llvm::Value *DeviceID = nullptr;
8397     if (Device) {
8398       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8399                                            CGF.Int64Ty, /*isSigned=*/true);
8400     } else {
8401       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8402     }
8403
8404     // Emit the number of elements in the offloading arrays.
8405     llvm::Constant *PointerNum =
8406         CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
8407
8408     llvm::Value *OffloadingArgs[] = {DeviceID,
8409                                      PointerNum,
8410                                      InputInfo.BasePointersArray.getPointer(),
8411                                      InputInfo.PointersArray.getPointer(),
8412                                      InputInfo.SizesArray.getPointer(),
8413                                      MapTypesArray};
8414
8415     // Select the right runtime function call for each expected standalone
8416     // directive.
8417     const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
8418     OpenMPRTLFunction RTLFn;
8419     switch (D.getDirectiveKind()) {
8420     case OMPD_target_enter_data:
8421       RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
8422                         : OMPRTL__tgt_target_data_begin;
8423       break;
8424     case OMPD_target_exit_data:
8425       RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
8426                         : OMPRTL__tgt_target_data_end;
8427       break;
8428     case OMPD_target_update:
8429       RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
8430                         : OMPRTL__tgt_target_data_update;
8431       break;
8432     case OMPD_parallel:
8433     case OMPD_for:
8434     case OMPD_parallel_for:
8435     case OMPD_parallel_sections:
8436     case OMPD_for_simd:
8437     case OMPD_parallel_for_simd:
8438     case OMPD_cancel:
8439     case OMPD_cancellation_point:
8440     case OMPD_ordered:
8441     case OMPD_threadprivate:
8442     case OMPD_task:
8443     case OMPD_simd:
8444     case OMPD_sections:
8445     case OMPD_section:
8446     case OMPD_single:
8447     case OMPD_master:
8448     case OMPD_critical:
8449     case OMPD_taskyield:
8450     case OMPD_barrier:
8451     case OMPD_taskwait:
8452     case OMPD_taskgroup:
8453     case OMPD_atomic:
8454     case OMPD_flush:
8455     case OMPD_teams:
8456     case OMPD_target_data:
8457     case OMPD_distribute:
8458     case OMPD_distribute_simd:
8459     case OMPD_distribute_parallel_for:
8460     case OMPD_distribute_parallel_for_simd:
8461     case OMPD_teams_distribute:
8462     case OMPD_teams_distribute_simd:
8463     case OMPD_teams_distribute_parallel_for:
8464     case OMPD_teams_distribute_parallel_for_simd:
8465     case OMPD_declare_simd:
8466     case OMPD_declare_target:
8467     case OMPD_end_declare_target:
8468     case OMPD_declare_reduction:
8469     case OMPD_taskloop:
8470     case OMPD_taskloop_simd:
8471     case OMPD_target:
8472     case OMPD_target_simd:
8473     case OMPD_target_teams_distribute:
8474     case OMPD_target_teams_distribute_simd:
8475     case OMPD_target_teams_distribute_parallel_for:
8476     case OMPD_target_teams_distribute_parallel_for_simd:
8477     case OMPD_target_teams:
8478     case OMPD_target_parallel:
8479     case OMPD_target_parallel_for:
8480     case OMPD_target_parallel_for_simd:
8481     case OMPD_unknown:
8482       llvm_unreachable("Unexpected standalone target data directive.");
8483       break;
8484     }
8485     CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
8486   };
8487
8488   auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
8489                              CodeGenFunction &CGF, PrePostActionTy &) {
8490     // Fill up the arrays with all the mapped variables.
8491     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8492     MappableExprsHandler::MapValuesArrayTy Pointers;
8493     MappableExprsHandler::MapValuesArrayTy Sizes;
8494     MappableExprsHandler::MapFlagsArrayTy MapTypes;
8495
8496     // Get map clause information.
8497     MappableExprsHandler MEHandler(D, CGF);
8498     MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8499
8500     TargetDataInfo Info;
8501     // Fill up the arrays and create the arguments.
8502     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8503     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
8504                                  Info.PointersArray, Info.SizesArray,
8505                                  Info.MapTypesArray, Info);
8506     InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
8507     InputInfo.BasePointersArray =
8508         Address(Info.BasePointersArray, CGM.getPointerAlign());
8509     InputInfo.PointersArray =
8510         Address(Info.PointersArray, CGM.getPointerAlign());
8511     InputInfo.SizesArray =
8512         Address(Info.SizesArray, CGM.getPointerAlign());
8513     MapTypesArray = Info.MapTypesArray;
8514     if (D.hasClausesOfKind<OMPDependClause>())
8515       CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
8516     else
8517       emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
8518   };
8519
8520   if (IfCond) {
8521     emitOMPIfClause(CGF, IfCond, TargetThenGen,
8522                     [](CodeGenFunction &CGF, PrePostActionTy &) {});
8523   } else {
8524     RegionCodeGenTy ThenRCG(TargetThenGen);
8525     ThenRCG(CGF);
8526   }
8527 }
8528
8529 namespace {
8530   /// Kind of parameter in a function with 'declare simd' directive.
8531   enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
8532   /// Attribute set of the parameter.
8533   struct ParamAttrTy {
8534     ParamKindTy Kind = Vector;
8535     llvm::APSInt StrideOrArg;
8536     llvm::APSInt Alignment;
8537   };
8538 } // namespace
8539
8540 static unsigned evaluateCDTSize(const FunctionDecl *FD,
8541                                 ArrayRef<ParamAttrTy> ParamAttrs) {
8542   // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
8543   // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
8544   // of that clause. The VLEN value must be power of 2.
8545   // In other case the notion of the function`s "characteristic data type" (CDT)
8546   // is used to compute the vector length.
8547   // CDT is defined in the following order:
8548   //   a) For non-void function, the CDT is the return type.
8549   //   b) If the function has any non-uniform, non-linear parameters, then the
8550   //   CDT is the type of the first such parameter.
8551   //   c) If the CDT determined by a) or b) above is struct, union, or class
8552   //   type which is pass-by-value (except for the type that maps to the
8553   //   built-in complex data type), the characteristic data type is int.
8554   //   d) If none of the above three cases is applicable, the CDT is int.
8555   // The VLEN is then determined based on the CDT and the size of vector
8556   // register of that ISA for which current vector version is generated. The
8557   // VLEN is computed using the formula below:
8558   //   VLEN  = sizeof(vector_register) / sizeof(CDT),
8559   // where vector register size specified in section 3.2.1 Registers and the
8560   // Stack Frame of original AMD64 ABI document.
8561   QualType RetType = FD->getReturnType();
8562   if (RetType.isNull())
8563     return 0;
8564   ASTContext &C = FD->getASTContext();
8565   QualType CDT;
8566   if (!RetType.isNull() && !RetType->isVoidType()) {
8567     CDT = RetType;
8568   } else {
8569     unsigned Offset = 0;
8570     if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
8571       if (ParamAttrs[Offset].Kind == Vector)
8572         CDT = C.getPointerType(C.getRecordType(MD->getParent()));
8573       ++Offset;
8574     }
8575     if (CDT.isNull()) {
8576       for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
8577         if (ParamAttrs[I + Offset].Kind == Vector) {
8578           CDT = FD->getParamDecl(I)->getType();
8579           break;
8580         }
8581       }
8582     }
8583   }
8584   if (CDT.isNull())
8585     CDT = C.IntTy;
8586   CDT = CDT->getCanonicalTypeUnqualified();
8587   if (CDT->isRecordType() || CDT->isUnionType())
8588     CDT = C.IntTy;
8589   return C.getTypeSize(CDT);
8590 }
8591
8592 static void
8593 emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
8594                            const llvm::APSInt &VLENVal,
8595                            ArrayRef<ParamAttrTy> ParamAttrs,
8596                            OMPDeclareSimdDeclAttr::BranchStateTy State) {
8597   struct ISADataTy {
8598     char ISA;
8599     unsigned VecRegSize;
8600   };
8601   ISADataTy ISAData[] = {
8602       {
8603           'b', 128
8604       }, // SSE
8605       {
8606           'c', 256
8607       }, // AVX
8608       {
8609           'd', 256
8610       }, // AVX2
8611       {
8612           'e', 512
8613       }, // AVX512
8614   };
8615   llvm::SmallVector<char, 2> Masked;
8616   switch (State) {
8617   case OMPDeclareSimdDeclAttr::BS_Undefined:
8618     Masked.push_back('N');
8619     Masked.push_back('M');
8620     break;
8621   case OMPDeclareSimdDeclAttr::BS_Notinbranch:
8622     Masked.push_back('N');
8623     break;
8624   case OMPDeclareSimdDeclAttr::BS_Inbranch:
8625     Masked.push_back('M');
8626     break;
8627   }
8628   for (char Mask : Masked) {
8629     for (const ISADataTy &Data : ISAData) {
8630       SmallString<256> Buffer;
8631       llvm::raw_svector_ostream Out(Buffer);
8632       Out << "_ZGV" << Data.ISA << Mask;
8633       if (!VLENVal) {
8634         Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
8635                                          evaluateCDTSize(FD, ParamAttrs));
8636       } else {
8637         Out << VLENVal;
8638       }
8639       for (const ParamAttrTy &ParamAttr : ParamAttrs) {
8640         switch (ParamAttr.Kind){
8641         case LinearWithVarStride:
8642           Out << 's' << ParamAttr.StrideOrArg;
8643           break;
8644         case Linear:
8645           Out << 'l';
8646           if (!!ParamAttr.StrideOrArg)
8647             Out << ParamAttr.StrideOrArg;
8648           break;
8649         case Uniform:
8650           Out << 'u';
8651           break;
8652         case Vector:
8653           Out << 'v';
8654           break;
8655         }
8656         if (!!ParamAttr.Alignment)
8657           Out << 'a' << ParamAttr.Alignment;
8658       }
8659       Out << '_' << Fn->getName();
8660       Fn->addFnAttr(Out.str());
8661     }
8662   }
8663 }
8664
8665 void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
8666                                               llvm::Function *Fn) {
8667   ASTContext &C = CGM.getContext();
8668   FD = FD->getMostRecentDecl();
8669   // Map params to their positions in function decl.
8670   llvm::DenseMap<const Decl *, unsigned> ParamPositions;
8671   if (isa<CXXMethodDecl>(FD))
8672     ParamPositions.try_emplace(FD, 0);
8673   unsigned ParamPos = ParamPositions.size();
8674   for (const ParmVarDecl *P : FD->parameters()) {
8675     ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
8676     ++ParamPos;
8677   }
8678   while (FD) {
8679     for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
8680       llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
8681       // Mark uniform parameters.
8682       for (const Expr *E : Attr->uniforms()) {
8683         E = E->IgnoreParenImpCasts();
8684         unsigned Pos;
8685         if (isa<CXXThisExpr>(E)) {
8686           Pos = ParamPositions[FD];
8687         } else {
8688           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8689                                 ->getCanonicalDecl();
8690           Pos = ParamPositions[PVD];
8691         }
8692         ParamAttrs[Pos].Kind = Uniform;
8693       }
8694       // Get alignment info.
8695       auto NI = Attr->alignments_begin();
8696       for (const Expr *E : Attr->aligneds()) {
8697         E = E->IgnoreParenImpCasts();
8698         unsigned Pos;
8699         QualType ParmTy;
8700         if (isa<CXXThisExpr>(E)) {
8701           Pos = ParamPositions[FD];
8702           ParmTy = E->getType();
8703         } else {
8704           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8705                                 ->getCanonicalDecl();
8706           Pos = ParamPositions[PVD];
8707           ParmTy = PVD->getType();
8708         }
8709         ParamAttrs[Pos].Alignment =
8710             (*NI)
8711                 ? (*NI)->EvaluateKnownConstInt(C)
8712                 : llvm::APSInt::getUnsigned(
8713                       C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
8714                           .getQuantity());
8715         ++NI;
8716       }
8717       // Mark linear parameters.
8718       auto SI = Attr->steps_begin();
8719       auto MI = Attr->modifiers_begin();
8720       for (const Expr *E : Attr->linears()) {
8721         E = E->IgnoreParenImpCasts();
8722         unsigned Pos;
8723         if (isa<CXXThisExpr>(E)) {
8724           Pos = ParamPositions[FD];
8725         } else {
8726           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8727                                 ->getCanonicalDecl();
8728           Pos = ParamPositions[PVD];
8729         }
8730         ParamAttrTy &ParamAttr = ParamAttrs[Pos];
8731         ParamAttr.Kind = Linear;
8732         if (*SI) {
8733           if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
8734                                     Expr::SE_AllowSideEffects)) {
8735             if (const auto *DRE =
8736                     cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
8737               if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
8738                 ParamAttr.Kind = LinearWithVarStride;
8739                 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
8740                     ParamPositions[StridePVD->getCanonicalDecl()]);
8741               }
8742             }
8743           }
8744         }
8745         ++SI;
8746         ++MI;
8747       }
8748       llvm::APSInt VLENVal;
8749       if (const Expr *VLEN = Attr->getSimdlen())
8750         VLENVal = VLEN->EvaluateKnownConstInt(C);
8751       OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
8752       if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
8753           CGM.getTriple().getArch() == llvm::Triple::x86_64)
8754         emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
8755     }
8756     FD = FD->getPreviousDecl();
8757   }
8758 }
8759
8760 namespace {
8761 /// Cleanup action for doacross support.
8762 class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
8763 public:
8764   static const int DoacrossFinArgs = 2;
8765
8766 private:
8767   llvm::Value *RTLFn;
8768   llvm::Value *Args[DoacrossFinArgs];
8769
8770 public:
8771   DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs)
8772       : RTLFn(RTLFn) {
8773     assert(CallArgs.size() == DoacrossFinArgs);
8774     std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
8775   }
8776   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
8777     if (!CGF.HaveInsertPoint())
8778       return;
8779     CGF.EmitRuntimeCall(RTLFn, Args);
8780   }
8781 };
8782 } // namespace
8783
8784 void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
8785                                        const OMPLoopDirective &D) {
8786   if (!CGF.HaveInsertPoint())
8787     return;
8788
8789   ASTContext &C = CGM.getContext();
8790   QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
8791   RecordDecl *RD;
8792   if (KmpDimTy.isNull()) {
8793     // Build struct kmp_dim {  // loop bounds info casted to kmp_int64
8794     //  kmp_int64 lo; // lower
8795     //  kmp_int64 up; // upper
8796     //  kmp_int64 st; // stride
8797     // };
8798     RD = C.buildImplicitRecord("kmp_dim");
8799     RD->startDefinition();
8800     addFieldToRecordDecl(C, RD, Int64Ty);
8801     addFieldToRecordDecl(C, RD, Int64Ty);
8802     addFieldToRecordDecl(C, RD, Int64Ty);
8803     RD->completeDefinition();
8804     KmpDimTy = C.getRecordType(RD);
8805   } else {
8806     RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
8807   }
8808
8809   Address DimsAddr = CGF.CreateMemTemp(KmpDimTy, "dims");
8810   CGF.EmitNullInitialization(DimsAddr, KmpDimTy);
8811   enum { LowerFD = 0, UpperFD, StrideFD };
8812   // Fill dims with data.
8813   LValue DimsLVal = CGF.MakeAddrLValue(DimsAddr, KmpDimTy);
8814   // dims.upper = num_iterations;
8815   LValue UpperLVal =
8816       CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), UpperFD));
8817   llvm::Value *NumIterVal = CGF.EmitScalarConversion(
8818       CGF.EmitScalarExpr(D.getNumIterations()), D.getNumIterations()->getType(),
8819       Int64Ty, D.getNumIterations()->getExprLoc());
8820   CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
8821   // dims.stride = 1;
8822   LValue StrideLVal =
8823       CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), StrideFD));
8824   CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
8825                         StrideLVal);
8826
8827   // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
8828   // kmp_int32 num_dims, struct kmp_dim * dims);
8829   llvm::Value *Args[] = {emitUpdateLocation(CGF, D.getLocStart()),
8830                          getThreadID(CGF, D.getLocStart()),
8831                          llvm::ConstantInt::getSigned(CGM.Int32Ty, 1),
8832                          CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
8833                              DimsAddr.getPointer(), CGM.VoidPtrTy)};
8834
8835   llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
8836   CGF.EmitRuntimeCall(RTLFn, Args);
8837   llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
8838       emitUpdateLocation(CGF, D.getLocEnd()), getThreadID(CGF, D.getLocEnd())};
8839   llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
8840   CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
8841                                              llvm::makeArrayRef(FiniArgs));
8842 }
8843
8844 void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
8845                                           const OMPDependClause *C) {
8846   QualType Int64Ty =
8847       CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
8848   const Expr *CounterVal = C->getCounterValue();
8849   assert(CounterVal);
8850   llvm::Value *CntVal = CGF.EmitScalarConversion(CGF.EmitScalarExpr(CounterVal),
8851                                                  CounterVal->getType(), Int64Ty,
8852                                                  CounterVal->getExprLoc());
8853   Address CntAddr = CGF.CreateMemTemp(Int64Ty, ".cnt.addr");
8854   CGF.EmitStoreOfScalar(CntVal, CntAddr, /*Volatile=*/false, Int64Ty);
8855   llvm::Value *Args[] = {emitUpdateLocation(CGF, C->getLocStart()),
8856                          getThreadID(CGF, C->getLocStart()),
8857                          CntAddr.getPointer()};
8858   llvm::Value *RTLFn;
8859   if (C->getDependencyKind() == OMPC_DEPEND_source) {
8860     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
8861   } else {
8862     assert(C->getDependencyKind() == OMPC_DEPEND_sink);
8863     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
8864   }
8865   CGF.EmitRuntimeCall(RTLFn, Args);
8866 }
8867
8868 void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
8869                                llvm::Value *Callee,
8870                                ArrayRef<llvm::Value *> Args) const {
8871   assert(Loc.isValid() && "Outlined function call location must be valid.");
8872   auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
8873
8874   if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
8875     if (Fn->doesNotThrow()) {
8876       CGF.EmitNounwindRuntimeCall(Fn, Args);
8877       return;
8878     }
8879   }
8880   CGF.EmitRuntimeCall(Callee, Args);
8881 }
8882
8883 void CGOpenMPRuntime::emitOutlinedFunctionCall(
8884     CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
8885     ArrayRef<llvm::Value *> Args) const {
8886   emitCall(CGF, Loc, OutlinedFn, Args);
8887 }
8888
8889 Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
8890                                              const VarDecl *NativeParam,
8891                                              const VarDecl *TargetParam) const {
8892   return CGF.GetAddrOfLocalVar(NativeParam);
8893 }
8894
8895 Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
8896                                                    const VarDecl *VD) {
8897   return Address::invalid();
8898 }
8899
8900 llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
8901     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8902     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8903   llvm_unreachable("Not supported in SIMD-only mode");
8904 }
8905
8906 llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
8907     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8908     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8909   llvm_unreachable("Not supported in SIMD-only mode");
8910 }
8911
8912 llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
8913     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8914     const VarDecl *PartIDVar, const VarDecl *TaskTVar,
8915     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
8916     bool Tied, unsigned &NumberOfParts) {
8917   llvm_unreachable("Not supported in SIMD-only mode");
8918 }
8919
8920 void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
8921                                            SourceLocation Loc,
8922                                            llvm::Value *OutlinedFn,
8923                                            ArrayRef<llvm::Value *> CapturedVars,
8924                                            const Expr *IfCond) {
8925   llvm_unreachable("Not supported in SIMD-only mode");
8926 }
8927
8928 void CGOpenMPSIMDRuntime::emitCriticalRegion(
8929     CodeGenFunction &CGF, StringRef CriticalName,
8930     const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
8931     const Expr *Hint) {
8932   llvm_unreachable("Not supported in SIMD-only mode");
8933 }
8934
8935 void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
8936                                            const RegionCodeGenTy &MasterOpGen,
8937                                            SourceLocation Loc) {
8938   llvm_unreachable("Not supported in SIMD-only mode");
8939 }
8940
8941 void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
8942                                             SourceLocation Loc) {
8943   llvm_unreachable("Not supported in SIMD-only mode");
8944 }
8945
8946 void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
8947     CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
8948     SourceLocation Loc) {
8949   llvm_unreachable("Not supported in SIMD-only mode");
8950 }
8951
8952 void CGOpenMPSIMDRuntime::emitSingleRegion(
8953     CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
8954     SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
8955     ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
8956     ArrayRef<const Expr *> AssignmentOps) {
8957   llvm_unreachable("Not supported in SIMD-only mode");
8958 }
8959
8960 void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
8961                                             const RegionCodeGenTy &OrderedOpGen,
8962                                             SourceLocation Loc,
8963                                             bool IsThreads) {
8964   llvm_unreachable("Not supported in SIMD-only mode");
8965 }
8966
8967 void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
8968                                           SourceLocation Loc,
8969                                           OpenMPDirectiveKind Kind,
8970                                           bool EmitChecks,
8971                                           bool ForceSimpleCall) {
8972   llvm_unreachable("Not supported in SIMD-only mode");
8973 }
8974
8975 void CGOpenMPSIMDRuntime::emitForDispatchInit(
8976     CodeGenFunction &CGF, SourceLocation Loc,
8977     const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
8978     bool Ordered, const DispatchRTInput &DispatchValues) {
8979   llvm_unreachable("Not supported in SIMD-only mode");
8980 }
8981
8982 void CGOpenMPSIMDRuntime::emitForStaticInit(
8983     CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
8984     const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
8985   llvm_unreachable("Not supported in SIMD-only mode");
8986 }
8987
8988 void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
8989     CodeGenFunction &CGF, SourceLocation Loc,
8990     OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
8991   llvm_unreachable("Not supported in SIMD-only mode");
8992 }
8993
8994 void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
8995                                                      SourceLocation Loc,
8996                                                      unsigned IVSize,
8997                                                      bool IVSigned) {
8998   llvm_unreachable("Not supported in SIMD-only mode");
8999 }
9000
9001 void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
9002                                               SourceLocation Loc,
9003                                               OpenMPDirectiveKind DKind) {
9004   llvm_unreachable("Not supported in SIMD-only mode");
9005 }
9006
9007 llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
9008                                               SourceLocation Loc,
9009                                               unsigned IVSize, bool IVSigned,
9010                                               Address IL, Address LB,
9011                                               Address UB, Address ST) {
9012   llvm_unreachable("Not supported in SIMD-only mode");
9013 }
9014
9015 void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
9016                                                llvm::Value *NumThreads,
9017                                                SourceLocation Loc) {
9018   llvm_unreachable("Not supported in SIMD-only mode");
9019 }
9020
9021 void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
9022                                              OpenMPProcBindClauseKind ProcBind,
9023                                              SourceLocation Loc) {
9024   llvm_unreachable("Not supported in SIMD-only mode");
9025 }
9026
9027 Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
9028                                                     const VarDecl *VD,
9029                                                     Address VDAddr,
9030                                                     SourceLocation Loc) {
9031   llvm_unreachable("Not supported in SIMD-only mode");
9032 }
9033
9034 llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
9035     const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
9036     CodeGenFunction *CGF) {
9037   llvm_unreachable("Not supported in SIMD-only mode");
9038 }
9039
9040 Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
9041     CodeGenFunction &CGF, QualType VarType, StringRef Name) {
9042   llvm_unreachable("Not supported in SIMD-only mode");
9043 }
9044
9045 void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
9046                                     ArrayRef<const Expr *> Vars,
9047                                     SourceLocation Loc) {
9048   llvm_unreachable("Not supported in SIMD-only mode");
9049 }
9050
9051 void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
9052                                        const OMPExecutableDirective &D,
9053                                        llvm::Value *TaskFunction,
9054                                        QualType SharedsTy, Address Shareds,
9055                                        const Expr *IfCond,
9056                                        const OMPTaskDataTy &Data) {
9057   llvm_unreachable("Not supported in SIMD-only mode");
9058 }
9059
9060 void CGOpenMPSIMDRuntime::emitTaskLoopCall(
9061     CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
9062     llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
9063     const Expr *IfCond, const OMPTaskDataTy &Data) {
9064   llvm_unreachable("Not supported in SIMD-only mode");
9065 }
9066
9067 void CGOpenMPSIMDRuntime::emitReduction(
9068     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
9069     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
9070     ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
9071   assert(Options.SimpleReduction && "Only simple reduction is expected.");
9072   CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
9073                                  ReductionOps, Options);
9074 }
9075
9076 llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
9077     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
9078     ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
9079   llvm_unreachable("Not supported in SIMD-only mode");
9080 }
9081
9082 void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
9083                                                   SourceLocation Loc,
9084                                                   ReductionCodeGen &RCG,
9085                                                   unsigned N) {
9086   llvm_unreachable("Not supported in SIMD-only mode");
9087 }
9088
9089 Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
9090                                                   SourceLocation Loc,
9091                                                   llvm::Value *ReductionsPtr,
9092                                                   LValue SharedLVal) {
9093   llvm_unreachable("Not supported in SIMD-only mode");
9094 }
9095
9096 void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
9097                                            SourceLocation Loc) {
9098   llvm_unreachable("Not supported in SIMD-only mode");
9099 }
9100
9101 void CGOpenMPSIMDRuntime::emitCancellationPointCall(
9102     CodeGenFunction &CGF, SourceLocation Loc,
9103     OpenMPDirectiveKind CancelRegion) {
9104   llvm_unreachable("Not supported in SIMD-only mode");
9105 }
9106
9107 void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
9108                                          SourceLocation Loc, const Expr *IfCond,
9109                                          OpenMPDirectiveKind CancelRegion) {
9110   llvm_unreachable("Not supported in SIMD-only mode");
9111 }
9112
9113 void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
9114     const OMPExecutableDirective &D, StringRef ParentName,
9115     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
9116     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
9117   llvm_unreachable("Not supported in SIMD-only mode");
9118 }
9119
9120 void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF,
9121                                          const OMPExecutableDirective &D,
9122                                          llvm::Value *OutlinedFn,
9123                                          llvm::Value *OutlinedFnID,
9124                                          const Expr *IfCond, const Expr *Device) {
9125   llvm_unreachable("Not supported in SIMD-only mode");
9126 }
9127
9128 bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
9129   llvm_unreachable("Not supported in SIMD-only mode");
9130 }
9131
9132 bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
9133   llvm_unreachable("Not supported in SIMD-only mode");
9134 }
9135
9136 bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
9137   return false;
9138 }
9139
9140 llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() {
9141   return nullptr;
9142 }
9143
9144 void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
9145                                         const OMPExecutableDirective &D,
9146                                         SourceLocation Loc,
9147                                         llvm::Value *OutlinedFn,
9148                                         ArrayRef<llvm::Value *> CapturedVars) {
9149   llvm_unreachable("Not supported in SIMD-only mode");
9150 }
9151
9152 void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
9153                                              const Expr *NumTeams,
9154                                              const Expr *ThreadLimit,
9155                                              SourceLocation Loc) {
9156   llvm_unreachable("Not supported in SIMD-only mode");
9157 }
9158
9159 void CGOpenMPSIMDRuntime::emitTargetDataCalls(
9160     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
9161     const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
9162   llvm_unreachable("Not supported in SIMD-only mode");
9163 }
9164
9165 void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
9166     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
9167     const Expr *Device) {
9168   llvm_unreachable("Not supported in SIMD-only mode");
9169 }
9170
9171 void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
9172                                            const OMPLoopDirective &D) {
9173   llvm_unreachable("Not supported in SIMD-only mode");
9174 }
9175
9176 void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
9177                                               const OMPDependClause *C) {
9178   llvm_unreachable("Not supported in SIMD-only mode");
9179 }
9180
9181 const VarDecl *
9182 CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
9183                                         const VarDecl *NativeParam) const {
9184   llvm_unreachable("Not supported in SIMD-only mode");
9185 }
9186
9187 Address
9188 CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
9189                                          const VarDecl *NativeParam,
9190                                          const VarDecl *TargetParam) const {
9191   llvm_unreachable("Not supported in SIMD-only mode");
9192 }
9193