]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp
Merge clang 7.0.1 and several follow-up changes
[FreeBSD/FreeBSD.git] / contrib / llvm / tools / clang / lib / CodeGen / CGOpenMPRuntime.cpp
1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides a class for OpenMP runtime code generation.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenFunction.h"
19 #include "clang/CodeGen/ConstantInitBuilder.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/StmtOpenMP.h"
22 #include "clang/Basic/BitmaskEnum.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/Bitcode/BitcodeReader.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/GlobalValue.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/Format.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <cassert>
32
33 using namespace clang;
34 using namespace CodeGen;
35
36 namespace {
37 /// Base class for handling code generation inside OpenMP regions.
38 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
39 public:
40   /// Kinds of OpenMP regions used in codegen.
41   enum CGOpenMPRegionKind {
42     /// Region with outlined function for standalone 'parallel'
43     /// directive.
44     ParallelOutlinedRegion,
45     /// Region with outlined function for standalone 'task' directive.
46     TaskOutlinedRegion,
47     /// Region for constructs that do not require function outlining,
48     /// like 'for', 'sections', 'atomic' etc. directives.
49     InlinedRegion,
50     /// Region with outlined function for standalone 'target' directive.
51     TargetRegion,
52   };
53
54   CGOpenMPRegionInfo(const CapturedStmt &CS,
55                      const CGOpenMPRegionKind RegionKind,
56                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
57                      bool HasCancel)
58       : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
59         CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
60
61   CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
62                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
63                      bool HasCancel)
64       : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
65         Kind(Kind), HasCancel(HasCancel) {}
66
67   /// Get a variable or parameter for storing global thread id
68   /// inside OpenMP construct.
69   virtual const VarDecl *getThreadIDVariable() const = 0;
70
71   /// Emit the captured statement body.
72   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
73
74   /// Get an LValue for the current ThreadID variable.
75   /// \return LValue for thread id variable. This LValue always has type int32*.
76   virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
77
78   virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
79
80   CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
81
82   OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
83
84   bool hasCancel() const { return HasCancel; }
85
86   static bool classof(const CGCapturedStmtInfo *Info) {
87     return Info->getKind() == CR_OpenMP;
88   }
89
90   ~CGOpenMPRegionInfo() override = default;
91
92 protected:
93   CGOpenMPRegionKind RegionKind;
94   RegionCodeGenTy CodeGen;
95   OpenMPDirectiveKind Kind;
96   bool HasCancel;
97 };
98
99 /// API for captured statement code generation in OpenMP constructs.
100 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
101 public:
102   CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
103                              const RegionCodeGenTy &CodeGen,
104                              OpenMPDirectiveKind Kind, bool HasCancel,
105                              StringRef HelperName)
106       : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
107                            HasCancel),
108         ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
109     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
110   }
111
112   /// Get a variable or parameter for storing global thread id
113   /// inside OpenMP construct.
114   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
115
116   /// Get the name of the capture helper.
117   StringRef getHelperName() const override { return HelperName; }
118
119   static bool classof(const CGCapturedStmtInfo *Info) {
120     return CGOpenMPRegionInfo::classof(Info) &&
121            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
122                ParallelOutlinedRegion;
123   }
124
125 private:
126   /// A variable or parameter storing global thread id for OpenMP
127   /// constructs.
128   const VarDecl *ThreadIDVar;
129   StringRef HelperName;
130 };
131
132 /// API for captured statement code generation in OpenMP constructs.
133 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
134 public:
135   class UntiedTaskActionTy final : public PrePostActionTy {
136     bool Untied;
137     const VarDecl *PartIDVar;
138     const RegionCodeGenTy UntiedCodeGen;
139     llvm::SwitchInst *UntiedSwitch = nullptr;
140
141   public:
142     UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
143                        const RegionCodeGenTy &UntiedCodeGen)
144         : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
145     void Enter(CodeGenFunction &CGF) override {
146       if (Untied) {
147         // Emit task switching point.
148         LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
149             CGF.GetAddrOfLocalVar(PartIDVar),
150             PartIDVar->getType()->castAs<PointerType>());
151         llvm::Value *Res =
152             CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
153         llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
154         UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
155         CGF.EmitBlock(DoneBB);
156         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
157         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
158         UntiedSwitch->addCase(CGF.Builder.getInt32(0),
159                               CGF.Builder.GetInsertBlock());
160         emitUntiedSwitch(CGF);
161       }
162     }
163     void emitUntiedSwitch(CodeGenFunction &CGF) const {
164       if (Untied) {
165         LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
166             CGF.GetAddrOfLocalVar(PartIDVar),
167             PartIDVar->getType()->castAs<PointerType>());
168         CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
169                               PartIdLVal);
170         UntiedCodeGen(CGF);
171         CodeGenFunction::JumpDest CurPoint =
172             CGF.getJumpDestInCurrentScope(".untied.next.");
173         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
174         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
175         UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
176                               CGF.Builder.GetInsertBlock());
177         CGF.EmitBranchThroughCleanup(CurPoint);
178         CGF.EmitBlock(CurPoint.getBlock());
179       }
180     }
181     unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
182   };
183   CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
184                                  const VarDecl *ThreadIDVar,
185                                  const RegionCodeGenTy &CodeGen,
186                                  OpenMPDirectiveKind Kind, bool HasCancel,
187                                  const UntiedTaskActionTy &Action)
188       : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
189         ThreadIDVar(ThreadIDVar), Action(Action) {
190     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
191   }
192
193   /// Get a variable or parameter for storing global thread id
194   /// inside OpenMP construct.
195   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
196
197   /// Get an LValue for the current ThreadID variable.
198   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
199
200   /// Get the name of the capture helper.
201   StringRef getHelperName() const override { return ".omp_outlined."; }
202
203   void emitUntiedSwitch(CodeGenFunction &CGF) override {
204     Action.emitUntiedSwitch(CGF);
205   }
206
207   static bool classof(const CGCapturedStmtInfo *Info) {
208     return CGOpenMPRegionInfo::classof(Info) &&
209            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
210                TaskOutlinedRegion;
211   }
212
213 private:
214   /// A variable or parameter storing global thread id for OpenMP
215   /// constructs.
216   const VarDecl *ThreadIDVar;
217   /// Action for emitting code for untied tasks.
218   const UntiedTaskActionTy &Action;
219 };
220
221 /// API for inlined captured statement code generation in OpenMP
222 /// constructs.
223 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
224 public:
225   CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
226                             const RegionCodeGenTy &CodeGen,
227                             OpenMPDirectiveKind Kind, bool HasCancel)
228       : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
229         OldCSI(OldCSI),
230         OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
231
232   // Retrieve the value of the context parameter.
233   llvm::Value *getContextValue() const override {
234     if (OuterRegionInfo)
235       return OuterRegionInfo->getContextValue();
236     llvm_unreachable("No context value for inlined OpenMP region");
237   }
238
239   void setContextValue(llvm::Value *V) override {
240     if (OuterRegionInfo) {
241       OuterRegionInfo->setContextValue(V);
242       return;
243     }
244     llvm_unreachable("No context value for inlined OpenMP region");
245   }
246
247   /// Lookup the captured field decl for a variable.
248   const FieldDecl *lookup(const VarDecl *VD) const override {
249     if (OuterRegionInfo)
250       return OuterRegionInfo->lookup(VD);
251     // If there is no outer outlined region,no need to lookup in a list of
252     // captured variables, we can use the original one.
253     return nullptr;
254   }
255
256   FieldDecl *getThisFieldDecl() const override {
257     if (OuterRegionInfo)
258       return OuterRegionInfo->getThisFieldDecl();
259     return nullptr;
260   }
261
262   /// Get a variable or parameter for storing global thread id
263   /// inside OpenMP construct.
264   const VarDecl *getThreadIDVariable() const override {
265     if (OuterRegionInfo)
266       return OuterRegionInfo->getThreadIDVariable();
267     return nullptr;
268   }
269
270   /// Get an LValue for the current ThreadID variable.
271   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
272     if (OuterRegionInfo)
273       return OuterRegionInfo->getThreadIDVariableLValue(CGF);
274     llvm_unreachable("No LValue for inlined OpenMP construct");
275   }
276
277   /// Get the name of the capture helper.
278   StringRef getHelperName() const override {
279     if (auto *OuterRegionInfo = getOldCSI())
280       return OuterRegionInfo->getHelperName();
281     llvm_unreachable("No helper name for inlined OpenMP construct");
282   }
283
284   void emitUntiedSwitch(CodeGenFunction &CGF) override {
285     if (OuterRegionInfo)
286       OuterRegionInfo->emitUntiedSwitch(CGF);
287   }
288
289   CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
290
291   static bool classof(const CGCapturedStmtInfo *Info) {
292     return CGOpenMPRegionInfo::classof(Info) &&
293            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
294   }
295
296   ~CGOpenMPInlinedRegionInfo() override = default;
297
298 private:
299   /// CodeGen info about outer OpenMP region.
300   CodeGenFunction::CGCapturedStmtInfo *OldCSI;
301   CGOpenMPRegionInfo *OuterRegionInfo;
302 };
303
304 /// API for captured statement code generation in OpenMP target
305 /// constructs. For this captures, implicit parameters are used instead of the
306 /// captured fields. The name of the target region has to be unique in a given
307 /// application so it is provided by the client, because only the client has
308 /// the information to generate that.
309 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
310 public:
311   CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
312                            const RegionCodeGenTy &CodeGen, StringRef HelperName)
313       : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
314                            /*HasCancel=*/false),
315         HelperName(HelperName) {}
316
317   /// This is unused for target regions because each starts executing
318   /// with a single thread.
319   const VarDecl *getThreadIDVariable() const override { return nullptr; }
320
321   /// Get the name of the capture helper.
322   StringRef getHelperName() const override { return HelperName; }
323
324   static bool classof(const CGCapturedStmtInfo *Info) {
325     return CGOpenMPRegionInfo::classof(Info) &&
326            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
327   }
328
329 private:
330   StringRef HelperName;
331 };
332
333 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
334   llvm_unreachable("No codegen for expressions");
335 }
336 /// API for generation of expressions captured in a innermost OpenMP
337 /// region.
338 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
339 public:
340   CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
341       : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
342                                   OMPD_unknown,
343                                   /*HasCancel=*/false),
344         PrivScope(CGF) {
345     // Make sure the globals captured in the provided statement are local by
346     // using the privatization logic. We assume the same variable is not
347     // captured more than once.
348     for (const auto &C : CS.captures()) {
349       if (!C.capturesVariable() && !C.capturesVariableByCopy())
350         continue;
351
352       const VarDecl *VD = C.getCapturedVar();
353       if (VD->isLocalVarDeclOrParm())
354         continue;
355
356       DeclRefExpr DRE(const_cast<VarDecl *>(VD),
357                       /*RefersToEnclosingVariableOrCapture=*/false,
358                       VD->getType().getNonReferenceType(), VK_LValue,
359                       C.getLocation());
360       PrivScope.addPrivate(
361           VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
362     }
363     (void)PrivScope.Privatize();
364   }
365
366   /// Lookup the captured field decl for a variable.
367   const FieldDecl *lookup(const VarDecl *VD) const override {
368     if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
369       return FD;
370     return nullptr;
371   }
372
373   /// Emit the captured statement body.
374   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
375     llvm_unreachable("No body for expressions");
376   }
377
378   /// Get a variable or parameter for storing global thread id
379   /// inside OpenMP construct.
380   const VarDecl *getThreadIDVariable() const override {
381     llvm_unreachable("No thread id for expressions");
382   }
383
384   /// Get the name of the capture helper.
385   StringRef getHelperName() const override {
386     llvm_unreachable("No helper name for expressions");
387   }
388
389   static bool classof(const CGCapturedStmtInfo *Info) { return false; }
390
391 private:
392   /// Private scope to capture global variables.
393   CodeGenFunction::OMPPrivateScope PrivScope;
394 };
395
396 /// RAII for emitting code of OpenMP constructs.
397 class InlinedOpenMPRegionRAII {
398   CodeGenFunction &CGF;
399   llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
400   FieldDecl *LambdaThisCaptureField = nullptr;
401   const CodeGen::CGBlockInfo *BlockInfo = nullptr;
402
403 public:
404   /// Constructs region for combined constructs.
405   /// \param CodeGen Code generation sequence for combined directives. Includes
406   /// a list of functions used for code generation of implicitly inlined
407   /// regions.
408   InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
409                           OpenMPDirectiveKind Kind, bool HasCancel)
410       : CGF(CGF) {
411     // Start emission for the construct.
412     CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
413         CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
414     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
415     LambdaThisCaptureField = CGF.LambdaThisCaptureField;
416     CGF.LambdaThisCaptureField = nullptr;
417     BlockInfo = CGF.BlockInfo;
418     CGF.BlockInfo = nullptr;
419   }
420
421   ~InlinedOpenMPRegionRAII() {
422     // Restore original CapturedStmtInfo only if we're done with code emission.
423     auto *OldCSI =
424         cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
425     delete CGF.CapturedStmtInfo;
426     CGF.CapturedStmtInfo = OldCSI;
427     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
428     CGF.LambdaThisCaptureField = LambdaThisCaptureField;
429     CGF.BlockInfo = BlockInfo;
430   }
431 };
432
433 /// Values for bit flags used in the ident_t to describe the fields.
434 /// All enumeric elements are named and described in accordance with the code
435 /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
436 enum OpenMPLocationFlags : unsigned {
437   /// Use trampoline for internal microtask.
438   OMP_IDENT_IMD = 0x01,
439   /// Use c-style ident structure.
440   OMP_IDENT_KMPC = 0x02,
441   /// Atomic reduction option for kmpc_reduce.
442   OMP_ATOMIC_REDUCE = 0x10,
443   /// Explicit 'barrier' directive.
444   OMP_IDENT_BARRIER_EXPL = 0x20,
445   /// Implicit barrier in code.
446   OMP_IDENT_BARRIER_IMPL = 0x40,
447   /// Implicit barrier in 'for' directive.
448   OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
449   /// Implicit barrier in 'sections' directive.
450   OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
451   /// Implicit barrier in 'single' directive.
452   OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
453   /// Call of __kmp_for_static_init for static loop.
454   OMP_IDENT_WORK_LOOP = 0x200,
455   /// Call of __kmp_for_static_init for sections.
456   OMP_IDENT_WORK_SECTIONS = 0x400,
457   /// Call of __kmp_for_static_init for distribute.
458   OMP_IDENT_WORK_DISTRIBUTE = 0x800,
459   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
460 };
461
462 /// Describes ident structure that describes a source location.
463 /// All descriptions are taken from
464 /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
465 /// Original structure:
466 /// typedef struct ident {
467 ///    kmp_int32 reserved_1;   /**<  might be used in Fortran;
468 ///                                  see above  */
469 ///    kmp_int32 flags;        /**<  also f.flags; KMP_IDENT_xxx flags;
470 ///                                  KMP_IDENT_KMPC identifies this union
471 ///                                  member  */
472 ///    kmp_int32 reserved_2;   /**<  not really used in Fortran any more;
473 ///                                  see above */
474 ///#if USE_ITT_BUILD
475 ///                            /*  but currently used for storing
476 ///                                region-specific ITT */
477 ///                            /*  contextual information. */
478 ///#endif /* USE_ITT_BUILD */
479 ///    kmp_int32 reserved_3;   /**< source[4] in Fortran, do not use for
480 ///                                 C++  */
481 ///    char const *psource;    /**< String describing the source location.
482 ///                            The string is composed of semi-colon separated
483 //                             fields which describe the source file,
484 ///                            the function and a pair of line numbers that
485 ///                            delimit the construct.
486 ///                             */
487 /// } ident_t;
488 enum IdentFieldIndex {
489   /// might be used in Fortran
490   IdentField_Reserved_1,
491   /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
492   IdentField_Flags,
493   /// Not really used in Fortran any more
494   IdentField_Reserved_2,
495   /// Source[4] in Fortran, do not use for C++
496   IdentField_Reserved_3,
497   /// String describing the source location. The string is composed of
498   /// semi-colon separated fields which describe the source file, the function
499   /// and a pair of line numbers that delimit the construct.
500   IdentField_PSource
501 };
502
503 /// Schedule types for 'omp for' loops (these enumerators are taken from
504 /// the enum sched_type in kmp.h).
505 enum OpenMPSchedType {
506   /// Lower bound for default (unordered) versions.
507   OMP_sch_lower = 32,
508   OMP_sch_static_chunked = 33,
509   OMP_sch_static = 34,
510   OMP_sch_dynamic_chunked = 35,
511   OMP_sch_guided_chunked = 36,
512   OMP_sch_runtime = 37,
513   OMP_sch_auto = 38,
514   /// static with chunk adjustment (e.g., simd)
515   OMP_sch_static_balanced_chunked = 45,
516   /// Lower bound for 'ordered' versions.
517   OMP_ord_lower = 64,
518   OMP_ord_static_chunked = 65,
519   OMP_ord_static = 66,
520   OMP_ord_dynamic_chunked = 67,
521   OMP_ord_guided_chunked = 68,
522   OMP_ord_runtime = 69,
523   OMP_ord_auto = 70,
524   OMP_sch_default = OMP_sch_static,
525   /// dist_schedule types
526   OMP_dist_sch_static_chunked = 91,
527   OMP_dist_sch_static = 92,
528   /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
529   /// Set if the monotonic schedule modifier was present.
530   OMP_sch_modifier_monotonic = (1 << 29),
531   /// Set if the nonmonotonic schedule modifier was present.
532   OMP_sch_modifier_nonmonotonic = (1 << 30),
533 };
534
535 enum OpenMPRTLFunction {
536   /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
537   /// kmpc_micro microtask, ...);
538   OMPRTL__kmpc_fork_call,
539   /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
540   /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
541   OMPRTL__kmpc_threadprivate_cached,
542   /// Call to void __kmpc_threadprivate_register( ident_t *,
543   /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
544   OMPRTL__kmpc_threadprivate_register,
545   // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
546   OMPRTL__kmpc_global_thread_num,
547   // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
548   // kmp_critical_name *crit);
549   OMPRTL__kmpc_critical,
550   // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
551   // global_tid, kmp_critical_name *crit, uintptr_t hint);
552   OMPRTL__kmpc_critical_with_hint,
553   // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
554   // kmp_critical_name *crit);
555   OMPRTL__kmpc_end_critical,
556   // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
557   // global_tid);
558   OMPRTL__kmpc_cancel_barrier,
559   // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
560   OMPRTL__kmpc_barrier,
561   // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
562   OMPRTL__kmpc_for_static_fini,
563   // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
564   // global_tid);
565   OMPRTL__kmpc_serialized_parallel,
566   // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
567   // global_tid);
568   OMPRTL__kmpc_end_serialized_parallel,
569   // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
570   // kmp_int32 num_threads);
571   OMPRTL__kmpc_push_num_threads,
572   // Call to void __kmpc_flush(ident_t *loc);
573   OMPRTL__kmpc_flush,
574   // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
575   OMPRTL__kmpc_master,
576   // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
577   OMPRTL__kmpc_end_master,
578   // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
579   // int end_part);
580   OMPRTL__kmpc_omp_taskyield,
581   // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
582   OMPRTL__kmpc_single,
583   // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
584   OMPRTL__kmpc_end_single,
585   // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
586   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
587   // kmp_routine_entry_t *task_entry);
588   OMPRTL__kmpc_omp_task_alloc,
589   // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
590   // new_task);
591   OMPRTL__kmpc_omp_task,
592   // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
593   // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
594   // kmp_int32 didit);
595   OMPRTL__kmpc_copyprivate,
596   // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
597   // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
598   // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
599   OMPRTL__kmpc_reduce,
600   // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
601   // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
602   // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
603   // *lck);
604   OMPRTL__kmpc_reduce_nowait,
605   // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
606   // kmp_critical_name *lck);
607   OMPRTL__kmpc_end_reduce,
608   // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
609   // kmp_critical_name *lck);
610   OMPRTL__kmpc_end_reduce_nowait,
611   // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
612   // kmp_task_t * new_task);
613   OMPRTL__kmpc_omp_task_begin_if0,
614   // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
615   // kmp_task_t * new_task);
616   OMPRTL__kmpc_omp_task_complete_if0,
617   // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
618   OMPRTL__kmpc_ordered,
619   // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
620   OMPRTL__kmpc_end_ordered,
621   // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
622   // global_tid);
623   OMPRTL__kmpc_omp_taskwait,
624   // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
625   OMPRTL__kmpc_taskgroup,
626   // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
627   OMPRTL__kmpc_end_taskgroup,
628   // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
629   // int proc_bind);
630   OMPRTL__kmpc_push_proc_bind,
631   // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
632   // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
633   // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
634   OMPRTL__kmpc_omp_task_with_deps,
635   // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
636   // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
637   // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
638   OMPRTL__kmpc_omp_wait_deps,
639   // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
640   // global_tid, kmp_int32 cncl_kind);
641   OMPRTL__kmpc_cancellationpoint,
642   // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
643   // kmp_int32 cncl_kind);
644   OMPRTL__kmpc_cancel,
645   // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
646   // kmp_int32 num_teams, kmp_int32 thread_limit);
647   OMPRTL__kmpc_push_num_teams,
648   // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
649   // microtask, ...);
650   OMPRTL__kmpc_fork_teams,
651   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
652   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
653   // sched, kmp_uint64 grainsize, void *task_dup);
654   OMPRTL__kmpc_taskloop,
655   // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
656   // num_dims, struct kmp_dim *dims);
657   OMPRTL__kmpc_doacross_init,
658   // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
659   OMPRTL__kmpc_doacross_fini,
660   // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
661   // *vec);
662   OMPRTL__kmpc_doacross_post,
663   // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
664   // *vec);
665   OMPRTL__kmpc_doacross_wait,
666   // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
667   // *data);
668   OMPRTL__kmpc_task_reduction_init,
669   // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
670   // *d);
671   OMPRTL__kmpc_task_reduction_get_th_data,
672
673   //
674   // Offloading related calls
675   //
676   // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
677   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
678   // *arg_types);
679   OMPRTL__tgt_target,
680   // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
681   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
682   // *arg_types);
683   OMPRTL__tgt_target_nowait,
684   // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
685   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
686   // *arg_types, int32_t num_teams, int32_t thread_limit);
687   OMPRTL__tgt_target_teams,
688   // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
689   // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
690   // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
691   OMPRTL__tgt_target_teams_nowait,
692   // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
693   OMPRTL__tgt_register_lib,
694   // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
695   OMPRTL__tgt_unregister_lib,
696   // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
697   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
698   OMPRTL__tgt_target_data_begin,
699   // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
700   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
701   // *arg_types);
702   OMPRTL__tgt_target_data_begin_nowait,
703   // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
704   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
705   OMPRTL__tgt_target_data_end,
706   // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
707   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
708   // *arg_types);
709   OMPRTL__tgt_target_data_end_nowait,
710   // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
711   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
712   OMPRTL__tgt_target_data_update,
713   // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
714   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
715   // *arg_types);
716   OMPRTL__tgt_target_data_update_nowait,
717 };
718
719 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
720 /// region.
721 class CleanupTy final : public EHScopeStack::Cleanup {
722   PrePostActionTy *Action;
723
724 public:
725   explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
726   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
727     if (!CGF.HaveInsertPoint())
728       return;
729     Action->Exit(CGF);
730   }
731 };
732
733 } // anonymous namespace
734
735 void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
736   CodeGenFunction::RunCleanupsScope Scope(CGF);
737   if (PrePostAction) {
738     CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
739     Callback(CodeGen, CGF, *PrePostAction);
740   } else {
741     PrePostActionTy Action;
742     Callback(CodeGen, CGF, Action);
743   }
744 }
745
746 /// Check if the combiner is a call to UDR combiner and if it is so return the
747 /// UDR decl used for reduction.
748 static const OMPDeclareReductionDecl *
749 getReductionInit(const Expr *ReductionOp) {
750   if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
751     if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
752       if (const auto *DRE =
753               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
754         if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
755           return DRD;
756   return nullptr;
757 }
758
759 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
760                                              const OMPDeclareReductionDecl *DRD,
761                                              const Expr *InitOp,
762                                              Address Private, Address Original,
763                                              QualType Ty) {
764   if (DRD->getInitializer()) {
765     std::pair<llvm::Function *, llvm::Function *> Reduction =
766         CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
767     const auto *CE = cast<CallExpr>(InitOp);
768     const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
769     const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
770     const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
771     const auto *LHSDRE =
772         cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
773     const auto *RHSDRE =
774         cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
775     CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
776     PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
777                             [=]() { return Private; });
778     PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
779                             [=]() { return Original; });
780     (void)PrivateScope.Privatize();
781     RValue Func = RValue::get(Reduction.second);
782     CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
783     CGF.EmitIgnoredExpr(InitOp);
784   } else {
785     llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
786     std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
787     auto *GV = new llvm::GlobalVariable(
788         CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
789         llvm::GlobalValue::PrivateLinkage, Init, Name);
790     LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
791     RValue InitRVal;
792     switch (CGF.getEvaluationKind(Ty)) {
793     case TEK_Scalar:
794       InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
795       break;
796     case TEK_Complex:
797       InitRVal =
798           RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
799       break;
800     case TEK_Aggregate:
801       InitRVal = RValue::getAggregate(LV.getAddress());
802       break;
803     }
804     OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
805     CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
806     CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
807                          /*IsInitializer=*/false);
808   }
809 }
810
811 /// Emit initialization of arrays of complex types.
812 /// \param DestAddr Address of the array.
813 /// \param Type Type of array.
814 /// \param Init Initial expression of array.
815 /// \param SrcAddr Address of the original array.
816 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
817                                  QualType Type, bool EmitDeclareReductionInit,
818                                  const Expr *Init,
819                                  const OMPDeclareReductionDecl *DRD,
820                                  Address SrcAddr = Address::invalid()) {
821   // Perform element-by-element initialization.
822   QualType ElementTy;
823
824   // Drill down to the base element type on both arrays.
825   const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
826   llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
827   DestAddr =
828       CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
829   if (DRD)
830     SrcAddr =
831         CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
832
833   llvm::Value *SrcBegin = nullptr;
834   if (DRD)
835     SrcBegin = SrcAddr.getPointer();
836   llvm::Value *DestBegin = DestAddr.getPointer();
837   // Cast from pointer to array type to pointer to single element.
838   llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
839   // The basic structure here is a while-do loop.
840   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
841   llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
842   llvm::Value *IsEmpty =
843       CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
844   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
845
846   // Enter the loop body, making that address the current address.
847   llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
848   CGF.EmitBlock(BodyBB);
849
850   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
851
852   llvm::PHINode *SrcElementPHI = nullptr;
853   Address SrcElementCurrent = Address::invalid();
854   if (DRD) {
855     SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
856                                           "omp.arraycpy.srcElementPast");
857     SrcElementPHI->addIncoming(SrcBegin, EntryBB);
858     SrcElementCurrent =
859         Address(SrcElementPHI,
860                 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
861   }
862   llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
863       DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
864   DestElementPHI->addIncoming(DestBegin, EntryBB);
865   Address DestElementCurrent =
866       Address(DestElementPHI,
867               DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
868
869   // Emit copy.
870   {
871     CodeGenFunction::RunCleanupsScope InitScope(CGF);
872     if (EmitDeclareReductionInit) {
873       emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
874                                        SrcElementCurrent, ElementTy);
875     } else
876       CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
877                            /*IsInitializer=*/false);
878   }
879
880   if (DRD) {
881     // Shift the address forward by one element.
882     llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
883         SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
884     SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
885   }
886
887   // Shift the address forward by one element.
888   llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
889       DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
890   // Check whether we've reached the end.
891   llvm::Value *Done =
892       CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
893   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
894   DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
895
896   // Done.
897   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
898 }
899
900 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
901   return CGF.EmitOMPSharedLValue(E);
902 }
903
904 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
905                                             const Expr *E) {
906   if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
907     return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
908   return LValue();
909 }
910
911 void ReductionCodeGen::emitAggregateInitialization(
912     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
913     const OMPDeclareReductionDecl *DRD) {
914   // Emit VarDecl with copy init for arrays.
915   // Get the address of the original variable captured in current
916   // captured region.
917   const auto *PrivateVD =
918       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
919   bool EmitDeclareReductionInit =
920       DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
921   EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
922                        EmitDeclareReductionInit,
923                        EmitDeclareReductionInit ? ClausesData[N].ReductionOp
924                                                 : PrivateVD->getInit(),
925                        DRD, SharedLVal.getAddress());
926 }
927
928 ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
929                                    ArrayRef<const Expr *> Privates,
930                                    ArrayRef<const Expr *> ReductionOps) {
931   ClausesData.reserve(Shareds.size());
932   SharedAddresses.reserve(Shareds.size());
933   Sizes.reserve(Shareds.size());
934   BaseDecls.reserve(Shareds.size());
935   auto IPriv = Privates.begin();
936   auto IRed = ReductionOps.begin();
937   for (const Expr *Ref : Shareds) {
938     ClausesData.emplace_back(Ref, *IPriv, *IRed);
939     std::advance(IPriv, 1);
940     std::advance(IRed, 1);
941   }
942 }
943
944 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
945   assert(SharedAddresses.size() == N &&
946          "Number of generated lvalues must be exactly N.");
947   LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
948   LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
949   SharedAddresses.emplace_back(First, Second);
950 }
951
952 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
953   const auto *PrivateVD =
954       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
955   QualType PrivateType = PrivateVD->getType();
956   bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
957   if (!PrivateType->isVariablyModifiedType()) {
958     Sizes.emplace_back(
959         CGF.getTypeSize(
960             SharedAddresses[N].first.getType().getNonReferenceType()),
961         nullptr);
962     return;
963   }
964   llvm::Value *Size;
965   llvm::Value *SizeInChars;
966   auto *ElemType =
967       cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
968           ->getElementType();
969   auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
970   if (AsArraySection) {
971     Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
972                                      SharedAddresses[N].first.getPointer());
973     Size = CGF.Builder.CreateNUWAdd(
974         Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
975     SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
976   } else {
977     SizeInChars = CGF.getTypeSize(
978         SharedAddresses[N].first.getType().getNonReferenceType());
979     Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
980   }
981   Sizes.emplace_back(SizeInChars, Size);
982   CodeGenFunction::OpaqueValueMapping OpaqueMap(
983       CGF,
984       cast<OpaqueValueExpr>(
985           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
986       RValue::get(Size));
987   CGF.EmitVariablyModifiedType(PrivateType);
988 }
989
990 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
991                                          llvm::Value *Size) {
992   const auto *PrivateVD =
993       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
994   QualType PrivateType = PrivateVD->getType();
995   if (!PrivateType->isVariablyModifiedType()) {
996     assert(!Size && !Sizes[N].second &&
997            "Size should be nullptr for non-variably modified reduction "
998            "items.");
999     return;
1000   }
1001   CodeGenFunction::OpaqueValueMapping OpaqueMap(
1002       CGF,
1003       cast<OpaqueValueExpr>(
1004           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1005       RValue::get(Size));
1006   CGF.EmitVariablyModifiedType(PrivateType);
1007 }
1008
1009 void ReductionCodeGen::emitInitialization(
1010     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1011     llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1012   assert(SharedAddresses.size() > N && "No variable was generated");
1013   const auto *PrivateVD =
1014       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1015   const OMPDeclareReductionDecl *DRD =
1016       getReductionInit(ClausesData[N].ReductionOp);
1017   QualType PrivateType = PrivateVD->getType();
1018   PrivateAddr = CGF.Builder.CreateElementBitCast(
1019       PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1020   QualType SharedType = SharedAddresses[N].first.getType();
1021   SharedLVal = CGF.MakeAddrLValue(
1022       CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1023                                        CGF.ConvertTypeForMem(SharedType)),
1024       SharedType, SharedAddresses[N].first.getBaseInfo(),
1025       CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1026   if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1027     emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1028   } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1029     emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1030                                      PrivateAddr, SharedLVal.getAddress(),
1031                                      SharedLVal.getType());
1032   } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1033              !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1034     CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1035                          PrivateVD->getType().getQualifiers(),
1036                          /*IsInitializer=*/false);
1037   }
1038 }
1039
1040 bool ReductionCodeGen::needCleanups(unsigned N) {
1041   const auto *PrivateVD =
1042       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1043   QualType PrivateType = PrivateVD->getType();
1044   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1045   return DTorKind != QualType::DK_none;
1046 }
1047
1048 void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
1049                                     Address PrivateAddr) {
1050   const auto *PrivateVD =
1051       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1052   QualType PrivateType = PrivateVD->getType();
1053   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1054   if (needCleanups(N)) {
1055     PrivateAddr = CGF.Builder.CreateElementBitCast(
1056         PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1057     CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1058   }
1059 }
1060
1061 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1062                           LValue BaseLV) {
1063   BaseTy = BaseTy.getNonReferenceType();
1064   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1065          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1066     if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
1067       BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1068     } else {
1069       LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1070       BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1071     }
1072     BaseTy = BaseTy->getPointeeType();
1073   }
1074   return CGF.MakeAddrLValue(
1075       CGF.Builder.CreateElementBitCast(BaseLV.getAddress(),
1076                                        CGF.ConvertTypeForMem(ElTy)),
1077       BaseLV.getType(), BaseLV.getBaseInfo(),
1078       CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1079 }
1080
1081 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1082                           llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1083                           llvm::Value *Addr) {
1084   Address Tmp = Address::invalid();
1085   Address TopTmp = Address::invalid();
1086   Address MostTopTmp = Address::invalid();
1087   BaseTy = BaseTy.getNonReferenceType();
1088   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1089          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1090     Tmp = CGF.CreateMemTemp(BaseTy);
1091     if (TopTmp.isValid())
1092       CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1093     else
1094       MostTopTmp = Tmp;
1095     TopTmp = Tmp;
1096     BaseTy = BaseTy->getPointeeType();
1097   }
1098   llvm::Type *Ty = BaseLVType;
1099   if (Tmp.isValid())
1100     Ty = Tmp.getElementType();
1101   Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1102   if (Tmp.isValid()) {
1103     CGF.Builder.CreateStore(Addr, Tmp);
1104     return MostTopTmp;
1105   }
1106   return Address(Addr, BaseLVAlignment);
1107 }
1108
1109 static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
1110   const VarDecl *OrigVD = nullptr;
1111   if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
1112     const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
1113     while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1114       Base = TempOASE->getBase()->IgnoreParenImpCasts();
1115     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1116       Base = TempASE->getBase()->IgnoreParenImpCasts();
1117     DE = cast<DeclRefExpr>(Base);
1118     OrigVD = cast<VarDecl>(DE->getDecl());
1119   } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
1120     const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
1121     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1122       Base = TempASE->getBase()->IgnoreParenImpCasts();
1123     DE = cast<DeclRefExpr>(Base);
1124     OrigVD = cast<VarDecl>(DE->getDecl());
1125   }
1126   return OrigVD;
1127 }
1128
1129 Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1130                                                Address PrivateAddr) {
1131   const DeclRefExpr *DE;
1132   if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1133     BaseDecls.emplace_back(OrigVD);
1134     LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1135     LValue BaseLValue =
1136         loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1137                     OriginalBaseLValue);
1138     llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1139         BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1140     llvm::Value *PrivatePointer =
1141         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1142             PrivateAddr.getPointer(),
1143             SharedAddresses[N].first.getAddress().getType());
1144     llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1145     return castToBase(CGF, OrigVD->getType(),
1146                       SharedAddresses[N].first.getType(),
1147                       OriginalBaseLValue.getAddress().getType(),
1148                       OriginalBaseLValue.getAlignment(), Ptr);
1149   }
1150   BaseDecls.emplace_back(
1151       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1152   return PrivateAddr;
1153 }
1154
1155 bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1156   const OMPDeclareReductionDecl *DRD =
1157       getReductionInit(ClausesData[N].ReductionOp);
1158   return DRD && DRD->getInitializer();
1159 }
1160
1161 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1162   return CGF.EmitLoadOfPointerLValue(
1163       CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1164       getThreadIDVariable()->getType()->castAs<PointerType>());
1165 }
1166
1167 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1168   if (!CGF.HaveInsertPoint())
1169     return;
1170   // 1.2.2 OpenMP Language Terminology
1171   // Structured block - An executable statement with a single entry at the
1172   // top and a single exit at the bottom.
1173   // The point of exit cannot be a branch out of the structured block.
1174   // longjmp() and throw() must not violate the entry/exit criteria.
1175   CGF.EHStack.pushTerminate();
1176   CodeGen(CGF);
1177   CGF.EHStack.popTerminate();
1178 }
1179
1180 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1181     CodeGenFunction &CGF) {
1182   return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1183                             getThreadIDVariable()->getType(),
1184                             AlignmentSource::Decl);
1185 }
1186
1187 static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
1188                                        QualType FieldTy) {
1189   auto *Field = FieldDecl::Create(
1190       C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1191       C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1192       /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1193   Field->setAccess(AS_public);
1194   DC->addDecl(Field);
1195   return Field;
1196 }
1197
1198 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1199                                  StringRef Separator)
1200     : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1201       OffloadEntriesInfoManager(CGM) {
1202   ASTContext &C = CGM.getContext();
1203   RecordDecl *RD = C.buildImplicitRecord("ident_t");
1204   QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1205   RD->startDefinition();
1206   // reserved_1
1207   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1208   // flags
1209   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1210   // reserved_2
1211   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1212   // reserved_3
1213   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1214   // psource
1215   addFieldToRecordDecl(C, RD, C.VoidPtrTy);
1216   RD->completeDefinition();
1217   IdentQTy = C.getRecordType(RD);
1218   IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
1219   KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1220
1221   loadOffloadInfoMetadata();
1222 }
1223
1224 void CGOpenMPRuntime::clear() {
1225   InternalVars.clear();
1226 }
1227
1228 std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1229   SmallString<128> Buffer;
1230   llvm::raw_svector_ostream OS(Buffer);
1231   StringRef Sep = FirstSeparator;
1232   for (StringRef Part : Parts) {
1233     OS << Sep << Part;
1234     Sep = Separator;
1235   }
1236   return OS.str();
1237 }
1238
1239 static llvm::Function *
1240 emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1241                           const Expr *CombinerInitializer, const VarDecl *In,
1242                           const VarDecl *Out, bool IsCombiner) {
1243   // void .omp_combiner.(Ty *in, Ty *out);
1244   ASTContext &C = CGM.getContext();
1245   QualType PtrTy = C.getPointerType(Ty).withRestrict();
1246   FunctionArgList Args;
1247   ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1248                                /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1249   ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1250                               /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1251   Args.push_back(&OmpOutParm);
1252   Args.push_back(&OmpInParm);
1253   const CGFunctionInfo &FnInfo =
1254       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1255   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1256   std::string Name = CGM.getOpenMPRuntime().getName(
1257       {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1258   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1259                                     Name, &CGM.getModule());
1260   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1261   Fn->removeFnAttr(llvm::Attribute::NoInline);
1262   Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1263   Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1264   CodeGenFunction CGF(CGM);
1265   // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1266   // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1267   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1268                     Out->getLocation());
1269   CodeGenFunction::OMPPrivateScope Scope(CGF);
1270   Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1271   Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1272     return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1273         .getAddress();
1274   });
1275   Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1276   Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1277     return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1278         .getAddress();
1279   });
1280   (void)Scope.Privatize();
1281   if (!IsCombiner && Out->hasInit() &&
1282       !CGF.isTrivialInitializer(Out->getInit())) {
1283     CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1284                          Out->getType().getQualifiers(),
1285                          /*IsInitializer=*/true);
1286   }
1287   if (CombinerInitializer)
1288     CGF.EmitIgnoredExpr(CombinerInitializer);
1289   Scope.ForceCleanup();
1290   CGF.FinishFunction();
1291   return Fn;
1292 }
1293
1294 void CGOpenMPRuntime::emitUserDefinedReduction(
1295     CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1296   if (UDRMap.count(D) > 0)
1297     return;
1298   ASTContext &C = CGM.getContext();
1299   if (!In || !Out) {
1300     In = &C.Idents.get("omp_in");
1301     Out = &C.Idents.get("omp_out");
1302   }
1303   llvm::Function *Combiner = emitCombinerOrInitializer(
1304       CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
1305       cast<VarDecl>(D->lookup(Out).front()),
1306       /*IsCombiner=*/true);
1307   llvm::Function *Initializer = nullptr;
1308   if (const Expr *Init = D->getInitializer()) {
1309     if (!Priv || !Orig) {
1310       Priv = &C.Idents.get("omp_priv");
1311       Orig = &C.Idents.get("omp_orig");
1312     }
1313     Initializer = emitCombinerOrInitializer(
1314         CGM, D->getType(),
1315         D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1316                                                                      : nullptr,
1317         cast<VarDecl>(D->lookup(Orig).front()),
1318         cast<VarDecl>(D->lookup(Priv).front()),
1319         /*IsCombiner=*/false);
1320   }
1321   UDRMap.try_emplace(D, Combiner, Initializer);
1322   if (CGF) {
1323     auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1324     Decls.second.push_back(D);
1325   }
1326 }
1327
1328 std::pair<llvm::Function *, llvm::Function *>
1329 CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1330   auto I = UDRMap.find(D);
1331   if (I != UDRMap.end())
1332     return I->second;
1333   emitUserDefinedReduction(/*CGF=*/nullptr, D);
1334   return UDRMap.lookup(D);
1335 }
1336
1337 static llvm::Value *emitParallelOrTeamsOutlinedFunction(
1338     CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1339     const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1340     const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1341   assert(ThreadIDVar->getType()->isPointerType() &&
1342          "thread id variable must be of type kmp_int32 *");
1343   CodeGenFunction CGF(CGM, true);
1344   bool HasCancel = false;
1345   if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1346     HasCancel = OPD->hasCancel();
1347   else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1348     HasCancel = OPSD->hasCancel();
1349   else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1350     HasCancel = OPFD->hasCancel();
1351   else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1352     HasCancel = OPFD->hasCancel();
1353   else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1354     HasCancel = OPFD->hasCancel();
1355   else if (const auto *OPFD =
1356                dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1357     HasCancel = OPFD->hasCancel();
1358   else if (const auto *OPFD =
1359                dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1360     HasCancel = OPFD->hasCancel();
1361   CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1362                                     HasCancel, OutlinedHelperName);
1363   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1364   return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1365 }
1366
1367 llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
1368     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1369     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1370   const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1371   return emitParallelOrTeamsOutlinedFunction(
1372       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1373 }
1374
1375 llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1376     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1377     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1378   const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1379   return emitParallelOrTeamsOutlinedFunction(
1380       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1381 }
1382
1383 llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
1384     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1385     const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1386     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1387     bool Tied, unsigned &NumberOfParts) {
1388   auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1389                                               PrePostActionTy &) {
1390     llvm::Value *ThreadID = getThreadID(CGF, D.getLocStart());
1391     llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
1392     llvm::Value *TaskArgs[] = {
1393         UpLoc, ThreadID,
1394         CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1395                                     TaskTVar->getType()->castAs<PointerType>())
1396             .getPointer()};
1397     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1398   };
1399   CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1400                                                             UntiedCodeGen);
1401   CodeGen.setAction(Action);
1402   assert(!ThreadIDVar->getType()->isPointerType() &&
1403          "thread id variable must be of type kmp_int32 for tasks");
1404   const OpenMPDirectiveKind Region =
1405       isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1406                                                       : OMPD_task;
1407   const CapturedStmt *CS = D.getCapturedStmt(Region);
1408   const auto *TD = dyn_cast<OMPTaskDirective>(&D);
1409   CodeGenFunction CGF(CGM, true);
1410   CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1411                                         InnermostKind,
1412                                         TD ? TD->hasCancel() : false, Action);
1413   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1414   llvm::Value *Res = CGF.GenerateCapturedStmtFunction(*CS);
1415   if (!Tied)
1416     NumberOfParts = Action.getNumberOfParts();
1417   return Res;
1418 }
1419
1420 static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
1421                              const RecordDecl *RD, const CGRecordLayout &RL,
1422                              ArrayRef<llvm::Constant *> Data) {
1423   llvm::StructType *StructTy = RL.getLLVMType();
1424   unsigned PrevIdx = 0;
1425   ConstantInitBuilder CIBuilder(CGM);
1426   auto DI = Data.begin();
1427   for (const FieldDecl *FD : RD->fields()) {
1428     unsigned Idx = RL.getLLVMFieldNo(FD);
1429     // Fill the alignment.
1430     for (unsigned I = PrevIdx; I < Idx; ++I)
1431       Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1432     PrevIdx = Idx + 1;
1433     Fields.add(*DI);
1434     ++DI;
1435   }
1436 }
1437
1438 template <class... As>
1439 static llvm::GlobalVariable *
1440 createConstantGlobalStruct(CodeGenModule &CGM, QualType Ty,
1441                            ArrayRef<llvm::Constant *> Data, const Twine &Name,
1442                            As &&... Args) {
1443   const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1444   const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1445   ConstantInitBuilder CIBuilder(CGM);
1446   ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1447   buildStructValue(Fields, CGM, RD, RL, Data);
1448   return Fields.finishAndCreateGlobal(
1449       Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty),
1450       /*isConstant=*/true, std::forward<As>(Args)...);
1451 }
1452
1453 template <typename T>
1454 static void
1455 createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
1456                                          ArrayRef<llvm::Constant *> Data,
1457                                          T &Parent) {
1458   const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1459   const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1460   ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1461   buildStructValue(Fields, CGM, RD, RL, Data);
1462   Fields.finishAndAddTo(Parent);
1463 }
1464
1465 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1466   CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1467   llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
1468   if (!Entry) {
1469     if (!DefaultOpenMPPSource) {
1470       // Initialize default location for psource field of ident_t structure of
1471       // all ident_t objects. Format is ";file;function;line;column;;".
1472       // Taken from
1473       // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
1474       DefaultOpenMPPSource =
1475           CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1476       DefaultOpenMPPSource =
1477           llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1478     }
1479
1480     llvm::Constant *Data[] = {llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1481                               llvm::ConstantInt::get(CGM.Int32Ty, Flags),
1482                               llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1483                               llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1484                               DefaultOpenMPPSource};
1485     llvm::GlobalValue *DefaultOpenMPLocation = createConstantGlobalStruct(
1486         CGM, IdentQTy, Data, "", llvm::GlobalValue::PrivateLinkage);
1487     DefaultOpenMPLocation->setUnnamedAddr(
1488         llvm::GlobalValue::UnnamedAddr::Global);
1489
1490     OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
1491   }
1492   return Address(Entry, Align);
1493 }
1494
1495 llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1496                                                  SourceLocation Loc,
1497                                                  unsigned Flags) {
1498   Flags |= OMP_IDENT_KMPC;
1499   // If no debug info is generated - return global default location.
1500   if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1501       Loc.isInvalid())
1502     return getOrCreateDefaultLocation(Flags).getPointer();
1503
1504   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1505
1506   CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1507   Address LocValue = Address::invalid();
1508   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1509   if (I != OpenMPLocThreadIDMap.end())
1510     LocValue = Address(I->second.DebugLoc, Align);
1511
1512   // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1513   // GetOpenMPThreadID was called before this routine.
1514   if (!LocValue.isValid()) {
1515     // Generate "ident_t .kmpc_loc.addr;"
1516     Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
1517     auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1518     Elem.second.DebugLoc = AI.getPointer();
1519     LocValue = AI;
1520
1521     CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1522     CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1523     CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1524                              CGF.getTypeSize(IdentQTy));
1525   }
1526
1527   // char **psource = &.kmpc_loc_<flags>.addr.psource;
1528   LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
1529   auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
1530   LValue PSource =
1531       CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
1532
1533   llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1534   if (OMPDebugLoc == nullptr) {
1535     SmallString<128> Buffer2;
1536     llvm::raw_svector_ostream OS2(Buffer2);
1537     // Build debug location
1538     PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1539     OS2 << ";" << PLoc.getFilename() << ";";
1540     if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1541       OS2 << FD->getQualifiedNameAsString();
1542     OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1543     OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1544     OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1545   }
1546   // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1547   CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
1548
1549   // Our callers always pass this to a runtime function, so for
1550   // convenience, go ahead and return a naked pointer.
1551   return LocValue.getPointer();
1552 }
1553
1554 llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1555                                           SourceLocation Loc) {
1556   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1557
1558   llvm::Value *ThreadID = nullptr;
1559   // Check whether we've already cached a load of the thread id in this
1560   // function.
1561   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1562   if (I != OpenMPLocThreadIDMap.end()) {
1563     ThreadID = I->second.ThreadID;
1564     if (ThreadID != nullptr)
1565       return ThreadID;
1566   }
1567   // If exceptions are enabled, do not use parameter to avoid possible crash.
1568   if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1569       !CGF.getLangOpts().CXXExceptions ||
1570       CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1571     if (auto *OMPRegionInfo =
1572             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1573       if (OMPRegionInfo->getThreadIDVariable()) {
1574         // Check if this an outlined function with thread id passed as argument.
1575         LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1576         ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1577         // If value loaded in entry block, cache it and use it everywhere in
1578         // function.
1579         if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1580           auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1581           Elem.second.ThreadID = ThreadID;
1582         }
1583         return ThreadID;
1584       }
1585     }
1586   }
1587
1588   // This is not an outlined function region - need to call __kmpc_int32
1589   // kmpc_global_thread_num(ident_t *loc).
1590   // Generate thread id value and cache this value for use across the
1591   // function.
1592   CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1593   CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1594   llvm::CallInst *Call = CGF.Builder.CreateCall(
1595       createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
1596       emitUpdateLocation(CGF, Loc));
1597   Call->setCallingConv(CGF.getRuntimeCC());
1598   auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1599   Elem.second.ThreadID = Call;
1600   return Call;
1601 }
1602
1603 void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1604   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1605   if (OpenMPLocThreadIDMap.count(CGF.CurFn))
1606     OpenMPLocThreadIDMap.erase(CGF.CurFn);
1607   if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1608     for(auto *D : FunctionUDRMap[CGF.CurFn])
1609       UDRMap.erase(D);
1610     FunctionUDRMap.erase(CGF.CurFn);
1611   }
1612 }
1613
1614 llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1615   return IdentTy->getPointerTo();
1616 }
1617
1618 llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1619   if (!Kmpc_MicroTy) {
1620     // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1621     llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1622                                  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1623     Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1624   }
1625   return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1626 }
1627
1628 llvm::Constant *
1629 CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1630   llvm::Constant *RTLFn = nullptr;
1631   switch (static_cast<OpenMPRTLFunction>(Function)) {
1632   case OMPRTL__kmpc_fork_call: {
1633     // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1634     // microtask, ...);
1635     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1636                                 getKmpc_MicroPointerTy()};
1637     auto *FnTy =
1638         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1639     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1640     break;
1641   }
1642   case OMPRTL__kmpc_global_thread_num: {
1643     // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1644     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1645     auto *FnTy =
1646         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1647     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1648     break;
1649   }
1650   case OMPRTL__kmpc_threadprivate_cached: {
1651     // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1652     // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1653     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1654                                 CGM.VoidPtrTy, CGM.SizeTy,
1655                                 CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1656     auto *FnTy =
1657         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1658     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1659     break;
1660   }
1661   case OMPRTL__kmpc_critical: {
1662     // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1663     // kmp_critical_name *crit);
1664     llvm::Type *TypeParams[] = {
1665         getIdentTyPointerTy(), CGM.Int32Ty,
1666         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1667     auto *FnTy =
1668         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1669     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1670     break;
1671   }
1672   case OMPRTL__kmpc_critical_with_hint: {
1673     // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1674     // kmp_critical_name *crit, uintptr_t hint);
1675     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1676                                 llvm::PointerType::getUnqual(KmpCriticalNameTy),
1677                                 CGM.IntPtrTy};
1678     auto *FnTy =
1679         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1680     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1681     break;
1682   }
1683   case OMPRTL__kmpc_threadprivate_register: {
1684     // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1685     // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1686     // typedef void *(*kmpc_ctor)(void *);
1687     auto *KmpcCtorTy =
1688         llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1689                                 /*isVarArg*/ false)->getPointerTo();
1690     // typedef void *(*kmpc_cctor)(void *, void *);
1691     llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1692     auto *KmpcCopyCtorTy =
1693         llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1694                                 /*isVarArg*/ false)
1695             ->getPointerTo();
1696     // typedef void (*kmpc_dtor)(void *);
1697     auto *KmpcDtorTy =
1698         llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1699             ->getPointerTo();
1700     llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1701                               KmpcCopyCtorTy, KmpcDtorTy};
1702     auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1703                                         /*isVarArg*/ false);
1704     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1705     break;
1706   }
1707   case OMPRTL__kmpc_end_critical: {
1708     // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1709     // kmp_critical_name *crit);
1710     llvm::Type *TypeParams[] = {
1711         getIdentTyPointerTy(), CGM.Int32Ty,
1712         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1713     auto *FnTy =
1714         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1715     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1716     break;
1717   }
1718   case OMPRTL__kmpc_cancel_barrier: {
1719     // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1720     // global_tid);
1721     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1722     auto *FnTy =
1723         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1724     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1725     break;
1726   }
1727   case OMPRTL__kmpc_barrier: {
1728     // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1729     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1730     auto *FnTy =
1731         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1732     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1733     break;
1734   }
1735   case OMPRTL__kmpc_for_static_fini: {
1736     // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1737     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1738     auto *FnTy =
1739         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1740     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1741     break;
1742   }
1743   case OMPRTL__kmpc_push_num_threads: {
1744     // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1745     // kmp_int32 num_threads)
1746     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1747                                 CGM.Int32Ty};
1748     auto *FnTy =
1749         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1750     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1751     break;
1752   }
1753   case OMPRTL__kmpc_serialized_parallel: {
1754     // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1755     // global_tid);
1756     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1757     auto *FnTy =
1758         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1759     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1760     break;
1761   }
1762   case OMPRTL__kmpc_end_serialized_parallel: {
1763     // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1764     // global_tid);
1765     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1766     auto *FnTy =
1767         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1768     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1769     break;
1770   }
1771   case OMPRTL__kmpc_flush: {
1772     // Build void __kmpc_flush(ident_t *loc);
1773     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1774     auto *FnTy =
1775         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1776     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1777     break;
1778   }
1779   case OMPRTL__kmpc_master: {
1780     // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1781     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1782     auto *FnTy =
1783         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1784     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1785     break;
1786   }
1787   case OMPRTL__kmpc_end_master: {
1788     // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1789     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1790     auto *FnTy =
1791         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1792     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1793     break;
1794   }
1795   case OMPRTL__kmpc_omp_taskyield: {
1796     // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1797     // int end_part);
1798     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1799     auto *FnTy =
1800         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1801     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1802     break;
1803   }
1804   case OMPRTL__kmpc_single: {
1805     // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1806     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1807     auto *FnTy =
1808         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1809     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1810     break;
1811   }
1812   case OMPRTL__kmpc_end_single: {
1813     // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1814     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1815     auto *FnTy =
1816         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1817     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1818     break;
1819   }
1820   case OMPRTL__kmpc_omp_task_alloc: {
1821     // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1822     // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1823     // kmp_routine_entry_t *task_entry);
1824     assert(KmpRoutineEntryPtrTy != nullptr &&
1825            "Type kmp_routine_entry_t must be created.");
1826     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1827                                 CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1828     // Return void * and then cast to particular kmp_task_t type.
1829     auto *FnTy =
1830         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1831     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1832     break;
1833   }
1834   case OMPRTL__kmpc_omp_task: {
1835     // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1836     // *new_task);
1837     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1838                                 CGM.VoidPtrTy};
1839     auto *FnTy =
1840         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1841     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1842     break;
1843   }
1844   case OMPRTL__kmpc_copyprivate: {
1845     // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1846     // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1847     // kmp_int32 didit);
1848     llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1849     auto *CpyFnTy =
1850         llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1851     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1852                                 CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1853                                 CGM.Int32Ty};
1854     auto *FnTy =
1855         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1856     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1857     break;
1858   }
1859   case OMPRTL__kmpc_reduce: {
1860     // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1861     // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1862     // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1863     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1864     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1865                                                /*isVarArg=*/false);
1866     llvm::Type *TypeParams[] = {
1867         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1868         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1869         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1870     auto *FnTy =
1871         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1872     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1873     break;
1874   }
1875   case OMPRTL__kmpc_reduce_nowait: {
1876     // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1877     // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1878     // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1879     // *lck);
1880     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1881     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1882                                                /*isVarArg=*/false);
1883     llvm::Type *TypeParams[] = {
1884         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1885         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1886         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1887     auto *FnTy =
1888         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1889     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1890     break;
1891   }
1892   case OMPRTL__kmpc_end_reduce: {
1893     // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
1894     // kmp_critical_name *lck);
1895     llvm::Type *TypeParams[] = {
1896         getIdentTyPointerTy(), CGM.Int32Ty,
1897         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1898     auto *FnTy =
1899         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1900     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
1901     break;
1902   }
1903   case OMPRTL__kmpc_end_reduce_nowait: {
1904     // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
1905     // kmp_critical_name *lck);
1906     llvm::Type *TypeParams[] = {
1907         getIdentTyPointerTy(), CGM.Int32Ty,
1908         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1909     auto *FnTy =
1910         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1911     RTLFn =
1912         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
1913     break;
1914   }
1915   case OMPRTL__kmpc_omp_task_begin_if0: {
1916     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1917     // *new_task);
1918     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1919                                 CGM.VoidPtrTy};
1920     auto *FnTy =
1921         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1922     RTLFn =
1923         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
1924     break;
1925   }
1926   case OMPRTL__kmpc_omp_task_complete_if0: {
1927     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1928     // *new_task);
1929     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1930                                 CGM.VoidPtrTy};
1931     auto *FnTy =
1932         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1933     RTLFn = CGM.CreateRuntimeFunction(FnTy,
1934                                       /*Name=*/"__kmpc_omp_task_complete_if0");
1935     break;
1936   }
1937   case OMPRTL__kmpc_ordered: {
1938     // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
1939     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1940     auto *FnTy =
1941         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1942     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
1943     break;
1944   }
1945   case OMPRTL__kmpc_end_ordered: {
1946     // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
1947     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1948     auto *FnTy =
1949         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1950     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
1951     break;
1952   }
1953   case OMPRTL__kmpc_omp_taskwait: {
1954     // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
1955     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1956     auto *FnTy =
1957         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1958     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
1959     break;
1960   }
1961   case OMPRTL__kmpc_taskgroup: {
1962     // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
1963     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1964     auto *FnTy =
1965         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1966     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
1967     break;
1968   }
1969   case OMPRTL__kmpc_end_taskgroup: {
1970     // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
1971     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1972     auto *FnTy =
1973         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1974     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
1975     break;
1976   }
1977   case OMPRTL__kmpc_push_proc_bind: {
1978     // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
1979     // int proc_bind)
1980     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1981     auto *FnTy =
1982         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1983     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
1984     break;
1985   }
1986   case OMPRTL__kmpc_omp_task_with_deps: {
1987     // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
1988     // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
1989     // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
1990     llvm::Type *TypeParams[] = {
1991         getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
1992         CGM.VoidPtrTy,         CGM.Int32Ty, CGM.VoidPtrTy};
1993     auto *FnTy =
1994         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1995     RTLFn =
1996         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
1997     break;
1998   }
1999   case OMPRTL__kmpc_omp_wait_deps: {
2000     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
2001     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
2002     // kmp_depend_info_t *noalias_dep_list);
2003     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2004                                 CGM.Int32Ty,           CGM.VoidPtrTy,
2005                                 CGM.Int32Ty,           CGM.VoidPtrTy};
2006     auto *FnTy =
2007         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2008     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
2009     break;
2010   }
2011   case OMPRTL__kmpc_cancellationpoint: {
2012     // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
2013     // global_tid, kmp_int32 cncl_kind)
2014     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2015     auto *FnTy =
2016         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2017     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
2018     break;
2019   }
2020   case OMPRTL__kmpc_cancel: {
2021     // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
2022     // kmp_int32 cncl_kind)
2023     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2024     auto *FnTy =
2025         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2026     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
2027     break;
2028   }
2029   case OMPRTL__kmpc_push_num_teams: {
2030     // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
2031     // kmp_int32 num_teams, kmp_int32 num_threads)
2032     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2033         CGM.Int32Ty};
2034     auto *FnTy =
2035         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2036     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
2037     break;
2038   }
2039   case OMPRTL__kmpc_fork_teams: {
2040     // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
2041     // microtask, ...);
2042     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2043                                 getKmpc_MicroPointerTy()};
2044     auto *FnTy =
2045         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
2046     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
2047     break;
2048   }
2049   case OMPRTL__kmpc_taskloop: {
2050     // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
2051     // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
2052     // sched, kmp_uint64 grainsize, void *task_dup);
2053     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2054                                 CGM.IntTy,
2055                                 CGM.VoidPtrTy,
2056                                 CGM.IntTy,
2057                                 CGM.Int64Ty->getPointerTo(),
2058                                 CGM.Int64Ty->getPointerTo(),
2059                                 CGM.Int64Ty,
2060                                 CGM.IntTy,
2061                                 CGM.IntTy,
2062                                 CGM.Int64Ty,
2063                                 CGM.VoidPtrTy};
2064     auto *FnTy =
2065         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2066     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
2067     break;
2068   }
2069   case OMPRTL__kmpc_doacross_init: {
2070     // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
2071     // num_dims, struct kmp_dim *dims);
2072     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2073                                 CGM.Int32Ty,
2074                                 CGM.Int32Ty,
2075                                 CGM.VoidPtrTy};
2076     auto *FnTy =
2077         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2078     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2079     break;
2080   }
2081   case OMPRTL__kmpc_doacross_fini: {
2082     // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2083     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2084     auto *FnTy =
2085         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2086     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2087     break;
2088   }
2089   case OMPRTL__kmpc_doacross_post: {
2090     // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2091     // *vec);
2092     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2093                                 CGM.Int64Ty->getPointerTo()};
2094     auto *FnTy =
2095         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2096     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2097     break;
2098   }
2099   case OMPRTL__kmpc_doacross_wait: {
2100     // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2101     // *vec);
2102     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2103                                 CGM.Int64Ty->getPointerTo()};
2104     auto *FnTy =
2105         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2106     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2107     break;
2108   }
2109   case OMPRTL__kmpc_task_reduction_init: {
2110     // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2111     // *data);
2112     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2113     auto *FnTy =
2114         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2115     RTLFn =
2116         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2117     break;
2118   }
2119   case OMPRTL__kmpc_task_reduction_get_th_data: {
2120     // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2121     // *d);
2122     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2123     auto *FnTy =
2124         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2125     RTLFn = CGM.CreateRuntimeFunction(
2126         FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2127     break;
2128   }
2129   case OMPRTL__tgt_target: {
2130     // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2131     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2132     // *arg_types);
2133     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2134                                 CGM.VoidPtrTy,
2135                                 CGM.Int32Ty,
2136                                 CGM.VoidPtrPtrTy,
2137                                 CGM.VoidPtrPtrTy,
2138                                 CGM.SizeTy->getPointerTo(),
2139                                 CGM.Int64Ty->getPointerTo()};
2140     auto *FnTy =
2141         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2142     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2143     break;
2144   }
2145   case OMPRTL__tgt_target_nowait: {
2146     // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2147     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2148     // int64_t *arg_types);
2149     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2150                                 CGM.VoidPtrTy,
2151                                 CGM.Int32Ty,
2152                                 CGM.VoidPtrPtrTy,
2153                                 CGM.VoidPtrPtrTy,
2154                                 CGM.SizeTy->getPointerTo(),
2155                                 CGM.Int64Ty->getPointerTo()};
2156     auto *FnTy =
2157         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2158     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2159     break;
2160   }
2161   case OMPRTL__tgt_target_teams: {
2162     // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2163     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2164     // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2165     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2166                                 CGM.VoidPtrTy,
2167                                 CGM.Int32Ty,
2168                                 CGM.VoidPtrPtrTy,
2169                                 CGM.VoidPtrPtrTy,
2170                                 CGM.SizeTy->getPointerTo(),
2171                                 CGM.Int64Ty->getPointerTo(),
2172                                 CGM.Int32Ty,
2173                                 CGM.Int32Ty};
2174     auto *FnTy =
2175         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2176     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2177     break;
2178   }
2179   case OMPRTL__tgt_target_teams_nowait: {
2180     // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2181     // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2182     // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2183     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2184                                 CGM.VoidPtrTy,
2185                                 CGM.Int32Ty,
2186                                 CGM.VoidPtrPtrTy,
2187                                 CGM.VoidPtrPtrTy,
2188                                 CGM.SizeTy->getPointerTo(),
2189                                 CGM.Int64Ty->getPointerTo(),
2190                                 CGM.Int32Ty,
2191                                 CGM.Int32Ty};
2192     auto *FnTy =
2193         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2194     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2195     break;
2196   }
2197   case OMPRTL__tgt_register_lib: {
2198     // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2199     QualType ParamTy =
2200         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2201     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2202     auto *FnTy =
2203         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2204     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2205     break;
2206   }
2207   case OMPRTL__tgt_unregister_lib: {
2208     // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2209     QualType ParamTy =
2210         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2211     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2212     auto *FnTy =
2213         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2214     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2215     break;
2216   }
2217   case OMPRTL__tgt_target_data_begin: {
2218     // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2219     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2220     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2221                                 CGM.Int32Ty,
2222                                 CGM.VoidPtrPtrTy,
2223                                 CGM.VoidPtrPtrTy,
2224                                 CGM.SizeTy->getPointerTo(),
2225                                 CGM.Int64Ty->getPointerTo()};
2226     auto *FnTy =
2227         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2228     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2229     break;
2230   }
2231   case OMPRTL__tgt_target_data_begin_nowait: {
2232     // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2233     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2234     // *arg_types);
2235     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2236                                 CGM.Int32Ty,
2237                                 CGM.VoidPtrPtrTy,
2238                                 CGM.VoidPtrPtrTy,
2239                                 CGM.SizeTy->getPointerTo(),
2240                                 CGM.Int64Ty->getPointerTo()};
2241     auto *FnTy =
2242         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2243     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2244     break;
2245   }
2246   case OMPRTL__tgt_target_data_end: {
2247     // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2248     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2249     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2250                                 CGM.Int32Ty,
2251                                 CGM.VoidPtrPtrTy,
2252                                 CGM.VoidPtrPtrTy,
2253                                 CGM.SizeTy->getPointerTo(),
2254                                 CGM.Int64Ty->getPointerTo()};
2255     auto *FnTy =
2256         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2257     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2258     break;
2259   }
2260   case OMPRTL__tgt_target_data_end_nowait: {
2261     // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2262     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2263     // *arg_types);
2264     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2265                                 CGM.Int32Ty,
2266                                 CGM.VoidPtrPtrTy,
2267                                 CGM.VoidPtrPtrTy,
2268                                 CGM.SizeTy->getPointerTo(),
2269                                 CGM.Int64Ty->getPointerTo()};
2270     auto *FnTy =
2271         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2272     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2273     break;
2274   }
2275   case OMPRTL__tgt_target_data_update: {
2276     // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2277     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2278     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2279                                 CGM.Int32Ty,
2280                                 CGM.VoidPtrPtrTy,
2281                                 CGM.VoidPtrPtrTy,
2282                                 CGM.SizeTy->getPointerTo(),
2283                                 CGM.Int64Ty->getPointerTo()};
2284     auto *FnTy =
2285         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2286     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2287     break;
2288   }
2289   case OMPRTL__tgt_target_data_update_nowait: {
2290     // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2291     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2292     // *arg_types);
2293     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2294                                 CGM.Int32Ty,
2295                                 CGM.VoidPtrPtrTy,
2296                                 CGM.VoidPtrPtrTy,
2297                                 CGM.SizeTy->getPointerTo(),
2298                                 CGM.Int64Ty->getPointerTo()};
2299     auto *FnTy =
2300         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2301     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2302     break;
2303   }
2304   }
2305   assert(RTLFn && "Unable to find OpenMP runtime function");
2306   return RTLFn;
2307 }
2308
2309 llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
2310                                                              bool IVSigned) {
2311   assert((IVSize == 32 || IVSize == 64) &&
2312          "IV size is not compatible with the omp runtime");
2313   StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2314                                             : "__kmpc_for_static_init_4u")
2315                                 : (IVSigned ? "__kmpc_for_static_init_8"
2316                                             : "__kmpc_for_static_init_8u");
2317   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2318   auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2319   llvm::Type *TypeParams[] = {
2320     getIdentTyPointerTy(),                     // loc
2321     CGM.Int32Ty,                               // tid
2322     CGM.Int32Ty,                               // schedtype
2323     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2324     PtrTy,                                     // p_lower
2325     PtrTy,                                     // p_upper
2326     PtrTy,                                     // p_stride
2327     ITy,                                       // incr
2328     ITy                                        // chunk
2329   };
2330   auto *FnTy =
2331       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2332   return CGM.CreateRuntimeFunction(FnTy, Name);
2333 }
2334
2335 llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
2336                                                             bool IVSigned) {
2337   assert((IVSize == 32 || IVSize == 64) &&
2338          "IV size is not compatible with the omp runtime");
2339   StringRef Name =
2340       IVSize == 32
2341           ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2342           : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2343   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2344   llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2345                                CGM.Int32Ty,           // tid
2346                                CGM.Int32Ty,           // schedtype
2347                                ITy,                   // lower
2348                                ITy,                   // upper
2349                                ITy,                   // stride
2350                                ITy                    // chunk
2351   };
2352   auto *FnTy =
2353       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2354   return CGM.CreateRuntimeFunction(FnTy, Name);
2355 }
2356
2357 llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
2358                                                             bool IVSigned) {
2359   assert((IVSize == 32 || IVSize == 64) &&
2360          "IV size is not compatible with the omp runtime");
2361   StringRef Name =
2362       IVSize == 32
2363           ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2364           : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2365   llvm::Type *TypeParams[] = {
2366       getIdentTyPointerTy(), // loc
2367       CGM.Int32Ty,           // tid
2368   };
2369   auto *FnTy =
2370       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2371   return CGM.CreateRuntimeFunction(FnTy, Name);
2372 }
2373
2374 llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
2375                                                             bool IVSigned) {
2376   assert((IVSize == 32 || IVSize == 64) &&
2377          "IV size is not compatible with the omp runtime");
2378   StringRef Name =
2379       IVSize == 32
2380           ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2381           : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2382   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2383   auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2384   llvm::Type *TypeParams[] = {
2385     getIdentTyPointerTy(),                     // loc
2386     CGM.Int32Ty,                               // tid
2387     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2388     PtrTy,                                     // p_lower
2389     PtrTy,                                     // p_upper
2390     PtrTy                                      // p_stride
2391   };
2392   auto *FnTy =
2393       llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2394   return CGM.CreateRuntimeFunction(FnTy, Name);
2395 }
2396
2397 Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
2398   if (CGM.getLangOpts().OpenMPSimd)
2399     return Address::invalid();
2400   llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2401       OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2402   if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
2403     SmallString<64> PtrName;
2404     {
2405       llvm::raw_svector_ostream OS(PtrName);
2406       OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_link_ptr";
2407     }
2408     llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
2409     if (!Ptr) {
2410       QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
2411       Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
2412                                         PtrName);
2413       if (!CGM.getLangOpts().OpenMPIsDevice) {
2414         auto *GV = cast<llvm::GlobalVariable>(Ptr);
2415         GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
2416         GV->setInitializer(CGM.GetAddrOfGlobal(VD));
2417       }
2418       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ptr));
2419       registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
2420     }
2421     return Address(Ptr, CGM.getContext().getDeclAlign(VD));
2422   }
2423   return Address::invalid();
2424 }
2425
2426 llvm::Constant *
2427 CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
2428   assert(!CGM.getLangOpts().OpenMPUseTLS ||
2429          !CGM.getContext().getTargetInfo().isTLSSupported());
2430   // Lookup the entry, lazily creating it if necessary.
2431   std::string Suffix = getName({"cache", ""});
2432   return getOrCreateInternalVariable(
2433       CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
2434 }
2435
2436 Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
2437                                                 const VarDecl *VD,
2438                                                 Address VDAddr,
2439                                                 SourceLocation Loc) {
2440   if (CGM.getLangOpts().OpenMPUseTLS &&
2441       CGM.getContext().getTargetInfo().isTLSSupported())
2442     return VDAddr;
2443
2444   llvm::Type *VarTy = VDAddr.getElementType();
2445   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2446                          CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2447                                                        CGM.Int8PtrTy),
2448                          CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
2449                          getOrCreateThreadPrivateCache(VD)};
2450   return Address(CGF.EmitRuntimeCall(
2451       createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2452                  VDAddr.getAlignment());
2453 }
2454
2455 void CGOpenMPRuntime::emitThreadPrivateVarInit(
2456     CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2457     llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2458   // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2459   // library.
2460   llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
2461   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
2462                       OMPLoc);
2463   // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2464   // to register constructor/destructor for variable.
2465   llvm::Value *Args[] = {
2466       OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
2467       Ctor, CopyCtor, Dtor};
2468   CGF.EmitRuntimeCall(
2469       createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
2470 }
2471
2472 llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
2473     const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2474     bool PerformInit, CodeGenFunction *CGF) {
2475   if (CGM.getLangOpts().OpenMPUseTLS &&
2476       CGM.getContext().getTargetInfo().isTLSSupported())
2477     return nullptr;
2478
2479   VD = VD->getDefinition(CGM.getContext());
2480   if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
2481     ThreadPrivateWithDefinition.insert(VD);
2482     QualType ASTTy = VD->getType();
2483
2484     llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2485     const Expr *Init = VD->getAnyInitializer();
2486     if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2487       // Generate function that re-emits the declaration's initializer into the
2488       // threadprivate copy of the variable VD
2489       CodeGenFunction CtorCGF(CGM);
2490       FunctionArgList Args;
2491       ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2492                             /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2493                             ImplicitParamDecl::Other);
2494       Args.push_back(&Dst);
2495
2496       const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2497           CGM.getContext().VoidPtrTy, Args);
2498       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2499       std::string Name = getName({"__kmpc_global_ctor_", ""});
2500       llvm::Function *Fn =
2501           CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2502       CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2503                             Args, Loc, Loc);
2504       llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
2505           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2506           CGM.getContext().VoidPtrTy, Dst.getLocation());
2507       Address Arg = Address(ArgVal, VDAddr.getAlignment());
2508       Arg = CtorCGF.Builder.CreateElementBitCast(
2509           Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2510       CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2511                                /*IsInitializer=*/true);
2512       ArgVal = CtorCGF.EmitLoadOfScalar(
2513           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2514           CGM.getContext().VoidPtrTy, Dst.getLocation());
2515       CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2516       CtorCGF.FinishFunction();
2517       Ctor = Fn;
2518     }
2519     if (VD->getType().isDestructedType() != QualType::DK_none) {
2520       // Generate function that emits destructor call for the threadprivate copy
2521       // of the variable VD
2522       CodeGenFunction DtorCGF(CGM);
2523       FunctionArgList Args;
2524       ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2525                             /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2526                             ImplicitParamDecl::Other);
2527       Args.push_back(&Dst);
2528
2529       const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2530           CGM.getContext().VoidTy, Args);
2531       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2532       std::string Name = getName({"__kmpc_global_dtor_", ""});
2533       llvm::Function *Fn =
2534           CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2535       auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2536       DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2537                             Loc, Loc);
2538       // Create a scope with an artificial location for the body of this function.
2539       auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2540       llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
2541           DtorCGF.GetAddrOfLocalVar(&Dst),
2542           /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2543       DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2544                           DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2545                           DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2546       DtorCGF.FinishFunction();
2547       Dtor = Fn;
2548     }
2549     // Do not emit init function if it is not required.
2550     if (!Ctor && !Dtor)
2551       return nullptr;
2552
2553     llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2554     auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2555                                                /*isVarArg=*/false)
2556                            ->getPointerTo();
2557     // Copying constructor for the threadprivate variable.
2558     // Must be NULL - reserved by runtime, but currently it requires that this
2559     // parameter is always NULL. Otherwise it fires assertion.
2560     CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2561     if (Ctor == nullptr) {
2562       auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2563                                              /*isVarArg=*/false)
2564                          ->getPointerTo();
2565       Ctor = llvm::Constant::getNullValue(CtorTy);
2566     }
2567     if (Dtor == nullptr) {
2568       auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2569                                              /*isVarArg=*/false)
2570                          ->getPointerTo();
2571       Dtor = llvm::Constant::getNullValue(DtorTy);
2572     }
2573     if (!CGF) {
2574       auto *InitFunctionTy =
2575           llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2576       std::string Name = getName({"__omp_threadprivate_init_", ""});
2577       llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2578           InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
2579       CodeGenFunction InitCGF(CGM);
2580       FunctionArgList ArgList;
2581       InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2582                             CGM.getTypes().arrangeNullaryFunction(), ArgList,
2583                             Loc, Loc);
2584       emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2585       InitCGF.FinishFunction();
2586       return InitFunction;
2587     }
2588     emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2589   }
2590   return nullptr;
2591 }
2592
2593 /// Obtain information that uniquely identifies a target entry. This
2594 /// consists of the file and device IDs as well as line number associated with
2595 /// the relevant entry source location.
2596 static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
2597                                      unsigned &DeviceID, unsigned &FileID,
2598                                      unsigned &LineNum) {
2599   SourceManager &SM = C.getSourceManager();
2600
2601   // The loc should be always valid and have a file ID (the user cannot use
2602   // #pragma directives in macros)
2603
2604   assert(Loc.isValid() && "Source location is expected to be always valid.");
2605
2606   PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2607   assert(PLoc.isValid() && "Source location is expected to be always valid.");
2608
2609   llvm::sys::fs::UniqueID ID;
2610   if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
2611     SM.getDiagnostics().Report(diag::err_cannot_open_file)
2612         << PLoc.getFilename() << EC.message();
2613
2614   DeviceID = ID.getDevice();
2615   FileID = ID.getFile();
2616   LineNum = PLoc.getLine();
2617 }
2618
2619 bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
2620                                                      llvm::GlobalVariable *Addr,
2621                                                      bool PerformInit) {
2622   Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2623       OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2624   if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
2625     return false;
2626   VD = VD->getDefinition(CGM.getContext());
2627   if (VD && !DeclareTargetWithDefinition.insert(VD).second)
2628     return CGM.getLangOpts().OpenMPIsDevice;
2629
2630   QualType ASTTy = VD->getType();
2631
2632   SourceLocation Loc = VD->getCanonicalDecl()->getLocStart();
2633   // Produce the unique prefix to identify the new target regions. We use
2634   // the source location of the variable declaration which we know to not
2635   // conflict with any target region.
2636   unsigned DeviceID;
2637   unsigned FileID;
2638   unsigned Line;
2639   getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
2640   SmallString<128> Buffer, Out;
2641   {
2642     llvm::raw_svector_ostream OS(Buffer);
2643     OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
2644        << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
2645   }
2646
2647   const Expr *Init = VD->getAnyInitializer();
2648   if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2649     llvm::Constant *Ctor;
2650     llvm::Constant *ID;
2651     if (CGM.getLangOpts().OpenMPIsDevice) {
2652       // Generate function that re-emits the declaration's initializer into
2653       // the threadprivate copy of the variable VD
2654       CodeGenFunction CtorCGF(CGM);
2655
2656       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2657       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2658       llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2659           FTy, Twine(Buffer, "_ctor"), FI, Loc);
2660       auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
2661       CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2662                             FunctionArgList(), Loc, Loc);
2663       auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
2664       CtorCGF.EmitAnyExprToMem(Init,
2665                                Address(Addr, CGM.getContext().getDeclAlign(VD)),
2666                                Init->getType().getQualifiers(),
2667                                /*IsInitializer=*/true);
2668       CtorCGF.FinishFunction();
2669       Ctor = Fn;
2670       ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2671       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
2672     } else {
2673       Ctor = new llvm::GlobalVariable(
2674           CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2675           llvm::GlobalValue::PrivateLinkage,
2676           llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
2677       ID = Ctor;
2678     }
2679
2680     // Register the information for the entry associated with the constructor.
2681     Out.clear();
2682     OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2683         DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
2684         ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
2685   }
2686   if (VD->getType().isDestructedType() != QualType::DK_none) {
2687     llvm::Constant *Dtor;
2688     llvm::Constant *ID;
2689     if (CGM.getLangOpts().OpenMPIsDevice) {
2690       // Generate function that emits destructor call for the threadprivate
2691       // copy of the variable VD
2692       CodeGenFunction DtorCGF(CGM);
2693
2694       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2695       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2696       llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2697           FTy, Twine(Buffer, "_dtor"), FI, Loc);
2698       auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2699       DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2700                             FunctionArgList(), Loc, Loc);
2701       // Create a scope with an artificial location for the body of this
2702       // function.
2703       auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2704       DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
2705                           ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2706                           DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2707       DtorCGF.FinishFunction();
2708       Dtor = Fn;
2709       ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2710       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
2711     } else {
2712       Dtor = new llvm::GlobalVariable(
2713           CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2714           llvm::GlobalValue::PrivateLinkage,
2715           llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
2716       ID = Dtor;
2717     }
2718     // Register the information for the entry associated with the destructor.
2719     Out.clear();
2720     OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2721         DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2722         ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
2723   }
2724   return CGM.getLangOpts().OpenMPIsDevice;
2725 }
2726
2727 Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2728                                                           QualType VarType,
2729                                                           StringRef Name) {
2730   std::string Suffix = getName({"artificial", ""});
2731   std::string CacheSuffix = getName({"cache", ""});
2732   llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2733   llvm::Value *GAddr =
2734       getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2735   llvm::Value *Args[] = {
2736       emitUpdateLocation(CGF, SourceLocation()),
2737       getThreadID(CGF, SourceLocation()),
2738       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2739       CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2740                                 /*IsSigned=*/false),
2741       getOrCreateInternalVariable(
2742           CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2743   return Address(
2744       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2745           CGF.EmitRuntimeCall(
2746               createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2747           VarLVType->getPointerTo(/*AddrSpace=*/0)),
2748       CGM.getPointerAlign());
2749 }
2750
2751 void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
2752                                       const RegionCodeGenTy &ThenGen,
2753                                       const RegionCodeGenTy &ElseGen) {
2754   CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2755
2756   // If the condition constant folds and can be elided, try to avoid emitting
2757   // the condition and the dead arm of the if/else.
2758   bool CondConstant;
2759   if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2760     if (CondConstant)
2761       ThenGen(CGF);
2762     else
2763       ElseGen(CGF);
2764     return;
2765   }
2766
2767   // Otherwise, the condition did not fold, or we couldn't elide it.  Just
2768   // emit the conditional branch.
2769   llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2770   llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2771   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2772   CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2773
2774   // Emit the 'then' code.
2775   CGF.EmitBlock(ThenBlock);
2776   ThenGen(CGF);
2777   CGF.EmitBranch(ContBlock);
2778   // Emit the 'else' code if present.
2779   // There is no need to emit line number for unconditional branch.
2780   (void)ApplyDebugLocation::CreateEmpty(CGF);
2781   CGF.EmitBlock(ElseBlock);
2782   ElseGen(CGF);
2783   // There is no need to emit line number for unconditional branch.
2784   (void)ApplyDebugLocation::CreateEmpty(CGF);
2785   CGF.EmitBranch(ContBlock);
2786   // Emit the continuation block for code after the if.
2787   CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2788 }
2789
2790 void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2791                                        llvm::Value *OutlinedFn,
2792                                        ArrayRef<llvm::Value *> CapturedVars,
2793                                        const Expr *IfCond) {
2794   if (!CGF.HaveInsertPoint())
2795     return;
2796   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2797   auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2798                                                      PrePostActionTy &) {
2799     // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2800     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2801     llvm::Value *Args[] = {
2802         RTLoc,
2803         CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2804         CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2805     llvm::SmallVector<llvm::Value *, 16> RealArgs;
2806     RealArgs.append(std::begin(Args), std::end(Args));
2807     RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2808
2809     llvm::Value *RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2810     CGF.EmitRuntimeCall(RTLFn, RealArgs);
2811   };
2812   auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2813                                                           PrePostActionTy &) {
2814     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2815     llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2816     // Build calls:
2817     // __kmpc_serialized_parallel(&Loc, GTid);
2818     llvm::Value *Args[] = {RTLoc, ThreadID};
2819     CGF.EmitRuntimeCall(
2820         RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2821
2822     // OutlinedFn(&GTid, &zero, CapturedStruct);
2823     Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2824                                                         /*Name*/ ".zero.addr");
2825     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2826     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2827     // ThreadId for serialized parallels is 0.
2828     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2829     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2830     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2831     RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2832
2833     // __kmpc_end_serialized_parallel(&Loc, GTid);
2834     llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2835     CGF.EmitRuntimeCall(
2836         RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2837         EndArgs);
2838   };
2839   if (IfCond) {
2840     emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
2841   } else {
2842     RegionCodeGenTy ThenRCG(ThenGen);
2843     ThenRCG(CGF);
2844   }
2845 }
2846
2847 // If we're inside an (outlined) parallel region, use the region info's
2848 // thread-ID variable (it is passed in a first argument of the outlined function
2849 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2850 // regular serial code region, get thread ID by calling kmp_int32
2851 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2852 // return the address of that temp.
2853 Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2854                                              SourceLocation Loc) {
2855   if (auto *OMPRegionInfo =
2856           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2857     if (OMPRegionInfo->getThreadIDVariable())
2858       return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
2859
2860   llvm::Value *ThreadID = getThreadID(CGF, Loc);
2861   QualType Int32Ty =
2862       CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2863   Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2864   CGF.EmitStoreOfScalar(ThreadID,
2865                         CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2866
2867   return ThreadIDTemp;
2868 }
2869
2870 llvm::Constant *
2871 CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
2872                                              const llvm::Twine &Name) {
2873   SmallString<256> Buffer;
2874   llvm::raw_svector_ostream Out(Buffer);
2875   Out << Name;
2876   StringRef RuntimeName = Out.str();
2877   auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2878   if (Elem.second) {
2879     assert(Elem.second->getType()->getPointerElementType() == Ty &&
2880            "OMP internal variable has different type than requested");
2881     return &*Elem.second;
2882   }
2883
2884   return Elem.second = new llvm::GlobalVariable(
2885              CGM.getModule(), Ty, /*IsConstant*/ false,
2886              llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2887              Elem.first());
2888 }
2889
2890 llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2891   std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2892   std::string Name = getName({Prefix, "var"});
2893   return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
2894 }
2895
2896 namespace {
2897 /// Common pre(post)-action for different OpenMP constructs.
2898 class CommonActionTy final : public PrePostActionTy {
2899   llvm::Value *EnterCallee;
2900   ArrayRef<llvm::Value *> EnterArgs;
2901   llvm::Value *ExitCallee;
2902   ArrayRef<llvm::Value *> ExitArgs;
2903   bool Conditional;
2904   llvm::BasicBlock *ContBlock = nullptr;
2905
2906 public:
2907   CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
2908                  llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
2909                  bool Conditional = false)
2910       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2911         ExitArgs(ExitArgs), Conditional(Conditional) {}
2912   void Enter(CodeGenFunction &CGF) override {
2913     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2914     if (Conditional) {
2915       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2916       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2917       ContBlock = CGF.createBasicBlock("omp_if.end");
2918       // Generate the branch (If-stmt)
2919       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2920       CGF.EmitBlock(ThenBlock);
2921     }
2922   }
2923   void Done(CodeGenFunction &CGF) {
2924     // Emit the rest of blocks/branches
2925     CGF.EmitBranch(ContBlock);
2926     CGF.EmitBlock(ContBlock, true);
2927   }
2928   void Exit(CodeGenFunction &CGF) override {
2929     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2930   }
2931 };
2932 } // anonymous namespace
2933
2934 void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2935                                          StringRef CriticalName,
2936                                          const RegionCodeGenTy &CriticalOpGen,
2937                                          SourceLocation Loc, const Expr *Hint) {
2938   // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2939   // CriticalOpGen();
2940   // __kmpc_end_critical(ident_t *, gtid, Lock);
2941   // Prepare arguments and build a call to __kmpc_critical
2942   if (!CGF.HaveInsertPoint())
2943     return;
2944   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2945                          getCriticalRegionLock(CriticalName)};
2946   llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2947                                                 std::end(Args));
2948   if (Hint) {
2949     EnterArgs.push_back(CGF.Builder.CreateIntCast(
2950         CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
2951   }
2952   CommonActionTy Action(
2953       createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
2954                                  : OMPRTL__kmpc_critical),
2955       EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
2956   CriticalOpGen.setAction(Action);
2957   emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2958 }
2959
2960 void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2961                                        const RegionCodeGenTy &MasterOpGen,
2962                                        SourceLocation Loc) {
2963   if (!CGF.HaveInsertPoint())
2964     return;
2965   // if(__kmpc_master(ident_t *, gtid)) {
2966   //   MasterOpGen();
2967   //   __kmpc_end_master(ident_t *, gtid);
2968   // }
2969   // Prepare arguments and build a call to __kmpc_master
2970   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2971   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
2972                         createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
2973                         /*Conditional=*/true);
2974   MasterOpGen.setAction(Action);
2975   emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2976   Action.Done(CGF);
2977 }
2978
2979 void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2980                                         SourceLocation Loc) {
2981   if (!CGF.HaveInsertPoint())
2982     return;
2983   // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2984   llvm::Value *Args[] = {
2985       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2986       llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2987   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
2988   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2989     Region->emitUntiedSwitch(CGF);
2990 }
2991
2992 void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
2993                                           const RegionCodeGenTy &TaskgroupOpGen,
2994                                           SourceLocation Loc) {
2995   if (!CGF.HaveInsertPoint())
2996     return;
2997   // __kmpc_taskgroup(ident_t *, gtid);
2998   // TaskgroupOpGen();
2999   // __kmpc_end_taskgroup(ident_t *, gtid);
3000   // Prepare arguments and build a call to __kmpc_taskgroup
3001   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3002   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
3003                         createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
3004                         Args);
3005   TaskgroupOpGen.setAction(Action);
3006   emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
3007 }
3008
3009 /// Given an array of pointers to variables, project the address of a
3010 /// given variable.
3011 static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
3012                                       unsigned Index, const VarDecl *Var) {
3013   // Pull out the pointer to the variable.
3014   Address PtrAddr =
3015       CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
3016   llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
3017
3018   Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
3019   Addr = CGF.Builder.CreateElementBitCast(
3020       Addr, CGF.ConvertTypeForMem(Var->getType()));
3021   return Addr;
3022 }
3023
3024 static llvm::Value *emitCopyprivateCopyFunction(
3025     CodeGenModule &CGM, llvm::Type *ArgsType,
3026     ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
3027     ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
3028     SourceLocation Loc) {
3029   ASTContext &C = CGM.getContext();
3030   // void copy_func(void *LHSArg, void *RHSArg);
3031   FunctionArgList Args;
3032   ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3033                            ImplicitParamDecl::Other);
3034   ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3035                            ImplicitParamDecl::Other);
3036   Args.push_back(&LHSArg);
3037   Args.push_back(&RHSArg);
3038   const auto &CGFI =
3039       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3040   std::string Name =
3041       CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
3042   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3043                                     llvm::GlobalValue::InternalLinkage, Name,
3044                                     &CGM.getModule());
3045   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3046   Fn->setDoesNotRecurse();
3047   CodeGenFunction CGF(CGM);
3048   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3049   // Dest = (void*[n])(LHSArg);
3050   // Src = (void*[n])(RHSArg);
3051   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3052       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
3053       ArgsType), CGF.getPointerAlign());
3054   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3055       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
3056       ArgsType), CGF.getPointerAlign());
3057   // *(Type0*)Dst[0] = *(Type0*)Src[0];
3058   // *(Type1*)Dst[1] = *(Type1*)Src[1];
3059   // ...
3060   // *(Typen*)Dst[n] = *(Typen*)Src[n];
3061   for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
3062     const auto *DestVar =
3063         cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
3064     Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
3065
3066     const auto *SrcVar =
3067         cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
3068     Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
3069
3070     const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
3071     QualType Type = VD->getType();
3072     CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
3073   }
3074   CGF.FinishFunction();
3075   return Fn;
3076 }
3077
3078 void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
3079                                        const RegionCodeGenTy &SingleOpGen,
3080                                        SourceLocation Loc,
3081                                        ArrayRef<const Expr *> CopyprivateVars,
3082                                        ArrayRef<const Expr *> SrcExprs,
3083                                        ArrayRef<const Expr *> DstExprs,
3084                                        ArrayRef<const Expr *> AssignmentOps) {
3085   if (!CGF.HaveInsertPoint())
3086     return;
3087   assert(CopyprivateVars.size() == SrcExprs.size() &&
3088          CopyprivateVars.size() == DstExprs.size() &&
3089          CopyprivateVars.size() == AssignmentOps.size());
3090   ASTContext &C = CGM.getContext();
3091   // int32 did_it = 0;
3092   // if(__kmpc_single(ident_t *, gtid)) {
3093   //   SingleOpGen();
3094   //   __kmpc_end_single(ident_t *, gtid);
3095   //   did_it = 1;
3096   // }
3097   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3098   // <copy_func>, did_it);
3099
3100   Address DidIt = Address::invalid();
3101   if (!CopyprivateVars.empty()) {
3102     // int32 did_it = 0;
3103     QualType KmpInt32Ty =
3104         C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3105     DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
3106     CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
3107   }
3108   // Prepare arguments and build a call to __kmpc_single
3109   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3110   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
3111                         createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
3112                         /*Conditional=*/true);
3113   SingleOpGen.setAction(Action);
3114   emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
3115   if (DidIt.isValid()) {
3116     // did_it = 1;
3117     CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
3118   }
3119   Action.Done(CGF);
3120   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3121   // <copy_func>, did_it);
3122   if (DidIt.isValid()) {
3123     llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
3124     QualType CopyprivateArrayTy =
3125         C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
3126                                /*IndexTypeQuals=*/0);
3127     // Create a list of all private variables for copyprivate.
3128     Address CopyprivateList =
3129         CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
3130     for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
3131       Address Elem = CGF.Builder.CreateConstArrayGEP(
3132           CopyprivateList, I, CGF.getPointerSize());
3133       CGF.Builder.CreateStore(
3134           CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3135               CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
3136           Elem);
3137     }
3138     // Build function that copies private values from single region to all other
3139     // threads in the corresponding parallel region.
3140     llvm::Value *CpyFn = emitCopyprivateCopyFunction(
3141         CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
3142         CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
3143     llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
3144     Address CL =
3145       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
3146                                                       CGF.VoidPtrTy);
3147     llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
3148     llvm::Value *Args[] = {
3149         emitUpdateLocation(CGF, Loc), // ident_t *<loc>
3150         getThreadID(CGF, Loc),        // i32 <gtid>
3151         BufSize,                      // size_t <buf_size>
3152         CL.getPointer(),              // void *<copyprivate list>
3153         CpyFn,                        // void (*) (void *, void *) <copy_func>
3154         DidItVal                      // i32 did_it
3155     };
3156     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
3157   }
3158 }
3159
3160 void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
3161                                         const RegionCodeGenTy &OrderedOpGen,
3162                                         SourceLocation Loc, bool IsThreads) {
3163   if (!CGF.HaveInsertPoint())
3164     return;
3165   // __kmpc_ordered(ident_t *, gtid);
3166   // OrderedOpGen();
3167   // __kmpc_end_ordered(ident_t *, gtid);
3168   // Prepare arguments and build a call to __kmpc_ordered
3169   if (IsThreads) {
3170     llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3171     CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
3172                           createRuntimeFunction(OMPRTL__kmpc_end_ordered),
3173                           Args);
3174     OrderedOpGen.setAction(Action);
3175     emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3176     return;
3177   }
3178   emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3179 }
3180
3181 void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
3182                                       OpenMPDirectiveKind Kind, bool EmitChecks,
3183                                       bool ForceSimpleCall) {
3184   if (!CGF.HaveInsertPoint())
3185     return;
3186   // Build call __kmpc_cancel_barrier(loc, thread_id);
3187   // Build call __kmpc_barrier(loc, thread_id);
3188   unsigned Flags;
3189   if (Kind == OMPD_for)
3190     Flags = OMP_IDENT_BARRIER_IMPL_FOR;
3191   else if (Kind == OMPD_sections)
3192     Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
3193   else if (Kind == OMPD_single)
3194     Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
3195   else if (Kind == OMPD_barrier)
3196     Flags = OMP_IDENT_BARRIER_EXPL;
3197   else
3198     Flags = OMP_IDENT_BARRIER_IMPL;
3199   // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
3200   // thread_id);
3201   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
3202                          getThreadID(CGF, Loc)};
3203   if (auto *OMPRegionInfo =
3204           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
3205     if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
3206       llvm::Value *Result = CGF.EmitRuntimeCall(
3207           createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
3208       if (EmitChecks) {
3209         // if (__kmpc_cancel_barrier()) {
3210         //   exit from construct;
3211         // }
3212         llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
3213         llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
3214         llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
3215         CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
3216         CGF.EmitBlock(ExitBB);
3217         //   exit from construct;
3218         CodeGenFunction::JumpDest CancelDestination =
3219             CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
3220         CGF.EmitBranchThroughCleanup(CancelDestination);
3221         CGF.EmitBlock(ContBB, /*IsFinished=*/true);
3222       }
3223       return;
3224     }
3225   }
3226   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
3227 }
3228
3229 /// Map the OpenMP loop schedule to the runtime enumeration.
3230 static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
3231                                           bool Chunked, bool Ordered) {
3232   switch (ScheduleKind) {
3233   case OMPC_SCHEDULE_static:
3234     return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
3235                    : (Ordered ? OMP_ord_static : OMP_sch_static);
3236   case OMPC_SCHEDULE_dynamic:
3237     return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
3238   case OMPC_SCHEDULE_guided:
3239     return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
3240   case OMPC_SCHEDULE_runtime:
3241     return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3242   case OMPC_SCHEDULE_auto:
3243     return Ordered ? OMP_ord_auto : OMP_sch_auto;
3244   case OMPC_SCHEDULE_unknown:
3245     assert(!Chunked && "chunk was specified but schedule kind not known");
3246     return Ordered ? OMP_ord_static : OMP_sch_static;
3247   }
3248   llvm_unreachable("Unexpected runtime schedule");
3249 }
3250
3251 /// Map the OpenMP distribute schedule to the runtime enumeration.
3252 static OpenMPSchedType
3253 getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
3254   // only static is allowed for dist_schedule
3255   return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
3256 }
3257
3258 bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
3259                                          bool Chunked) const {
3260   OpenMPSchedType Schedule =
3261       getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3262   return Schedule == OMP_sch_static;
3263 }
3264
3265 bool CGOpenMPRuntime::isStaticNonchunked(
3266     OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3267   OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3268   return Schedule == OMP_dist_sch_static;
3269 }
3270
3271
3272 bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
3273   OpenMPSchedType Schedule =
3274       getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3275   assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3276   return Schedule != OMP_sch_static;
3277 }
3278
3279 static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
3280                                   OpenMPScheduleClauseModifier M1,
3281                                   OpenMPScheduleClauseModifier M2) {
3282   int Modifier = 0;
3283   switch (M1) {
3284   case OMPC_SCHEDULE_MODIFIER_monotonic:
3285     Modifier = OMP_sch_modifier_monotonic;
3286     break;
3287   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3288     Modifier = OMP_sch_modifier_nonmonotonic;
3289     break;
3290   case OMPC_SCHEDULE_MODIFIER_simd:
3291     if (Schedule == OMP_sch_static_chunked)
3292       Schedule = OMP_sch_static_balanced_chunked;
3293     break;
3294   case OMPC_SCHEDULE_MODIFIER_last:
3295   case OMPC_SCHEDULE_MODIFIER_unknown:
3296     break;
3297   }
3298   switch (M2) {
3299   case OMPC_SCHEDULE_MODIFIER_monotonic:
3300     Modifier = OMP_sch_modifier_monotonic;
3301     break;
3302   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3303     Modifier = OMP_sch_modifier_nonmonotonic;
3304     break;
3305   case OMPC_SCHEDULE_MODIFIER_simd:
3306     if (Schedule == OMP_sch_static_chunked)
3307       Schedule = OMP_sch_static_balanced_chunked;
3308     break;
3309   case OMPC_SCHEDULE_MODIFIER_last:
3310   case OMPC_SCHEDULE_MODIFIER_unknown:
3311     break;
3312   }
3313   return Schedule | Modifier;
3314 }
3315
3316 void CGOpenMPRuntime::emitForDispatchInit(
3317     CodeGenFunction &CGF, SourceLocation Loc,
3318     const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3319     bool Ordered, const DispatchRTInput &DispatchValues) {
3320   if (!CGF.HaveInsertPoint())
3321     return;
3322   OpenMPSchedType Schedule = getRuntimeSchedule(
3323       ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3324   assert(Ordered ||
3325          (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3326           Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3327           Schedule != OMP_sch_static_balanced_chunked));
3328   // Call __kmpc_dispatch_init(
3329   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3330   //          kmp_int[32|64] lower, kmp_int[32|64] upper,
3331   //          kmp_int[32|64] stride, kmp_int[32|64] chunk);
3332
3333   // If the Chunk was not specified in the clause - use default value 1.
3334   llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3335                                             : CGF.Builder.getIntN(IVSize, 1);
3336   llvm::Value *Args[] = {
3337       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3338       CGF.Builder.getInt32(addMonoNonMonoModifier(
3339           Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3340       DispatchValues.LB,                                // Lower
3341       DispatchValues.UB,                                // Upper
3342       CGF.Builder.getIntN(IVSize, 1),                   // Stride
3343       Chunk                                             // Chunk
3344   };
3345   CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3346 }
3347
3348 static void emitForStaticInitCall(
3349     CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3350     llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
3351     OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
3352     const CGOpenMPRuntime::StaticRTInput &Values) {
3353   if (!CGF.HaveInsertPoint())
3354     return;
3355
3356   assert(!Values.Ordered);
3357   assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3358          Schedule == OMP_sch_static_balanced_chunked ||
3359          Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3360          Schedule == OMP_dist_sch_static ||
3361          Schedule == OMP_dist_sch_static_chunked);
3362
3363   // Call __kmpc_for_static_init(
3364   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3365   //          kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3366   //          kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3367   //          kmp_int[32|64] incr, kmp_int[32|64] chunk);
3368   llvm::Value *Chunk = Values.Chunk;
3369   if (Chunk == nullptr) {
3370     assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3371             Schedule == OMP_dist_sch_static) &&
3372            "expected static non-chunked schedule");
3373     // If the Chunk was not specified in the clause - use default value 1.
3374     Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3375   } else {
3376     assert((Schedule == OMP_sch_static_chunked ||
3377             Schedule == OMP_sch_static_balanced_chunked ||
3378             Schedule == OMP_ord_static_chunked ||
3379             Schedule == OMP_dist_sch_static_chunked) &&
3380            "expected static chunked schedule");
3381   }
3382   llvm::Value *Args[] = {
3383       UpdateLocation,
3384       ThreadId,
3385       CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3386                                                   M2)), // Schedule type
3387       Values.IL.getPointer(),                           // &isLastIter
3388       Values.LB.getPointer(),                           // &LB
3389       Values.UB.getPointer(),                           // &UB
3390       Values.ST.getPointer(),                           // &Stride
3391       CGF.Builder.getIntN(Values.IVSize, 1),            // Incr
3392       Chunk                                             // Chunk
3393   };
3394   CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3395 }
3396
3397 void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
3398                                         SourceLocation Loc,
3399                                         OpenMPDirectiveKind DKind,
3400                                         const OpenMPScheduleTy &ScheduleKind,
3401                                         const StaticRTInput &Values) {
3402   OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3403       ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3404   assert(isOpenMPWorksharingDirective(DKind) &&
3405          "Expected loop-based or sections-based directive.");
3406   llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3407                                              isOpenMPLoopDirective(DKind)
3408                                                  ? OMP_IDENT_WORK_LOOP
3409                                                  : OMP_IDENT_WORK_SECTIONS);
3410   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3411   llvm::Constant *StaticInitFunction =
3412       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3413   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3414                         ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3415 }
3416
3417 void CGOpenMPRuntime::emitDistributeStaticInit(
3418     CodeGenFunction &CGF, SourceLocation Loc,
3419     OpenMPDistScheduleClauseKind SchedKind,
3420     const CGOpenMPRuntime::StaticRTInput &Values) {
3421   OpenMPSchedType ScheduleNum =
3422       getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3423   llvm::Value *UpdatedLocation =
3424       emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3425   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3426   llvm::Constant *StaticInitFunction =
3427       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3428   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3429                         ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3430                         OMPC_SCHEDULE_MODIFIER_unknown, Values);
3431 }
3432
3433 void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
3434                                           SourceLocation Loc,
3435                                           OpenMPDirectiveKind DKind) {
3436   if (!CGF.HaveInsertPoint())
3437     return;
3438   // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3439   llvm::Value *Args[] = {
3440       emitUpdateLocation(CGF, Loc,
3441                          isOpenMPDistributeDirective(DKind)
3442                              ? OMP_IDENT_WORK_DISTRIBUTE
3443                              : isOpenMPLoopDirective(DKind)
3444                                    ? OMP_IDENT_WORK_LOOP
3445                                    : OMP_IDENT_WORK_SECTIONS),
3446       getThreadID(CGF, Loc)};
3447   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
3448                       Args);
3449 }
3450
3451 void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
3452                                                  SourceLocation Loc,
3453                                                  unsigned IVSize,
3454                                                  bool IVSigned) {
3455   if (!CGF.HaveInsertPoint())
3456     return;
3457   // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3458   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3459   CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3460 }
3461
3462 llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
3463                                           SourceLocation Loc, unsigned IVSize,
3464                                           bool IVSigned, Address IL,
3465                                           Address LB, Address UB,
3466                                           Address ST) {
3467   // Call __kmpc_dispatch_next(
3468   //          ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3469   //          kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3470   //          kmp_int[32|64] *p_stride);
3471   llvm::Value *Args[] = {
3472       emitUpdateLocation(CGF, Loc),
3473       getThreadID(CGF, Loc),
3474       IL.getPointer(), // &isLastIter
3475       LB.getPointer(), // &Lower
3476       UB.getPointer(), // &Upper
3477       ST.getPointer()  // &Stride
3478   };
3479   llvm::Value *Call =
3480       CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3481   return CGF.EmitScalarConversion(
3482       Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
3483       CGF.getContext().BoolTy, Loc);
3484 }
3485
3486 void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
3487                                            llvm::Value *NumThreads,
3488                                            SourceLocation Loc) {
3489   if (!CGF.HaveInsertPoint())
3490     return;
3491   // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3492   llvm::Value *Args[] = {
3493       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3494       CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3495   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
3496                       Args);
3497 }
3498
3499 void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
3500                                          OpenMPProcBindClauseKind ProcBind,
3501                                          SourceLocation Loc) {
3502   if (!CGF.HaveInsertPoint())
3503     return;
3504   // Constants for proc bind value accepted by the runtime.
3505   enum ProcBindTy {
3506     ProcBindFalse = 0,
3507     ProcBindTrue,
3508     ProcBindMaster,
3509     ProcBindClose,
3510     ProcBindSpread,
3511     ProcBindIntel,
3512     ProcBindDefault
3513   } RuntimeProcBind;
3514   switch (ProcBind) {
3515   case OMPC_PROC_BIND_master:
3516     RuntimeProcBind = ProcBindMaster;
3517     break;
3518   case OMPC_PROC_BIND_close:
3519     RuntimeProcBind = ProcBindClose;
3520     break;
3521   case OMPC_PROC_BIND_spread:
3522     RuntimeProcBind = ProcBindSpread;
3523     break;
3524   case OMPC_PROC_BIND_unknown:
3525     llvm_unreachable("Unsupported proc_bind value.");
3526   }
3527   // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3528   llvm::Value *Args[] = {
3529       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3530       llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3531   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
3532 }
3533
3534 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3535                                 SourceLocation Loc) {
3536   if (!CGF.HaveInsertPoint())
3537     return;
3538   // Build call void __kmpc_flush(ident_t *loc)
3539   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
3540                       emitUpdateLocation(CGF, Loc));
3541 }
3542
3543 namespace {
3544 /// Indexes of fields for type kmp_task_t.
3545 enum KmpTaskTFields {
3546   /// List of shared variables.
3547   KmpTaskTShareds,
3548   /// Task routine.
3549   KmpTaskTRoutine,
3550   /// Partition id for the untied tasks.
3551   KmpTaskTPartId,
3552   /// Function with call of destructors for private variables.
3553   Data1,
3554   /// Task priority.
3555   Data2,
3556   /// (Taskloops only) Lower bound.
3557   KmpTaskTLowerBound,
3558   /// (Taskloops only) Upper bound.
3559   KmpTaskTUpperBound,
3560   /// (Taskloops only) Stride.
3561   KmpTaskTStride,
3562   /// (Taskloops only) Is last iteration flag.
3563   KmpTaskTLastIter,
3564   /// (Taskloops only) Reduction data.
3565   KmpTaskTReductions,
3566 };
3567 } // anonymous namespace
3568
3569 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3570   return OffloadEntriesTargetRegion.empty() &&
3571          OffloadEntriesDeviceGlobalVar.empty();
3572 }
3573
3574 /// Initialize target region entry.
3575 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3576     initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3577                                     StringRef ParentName, unsigned LineNum,
3578                                     unsigned Order) {
3579   assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3580                                              "only required for the device "
3581                                              "code generation.");
3582   OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3583       OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3584                                    OMPTargetRegionEntryTargetRegion);
3585   ++OffloadingEntriesNum;
3586 }
3587
3588 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3589     registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3590                                   StringRef ParentName, unsigned LineNum,
3591                                   llvm::Constant *Addr, llvm::Constant *ID,
3592                                   OMPTargetRegionEntryKind Flags) {
3593   // If we are emitting code for a target, the entry is already initialized,
3594   // only has to be registered.
3595   if (CGM.getLangOpts().OpenMPIsDevice) {
3596     if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
3597       unsigned DiagID = CGM.getDiags().getCustomDiagID(
3598           DiagnosticsEngine::Error,
3599           "Unable to find target region on line '%0' in the device code.");
3600       CGM.getDiags().Report(DiagID) << LineNum;
3601       return;
3602     }
3603     auto &Entry =
3604         OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3605     assert(Entry.isValid() && "Entry not initialized!");
3606     Entry.setAddress(Addr);
3607     Entry.setID(ID);
3608     Entry.setFlags(Flags);
3609   } else {
3610     OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3611     OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3612     ++OffloadingEntriesNum;
3613   }
3614 }
3615
3616 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3617     unsigned DeviceID, unsigned FileID, StringRef ParentName,
3618     unsigned LineNum) const {
3619   auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3620   if (PerDevice == OffloadEntriesTargetRegion.end())
3621     return false;
3622   auto PerFile = PerDevice->second.find(FileID);
3623   if (PerFile == PerDevice->second.end())
3624     return false;
3625   auto PerParentName = PerFile->second.find(ParentName);
3626   if (PerParentName == PerFile->second.end())
3627     return false;
3628   auto PerLine = PerParentName->second.find(LineNum);
3629   if (PerLine == PerParentName->second.end())
3630     return false;
3631   // Fail if this entry is already registered.
3632   if (PerLine->second.getAddress() || PerLine->second.getID())
3633     return false;
3634   return true;
3635 }
3636
3637 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3638     const OffloadTargetRegionEntryInfoActTy &Action) {
3639   // Scan all target region entries and perform the provided action.
3640   for (const auto &D : OffloadEntriesTargetRegion)
3641     for (const auto &F : D.second)
3642       for (const auto &P : F.second)
3643         for (const auto &L : P.second)
3644           Action(D.first, F.first, P.first(), L.first, L.second);
3645 }
3646
3647 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3648     initializeDeviceGlobalVarEntryInfo(StringRef Name,
3649                                        OMPTargetGlobalVarEntryKind Flags,
3650                                        unsigned Order) {
3651   assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3652                                              "only required for the device "
3653                                              "code generation.");
3654   OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3655   ++OffloadingEntriesNum;
3656 }
3657
3658 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3659     registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3660                                      CharUnits VarSize,
3661                                      OMPTargetGlobalVarEntryKind Flags,
3662                                      llvm::GlobalValue::LinkageTypes Linkage) {
3663   if (CGM.getLangOpts().OpenMPIsDevice) {
3664     auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3665     assert(Entry.isValid() && Entry.getFlags() == Flags &&
3666            "Entry not initialized!");
3667     assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3668            "Resetting with the new address.");
3669     if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName))
3670       return;
3671     Entry.setAddress(Addr);
3672     Entry.setVarSize(VarSize);
3673     Entry.setLinkage(Linkage);
3674   } else {
3675     if (hasDeviceGlobalVarEntryInfo(VarName))
3676       return;
3677     OffloadEntriesDeviceGlobalVar.try_emplace(
3678         VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3679     ++OffloadingEntriesNum;
3680   }
3681 }
3682
3683 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3684     actOnDeviceGlobalVarEntriesInfo(
3685         const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3686   // Scan all target region entries and perform the provided action.
3687   for (const auto &E : OffloadEntriesDeviceGlobalVar)
3688     Action(E.getKey(), E.getValue());
3689 }
3690
3691 llvm::Function *
3692 CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
3693   // If we don't have entries or if we are emitting code for the device, we
3694   // don't need to do anything.
3695   if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3696     return nullptr;
3697
3698   llvm::Module &M = CGM.getModule();
3699   ASTContext &C = CGM.getContext();
3700
3701   // Get list of devices we care about
3702   const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
3703
3704   // We should be creating an offloading descriptor only if there are devices
3705   // specified.
3706   assert(!Devices.empty() && "No OpenMP offloading devices??");
3707
3708   // Create the external variables that will point to the begin and end of the
3709   // host entries section. These will be defined by the linker.
3710   llvm::Type *OffloadEntryTy =
3711       CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
3712   std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
3713   auto *HostEntriesBegin = new llvm::GlobalVariable(
3714       M, OffloadEntryTy, /*isConstant=*/true,
3715       llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3716       EntriesBeginName);
3717   std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
3718   auto *HostEntriesEnd =
3719       new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
3720                                llvm::GlobalValue::ExternalLinkage,
3721                                /*Initializer=*/nullptr, EntriesEndName);
3722
3723   // Create all device images
3724   auto *DeviceImageTy = cast<llvm::StructType>(
3725       CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
3726   ConstantInitBuilder DeviceImagesBuilder(CGM);
3727   ConstantArrayBuilder DeviceImagesEntries =
3728       DeviceImagesBuilder.beginArray(DeviceImageTy);
3729
3730   for (const llvm::Triple &Device : Devices) {
3731     StringRef T = Device.getTriple();
3732     std::string BeginName = getName({"omp_offloading", "img_start", ""});
3733     auto *ImgBegin = new llvm::GlobalVariable(
3734         M, CGM.Int8Ty, /*isConstant=*/true,
3735         llvm::GlobalValue::ExternalWeakLinkage,
3736         /*Initializer=*/nullptr, Twine(BeginName).concat(T));
3737     std::string EndName = getName({"omp_offloading", "img_end", ""});
3738     auto *ImgEnd = new llvm::GlobalVariable(
3739         M, CGM.Int8Ty, /*isConstant=*/true,
3740         llvm::GlobalValue::ExternalWeakLinkage,
3741         /*Initializer=*/nullptr, Twine(EndName).concat(T));
3742
3743     llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
3744                               HostEntriesEnd};
3745     createConstantGlobalStructAndAddToParent(CGM, getTgtDeviceImageQTy(), Data,
3746                                              DeviceImagesEntries);
3747   }
3748
3749   // Create device images global array.
3750   std::string ImagesName = getName({"omp_offloading", "device_images"});
3751   llvm::GlobalVariable *DeviceImages =
3752       DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
3753                                                 CGM.getPointerAlign(),
3754                                                 /*isConstant=*/true);
3755   DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3756
3757   // This is a Zero array to be used in the creation of the constant expressions
3758   llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3759                              llvm::Constant::getNullValue(CGM.Int32Ty)};
3760
3761   // Create the target region descriptor.
3762   llvm::Constant *Data[] = {
3763       llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
3764       llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3765                                            DeviceImages, Index),
3766       HostEntriesBegin, HostEntriesEnd};
3767   std::string Descriptor = getName({"omp_offloading", "descriptor"});
3768   llvm::GlobalVariable *Desc = createConstantGlobalStruct(
3769       CGM, getTgtBinaryDescriptorQTy(), Data, Descriptor);
3770
3771   // Emit code to register or unregister the descriptor at execution
3772   // startup or closing, respectively.
3773
3774   llvm::Function *UnRegFn;
3775   {
3776     FunctionArgList Args;
3777     ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
3778     Args.push_back(&DummyPtr);
3779
3780     CodeGenFunction CGF(CGM);
3781     // Disable debug info for global (de-)initializer because they are not part
3782     // of some particular construct.
3783     CGF.disableDebugInfo();
3784     const auto &FI =
3785         CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3786     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3787     std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
3788     UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
3789     CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
3790     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
3791                         Desc);
3792     CGF.FinishFunction();
3793   }
3794   llvm::Function *RegFn;
3795   {
3796     CodeGenFunction CGF(CGM);
3797     // Disable debug info for global (de-)initializer because they are not part
3798     // of some particular construct.
3799     CGF.disableDebugInfo();
3800     const auto &FI = CGM.getTypes().arrangeNullaryFunction();
3801     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3802     std::string Descriptor = getName({"omp_offloading", "descriptor_reg"});
3803     RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
3804     CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
3805     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), Desc);
3806     // Create a variable to drive the registration and unregistration of the
3807     // descriptor, so we can reuse the logic that emits Ctors and Dtors.
3808     ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
3809                                   SourceLocation(), nullptr, C.CharTy,
3810                                   ImplicitParamDecl::Other);
3811     CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
3812     CGF.FinishFunction();
3813   }
3814   if (CGM.supportsCOMDAT()) {
3815     // It is sufficient to call registration function only once, so create a
3816     // COMDAT group for registration/unregistration functions and associated
3817     // data. That would reduce startup time and code size. Registration
3818     // function serves as a COMDAT group key.
3819     llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
3820     RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3821     RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3822     RegFn->setComdat(ComdatKey);
3823     UnRegFn->setComdat(ComdatKey);
3824     DeviceImages->setComdat(ComdatKey);
3825     Desc->setComdat(ComdatKey);
3826   }
3827   return RegFn;
3828 }
3829
3830 void CGOpenMPRuntime::createOffloadEntry(
3831     llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
3832     llvm::GlobalValue::LinkageTypes Linkage) {
3833   StringRef Name = Addr->getName();
3834   llvm::Module &M = CGM.getModule();
3835   llvm::LLVMContext &C = M.getContext();
3836
3837   // Create constant string with the name.
3838   llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3839
3840   std::string StringName = getName({"omp_offloading", "entry_name"});
3841   auto *Str = new llvm::GlobalVariable(
3842       M, StrPtrInit->getType(), /*isConstant=*/true,
3843       llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
3844   Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3845
3846   llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
3847                             llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
3848                             llvm::ConstantInt::get(CGM.SizeTy, Size),
3849                             llvm::ConstantInt::get(CGM.Int32Ty, Flags),
3850                             llvm::ConstantInt::get(CGM.Int32Ty, 0)};
3851   std::string EntryName = getName({"omp_offloading", "entry", ""});
3852   llvm::GlobalVariable *Entry = createConstantGlobalStruct(
3853       CGM, getTgtOffloadEntryQTy(), Data, Twine(EntryName).concat(Name),
3854       llvm::GlobalValue::WeakAnyLinkage);
3855
3856   // The entry has to be created in the section the linker expects it to be.
3857   std::string Section = getName({"omp_offloading", "entries"});
3858   Entry->setSection(Section);
3859 }
3860
3861 void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3862   // Emit the offloading entries and metadata so that the device codegen side
3863   // can easily figure out what to emit. The produced metadata looks like
3864   // this:
3865   //
3866   // !omp_offload.info = !{!1, ...}
3867   //
3868   // Right now we only generate metadata for function that contain target
3869   // regions.
3870
3871   // If we do not have entries, we don't need to do anything.
3872   if (OffloadEntriesInfoManager.empty())
3873     return;
3874
3875   llvm::Module &M = CGM.getModule();
3876   llvm::LLVMContext &C = M.getContext();
3877   SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
3878       OrderedEntries(OffloadEntriesInfoManager.size());
3879
3880   // Auxiliary methods to create metadata values and strings.
3881   auto &&GetMDInt = [this](unsigned V) {
3882     return llvm::ConstantAsMetadata::get(
3883         llvm::ConstantInt::get(CGM.Int32Ty, V));
3884   };
3885
3886   auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
3887
3888   // Create the offloading info metadata node.
3889   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3890
3891   // Create function that emits metadata for each target region entry;
3892   auto &&TargetRegionMetadataEmitter =
3893       [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString](
3894           unsigned DeviceID, unsigned FileID, StringRef ParentName,
3895           unsigned Line,
3896           const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3897         // Generate metadata for target regions. Each entry of this metadata
3898         // contains:
3899         // - Entry 0 -> Kind of this type of metadata (0).
3900         // - Entry 1 -> Device ID of the file where the entry was identified.
3901         // - Entry 2 -> File ID of the file where the entry was identified.
3902         // - Entry 3 -> Mangled name of the function where the entry was
3903         // identified.
3904         // - Entry 4 -> Line in the file where the entry was identified.
3905         // - Entry 5 -> Order the entry was created.
3906         // The first element of the metadata node is the kind.
3907         llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
3908                                  GetMDInt(FileID),      GetMDString(ParentName),
3909                                  GetMDInt(Line),        GetMDInt(E.getOrder())};
3910
3911         // Save this entry in the right position of the ordered entries array.
3912         OrderedEntries[E.getOrder()] = &E;
3913
3914         // Add metadata to the named metadata node.
3915         MD->addOperand(llvm::MDNode::get(C, Ops));
3916       };
3917
3918   OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3919       TargetRegionMetadataEmitter);
3920
3921   // Create function that emits metadata for each device global variable entry;
3922   auto &&DeviceGlobalVarMetadataEmitter =
3923       [&C, &OrderedEntries, &GetMDInt, &GetMDString,
3924        MD](StringRef MangledName,
3925            const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
3926                &E) {
3927         // Generate metadata for global variables. Each entry of this metadata
3928         // contains:
3929         // - Entry 0 -> Kind of this type of metadata (1).
3930         // - Entry 1 -> Mangled name of the variable.
3931         // - Entry 2 -> Declare target kind.
3932         // - Entry 3 -> Order the entry was created.
3933         // The first element of the metadata node is the kind.
3934         llvm::Metadata *Ops[] = {
3935             GetMDInt(E.getKind()), GetMDString(MangledName),
3936             GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
3937
3938         // Save this entry in the right position of the ordered entries array.
3939         OrderedEntries[E.getOrder()] = &E;
3940
3941         // Add metadata to the named metadata node.
3942         MD->addOperand(llvm::MDNode::get(C, Ops));
3943       };
3944
3945   OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
3946       DeviceGlobalVarMetadataEmitter);
3947
3948   for (const auto *E : OrderedEntries) {
3949     assert(E && "All ordered entries must exist!");
3950     if (const auto *CE =
3951             dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3952                 E)) {
3953       if (!CE->getID() || !CE->getAddress()) {
3954         unsigned DiagID = CGM.getDiags().getCustomDiagID(
3955             DiagnosticsEngine::Error,
3956             "Offloading entry for target region is incorrect: either the "
3957             "address or the ID is invalid.");
3958         CGM.getDiags().Report(DiagID);
3959         continue;
3960       }
3961       createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
3962                          CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
3963     } else if (const auto *CE =
3964                    dyn_cast<OffloadEntriesInfoManagerTy::
3965                                 OffloadEntryInfoDeviceGlobalVar>(E)) {
3966       OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
3967           static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3968               CE->getFlags());
3969       switch (Flags) {
3970       case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
3971         if (!CE->getAddress()) {
3972           unsigned DiagID = CGM.getDiags().getCustomDiagID(
3973               DiagnosticsEngine::Error,
3974               "Offloading entry for declare target variable is incorrect: the "
3975               "address is invalid.");
3976           CGM.getDiags().Report(DiagID);
3977           continue;
3978         }
3979         break;
3980       }
3981       case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
3982         assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
3983                 (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
3984                "Declaret target link address is set.");
3985         if (CGM.getLangOpts().OpenMPIsDevice)
3986           continue;
3987         if (!CE->getAddress()) {
3988           unsigned DiagID = CGM.getDiags().getCustomDiagID(
3989               DiagnosticsEngine::Error,
3990               "Offloading entry for declare target variable is incorrect: the "
3991               "address is invalid.");
3992           CGM.getDiags().Report(DiagID);
3993           continue;
3994         }
3995         break;
3996       }
3997       createOffloadEntry(CE->getAddress(), CE->getAddress(),
3998                          CE->getVarSize().getQuantity(), Flags,
3999                          CE->getLinkage());
4000     } else {
4001       llvm_unreachable("Unsupported entry kind.");
4002     }
4003   }
4004 }
4005
4006 /// Loads all the offload entries information from the host IR
4007 /// metadata.
4008 void CGOpenMPRuntime::loadOffloadInfoMetadata() {
4009   // If we are in target mode, load the metadata from the host IR. This code has
4010   // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
4011
4012   if (!CGM.getLangOpts().OpenMPIsDevice)
4013     return;
4014
4015   if (CGM.getLangOpts().OMPHostIRFile.empty())
4016     return;
4017
4018   auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
4019   if (auto EC = Buf.getError()) {
4020     CGM.getDiags().Report(diag::err_cannot_open_file)
4021         << CGM.getLangOpts().OMPHostIRFile << EC.message();
4022     return;
4023   }
4024
4025   llvm::LLVMContext C;
4026   auto ME = expectedToErrorOrAndEmitErrors(
4027       C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
4028
4029   if (auto EC = ME.getError()) {
4030     unsigned DiagID = CGM.getDiags().getCustomDiagID(
4031         DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
4032     CGM.getDiags().Report(DiagID)
4033         << CGM.getLangOpts().OMPHostIRFile << EC.message();
4034     return;
4035   }
4036
4037   llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
4038   if (!MD)
4039     return;
4040
4041   for (llvm::MDNode *MN : MD->operands()) {
4042     auto &&GetMDInt = [MN](unsigned Idx) {
4043       auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
4044       return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
4045     };
4046
4047     auto &&GetMDString = [MN](unsigned Idx) {
4048       auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
4049       return V->getString();
4050     };
4051
4052     switch (GetMDInt(0)) {
4053     default:
4054       llvm_unreachable("Unexpected metadata!");
4055       break;
4056     case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4057         OffloadingEntryInfoTargetRegion:
4058       OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
4059           /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
4060           /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
4061           /*Order=*/GetMDInt(5));
4062       break;
4063     case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4064         OffloadingEntryInfoDeviceGlobalVar:
4065       OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
4066           /*MangledName=*/GetMDString(1),
4067           static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
4068               /*Flags=*/GetMDInt(2)),
4069           /*Order=*/GetMDInt(3));
4070       break;
4071     }
4072   }
4073 }
4074
4075 void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
4076   if (!KmpRoutineEntryPtrTy) {
4077     // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
4078     ASTContext &C = CGM.getContext();
4079     QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
4080     FunctionProtoType::ExtProtoInfo EPI;
4081     KmpRoutineEntryPtrQTy = C.getPointerType(
4082         C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
4083     KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
4084   }
4085 }
4086
4087 QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
4088   // Make sure the type of the entry is already created. This is the type we
4089   // have to create:
4090   // struct __tgt_offload_entry{
4091   //   void      *addr;       // Pointer to the offload entry info.
4092   //                          // (function or global)
4093   //   char      *name;       // Name of the function or global.
4094   //   size_t     size;       // Size of the entry info (0 if it a function).
4095   //   int32_t    flags;      // Flags associated with the entry, e.g. 'link'.
4096   //   int32_t    reserved;   // Reserved, to use by the runtime library.
4097   // };
4098   if (TgtOffloadEntryQTy.isNull()) {
4099     ASTContext &C = CGM.getContext();
4100     RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
4101     RD->startDefinition();
4102     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4103     addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
4104     addFieldToRecordDecl(C, RD, C.getSizeType());
4105     addFieldToRecordDecl(
4106         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4107     addFieldToRecordDecl(
4108         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4109     RD->completeDefinition();
4110     RD->addAttr(PackedAttr::CreateImplicit(C));
4111     TgtOffloadEntryQTy = C.getRecordType(RD);
4112   }
4113   return TgtOffloadEntryQTy;
4114 }
4115
4116 QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
4117   // These are the types we need to build:
4118   // struct __tgt_device_image{
4119   // void   *ImageStart;       // Pointer to the target code start.
4120   // void   *ImageEnd;         // Pointer to the target code end.
4121   // // We also add the host entries to the device image, as it may be useful
4122   // // for the target runtime to have access to that information.
4123   // __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all
4124   //                                       // the entries.
4125   // __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
4126   //                                       // entries (non inclusive).
4127   // };
4128   if (TgtDeviceImageQTy.isNull()) {
4129     ASTContext &C = CGM.getContext();
4130     RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
4131     RD->startDefinition();
4132     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4133     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4134     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4135     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4136     RD->completeDefinition();
4137     TgtDeviceImageQTy = C.getRecordType(RD);
4138   }
4139   return TgtDeviceImageQTy;
4140 }
4141
4142 QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
4143   // struct __tgt_bin_desc{
4144   //   int32_t              NumDevices;      // Number of devices supported.
4145   //   __tgt_device_image   *DeviceImages;   // Arrays of device images
4146   //                                         // (one per device).
4147   //   __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all the
4148   //                                         // entries.
4149   //   __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
4150   //                                         // entries (non inclusive).
4151   // };
4152   if (TgtBinaryDescriptorQTy.isNull()) {
4153     ASTContext &C = CGM.getContext();
4154     RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
4155     RD->startDefinition();
4156     addFieldToRecordDecl(
4157         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4158     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
4159     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4160     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4161     RD->completeDefinition();
4162     TgtBinaryDescriptorQTy = C.getRecordType(RD);
4163   }
4164   return TgtBinaryDescriptorQTy;
4165 }
4166
4167 namespace {
4168 struct PrivateHelpersTy {
4169   PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
4170                    const VarDecl *PrivateElemInit)
4171       : Original(Original), PrivateCopy(PrivateCopy),
4172         PrivateElemInit(PrivateElemInit) {}
4173   const VarDecl *Original;
4174   const VarDecl *PrivateCopy;
4175   const VarDecl *PrivateElemInit;
4176 };
4177 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
4178 } // anonymous namespace
4179
4180 static RecordDecl *
4181 createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
4182   if (!Privates.empty()) {
4183     ASTContext &C = CGM.getContext();
4184     // Build struct .kmp_privates_t. {
4185     //         /*  private vars  */
4186     //       };
4187     RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
4188     RD->startDefinition();
4189     for (const auto &Pair : Privates) {
4190       const VarDecl *VD = Pair.second.Original;
4191       QualType Type = VD->getType().getNonReferenceType();
4192       FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
4193       if (VD->hasAttrs()) {
4194         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
4195              E(VD->getAttrs().end());
4196              I != E; ++I)
4197           FD->addAttr(*I);
4198       }
4199     }
4200     RD->completeDefinition();
4201     return RD;
4202   }
4203   return nullptr;
4204 }
4205
4206 static RecordDecl *
4207 createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
4208                          QualType KmpInt32Ty,
4209                          QualType KmpRoutineEntryPointerQTy) {
4210   ASTContext &C = CGM.getContext();
4211   // Build struct kmp_task_t {
4212   //         void *              shareds;
4213   //         kmp_routine_entry_t routine;
4214   //         kmp_int32           part_id;
4215   //         kmp_cmplrdata_t data1;
4216   //         kmp_cmplrdata_t data2;
4217   // For taskloops additional fields:
4218   //         kmp_uint64          lb;
4219   //         kmp_uint64          ub;
4220   //         kmp_int64           st;
4221   //         kmp_int32           liter;
4222   //         void *              reductions;
4223   //       };
4224   RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
4225   UD->startDefinition();
4226   addFieldToRecordDecl(C, UD, KmpInt32Ty);
4227   addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
4228   UD->completeDefinition();
4229   QualType KmpCmplrdataTy = C.getRecordType(UD);
4230   RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
4231   RD->startDefinition();
4232   addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4233   addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
4234   addFieldToRecordDecl(C, RD, KmpInt32Ty);
4235   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4236   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4237   if (isOpenMPTaskLoopDirective(Kind)) {
4238     QualType KmpUInt64Ty =
4239         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4240     QualType KmpInt64Ty =
4241         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4242     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4243     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4244     addFieldToRecordDecl(C, RD, KmpInt64Ty);
4245     addFieldToRecordDecl(C, RD, KmpInt32Ty);
4246     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4247   }
4248   RD->completeDefinition();
4249   return RD;
4250 }
4251
4252 static RecordDecl *
4253 createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
4254                                      ArrayRef<PrivateDataTy> Privates) {
4255   ASTContext &C = CGM.getContext();
4256   // Build struct kmp_task_t_with_privates {
4257   //         kmp_task_t task_data;
4258   //         .kmp_privates_t. privates;
4259   //       };
4260   RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
4261   RD->startDefinition();
4262   addFieldToRecordDecl(C, RD, KmpTaskTQTy);
4263   if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
4264     addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
4265   RD->completeDefinition();
4266   return RD;
4267 }
4268
4269 /// Emit a proxy function which accepts kmp_task_t as the second
4270 /// argument.
4271 /// \code
4272 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
4273 ///   TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
4274 ///   For taskloops:
4275 ///   tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4276 ///   tt->reductions, tt->shareds);
4277 ///   return 0;
4278 /// }
4279 /// \endcode
4280 static llvm::Value *
4281 emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
4282                       OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
4283                       QualType KmpTaskTWithPrivatesPtrQTy,
4284                       QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
4285                       QualType SharedsPtrTy, llvm::Value *TaskFunction,
4286                       llvm::Value *TaskPrivatesMap) {
4287   ASTContext &C = CGM.getContext();
4288   FunctionArgList Args;
4289   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4290                             ImplicitParamDecl::Other);
4291   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4292                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4293                                 ImplicitParamDecl::Other);
4294   Args.push_back(&GtidArg);
4295   Args.push_back(&TaskTypeArg);
4296   const auto &TaskEntryFnInfo =
4297       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4298   llvm::FunctionType *TaskEntryTy =
4299       CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
4300   std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
4301   auto *TaskEntry = llvm::Function::Create(
4302       TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4303   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
4304   TaskEntry->setDoesNotRecurse();
4305   CodeGenFunction CGF(CGM);
4306   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
4307                     Loc, Loc);
4308
4309   // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
4310   // tt,
4311   // For taskloops:
4312   // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4313   // tt->task_data.shareds);
4314   llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
4315       CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
4316   LValue TDBase = CGF.EmitLoadOfPointerLValue(
4317       CGF.GetAddrOfLocalVar(&TaskTypeArg),
4318       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4319   const auto *KmpTaskTWithPrivatesQTyRD =
4320       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4321   LValue Base =
4322       CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4323   const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4324   auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4325   LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
4326   llvm::Value *PartidParam = PartIdLVal.getPointer();
4327
4328   auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
4329   LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
4330   llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4331       CGF.EmitLoadOfScalar(SharedsLVal, Loc),
4332       CGF.ConvertTypeForMem(SharedsPtrTy));
4333
4334   auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4335   llvm::Value *PrivatesParam;
4336   if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
4337     LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
4338     PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4339         PrivatesLVal.getPointer(), CGF.VoidPtrTy);
4340   } else {
4341     PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4342   }
4343
4344   llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
4345                                TaskPrivatesMap,
4346                                CGF.Builder
4347                                    .CreatePointerBitCastOrAddrSpaceCast(
4348                                        TDBase.getAddress(), CGF.VoidPtrTy)
4349                                    .getPointer()};
4350   SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4351                                           std::end(CommonArgs));
4352   if (isOpenMPTaskLoopDirective(Kind)) {
4353     auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4354     LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4355     llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4356     auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4357     LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4358     llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4359     auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4360     LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
4361     llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4362     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4363     LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4364     llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4365     auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4366     LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
4367     llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4368     CallArgs.push_back(LBParam);
4369     CallArgs.push_back(UBParam);
4370     CallArgs.push_back(StParam);
4371     CallArgs.push_back(LIParam);
4372     CallArgs.push_back(RParam);
4373   }
4374   CallArgs.push_back(SharedsParam);
4375
4376   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4377                                                   CallArgs);
4378   CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4379                              CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4380   CGF.FinishFunction();
4381   return TaskEntry;
4382 }
4383
4384 static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
4385                                             SourceLocation Loc,
4386                                             QualType KmpInt32Ty,
4387                                             QualType KmpTaskTWithPrivatesPtrQTy,
4388                                             QualType KmpTaskTWithPrivatesQTy) {
4389   ASTContext &C = CGM.getContext();
4390   FunctionArgList Args;
4391   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4392                             ImplicitParamDecl::Other);
4393   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4394                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4395                                 ImplicitParamDecl::Other);
4396   Args.push_back(&GtidArg);
4397   Args.push_back(&TaskTypeArg);
4398   const auto &DestructorFnInfo =
4399       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4400   llvm::FunctionType *DestructorFnTy =
4401       CGM.getTypes().GetFunctionType(DestructorFnInfo);
4402   std::string Name =
4403       CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
4404   auto *DestructorFn =
4405       llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
4406                              Name, &CGM.getModule());
4407   CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
4408                                     DestructorFnInfo);
4409   DestructorFn->setDoesNotRecurse();
4410   CodeGenFunction CGF(CGM);
4411   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4412                     Args, Loc, Loc);
4413
4414   LValue Base = CGF.EmitLoadOfPointerLValue(
4415       CGF.GetAddrOfLocalVar(&TaskTypeArg),
4416       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4417   const auto *KmpTaskTWithPrivatesQTyRD =
4418       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4419   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4420   Base = CGF.EmitLValueForField(Base, *FI);
4421   for (const auto *Field :
4422        cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4423     if (QualType::DestructionKind DtorKind =
4424             Field->getType().isDestructedType()) {
4425       LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
4426       CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4427     }
4428   }
4429   CGF.FinishFunction();
4430   return DestructorFn;
4431 }
4432
4433 /// Emit a privates mapping function for correct handling of private and
4434 /// firstprivate variables.
4435 /// \code
4436 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
4437 /// **noalias priv1,...,  <tyn> **noalias privn) {
4438 ///   *priv1 = &.privates.priv1;
4439 ///   ...;
4440 ///   *privn = &.privates.privn;
4441 /// }
4442 /// \endcode
4443 static llvm::Value *
4444 emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
4445                                ArrayRef<const Expr *> PrivateVars,
4446                                ArrayRef<const Expr *> FirstprivateVars,
4447                                ArrayRef<const Expr *> LastprivateVars,
4448                                QualType PrivatesQTy,
4449                                ArrayRef<PrivateDataTy> Privates) {
4450   ASTContext &C = CGM.getContext();
4451   FunctionArgList Args;
4452   ImplicitParamDecl TaskPrivatesArg(
4453       C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4454       C.getPointerType(PrivatesQTy).withConst().withRestrict(),
4455       ImplicitParamDecl::Other);
4456   Args.push_back(&TaskPrivatesArg);
4457   llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
4458   unsigned Counter = 1;
4459   for (const Expr *E : PrivateVars) {
4460     Args.push_back(ImplicitParamDecl::Create(
4461         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4462         C.getPointerType(C.getPointerType(E->getType()))
4463             .withConst()
4464             .withRestrict(),
4465         ImplicitParamDecl::Other));
4466     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4467     PrivateVarsPos[VD] = Counter;
4468     ++Counter;
4469   }
4470   for (const Expr *E : FirstprivateVars) {
4471     Args.push_back(ImplicitParamDecl::Create(
4472         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4473         C.getPointerType(C.getPointerType(E->getType()))
4474             .withConst()
4475             .withRestrict(),
4476         ImplicitParamDecl::Other));
4477     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4478     PrivateVarsPos[VD] = Counter;
4479     ++Counter;
4480   }
4481   for (const Expr *E : LastprivateVars) {
4482     Args.push_back(ImplicitParamDecl::Create(
4483         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4484         C.getPointerType(C.getPointerType(E->getType()))
4485             .withConst()
4486             .withRestrict(),
4487         ImplicitParamDecl::Other));
4488     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4489     PrivateVarsPos[VD] = Counter;
4490     ++Counter;
4491   }
4492   const auto &TaskPrivatesMapFnInfo =
4493       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4494   llvm::FunctionType *TaskPrivatesMapTy =
4495       CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
4496   std::string Name =
4497       CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
4498   auto *TaskPrivatesMap = llvm::Function::Create(
4499       TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
4500       &CGM.getModule());
4501   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
4502                                     TaskPrivatesMapFnInfo);
4503   TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
4504   TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
4505   TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
4506   CodeGenFunction CGF(CGM);
4507   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
4508                     TaskPrivatesMapFnInfo, Args, Loc, Loc);
4509
4510   // *privi = &.privates.privi;
4511   LValue Base = CGF.EmitLoadOfPointerLValue(
4512       CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
4513       TaskPrivatesArg.getType()->castAs<PointerType>());
4514   const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
4515   Counter = 0;
4516   for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
4517     LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
4518     const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
4519     LValue RefLVal =
4520         CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
4521     LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
4522         RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
4523     CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
4524     ++Counter;
4525   }
4526   CGF.FinishFunction();
4527   return TaskPrivatesMap;
4528 }
4529
4530 static bool stable_sort_comparator(const PrivateDataTy P1,
4531                                    const PrivateDataTy P2) {
4532   return P1.first > P2.first;
4533 }
4534
4535 /// Emit initialization for private variables in task-based directives.
4536 static void emitPrivatesInit(CodeGenFunction &CGF,
4537                              const OMPExecutableDirective &D,
4538                              Address KmpTaskSharedsPtr, LValue TDBase,
4539                              const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4540                              QualType SharedsTy, QualType SharedsPtrTy,
4541                              const OMPTaskDataTy &Data,
4542                              ArrayRef<PrivateDataTy> Privates, bool ForDup) {
4543   ASTContext &C = CGF.getContext();
4544   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4545   LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
4546   OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
4547                                  ? OMPD_taskloop
4548                                  : OMPD_task;
4549   const CapturedStmt &CS = *D.getCapturedStmt(Kind);
4550   CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
4551   LValue SrcBase;
4552   bool IsTargetTask =
4553       isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
4554       isOpenMPTargetExecutionDirective(D.getDirectiveKind());
4555   // For target-based directives skip 3 firstprivate arrays BasePointersArray,
4556   // PointersArray and SizesArray. The original variables for these arrays are
4557   // not captured and we get their addresses explicitly.
4558   if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
4559       (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
4560     SrcBase = CGF.MakeAddrLValue(
4561         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4562             KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
4563         SharedsTy);
4564   }
4565   FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
4566   for (const PrivateDataTy &Pair : Privates) {
4567     const VarDecl *VD = Pair.second.PrivateCopy;
4568     const Expr *Init = VD->getAnyInitializer();
4569     if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
4570                              !CGF.isTrivialInitializer(Init)))) {
4571       LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
4572       if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
4573         const VarDecl *OriginalVD = Pair.second.Original;
4574         // Check if the variable is the target-based BasePointersArray,
4575         // PointersArray or SizesArray.
4576         LValue SharedRefLValue;
4577         QualType Type = OriginalVD->getType();
4578         const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
4579         if (IsTargetTask && !SharedField) {
4580           assert(isa<ImplicitParamDecl>(OriginalVD) &&
4581                  isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
4582                  cast<CapturedDecl>(OriginalVD->getDeclContext())
4583                          ->getNumParams() == 0 &&
4584                  isa<TranslationUnitDecl>(
4585                      cast<CapturedDecl>(OriginalVD->getDeclContext())
4586                          ->getDeclContext()) &&
4587                  "Expected artificial target data variable.");
4588           SharedRefLValue =
4589               CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
4590         } else {
4591           SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
4592           SharedRefLValue = CGF.MakeAddrLValue(
4593               Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
4594               SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
4595               SharedRefLValue.getTBAAInfo());
4596         }
4597         if (Type->isArrayType()) {
4598           // Initialize firstprivate array.
4599           if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
4600             // Perform simple memcpy.
4601             CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
4602           } else {
4603             // Initialize firstprivate array using element-by-element
4604             // initialization.
4605             CGF.EmitOMPAggregateAssign(
4606                 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
4607                 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
4608                                                   Address SrcElement) {
4609                   // Clean up any temporaries needed by the initialization.
4610                   CodeGenFunction::OMPPrivateScope InitScope(CGF);
4611                   InitScope.addPrivate(
4612                       Elem, [SrcElement]() -> Address { return SrcElement; });
4613                   (void)InitScope.Privatize();
4614                   // Emit initialization for single element.
4615                   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
4616                       CGF, &CapturesInfo);
4617                   CGF.EmitAnyExprToMem(Init, DestElement,
4618                                        Init->getType().getQualifiers(),
4619                                        /*IsInitializer=*/false);
4620                 });
4621           }
4622         } else {
4623           CodeGenFunction::OMPPrivateScope InitScope(CGF);
4624           InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
4625             return SharedRefLValue.getAddress();
4626           });
4627           (void)InitScope.Privatize();
4628           CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
4629           CGF.EmitExprAsInit(Init, VD, PrivateLValue,
4630                              /*capturedByInit=*/false);
4631         }
4632       } else {
4633         CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
4634       }
4635     }
4636     ++FI;
4637   }
4638 }
4639
4640 /// Check if duplication function is required for taskloops.
4641 static bool checkInitIsRequired(CodeGenFunction &CGF,
4642                                 ArrayRef<PrivateDataTy> Privates) {
4643   bool InitRequired = false;
4644   for (const PrivateDataTy &Pair : Privates) {
4645     const VarDecl *VD = Pair.second.PrivateCopy;
4646     const Expr *Init = VD->getAnyInitializer();
4647     InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
4648                                     !CGF.isTrivialInitializer(Init));
4649     if (InitRequired)
4650       break;
4651   }
4652   return InitRequired;
4653 }
4654
4655
4656 /// Emit task_dup function (for initialization of
4657 /// private/firstprivate/lastprivate vars and last_iter flag)
4658 /// \code
4659 /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
4660 /// lastpriv) {
4661 /// // setup lastprivate flag
4662 ///    task_dst->last = lastpriv;
4663 /// // could be constructor calls here...
4664 /// }
4665 /// \endcode
4666 static llvm::Value *
4667 emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
4668                     const OMPExecutableDirective &D,
4669                     QualType KmpTaskTWithPrivatesPtrQTy,
4670                     const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4671                     const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
4672                     QualType SharedsPtrTy, const OMPTaskDataTy &Data,
4673                     ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
4674   ASTContext &C = CGM.getContext();
4675   FunctionArgList Args;
4676   ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4677                            KmpTaskTWithPrivatesPtrQTy,
4678                            ImplicitParamDecl::Other);
4679   ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4680                            KmpTaskTWithPrivatesPtrQTy,
4681                            ImplicitParamDecl::Other);
4682   ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
4683                                 ImplicitParamDecl::Other);
4684   Args.push_back(&DstArg);
4685   Args.push_back(&SrcArg);
4686   Args.push_back(&LastprivArg);
4687   const auto &TaskDupFnInfo =
4688       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4689   llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4690   std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
4691   auto *TaskDup = llvm::Function::Create(
4692       TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4693   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
4694   TaskDup->setDoesNotRecurse();
4695   CodeGenFunction CGF(CGM);
4696   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4697                     Loc);
4698
4699   LValue TDBase = CGF.EmitLoadOfPointerLValue(
4700       CGF.GetAddrOfLocalVar(&DstArg),
4701       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4702   // task_dst->liter = lastpriv;
4703   if (WithLastIter) {
4704     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4705     LValue Base = CGF.EmitLValueForField(
4706         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4707     LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4708     llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4709         CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4710     CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4711   }
4712
4713   // Emit initial values for private copies (if any).
4714   assert(!Privates.empty());
4715   Address KmpTaskSharedsPtr = Address::invalid();
4716   if (!Data.FirstprivateVars.empty()) {
4717     LValue TDBase = CGF.EmitLoadOfPointerLValue(
4718         CGF.GetAddrOfLocalVar(&SrcArg),
4719         KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4720     LValue Base = CGF.EmitLValueForField(
4721         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4722     KmpTaskSharedsPtr = Address(
4723         CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4724                                  Base, *std::next(KmpTaskTQTyRD->field_begin(),
4725                                                   KmpTaskTShareds)),
4726                              Loc),
4727         CGF.getNaturalTypeAlignment(SharedsTy));
4728   }
4729   emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4730                    SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4731   CGF.FinishFunction();
4732   return TaskDup;
4733 }
4734
4735 /// Checks if destructor function is required to be generated.
4736 /// \return true if cleanups are required, false otherwise.
4737 static bool
4738 checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
4739   bool NeedsCleanup = false;
4740   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4741   const auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
4742   for (const FieldDecl *FD : PrivateRD->fields()) {
4743     NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
4744     if (NeedsCleanup)
4745       break;
4746   }
4747   return NeedsCleanup;
4748 }
4749
4750 CGOpenMPRuntime::TaskResultTy
4751 CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4752                               const OMPExecutableDirective &D,
4753                               llvm::Value *TaskFunction, QualType SharedsTy,
4754                               Address Shareds, const OMPTaskDataTy &Data) {
4755   ASTContext &C = CGM.getContext();
4756   llvm::SmallVector<PrivateDataTy, 4> Privates;
4757   // Aggregate privates and sort them by the alignment.
4758   auto I = Data.PrivateCopies.begin();
4759   for (const Expr *E : Data.PrivateVars) {
4760     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4761     Privates.emplace_back(
4762         C.getDeclAlign(VD),
4763         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4764                          /*PrivateElemInit=*/nullptr));
4765     ++I;
4766   }
4767   I = Data.FirstprivateCopies.begin();
4768   auto IElemInitRef = Data.FirstprivateInits.begin();
4769   for (const Expr *E : Data.FirstprivateVars) {
4770     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4771     Privates.emplace_back(
4772         C.getDeclAlign(VD),
4773         PrivateHelpersTy(
4774             VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4775             cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
4776     ++I;
4777     ++IElemInitRef;
4778   }
4779   I = Data.LastprivateCopies.begin();
4780   for (const Expr *E : Data.LastprivateVars) {
4781     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4782     Privates.emplace_back(
4783         C.getDeclAlign(VD),
4784         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4785                          /*PrivateElemInit=*/nullptr));
4786     ++I;
4787   }
4788   std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
4789   QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4790   // Build type kmp_routine_entry_t (if not built yet).
4791   emitKmpRoutineEntryT(KmpInt32Ty);
4792   // Build type kmp_task_t (if not built yet).
4793   if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4794     if (SavedKmpTaskloopTQTy.isNull()) {
4795       SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4796           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4797     }
4798     KmpTaskTQTy = SavedKmpTaskloopTQTy;
4799   } else {
4800     assert((D.getDirectiveKind() == OMPD_task ||
4801             isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
4802             isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
4803            "Expected taskloop, task or target directive");
4804     if (SavedKmpTaskTQTy.isNull()) {
4805       SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4806           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4807     }
4808     KmpTaskTQTy = SavedKmpTaskTQTy;
4809   }
4810   const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4811   // Build particular struct kmp_task_t for the given task.
4812   const RecordDecl *KmpTaskTWithPrivatesQTyRD =
4813       createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4814   QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4815   QualType KmpTaskTWithPrivatesPtrQTy =
4816       C.getPointerType(KmpTaskTWithPrivatesQTy);
4817   llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4818   llvm::Type *KmpTaskTWithPrivatesPtrTy =
4819       KmpTaskTWithPrivatesTy->getPointerTo();
4820   llvm::Value *KmpTaskTWithPrivatesTySize =
4821       CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4822   QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4823
4824   // Emit initial values for private copies (if any).
4825   llvm::Value *TaskPrivatesMap = nullptr;
4826   llvm::Type *TaskPrivatesMapTy =
4827       std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
4828   if (!Privates.empty()) {
4829     auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4830     TaskPrivatesMap = emitTaskPrivateMappingFunction(
4831         CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
4832         FI->getType(), Privates);
4833     TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4834         TaskPrivatesMap, TaskPrivatesMapTy);
4835   } else {
4836     TaskPrivatesMap = llvm::ConstantPointerNull::get(
4837         cast<llvm::PointerType>(TaskPrivatesMapTy));
4838   }
4839   // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4840   // kmp_task_t *tt);
4841   llvm::Value *TaskEntry = emitProxyTaskFunction(
4842       CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4843       KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4844       TaskPrivatesMap);
4845
4846   // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4847   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4848   // kmp_routine_entry_t *task_entry);
4849   // Task flags. Format is taken from
4850   // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
4851   // description of kmp_tasking_flags struct.
4852   enum {
4853     TiedFlag = 0x1,
4854     FinalFlag = 0x2,
4855     DestructorsFlag = 0x8,
4856     PriorityFlag = 0x20
4857   };
4858   unsigned Flags = Data.Tied ? TiedFlag : 0;
4859   bool NeedsCleanup = false;
4860   if (!Privates.empty()) {
4861     NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
4862     if (NeedsCleanup)
4863       Flags = Flags | DestructorsFlag;
4864   }
4865   if (Data.Priority.getInt())
4866     Flags = Flags | PriorityFlag;
4867   llvm::Value *TaskFlags =
4868       Data.Final.getPointer()
4869           ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4870                                      CGF.Builder.getInt32(FinalFlag),
4871                                      CGF.Builder.getInt32(/*C=*/0))
4872           : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4873   TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4874   llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4875   llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
4876                               getThreadID(CGF, Loc), TaskFlags,
4877                               KmpTaskTWithPrivatesTySize, SharedsSize,
4878                               CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4879                                   TaskEntry, KmpRoutineEntryPtrTy)};
4880   llvm::Value *NewTask = CGF.EmitRuntimeCall(
4881       createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
4882   llvm::Value *NewTaskNewTaskTTy =
4883       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4884           NewTask, KmpTaskTWithPrivatesPtrTy);
4885   LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4886                                                KmpTaskTWithPrivatesQTy);
4887   LValue TDBase =
4888       CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4889   // Fill the data in the resulting kmp_task_t record.
4890   // Copy shareds if there are any.
4891   Address KmpTaskSharedsPtr = Address::invalid();
4892   if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4893     KmpTaskSharedsPtr =
4894         Address(CGF.EmitLoadOfScalar(
4895                     CGF.EmitLValueForField(
4896                         TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4897                                            KmpTaskTShareds)),
4898                     Loc),
4899                 CGF.getNaturalTypeAlignment(SharedsTy));
4900     LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4901     LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4902     CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
4903   }
4904   // Emit initial values for private copies (if any).
4905   TaskResultTy Result;
4906   if (!Privates.empty()) {
4907     emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4908                      SharedsTy, SharedsPtrTy, Data, Privates,
4909                      /*ForDup=*/false);
4910     if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4911         (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4912       Result.TaskDupFn = emitTaskDupFunction(
4913           CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4914           KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4915           /*WithLastIter=*/!Data.LastprivateVars.empty());
4916     }
4917   }
4918   // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4919   enum { Priority = 0, Destructors = 1 };
4920   // Provide pointer to function with destructors for privates.
4921   auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4922   const RecordDecl *KmpCmplrdataUD =
4923       (*FI)->getType()->getAsUnionType()->getDecl();
4924   if (NeedsCleanup) {
4925     llvm::Value *DestructorFn = emitDestructorsFunction(
4926         CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4927         KmpTaskTWithPrivatesQTy);
4928     LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4929     LValue DestructorsLV = CGF.EmitLValueForField(
4930         Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4931     CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4932                               DestructorFn, KmpRoutineEntryPtrTy),
4933                           DestructorsLV);
4934   }
4935   // Set priority.
4936   if (Data.Priority.getInt()) {
4937     LValue Data2LV = CGF.EmitLValueForField(
4938         TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4939     LValue PriorityLV = CGF.EmitLValueForField(
4940         Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4941     CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4942   }
4943   Result.NewTask = NewTask;
4944   Result.TaskEntry = TaskEntry;
4945   Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4946   Result.TDBase = TDBase;
4947   Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4948   return Result;
4949 }
4950
4951 void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
4952                                    const OMPExecutableDirective &D,
4953                                    llvm::Value *TaskFunction,
4954                                    QualType SharedsTy, Address Shareds,
4955                                    const Expr *IfCond,
4956                                    const OMPTaskDataTy &Data) {
4957   if (!CGF.HaveInsertPoint())
4958     return;
4959
4960   TaskResultTy Result =
4961       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4962   llvm::Value *NewTask = Result.NewTask;
4963   llvm::Value *TaskEntry = Result.TaskEntry;
4964   llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
4965   LValue TDBase = Result.TDBase;
4966   const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
4967   ASTContext &C = CGM.getContext();
4968   // Process list of dependences.
4969   Address DependenciesArray = Address::invalid();
4970   unsigned NumDependencies = Data.Dependences.size();
4971   if (NumDependencies) {
4972     // Dependence kind for RTL.
4973     enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
4974     enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4975     RecordDecl *KmpDependInfoRD;
4976     QualType FlagsTy =
4977         C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4978     llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4979     if (KmpDependInfoTy.isNull()) {
4980       KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4981       KmpDependInfoRD->startDefinition();
4982       addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4983       addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4984       addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4985       KmpDependInfoRD->completeDefinition();
4986       KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4987     } else {
4988       KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4989     }
4990     CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
4991     // Define type kmp_depend_info[<Dependences.size()>];
4992     QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4993         KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
4994         ArrayType::Normal, /*IndexTypeQuals=*/0);
4995     // kmp_depend_info[<Dependences.size()>] deps;
4996     DependenciesArray =
4997         CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4998     for (unsigned I = 0; I < NumDependencies; ++I) {
4999       const Expr *E = Data.Dependences[I].second;
5000       LValue Addr = CGF.EmitLValue(E);
5001       llvm::Value *Size;
5002       QualType Ty = E->getType();
5003       if (const auto *ASE =
5004               dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
5005         LValue UpAddrLVal =
5006             CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
5007         llvm::Value *UpAddr =
5008             CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
5009         llvm::Value *LowIntPtr =
5010             CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
5011         llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
5012         Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
5013       } else {
5014         Size = CGF.getTypeSize(Ty);
5015       }
5016       LValue Base = CGF.MakeAddrLValue(
5017           CGF.Builder.CreateConstArrayGEP(DependenciesArray, I, DependencySize),
5018           KmpDependInfoTy);
5019       // deps[i].base_addr = &<Dependences[i].second>;
5020       LValue BaseAddrLVal = CGF.EmitLValueForField(
5021           Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
5022       CGF.EmitStoreOfScalar(
5023           CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
5024           BaseAddrLVal);
5025       // deps[i].len = sizeof(<Dependences[i].second>);
5026       LValue LenLVal = CGF.EmitLValueForField(
5027           Base, *std::next(KmpDependInfoRD->field_begin(), Len));
5028       CGF.EmitStoreOfScalar(Size, LenLVal);
5029       // deps[i].flags = <Dependences[i].first>;
5030       RTLDependenceKindTy DepKind;
5031       switch (Data.Dependences[I].first) {
5032       case OMPC_DEPEND_in:
5033         DepKind = DepIn;
5034         break;
5035       // Out and InOut dependencies must use the same code.
5036       case OMPC_DEPEND_out:
5037       case OMPC_DEPEND_inout:
5038         DepKind = DepInOut;
5039         break;
5040       case OMPC_DEPEND_source:
5041       case OMPC_DEPEND_sink:
5042       case OMPC_DEPEND_unknown:
5043         llvm_unreachable("Unknown task dependence type");
5044       }
5045       LValue FlagsLVal = CGF.EmitLValueForField(
5046           Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
5047       CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
5048                             FlagsLVal);
5049     }
5050     DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5051         CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
5052         CGF.VoidPtrTy);
5053   }
5054
5055   // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5056   // libcall.
5057   // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
5058   // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
5059   // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
5060   // list is not empty
5061   llvm::Value *ThreadID = getThreadID(CGF, Loc);
5062   llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5063   llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
5064   llvm::Value *DepTaskArgs[7];
5065   if (NumDependencies) {
5066     DepTaskArgs[0] = UpLoc;
5067     DepTaskArgs[1] = ThreadID;
5068     DepTaskArgs[2] = NewTask;
5069     DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
5070     DepTaskArgs[4] = DependenciesArray.getPointer();
5071     DepTaskArgs[5] = CGF.Builder.getInt32(0);
5072     DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5073   }
5074   auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
5075                         &TaskArgs,
5076                         &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
5077     if (!Data.Tied) {
5078       auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
5079       LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
5080       CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
5081     }
5082     if (NumDependencies) {
5083       CGF.EmitRuntimeCall(
5084           createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
5085     } else {
5086       CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
5087                           TaskArgs);
5088     }
5089     // Check if parent region is untied and build return for untied task;
5090     if (auto *Region =
5091             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5092       Region->emitUntiedSwitch(CGF);
5093   };
5094
5095   llvm::Value *DepWaitTaskArgs[6];
5096   if (NumDependencies) {
5097     DepWaitTaskArgs[0] = UpLoc;
5098     DepWaitTaskArgs[1] = ThreadID;
5099     DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
5100     DepWaitTaskArgs[3] = DependenciesArray.getPointer();
5101     DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
5102     DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5103   }
5104   auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
5105                         NumDependencies, &DepWaitTaskArgs,
5106                         Loc](CodeGenFunction &CGF, PrePostActionTy &) {
5107     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5108     CodeGenFunction::RunCleanupsScope LocalScope(CGF);
5109     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
5110     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
5111     // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
5112     // is specified.
5113     if (NumDependencies)
5114       CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
5115                           DepWaitTaskArgs);
5116     // Call proxy_task_entry(gtid, new_task);
5117     auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
5118                       Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
5119       Action.Enter(CGF);
5120       llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
5121       CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
5122                                                           OutlinedFnArgs);
5123     };
5124
5125     // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
5126     // kmp_task_t *new_task);
5127     // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
5128     // kmp_task_t *new_task);
5129     RegionCodeGenTy RCG(CodeGen);
5130     CommonActionTy Action(
5131         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
5132         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
5133     RCG.setAction(Action);
5134     RCG(CGF);
5135   };
5136
5137   if (IfCond) {
5138     emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
5139   } else {
5140     RegionCodeGenTy ThenRCG(ThenCodeGen);
5141     ThenRCG(CGF);
5142   }
5143 }
5144
5145 void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
5146                                        const OMPLoopDirective &D,
5147                                        llvm::Value *TaskFunction,
5148                                        QualType SharedsTy, Address Shareds,
5149                                        const Expr *IfCond,
5150                                        const OMPTaskDataTy &Data) {
5151   if (!CGF.HaveInsertPoint())
5152     return;
5153   TaskResultTy Result =
5154       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5155   // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5156   // libcall.
5157   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
5158   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
5159   // sched, kmp_uint64 grainsize, void *task_dup);
5160   llvm::Value *ThreadID = getThreadID(CGF, Loc);
5161   llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5162   llvm::Value *IfVal;
5163   if (IfCond) {
5164     IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
5165                                       /*isSigned=*/true);
5166   } else {
5167     IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
5168   }
5169
5170   LValue LBLVal = CGF.EmitLValueForField(
5171       Result.TDBase,
5172       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
5173   const auto *LBVar =
5174       cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
5175   CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
5176                        /*IsInitializer=*/true);
5177   LValue UBLVal = CGF.EmitLValueForField(
5178       Result.TDBase,
5179       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
5180   const auto *UBVar =
5181       cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
5182   CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
5183                        /*IsInitializer=*/true);
5184   LValue StLVal = CGF.EmitLValueForField(
5185       Result.TDBase,
5186       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
5187   const auto *StVar =
5188       cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
5189   CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
5190                        /*IsInitializer=*/true);
5191   // Store reductions address.
5192   LValue RedLVal = CGF.EmitLValueForField(
5193       Result.TDBase,
5194       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
5195   if (Data.Reductions) {
5196     CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
5197   } else {
5198     CGF.EmitNullInitialization(RedLVal.getAddress(),
5199                                CGF.getContext().VoidPtrTy);
5200   }
5201   enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
5202   llvm::Value *TaskArgs[] = {
5203       UpLoc,
5204       ThreadID,
5205       Result.NewTask,
5206       IfVal,
5207       LBLVal.getPointer(),
5208       UBLVal.getPointer(),
5209       CGF.EmitLoadOfScalar(StLVal, Loc),
5210       llvm::ConstantInt::getNullValue(
5211           CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
5212       llvm::ConstantInt::getSigned(
5213           CGF.IntTy, Data.Schedule.getPointer()
5214                          ? Data.Schedule.getInt() ? NumTasks : Grainsize
5215                          : NoSchedule),
5216       Data.Schedule.getPointer()
5217           ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
5218                                       /*isSigned=*/false)
5219           : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
5220       Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5221                              Result.TaskDupFn, CGF.VoidPtrTy)
5222                        : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
5223   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
5224 }
5225
5226 /// Emit reduction operation for each element of array (required for
5227 /// array sections) LHS op = RHS.
5228 /// \param Type Type of array.
5229 /// \param LHSVar Variable on the left side of the reduction operation
5230 /// (references element of array in original variable).
5231 /// \param RHSVar Variable on the right side of the reduction operation
5232 /// (references element of array in original variable).
5233 /// \param RedOpGen Generator of reduction operation with use of LHSVar and
5234 /// RHSVar.
5235 static void EmitOMPAggregateReduction(
5236     CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
5237     const VarDecl *RHSVar,
5238     const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
5239                                   const Expr *, const Expr *)> &RedOpGen,
5240     const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
5241     const Expr *UpExpr = nullptr) {
5242   // Perform element-by-element initialization.
5243   QualType ElementTy;
5244   Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
5245   Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
5246
5247   // Drill down to the base element type on both arrays.
5248   const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
5249   llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
5250
5251   llvm::Value *RHSBegin = RHSAddr.getPointer();
5252   llvm::Value *LHSBegin = LHSAddr.getPointer();
5253   // Cast from pointer to array type to pointer to single element.
5254   llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
5255   // The basic structure here is a while-do loop.
5256   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
5257   llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
5258   llvm::Value *IsEmpty =
5259       CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
5260   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5261
5262   // Enter the loop body, making that address the current address.
5263   llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5264   CGF.EmitBlock(BodyBB);
5265
5266   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
5267
5268   llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
5269       RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
5270   RHSElementPHI->addIncoming(RHSBegin, EntryBB);
5271   Address RHSElementCurrent =
5272       Address(RHSElementPHI,
5273               RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5274
5275   llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
5276       LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
5277   LHSElementPHI->addIncoming(LHSBegin, EntryBB);
5278   Address LHSElementCurrent =
5279       Address(LHSElementPHI,
5280               LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5281
5282   // Emit copy.
5283   CodeGenFunction::OMPPrivateScope Scope(CGF);
5284   Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
5285   Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
5286   Scope.Privatize();
5287   RedOpGen(CGF, XExpr, EExpr, UpExpr);
5288   Scope.ForceCleanup();
5289
5290   // Shift the address forward by one element.
5291   llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
5292       LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
5293   llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
5294       RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
5295   // Check whether we've reached the end.
5296   llvm::Value *Done =
5297       CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
5298   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
5299   LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
5300   RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
5301
5302   // Done.
5303   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5304 }
5305
5306 /// Emit reduction combiner. If the combiner is a simple expression emit it as
5307 /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
5308 /// UDR combiner function.
5309 static void emitReductionCombiner(CodeGenFunction &CGF,
5310                                   const Expr *ReductionOp) {
5311   if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
5312     if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
5313       if (const auto *DRE =
5314               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
5315         if (const auto *DRD =
5316                 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
5317           std::pair<llvm::Function *, llvm::Function *> Reduction =
5318               CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
5319           RValue Func = RValue::get(Reduction.first);
5320           CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
5321           CGF.EmitIgnoredExpr(ReductionOp);
5322           return;
5323         }
5324   CGF.EmitIgnoredExpr(ReductionOp);
5325 }
5326
5327 llvm::Value *CGOpenMPRuntime::emitReductionFunction(
5328     CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
5329     ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
5330     ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
5331   ASTContext &C = CGM.getContext();
5332
5333   // void reduction_func(void *LHSArg, void *RHSArg);
5334   FunctionArgList Args;
5335   ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5336                            ImplicitParamDecl::Other);
5337   ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5338                            ImplicitParamDecl::Other);
5339   Args.push_back(&LHSArg);
5340   Args.push_back(&RHSArg);
5341   const auto &CGFI =
5342       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5343   std::string Name = getName({"omp", "reduction", "reduction_func"});
5344   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
5345                                     llvm::GlobalValue::InternalLinkage, Name,
5346                                     &CGM.getModule());
5347   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
5348   Fn->setDoesNotRecurse();
5349   CodeGenFunction CGF(CGM);
5350   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
5351
5352   // Dst = (void*[n])(LHSArg);
5353   // Src = (void*[n])(RHSArg);
5354   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5355       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
5356       ArgsType), CGF.getPointerAlign());
5357   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5358       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
5359       ArgsType), CGF.getPointerAlign());
5360
5361   //  ...
5362   //  *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
5363   //  ...
5364   CodeGenFunction::OMPPrivateScope Scope(CGF);
5365   auto IPriv = Privates.begin();
5366   unsigned Idx = 0;
5367   for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
5368     const auto *RHSVar =
5369         cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
5370     Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
5371       return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
5372     });
5373     const auto *LHSVar =
5374         cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5375     Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
5376       return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5377     });
5378     QualType PrivTy = (*IPriv)->getType();
5379     if (PrivTy->isVariablyModifiedType()) {
5380       // Get array size and emit VLA type.
5381       ++Idx;
5382       Address Elem =
5383           CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
5384       llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5385       const VariableArrayType *VLA =
5386           CGF.getContext().getAsVariableArrayType(PrivTy);
5387       const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5388       CodeGenFunction::OpaqueValueMapping OpaqueMap(
5389           CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5390       CGF.EmitVariablyModifiedType(PrivTy);
5391     }
5392   }
5393   Scope.Privatize();
5394   IPriv = Privates.begin();
5395   auto ILHS = LHSExprs.begin();
5396   auto IRHS = RHSExprs.begin();
5397   for (const Expr *E : ReductionOps) {
5398     if ((*IPriv)->getType()->isArrayType()) {
5399       // Emit reduction for array section.
5400       const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5401       const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5402       EmitOMPAggregateReduction(
5403           CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5404           [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5405             emitReductionCombiner(CGF, E);
5406           });
5407     } else {
5408       // Emit reduction for array subscript or single variable.
5409       emitReductionCombiner(CGF, E);
5410     }
5411     ++IPriv;
5412     ++ILHS;
5413     ++IRHS;
5414   }
5415   Scope.ForceCleanup();
5416   CGF.FinishFunction();
5417   return Fn;
5418 }
5419
5420 void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5421                                                   const Expr *ReductionOp,
5422                                                   const Expr *PrivateRef,
5423                                                   const DeclRefExpr *LHS,
5424                                                   const DeclRefExpr *RHS) {
5425   if (PrivateRef->getType()->isArrayType()) {
5426     // Emit reduction for array section.
5427     const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5428     const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5429     EmitOMPAggregateReduction(
5430         CGF, PrivateRef->getType(), LHSVar, RHSVar,
5431         [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5432           emitReductionCombiner(CGF, ReductionOp);
5433         });
5434   } else {
5435     // Emit reduction for array subscript or single variable.
5436     emitReductionCombiner(CGF, ReductionOp);
5437   }
5438 }
5439
5440 void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5441                                     ArrayRef<const Expr *> Privates,
5442                                     ArrayRef<const Expr *> LHSExprs,
5443                                     ArrayRef<const Expr *> RHSExprs,
5444                                     ArrayRef<const Expr *> ReductionOps,
5445                                     ReductionOptionsTy Options) {
5446   if (!CGF.HaveInsertPoint())
5447     return;
5448
5449   bool WithNowait = Options.WithNowait;
5450   bool SimpleReduction = Options.SimpleReduction;
5451
5452   // Next code should be emitted for reduction:
5453   //
5454   // static kmp_critical_name lock = { 0 };
5455   //
5456   // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5457   //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5458   //  ...
5459   //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5460   //  *(Type<n>-1*)rhs[<n>-1]);
5461   // }
5462   //
5463   // ...
5464   // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5465   // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5466   // RedList, reduce_func, &<lock>)) {
5467   // case 1:
5468   //  ...
5469   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5470   //  ...
5471   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5472   // break;
5473   // case 2:
5474   //  ...
5475   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5476   //  ...
5477   // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5478   // break;
5479   // default:;
5480   // }
5481   //
5482   // if SimpleReduction is true, only the next code is generated:
5483   //  ...
5484   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5485   //  ...
5486
5487   ASTContext &C = CGM.getContext();
5488
5489   if (SimpleReduction) {
5490     CodeGenFunction::RunCleanupsScope Scope(CGF);
5491     auto IPriv = Privates.begin();
5492     auto ILHS = LHSExprs.begin();
5493     auto IRHS = RHSExprs.begin();
5494     for (const Expr *E : ReductionOps) {
5495       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5496                                   cast<DeclRefExpr>(*IRHS));
5497       ++IPriv;
5498       ++ILHS;
5499       ++IRHS;
5500     }
5501     return;
5502   }
5503
5504   // 1. Build a list of reduction variables.
5505   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5506   auto Size = RHSExprs.size();
5507   for (const Expr *E : Privates) {
5508     if (E->getType()->isVariablyModifiedType())
5509       // Reserve place for array size.
5510       ++Size;
5511   }
5512   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5513   QualType ReductionArrayTy =
5514       C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
5515                              /*IndexTypeQuals=*/0);
5516   Address ReductionList =
5517       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5518   auto IPriv = Privates.begin();
5519   unsigned Idx = 0;
5520   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5521     Address Elem =
5522       CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
5523     CGF.Builder.CreateStore(
5524         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5525             CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
5526         Elem);
5527     if ((*IPriv)->getType()->isVariablyModifiedType()) {
5528       // Store array size.
5529       ++Idx;
5530       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
5531                                              CGF.getPointerSize());
5532       llvm::Value *Size = CGF.Builder.CreateIntCast(
5533           CGF.getVLASize(
5534                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5535               .NumElts,
5536           CGF.SizeTy, /*isSigned=*/false);
5537       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5538                               Elem);
5539     }
5540   }
5541
5542   // 2. Emit reduce_func().
5543   llvm::Value *ReductionFn = emitReductionFunction(
5544       CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
5545       Privates, LHSExprs, RHSExprs, ReductionOps);
5546
5547   // 3. Create static kmp_critical_name lock = { 0 };
5548   std::string Name = getName({"reduction"});
5549   llvm::Value *Lock = getCriticalRegionLock(Name);
5550
5551   // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5552   // RedList, reduce_func, &<lock>);
5553   llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5554   llvm::Value *ThreadId = getThreadID(CGF, Loc);
5555   llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5556   llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5557       ReductionList.getPointer(), CGF.VoidPtrTy);
5558   llvm::Value *Args[] = {
5559       IdentTLoc,                             // ident_t *<loc>
5560       ThreadId,                              // i32 <gtid>
5561       CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5562       ReductionArrayTySize,                  // size_type sizeof(RedList)
5563       RL,                                    // void *RedList
5564       ReductionFn, // void (*) (void *, void *) <reduce_func>
5565       Lock         // kmp_critical_name *&<lock>
5566   };
5567   llvm::Value *Res = CGF.EmitRuntimeCall(
5568       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
5569                                        : OMPRTL__kmpc_reduce),
5570       Args);
5571
5572   // 5. Build switch(res)
5573   llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5574   llvm::SwitchInst *SwInst =
5575       CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5576
5577   // 6. Build case 1:
5578   //  ...
5579   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5580   //  ...
5581   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5582   // break;
5583   llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5584   SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5585   CGF.EmitBlock(Case1BB);
5586
5587   // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5588   llvm::Value *EndArgs[] = {
5589       IdentTLoc, // ident_t *<loc>
5590       ThreadId,  // i32 <gtid>
5591       Lock       // kmp_critical_name *&<lock>
5592   };
5593   auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5594                        CodeGenFunction &CGF, PrePostActionTy &Action) {
5595     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5596     auto IPriv = Privates.begin();
5597     auto ILHS = LHSExprs.begin();
5598     auto IRHS = RHSExprs.begin();
5599     for (const Expr *E : ReductionOps) {
5600       RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5601                                      cast<DeclRefExpr>(*IRHS));
5602       ++IPriv;
5603       ++ILHS;
5604       ++IRHS;
5605     }
5606   };
5607   RegionCodeGenTy RCG(CodeGen);
5608   CommonActionTy Action(
5609       nullptr, llvm::None,
5610       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
5611                                        : OMPRTL__kmpc_end_reduce),
5612       EndArgs);
5613   RCG.setAction(Action);
5614   RCG(CGF);
5615
5616   CGF.EmitBranch(DefaultBB);
5617
5618   // 7. Build case 2:
5619   //  ...
5620   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5621   //  ...
5622   // break;
5623   llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5624   SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5625   CGF.EmitBlock(Case2BB);
5626
5627   auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5628                              CodeGenFunction &CGF, PrePostActionTy &Action) {
5629     auto ILHS = LHSExprs.begin();
5630     auto IRHS = RHSExprs.begin();
5631     auto IPriv = Privates.begin();
5632     for (const Expr *E : ReductionOps) {
5633       const Expr *XExpr = nullptr;
5634       const Expr *EExpr = nullptr;
5635       const Expr *UpExpr = nullptr;
5636       BinaryOperatorKind BO = BO_Comma;
5637       if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
5638         if (BO->getOpcode() == BO_Assign) {
5639           XExpr = BO->getLHS();
5640           UpExpr = BO->getRHS();
5641         }
5642       }
5643       // Try to emit update expression as a simple atomic.
5644       const Expr *RHSExpr = UpExpr;
5645       if (RHSExpr) {
5646         // Analyze RHS part of the whole expression.
5647         if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5648                 RHSExpr->IgnoreParenImpCasts())) {
5649           // If this is a conditional operator, analyze its condition for
5650           // min/max reduction operator.
5651           RHSExpr = ACO->getCond();
5652         }
5653         if (const auto *BORHS =
5654                 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5655           EExpr = BORHS->getRHS();
5656           BO = BORHS->getOpcode();
5657         }
5658       }
5659       if (XExpr) {
5660         const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5661         auto &&AtomicRedGen = [BO, VD,
5662                                Loc](CodeGenFunction &CGF, const Expr *XExpr,
5663                                     const Expr *EExpr, const Expr *UpExpr) {
5664           LValue X = CGF.EmitLValue(XExpr);
5665           RValue E;
5666           if (EExpr)
5667             E = CGF.EmitAnyExpr(EExpr);
5668           CGF.EmitOMPAtomicSimpleUpdateExpr(
5669               X, E, BO, /*IsXLHSInRHSPart=*/true,
5670               llvm::AtomicOrdering::Monotonic, Loc,
5671               [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5672                 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5673                 PrivateScope.addPrivate(
5674                     VD, [&CGF, VD, XRValue, Loc]() {
5675                       Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5676                       CGF.emitOMPSimpleStore(
5677                           CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5678                           VD->getType().getNonReferenceType(), Loc);
5679                       return LHSTemp;
5680                     });
5681                 (void)PrivateScope.Privatize();
5682                 return CGF.EmitAnyExpr(UpExpr);
5683               });
5684         };
5685         if ((*IPriv)->getType()->isArrayType()) {
5686           // Emit atomic reduction for array section.
5687           const auto *RHSVar =
5688               cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5689           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5690                                     AtomicRedGen, XExpr, EExpr, UpExpr);
5691         } else {
5692           // Emit atomic reduction for array subscript or single variable.
5693           AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5694         }
5695       } else {
5696         // Emit as a critical region.
5697         auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5698                                            const Expr *, const Expr *) {
5699           CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5700           std::string Name = RT.getName({"atomic_reduction"});
5701           RT.emitCriticalRegion(
5702               CGF, Name,
5703               [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5704                 Action.Enter(CGF);
5705                 emitReductionCombiner(CGF, E);
5706               },
5707               Loc);
5708         };
5709         if ((*IPriv)->getType()->isArrayType()) {
5710           const auto *LHSVar =
5711               cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5712           const auto *RHSVar =
5713               cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5714           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5715                                     CritRedGen);
5716         } else {
5717           CritRedGen(CGF, nullptr, nullptr, nullptr);
5718         }
5719       }
5720       ++ILHS;
5721       ++IRHS;
5722       ++IPriv;
5723     }
5724   };
5725   RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5726   if (!WithNowait) {
5727     // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5728     llvm::Value *EndArgs[] = {
5729         IdentTLoc, // ident_t *<loc>
5730         ThreadId,  // i32 <gtid>
5731         Lock       // kmp_critical_name *&<lock>
5732     };
5733     CommonActionTy Action(nullptr, llvm::None,
5734                           createRuntimeFunction(OMPRTL__kmpc_end_reduce),
5735                           EndArgs);
5736     AtomicRCG.setAction(Action);
5737     AtomicRCG(CGF);
5738   } else {
5739     AtomicRCG(CGF);
5740   }
5741
5742   CGF.EmitBranch(DefaultBB);
5743   CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5744 }
5745
5746 /// Generates unique name for artificial threadprivate variables.
5747 /// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
5748 static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
5749                                       const Expr *Ref) {
5750   SmallString<256> Buffer;
5751   llvm::raw_svector_ostream Out(Buffer);
5752   const clang::DeclRefExpr *DE;
5753   const VarDecl *D = ::getBaseDecl(Ref, DE);
5754   if (!D)
5755     D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
5756   D = D->getCanonicalDecl();
5757   std::string Name = CGM.getOpenMPRuntime().getName(
5758       {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
5759   Out << Prefix << Name << "_"
5760       << D->getCanonicalDecl()->getLocStart().getRawEncoding();
5761   return Out.str();
5762 }
5763
5764 /// Emits reduction initializer function:
5765 /// \code
5766 /// void @.red_init(void* %arg) {
5767 /// %0 = bitcast void* %arg to <type>*
5768 /// store <type> <init>, <type>* %0
5769 /// ret void
5770 /// }
5771 /// \endcode
5772 static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5773                                            SourceLocation Loc,
5774                                            ReductionCodeGen &RCG, unsigned N) {
5775   ASTContext &C = CGM.getContext();
5776   FunctionArgList Args;
5777   ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5778                           ImplicitParamDecl::Other);
5779   Args.emplace_back(&Param);
5780   const auto &FnInfo =
5781       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5782   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5783   std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
5784   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5785                                     Name, &CGM.getModule());
5786   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5787   Fn->setDoesNotRecurse();
5788   CodeGenFunction CGF(CGM);
5789   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5790   Address PrivateAddr = CGF.EmitLoadOfPointer(
5791       CGF.GetAddrOfLocalVar(&Param),
5792       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5793   llvm::Value *Size = nullptr;
5794   // If the size of the reduction item is non-constant, load it from global
5795   // threadprivate variable.
5796   if (RCG.getSizes(N).second) {
5797     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5798         CGF, CGM.getContext().getSizeType(),
5799         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5800     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5801                                 CGM.getContext().getSizeType(), Loc);
5802   }
5803   RCG.emitAggregateType(CGF, N, Size);
5804   LValue SharedLVal;
5805   // If initializer uses initializer from declare reduction construct, emit a
5806   // pointer to the address of the original reduction item (reuired by reduction
5807   // initializer)
5808   if (RCG.usesReductionInitializer(N)) {
5809     Address SharedAddr =
5810         CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5811             CGF, CGM.getContext().VoidPtrTy,
5812             generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
5813     SharedAddr = CGF.EmitLoadOfPointer(
5814         SharedAddr,
5815         CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
5816     SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5817   } else {
5818     SharedLVal = CGF.MakeNaturalAlignAddrLValue(
5819         llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5820         CGM.getContext().VoidPtrTy);
5821   }
5822   // Emit the initializer:
5823   // %0 = bitcast void* %arg to <type>*
5824   // store <type> <init>, <type>* %0
5825   RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
5826                          [](CodeGenFunction &) { return false; });
5827   CGF.FinishFunction();
5828   return Fn;
5829 }
5830
5831 /// Emits reduction combiner function:
5832 /// \code
5833 /// void @.red_comb(void* %arg0, void* %arg1) {
5834 /// %lhs = bitcast void* %arg0 to <type>*
5835 /// %rhs = bitcast void* %arg1 to <type>*
5836 /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5837 /// store <type> %2, <type>* %lhs
5838 /// ret void
5839 /// }
5840 /// \endcode
5841 static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5842                                            SourceLocation Loc,
5843                                            ReductionCodeGen &RCG, unsigned N,
5844                                            const Expr *ReductionOp,
5845                                            const Expr *LHS, const Expr *RHS,
5846                                            const Expr *PrivateRef) {
5847   ASTContext &C = CGM.getContext();
5848   const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5849   const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5850   FunctionArgList Args;
5851   ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
5852                                C.VoidPtrTy, ImplicitParamDecl::Other);
5853   ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5854                             ImplicitParamDecl::Other);
5855   Args.emplace_back(&ParamInOut);
5856   Args.emplace_back(&ParamIn);
5857   const auto &FnInfo =
5858       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5859   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5860   std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
5861   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5862                                     Name, &CGM.getModule());
5863   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5864   Fn->setDoesNotRecurse();
5865   CodeGenFunction CGF(CGM);
5866   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5867   llvm::Value *Size = nullptr;
5868   // If the size of the reduction item is non-constant, load it from global
5869   // threadprivate variable.
5870   if (RCG.getSizes(N).second) {
5871     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5872         CGF, CGM.getContext().getSizeType(),
5873         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5874     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5875                                 CGM.getContext().getSizeType(), Loc);
5876   }
5877   RCG.emitAggregateType(CGF, N, Size);
5878   // Remap lhs and rhs variables to the addresses of the function arguments.
5879   // %lhs = bitcast void* %arg0 to <type>*
5880   // %rhs = bitcast void* %arg1 to <type>*
5881   CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5882   PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
5883     // Pull out the pointer to the variable.
5884     Address PtrAddr = CGF.EmitLoadOfPointer(
5885         CGF.GetAddrOfLocalVar(&ParamInOut),
5886         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5887     return CGF.Builder.CreateElementBitCast(
5888         PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5889   });
5890   PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
5891     // Pull out the pointer to the variable.
5892     Address PtrAddr = CGF.EmitLoadOfPointer(
5893         CGF.GetAddrOfLocalVar(&ParamIn),
5894         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5895     return CGF.Builder.CreateElementBitCast(
5896         PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5897   });
5898   PrivateScope.Privatize();
5899   // Emit the combiner body:
5900   // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
5901   // store <type> %2, <type>* %lhs
5902   CGM.getOpenMPRuntime().emitSingleReductionCombiner(
5903       CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
5904       cast<DeclRefExpr>(RHS));
5905   CGF.FinishFunction();
5906   return Fn;
5907 }
5908
5909 /// Emits reduction finalizer function:
5910 /// \code
5911 /// void @.red_fini(void* %arg) {
5912 /// %0 = bitcast void* %arg to <type>*
5913 /// <destroy>(<type>* %0)
5914 /// ret void
5915 /// }
5916 /// \endcode
5917 static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
5918                                            SourceLocation Loc,
5919                                            ReductionCodeGen &RCG, unsigned N) {
5920   if (!RCG.needCleanups(N))
5921     return nullptr;
5922   ASTContext &C = CGM.getContext();
5923   FunctionArgList Args;
5924   ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5925                           ImplicitParamDecl::Other);
5926   Args.emplace_back(&Param);
5927   const auto &FnInfo =
5928       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5929   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5930   std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
5931   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5932                                     Name, &CGM.getModule());
5933   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5934   Fn->setDoesNotRecurse();
5935   CodeGenFunction CGF(CGM);
5936   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5937   Address PrivateAddr = CGF.EmitLoadOfPointer(
5938       CGF.GetAddrOfLocalVar(&Param),
5939       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5940   llvm::Value *Size = nullptr;
5941   // If the size of the reduction item is non-constant, load it from global
5942   // threadprivate variable.
5943   if (RCG.getSizes(N).second) {
5944     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5945         CGF, CGM.getContext().getSizeType(),
5946         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5947     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5948                                 CGM.getContext().getSizeType(), Loc);
5949   }
5950   RCG.emitAggregateType(CGF, N, Size);
5951   // Emit the finalizer body:
5952   // <destroy>(<type>* %0)
5953   RCG.emitCleanups(CGF, N, PrivateAddr);
5954   CGF.FinishFunction();
5955   return Fn;
5956 }
5957
5958 llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
5959     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
5960     ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
5961   if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
5962     return nullptr;
5963
5964   // Build typedef struct:
5965   // kmp_task_red_input {
5966   //   void *reduce_shar; // shared reduction item
5967   //   size_t reduce_size; // size of data item
5968   //   void *reduce_init; // data initialization routine
5969   //   void *reduce_fini; // data finalization routine
5970   //   void *reduce_comb; // data combiner routine
5971   //   kmp_task_red_flags_t flags; // flags for additional info from compiler
5972   // } kmp_task_red_input_t;
5973   ASTContext &C = CGM.getContext();
5974   RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
5975   RD->startDefinition();
5976   const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5977   const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
5978   const FieldDecl *InitFD  = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5979   const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5980   const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5981   const FieldDecl *FlagsFD = addFieldToRecordDecl(
5982       C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
5983   RD->completeDefinition();
5984   QualType RDType = C.getRecordType(RD);
5985   unsigned Size = Data.ReductionVars.size();
5986   llvm::APInt ArraySize(/*numBits=*/64, Size);
5987   QualType ArrayRDType = C.getConstantArrayType(
5988       RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
5989   // kmp_task_red_input_t .rd_input.[Size];
5990   Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
5991   ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
5992                        Data.ReductionOps);
5993   for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
5994     // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
5995     llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
5996                            llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
5997     llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
5998         TaskRedInput.getPointer(), Idxs,
5999         /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
6000         ".rd_input.gep.");
6001     LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
6002     // ElemLVal.reduce_shar = &Shareds[Cnt];
6003     LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
6004     RCG.emitSharedLValue(CGF, Cnt);
6005     llvm::Value *CastedShared =
6006         CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer());
6007     CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
6008     RCG.emitAggregateType(CGF, Cnt);
6009     llvm::Value *SizeValInChars;
6010     llvm::Value *SizeVal;
6011     std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
6012     // We use delayed creation/initialization for VLAs, array sections and
6013     // custom reduction initializations. It is required because runtime does not
6014     // provide the way to pass the sizes of VLAs/array sections to
6015     // initializer/combiner/finalizer functions and does not pass the pointer to
6016     // original reduction item to the initializer. Instead threadprivate global
6017     // variables are used to store these values and use them in the functions.
6018     bool DelayedCreation = !!SizeVal;
6019     SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
6020                                                /*isSigned=*/false);
6021     LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
6022     CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
6023     // ElemLVal.reduce_init = init;
6024     LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
6025     llvm::Value *InitAddr =
6026         CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
6027     CGF.EmitStoreOfScalar(InitAddr, InitLVal);
6028     DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
6029     // ElemLVal.reduce_fini = fini;
6030     LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
6031     llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
6032     llvm::Value *FiniAddr = Fini
6033                                 ? CGF.EmitCastToVoidPtr(Fini)
6034                                 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
6035     CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
6036     // ElemLVal.reduce_comb = comb;
6037     LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
6038     llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
6039         CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
6040         RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
6041     CGF.EmitStoreOfScalar(CombAddr, CombLVal);
6042     // ElemLVal.flags = 0;
6043     LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
6044     if (DelayedCreation) {
6045       CGF.EmitStoreOfScalar(
6046           llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
6047           FlagsLVal);
6048     } else
6049       CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
6050   }
6051   // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
6052   // *data);
6053   llvm::Value *Args[] = {
6054       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6055                                 /*isSigned=*/true),
6056       llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6057       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
6058                                                       CGM.VoidPtrTy)};
6059   return CGF.EmitRuntimeCall(
6060       createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
6061 }
6062
6063 void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
6064                                               SourceLocation Loc,
6065                                               ReductionCodeGen &RCG,
6066                                               unsigned N) {
6067   auto Sizes = RCG.getSizes(N);
6068   // Emit threadprivate global variable if the type is non-constant
6069   // (Sizes.second = nullptr).
6070   if (Sizes.second) {
6071     llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
6072                                                      /*isSigned=*/false);
6073     Address SizeAddr = getAddrOfArtificialThreadPrivate(
6074         CGF, CGM.getContext().getSizeType(),
6075         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
6076     CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
6077   }
6078   // Store address of the original reduction item if custom initializer is used.
6079   if (RCG.usesReductionInitializer(N)) {
6080     Address SharedAddr = getAddrOfArtificialThreadPrivate(
6081         CGF, CGM.getContext().VoidPtrTy,
6082         generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
6083     CGF.Builder.CreateStore(
6084         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6085             RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
6086         SharedAddr, /*IsVolatile=*/false);
6087   }
6088 }
6089
6090 Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
6091                                               SourceLocation Loc,
6092                                               llvm::Value *ReductionsPtr,
6093                                               LValue SharedLVal) {
6094   // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
6095   // *d);
6096   llvm::Value *Args[] = {
6097       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6098                                 /*isSigned=*/true),
6099       ReductionsPtr,
6100       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(),
6101                                                       CGM.VoidPtrTy)};
6102   return Address(
6103       CGF.EmitRuntimeCall(
6104           createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
6105       SharedLVal.getAlignment());
6106 }
6107
6108 void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
6109                                        SourceLocation Loc) {
6110   if (!CGF.HaveInsertPoint())
6111     return;
6112   // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
6113   // global_tid);
6114   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
6115   // Ignore return result until untied tasks are supported.
6116   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
6117   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
6118     Region->emitUntiedSwitch(CGF);
6119 }
6120
6121 void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
6122                                            OpenMPDirectiveKind InnerKind,
6123                                            const RegionCodeGenTy &CodeGen,
6124                                            bool HasCancel) {
6125   if (!CGF.HaveInsertPoint())
6126     return;
6127   InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
6128   CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
6129 }
6130
6131 namespace {
6132 enum RTCancelKind {
6133   CancelNoreq = 0,
6134   CancelParallel = 1,
6135   CancelLoop = 2,
6136   CancelSections = 3,
6137   CancelTaskgroup = 4
6138 };
6139 } // anonymous namespace
6140
6141 static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
6142   RTCancelKind CancelKind = CancelNoreq;
6143   if (CancelRegion == OMPD_parallel)
6144     CancelKind = CancelParallel;
6145   else if (CancelRegion == OMPD_for)
6146     CancelKind = CancelLoop;
6147   else if (CancelRegion == OMPD_sections)
6148     CancelKind = CancelSections;
6149   else {
6150     assert(CancelRegion == OMPD_taskgroup);
6151     CancelKind = CancelTaskgroup;
6152   }
6153   return CancelKind;
6154 }
6155
6156 void CGOpenMPRuntime::emitCancellationPointCall(
6157     CodeGenFunction &CGF, SourceLocation Loc,
6158     OpenMPDirectiveKind CancelRegion) {
6159   if (!CGF.HaveInsertPoint())
6160     return;
6161   // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
6162   // global_tid, kmp_int32 cncl_kind);
6163   if (auto *OMPRegionInfo =
6164           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6165     // For 'cancellation point taskgroup', the task region info may not have a
6166     // cancel. This may instead happen in another adjacent task.
6167     if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
6168       llvm::Value *Args[] = {
6169           emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
6170           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6171       // Ignore return result until untied tasks are supported.
6172       llvm::Value *Result = CGF.EmitRuntimeCall(
6173           createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
6174       // if (__kmpc_cancellationpoint()) {
6175       //   exit from construct;
6176       // }
6177       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6178       llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6179       llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6180       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6181       CGF.EmitBlock(ExitBB);
6182       // exit from construct;
6183       CodeGenFunction::JumpDest CancelDest =
6184           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6185       CGF.EmitBranchThroughCleanup(CancelDest);
6186       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6187     }
6188   }
6189 }
6190
6191 void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
6192                                      const Expr *IfCond,
6193                                      OpenMPDirectiveKind CancelRegion) {
6194   if (!CGF.HaveInsertPoint())
6195     return;
6196   // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
6197   // kmp_int32 cncl_kind);
6198   if (auto *OMPRegionInfo =
6199           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6200     auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
6201                                                         PrePostActionTy &) {
6202       CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
6203       llvm::Value *Args[] = {
6204           RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
6205           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6206       // Ignore return result until untied tasks are supported.
6207       llvm::Value *Result = CGF.EmitRuntimeCall(
6208           RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
6209       // if (__kmpc_cancel()) {
6210       //   exit from construct;
6211       // }
6212       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6213       llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6214       llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6215       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6216       CGF.EmitBlock(ExitBB);
6217       // exit from construct;
6218       CodeGenFunction::JumpDest CancelDest =
6219           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6220       CGF.EmitBranchThroughCleanup(CancelDest);
6221       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6222     };
6223     if (IfCond) {
6224       emitOMPIfClause(CGF, IfCond, ThenGen,
6225                       [](CodeGenFunction &, PrePostActionTy &) {});
6226     } else {
6227       RegionCodeGenTy ThenRCG(ThenGen);
6228       ThenRCG(CGF);
6229     }
6230   }
6231 }
6232
6233 void CGOpenMPRuntime::emitTargetOutlinedFunction(
6234     const OMPExecutableDirective &D, StringRef ParentName,
6235     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6236     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6237   assert(!ParentName.empty() && "Invalid target region parent name!");
6238   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
6239                                    IsOffloadEntry, CodeGen);
6240 }
6241
6242 void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
6243     const OMPExecutableDirective &D, StringRef ParentName,
6244     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6245     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6246   // Create a unique name for the entry function using the source location
6247   // information of the current target region. The name will be something like:
6248   //
6249   // __omp_offloading_DD_FFFF_PP_lBB
6250   //
6251   // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
6252   // mangled name of the function that encloses the target region and BB is the
6253   // line number of the target region.
6254
6255   unsigned DeviceID;
6256   unsigned FileID;
6257   unsigned Line;
6258   getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
6259                            Line);
6260   SmallString<64> EntryFnName;
6261   {
6262     llvm::raw_svector_ostream OS(EntryFnName);
6263     OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
6264        << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
6265   }
6266
6267   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6268
6269   CodeGenFunction CGF(CGM, true);
6270   CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
6271   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6272
6273   OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
6274
6275   // If this target outline function is not an offload entry, we don't need to
6276   // register it.
6277   if (!IsOffloadEntry)
6278     return;
6279
6280   // The target region ID is used by the runtime library to identify the current
6281   // target region, so it only has to be unique and not necessarily point to
6282   // anything. It could be the pointer to the outlined function that implements
6283   // the target region, but we aren't using that so that the compiler doesn't
6284   // need to keep that, and could therefore inline the host function if proven
6285   // worthwhile during optimization. In the other hand, if emitting code for the
6286   // device, the ID has to be the function address so that it can retrieved from
6287   // the offloading entry and launched by the runtime library. We also mark the
6288   // outlined function to have external linkage in case we are emitting code for
6289   // the device, because these functions will be entry points to the device.
6290
6291   if (CGM.getLangOpts().OpenMPIsDevice) {
6292     OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
6293     OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
6294     OutlinedFn->setDSOLocal(false);
6295   } else {
6296     std::string Name = getName({EntryFnName, "region_id"});
6297     OutlinedFnID = new llvm::GlobalVariable(
6298         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
6299         llvm::GlobalValue::WeakAnyLinkage,
6300         llvm::Constant::getNullValue(CGM.Int8Ty), Name);
6301   }
6302
6303   // Register the information for the entry associated with this target region.
6304   OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
6305       DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
6306       OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
6307 }
6308
6309 /// discard all CompoundStmts intervening between two constructs
6310 static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
6311   while (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
6312     Body = CS->body_front();
6313
6314   return Body;
6315 }
6316
6317 /// Emit the number of teams for a target directive.  Inspect the num_teams
6318 /// clause associated with a teams construct combined or closely nested
6319 /// with the target directive.
6320 ///
6321 /// Emit a team of size one for directives such as 'target parallel' that
6322 /// have no associated teams construct.
6323 ///
6324 /// Otherwise, return nullptr.
6325 static llvm::Value *
6326 emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6327                                CodeGenFunction &CGF,
6328                                const OMPExecutableDirective &D) {
6329   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6330                                               "teams directive expected to be "
6331                                               "emitted only for the host!");
6332
6333   CGBuilderTy &Bld = CGF.Builder;
6334
6335   // If the target directive is combined with a teams directive:
6336   //   Return the value in the num_teams clause, if any.
6337   //   Otherwise, return 0 to denote the runtime default.
6338   if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
6339     if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
6340       CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
6341       llvm::Value *NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
6342                                                  /*IgnoreResultAssign*/ true);
6343       return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6344                                /*IsSigned=*/true);
6345     }
6346
6347     // The default value is 0.
6348     return Bld.getInt32(0);
6349   }
6350
6351   // If the target directive is combined with a parallel directive but not a
6352   // teams directive, start one team.
6353   if (isOpenMPParallelDirective(D.getDirectiveKind()))
6354     return Bld.getInt32(1);
6355
6356   // If the current target region has a teams region enclosed, we need to get
6357   // the number of teams to pass to the runtime function call. This is done
6358   // by generating the expression in a inlined region. This is required because
6359   // the expression is captured in the enclosing target environment when the
6360   // teams directive is not combined with target.
6361
6362   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6363
6364   if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6365           ignoreCompoundStmts(CS.getCapturedStmt()))) {
6366     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6367       if (const auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
6368         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6369         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6370         llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
6371         return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6372                                  /*IsSigned=*/true);
6373       }
6374
6375       // If we have an enclosed teams directive but no num_teams clause we use
6376       // the default value 0.
6377       return Bld.getInt32(0);
6378     }
6379   }
6380
6381   // No teams associated with the directive.
6382   return nullptr;
6383 }
6384
6385 /// Emit the number of threads for a target directive.  Inspect the
6386 /// thread_limit clause associated with a teams construct combined or closely
6387 /// nested with the target directive.
6388 ///
6389 /// Emit the num_threads clause for directives such as 'target parallel' that
6390 /// have no associated teams construct.
6391 ///
6392 /// Otherwise, return nullptr.
6393 static llvm::Value *
6394 emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6395                                  CodeGenFunction &CGF,
6396                                  const OMPExecutableDirective &D) {
6397   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6398                                               "teams directive expected to be "
6399                                               "emitted only for the host!");
6400
6401   CGBuilderTy &Bld = CGF.Builder;
6402
6403   //
6404   // If the target directive is combined with a teams directive:
6405   //   Return the value in the thread_limit clause, if any.
6406   //
6407   // If the target directive is combined with a parallel directive:
6408   //   Return the value in the num_threads clause, if any.
6409   //
6410   // If both clauses are set, select the minimum of the two.
6411   //
6412   // If neither teams or parallel combined directives set the number of threads
6413   // in a team, return 0 to denote the runtime default.
6414   //
6415   // If this is not a teams directive return nullptr.
6416
6417   if (isOpenMPTeamsDirective(D.getDirectiveKind()) ||
6418       isOpenMPParallelDirective(D.getDirectiveKind())) {
6419     llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0);
6420     llvm::Value *NumThreadsVal = nullptr;
6421     llvm::Value *ThreadLimitVal = nullptr;
6422
6423     if (const auto *ThreadLimitClause =
6424             D.getSingleClause<OMPThreadLimitClause>()) {
6425       CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6426       llvm::Value *ThreadLimit =
6427           CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
6428                              /*IgnoreResultAssign*/ true);
6429       ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6430                                          /*IsSigned=*/true);
6431     }
6432
6433     if (const auto *NumThreadsClause =
6434             D.getSingleClause<OMPNumThreadsClause>()) {
6435       CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
6436       llvm::Value *NumThreads =
6437           CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
6438                              /*IgnoreResultAssign*/ true);
6439       NumThreadsVal =
6440           Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true);
6441     }
6442
6443     // Select the lesser of thread_limit and num_threads.
6444     if (NumThreadsVal)
6445       ThreadLimitVal = ThreadLimitVal
6446                            ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal,
6447                                                                 ThreadLimitVal),
6448                                               NumThreadsVal, ThreadLimitVal)
6449                            : NumThreadsVal;
6450
6451     // Set default value passed to the runtime if either teams or a target
6452     // parallel type directive is found but no clause is specified.
6453     if (!ThreadLimitVal)
6454       ThreadLimitVal = DefaultThreadLimitVal;
6455
6456     return ThreadLimitVal;
6457   }
6458
6459   // If the current target region has a teams region enclosed, we need to get
6460   // the thread limit to pass to the runtime function call. This is done
6461   // by generating the expression in a inlined region. This is required because
6462   // the expression is captured in the enclosing target environment when the
6463   // teams directive is not combined with target.
6464
6465   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6466
6467   if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6468           ignoreCompoundStmts(CS.getCapturedStmt()))) {
6469     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6470       if (const auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
6471         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6472         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6473         llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
6474         return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6475                                          /*IsSigned=*/true);
6476       }
6477
6478       // If we have an enclosed teams directive but no thread_limit clause we
6479       // use the default value 0.
6480       return CGF.Builder.getInt32(0);
6481     }
6482   }
6483
6484   // No teams associated with the directive.
6485   return nullptr;
6486 }
6487
6488 namespace {
6489 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
6490
6491 // Utility to handle information from clauses associated with a given
6492 // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
6493 // It provides a convenient interface to obtain the information and generate
6494 // code for that information.
6495 class MappableExprsHandler {
6496 public:
6497   /// Values for bit flags used to specify the mapping type for
6498   /// offloading.
6499   enum OpenMPOffloadMappingFlags : uint64_t {
6500     /// No flags
6501     OMP_MAP_NONE = 0x0,
6502     /// Allocate memory on the device and move data from host to device.
6503     OMP_MAP_TO = 0x01,
6504     /// Allocate memory on the device and move data from device to host.
6505     OMP_MAP_FROM = 0x02,
6506     /// Always perform the requested mapping action on the element, even
6507     /// if it was already mapped before.
6508     OMP_MAP_ALWAYS = 0x04,
6509     /// Delete the element from the device environment, ignoring the
6510     /// current reference count associated with the element.
6511     OMP_MAP_DELETE = 0x08,
6512     /// The element being mapped is a pointer-pointee pair; both the
6513     /// pointer and the pointee should be mapped.
6514     OMP_MAP_PTR_AND_OBJ = 0x10,
6515     /// This flags signals that the base address of an entry should be
6516     /// passed to the target kernel as an argument.
6517     OMP_MAP_TARGET_PARAM = 0x20,
6518     /// Signal that the runtime library has to return the device pointer
6519     /// in the current position for the data being mapped. Used when we have the
6520     /// use_device_ptr clause.
6521     OMP_MAP_RETURN_PARAM = 0x40,
6522     /// This flag signals that the reference being passed is a pointer to
6523     /// private data.
6524     OMP_MAP_PRIVATE = 0x80,
6525     /// Pass the element to the device by value.
6526     OMP_MAP_LITERAL = 0x100,
6527     /// Implicit map
6528     OMP_MAP_IMPLICIT = 0x200,
6529     /// The 16 MSBs of the flags indicate whether the entry is member of some
6530     /// struct/class.
6531     OMP_MAP_MEMBER_OF = 0xffff000000000000,
6532     LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF),
6533   };
6534
6535   /// Class that associates information with a base pointer to be passed to the
6536   /// runtime library.
6537   class BasePointerInfo {
6538     /// The base pointer.
6539     llvm::Value *Ptr = nullptr;
6540     /// The base declaration that refers to this device pointer, or null if
6541     /// there is none.
6542     const ValueDecl *DevPtrDecl = nullptr;
6543
6544   public:
6545     BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
6546         : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
6547     llvm::Value *operator*() const { return Ptr; }
6548     const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
6549     void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
6550   };
6551
6552   using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
6553   using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
6554   using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
6555
6556   /// Map between a struct and the its lowest & highest elements which have been
6557   /// mapped.
6558   /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
6559   ///                    HE(FieldIndex, Pointer)}
6560   struct StructRangeInfoTy {
6561     std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
6562         0, Address::invalid()};
6563     std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
6564         0, Address::invalid()};
6565     Address Base = Address::invalid();
6566   };
6567
6568 private:
6569   /// Kind that defines how a device pointer has to be returned.
6570   struct MapInfo {
6571     OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
6572     OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
6573     OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
6574     bool ReturnDevicePointer = false;
6575     bool IsImplicit = false;
6576
6577     MapInfo() = default;
6578     MapInfo(
6579         OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6580         OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6581         bool ReturnDevicePointer, bool IsImplicit)
6582         : Components(Components), MapType(MapType),
6583           MapTypeModifier(MapTypeModifier),
6584           ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
6585   };
6586
6587   /// If use_device_ptr is used on a pointer which is a struct member and there
6588   /// is no map information about it, then emission of that entry is deferred
6589   /// until the whole struct has been processed.
6590   struct DeferredDevicePtrEntryTy {
6591     const Expr *IE = nullptr;
6592     const ValueDecl *VD = nullptr;
6593
6594     DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD)
6595         : IE(IE), VD(VD) {}
6596   };
6597
6598   /// Directive from where the map clauses were extracted.
6599   const OMPExecutableDirective &CurDir;
6600
6601   /// Function the directive is being generated for.
6602   CodeGenFunction &CGF;
6603
6604   /// Set of all first private variables in the current directive.
6605   llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
6606
6607   /// Map between device pointer declarations and their expression components.
6608   /// The key value for declarations in 'this' is null.
6609   llvm::DenseMap<
6610       const ValueDecl *,
6611       SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
6612       DevPointersMap;
6613
6614   llvm::Value *getExprTypeSize(const Expr *E) const {
6615     QualType ExprTy = E->getType().getCanonicalType();
6616
6617     // Reference types are ignored for mapping purposes.
6618     if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
6619       ExprTy = RefTy->getPointeeType().getCanonicalType();
6620
6621     // Given that an array section is considered a built-in type, we need to
6622     // do the calculation based on the length of the section instead of relying
6623     // on CGF.getTypeSize(E->getType()).
6624     if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
6625       QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
6626                             OAE->getBase()->IgnoreParenImpCasts())
6627                             .getCanonicalType();
6628
6629       // If there is no length associated with the expression, that means we
6630       // are using the whole length of the base.
6631       if (!OAE->getLength() && OAE->getColonLoc().isValid())
6632         return CGF.getTypeSize(BaseTy);
6633
6634       llvm::Value *ElemSize;
6635       if (const auto *PTy = BaseTy->getAs<PointerType>()) {
6636         ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
6637       } else {
6638         const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
6639         assert(ATy && "Expecting array type if not a pointer type.");
6640         ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
6641       }
6642
6643       // If we don't have a length at this point, that is because we have an
6644       // array section with a single element.
6645       if (!OAE->getLength())
6646         return ElemSize;
6647
6648       llvm::Value *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
6649       LengthVal =
6650           CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false);
6651       return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
6652     }
6653     return CGF.getTypeSize(ExprTy);
6654   }
6655
6656   /// Return the corresponding bits for a given map clause modifier. Add
6657   /// a flag marking the map as a pointer if requested. Add a flag marking the
6658   /// map as the first one of a series of maps that relate to the same map
6659   /// expression.
6660   OpenMPOffloadMappingFlags getMapTypeBits(OpenMPMapClauseKind MapType,
6661                                            OpenMPMapClauseKind MapTypeModifier,
6662                                            bool IsImplicit, bool AddPtrFlag,
6663                                            bool AddIsTargetParamFlag) const {
6664     OpenMPOffloadMappingFlags Bits =
6665         IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
6666     switch (MapType) {
6667     case OMPC_MAP_alloc:
6668     case OMPC_MAP_release:
6669       // alloc and release is the default behavior in the runtime library,  i.e.
6670       // if we don't pass any bits alloc/release that is what the runtime is
6671       // going to do. Therefore, we don't need to signal anything for these two
6672       // type modifiers.
6673       break;
6674     case OMPC_MAP_to:
6675       Bits |= OMP_MAP_TO;
6676       break;
6677     case OMPC_MAP_from:
6678       Bits |= OMP_MAP_FROM;
6679       break;
6680     case OMPC_MAP_tofrom:
6681       Bits |= OMP_MAP_TO | OMP_MAP_FROM;
6682       break;
6683     case OMPC_MAP_delete:
6684       Bits |= OMP_MAP_DELETE;
6685       break;
6686     case OMPC_MAP_always:
6687     case OMPC_MAP_unknown:
6688       llvm_unreachable("Unexpected map type!");
6689     }
6690     if (AddPtrFlag)
6691       Bits |= OMP_MAP_PTR_AND_OBJ;
6692     if (AddIsTargetParamFlag)
6693       Bits |= OMP_MAP_TARGET_PARAM;
6694     if (MapTypeModifier == OMPC_MAP_always)
6695       Bits |= OMP_MAP_ALWAYS;
6696     return Bits;
6697   }
6698
6699   /// Return true if the provided expression is a final array section. A
6700   /// final array section, is one whose length can't be proved to be one.
6701   bool isFinalArraySectionExpression(const Expr *E) const {
6702     const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
6703
6704     // It is not an array section and therefore not a unity-size one.
6705     if (!OASE)
6706       return false;
6707
6708     // An array section with no colon always refer to a single element.
6709     if (OASE->getColonLoc().isInvalid())
6710       return false;
6711
6712     const Expr *Length = OASE->getLength();
6713
6714     // If we don't have a length we have to check if the array has size 1
6715     // for this dimension. Also, we should always expect a length if the
6716     // base type is pointer.
6717     if (!Length) {
6718       QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
6719                              OASE->getBase()->IgnoreParenImpCasts())
6720                              .getCanonicalType();
6721       if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
6722         return ATy->getSize().getSExtValue() != 1;
6723       // If we don't have a constant dimension length, we have to consider
6724       // the current section as having any size, so it is not necessarily
6725       // unitary. If it happen to be unity size, that's user fault.
6726       return true;
6727     }
6728
6729     // Check if the length evaluates to 1.
6730     llvm::APSInt ConstLength;
6731     if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
6732       return true; // Can have more that size 1.
6733
6734     return ConstLength.getSExtValue() != 1;
6735   }
6736
6737   /// Generate the base pointers, section pointers, sizes and map type
6738   /// bits for the provided map type, map modifier, and expression components.
6739   /// \a IsFirstComponent should be set to true if the provided set of
6740   /// components is the first associated with a capture.
6741   void generateInfoForComponentList(
6742       OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6743       OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6744       MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
6745       MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
6746       StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
6747       bool IsImplicit) const {
6748     // The following summarizes what has to be generated for each map and the
6749     // types below. The generated information is expressed in this order:
6750     // base pointer, section pointer, size, flags
6751     // (to add to the ones that come from the map type and modifier).
6752     //
6753     // double d;
6754     // int i[100];
6755     // float *p;
6756     //
6757     // struct S1 {
6758     //   int i;
6759     //   float f[50];
6760     // }
6761     // struct S2 {
6762     //   int i;
6763     //   float f[50];
6764     //   S1 s;
6765     //   double *p;
6766     //   struct S2 *ps;
6767     // }
6768     // S2 s;
6769     // S2 *ps;
6770     //
6771     // map(d)
6772     // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
6773     //
6774     // map(i)
6775     // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
6776     //
6777     // map(i[1:23])
6778     // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
6779     //
6780     // map(p)
6781     // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
6782     //
6783     // map(p[1:24])
6784     // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
6785     //
6786     // map(s)
6787     // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
6788     //
6789     // map(s.i)
6790     // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
6791     //
6792     // map(s.s.f)
6793     // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
6794     //
6795     // map(s.p)
6796     // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
6797     //
6798     // map(to: s.p[:22])
6799     // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
6800     // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
6801     // &(s.p), &(s.p[0]), 22*sizeof(double),
6802     //   MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
6803     // (*) alloc space for struct members, only this is a target parameter
6804     // (**) map the pointer (nothing to be mapped in this example) (the compiler
6805     //      optimizes this entry out, same in the examples below)
6806     // (***) map the pointee (map: to)
6807     //
6808     // map(s.ps)
6809     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
6810     //
6811     // map(from: s.ps->s.i)
6812     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6813     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6814     // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ  | FROM
6815     //
6816     // map(to: s.ps->ps)
6817     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6818     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6819     // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ  | TO
6820     //
6821     // map(s.ps->ps->ps)
6822     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6823     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6824     // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6825     // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
6826     //
6827     // map(to: s.ps->ps->s.f[:22])
6828     // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
6829     // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
6830     // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6831     // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
6832     //
6833     // map(ps)
6834     // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
6835     //
6836     // map(ps->i)
6837     // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
6838     //
6839     // map(ps->s.f)
6840     // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
6841     //
6842     // map(from: ps->p)
6843     // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
6844     //
6845     // map(to: ps->p[:22])
6846     // ps, &(ps->p), sizeof(double*), TARGET_PARAM
6847     // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
6848     // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
6849     //
6850     // map(ps->ps)
6851     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
6852     //
6853     // map(from: ps->ps->s.i)
6854     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6855     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6856     // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6857     //
6858     // map(from: ps->ps->ps)
6859     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6860     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6861     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6862     //
6863     // map(ps->ps->ps->ps)
6864     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6865     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6866     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6867     // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
6868     //
6869     // map(to: ps->ps->ps->s.f[:22])
6870     // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
6871     // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
6872     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
6873     // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
6874     //
6875     // map(to: s.f[:22]) map(from: s.p[:33])
6876     // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
6877     //     sizeof(double*) (**), TARGET_PARAM
6878     // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
6879     // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
6880     // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
6881     // (*) allocate contiguous space needed to fit all mapped members even if
6882     //     we allocate space for members not mapped (in this example,
6883     //     s.f[22..49] and s.s are not mapped, yet we must allocate space for
6884     //     them as well because they fall between &s.f[0] and &s.p)
6885     //
6886     // map(from: s.f[:22]) map(to: ps->p[:33])
6887     // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
6888     // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
6889     // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
6890     // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
6891     // (*) the struct this entry pertains to is the 2nd element in the list of
6892     //     arguments, hence MEMBER_OF(2)
6893     //
6894     // map(from: s.f[:22], s.s) map(to: ps->p[:33])
6895     // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
6896     // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
6897     // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
6898     // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
6899     // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
6900     // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
6901     // (*) the struct this entry pertains to is the 4th element in the list
6902     //     of arguments, hence MEMBER_OF(4)
6903
6904     // Track if the map information being generated is the first for a capture.
6905     bool IsCaptureFirstInfo = IsFirstComponentList;
6906     bool IsLink = false; // Is this variable a "declare target link"?
6907
6908     // Scan the components from the base to the complete expression.
6909     auto CI = Components.rbegin();
6910     auto CE = Components.rend();
6911     auto I = CI;
6912
6913     // Track if the map information being generated is the first for a list of
6914     // components.
6915     bool IsExpressionFirstInfo = true;
6916     Address BP = Address::invalid();
6917
6918     if (isa<MemberExpr>(I->getAssociatedExpression())) {
6919       // The base is the 'this' pointer. The content of the pointer is going
6920       // to be the base of the field being mapped.
6921       BP = CGF.LoadCXXThisAddress();
6922     } else {
6923       // The base is the reference to the variable.
6924       // BP = &Var.
6925       BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
6926       if (const auto *VD =
6927               dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
6928         if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
6929                 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
6930           if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
6931             IsLink = true;
6932             BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
6933           }
6934       }
6935
6936       // If the variable is a pointer and is being dereferenced (i.e. is not
6937       // the last component), the base has to be the pointer itself, not its
6938       // reference. References are ignored for mapping purposes.
6939       QualType Ty =
6940           I->getAssociatedDeclaration()->getType().getNonReferenceType();
6941       if (Ty->isAnyPointerType() && std::next(I) != CE) {
6942         BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
6943
6944         // We do not need to generate individual map information for the
6945         // pointer, it can be associated with the combined storage.
6946         ++I;
6947       }
6948     }
6949
6950     // Track whether a component of the list should be marked as MEMBER_OF some
6951     // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
6952     // in a component list should be marked as MEMBER_OF, all subsequent entries
6953     // do not belong to the base struct. E.g.
6954     // struct S2 s;
6955     // s.ps->ps->ps->f[:]
6956     //   (1) (2) (3) (4)
6957     // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
6958     // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
6959     // is the pointee of ps(2) which is not member of struct s, so it should not
6960     // be marked as such (it is still PTR_AND_OBJ).
6961     // The variable is initialized to false so that PTR_AND_OBJ entries which
6962     // are not struct members are not considered (e.g. array of pointers to
6963     // data).
6964     bool ShouldBeMemberOf = false;
6965
6966     // Variable keeping track of whether or not we have encountered a component
6967     // in the component list which is a member expression. Useful when we have a
6968     // pointer or a final array section, in which case it is the previous
6969     // component in the list which tells us whether we have a member expression.
6970     // E.g. X.f[:]
6971     // While processing the final array section "[:]" it is "f" which tells us
6972     // whether we are dealing with a member of a declared struct.
6973     const MemberExpr *EncounteredME = nullptr;
6974
6975     for (; I != CE; ++I) {
6976       // If the current component is member of a struct (parent struct) mark it.
6977       if (!EncounteredME) {
6978         EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
6979         // If we encounter a PTR_AND_OBJ entry from now on it should be marked
6980         // as MEMBER_OF the parent struct.
6981         if (EncounteredME)
6982           ShouldBeMemberOf = true;
6983       }
6984
6985       auto Next = std::next(I);
6986
6987       // We need to generate the addresses and sizes if this is the last
6988       // component, if the component is a pointer or if it is an array section
6989       // whose length can't be proved to be one. If this is a pointer, it
6990       // becomes the base address for the following components.
6991
6992       // A final array section, is one whose length can't be proved to be one.
6993       bool IsFinalArraySection =
6994           isFinalArraySectionExpression(I->getAssociatedExpression());
6995
6996       // Get information on whether the element is a pointer. Have to do a
6997       // special treatment for array sections given that they are built-in
6998       // types.
6999       const auto *OASE =
7000           dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
7001       bool IsPointer =
7002           (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
7003                        .getCanonicalType()
7004                        ->isAnyPointerType()) ||
7005           I->getAssociatedExpression()->getType()->isAnyPointerType();
7006
7007       if (Next == CE || IsPointer || IsFinalArraySection) {
7008         // If this is not the last component, we expect the pointer to be
7009         // associated with an array expression or member expression.
7010         assert((Next == CE ||
7011                 isa<MemberExpr>(Next->getAssociatedExpression()) ||
7012                 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
7013                 isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
7014                "Unexpected expression");
7015
7016         Address LB =
7017             CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
7018         llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
7019
7020         // If this component is a pointer inside the base struct then we don't
7021         // need to create any entry for it - it will be combined with the object
7022         // it is pointing to into a single PTR_AND_OBJ entry.
7023         bool IsMemberPointer =
7024             IsPointer && EncounteredME &&
7025             (dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
7026              EncounteredME);
7027         if (!IsMemberPointer) {
7028           BasePointers.push_back(BP.getPointer());
7029           Pointers.push_back(LB.getPointer());
7030           Sizes.push_back(Size);
7031
7032           // We need to add a pointer flag for each map that comes from the
7033           // same expression except for the first one. We also need to signal
7034           // this map is the first one that relates with the current capture
7035           // (there is a set of entries for each capture).
7036           OpenMPOffloadMappingFlags Flags = getMapTypeBits(
7037               MapType, MapTypeModifier, IsImplicit,
7038               !IsExpressionFirstInfo || IsLink, IsCaptureFirstInfo && !IsLink);
7039
7040           if (!IsExpressionFirstInfo) {
7041             // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
7042             // then we reset the TO/FROM/ALWAYS/DELETE flags.
7043             if (IsPointer)
7044               Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
7045                          OMP_MAP_DELETE);
7046
7047             if (ShouldBeMemberOf) {
7048               // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
7049               // should be later updated with the correct value of MEMBER_OF.
7050               Flags |= OMP_MAP_MEMBER_OF;
7051               // From now on, all subsequent PTR_AND_OBJ entries should not be
7052               // marked as MEMBER_OF.
7053               ShouldBeMemberOf = false;
7054             }
7055           }
7056
7057           Types.push_back(Flags);
7058         }
7059
7060         // If we have encountered a member expression so far, keep track of the
7061         // mapped member. If the parent is "*this", then the value declaration
7062         // is nullptr.
7063         if (EncounteredME) {
7064           const auto *FD = dyn_cast<FieldDecl>(EncounteredME->getMemberDecl());
7065           unsigned FieldIndex = FD->getFieldIndex();
7066
7067           // Update info about the lowest and highest elements for this struct
7068           if (!PartialStruct.Base.isValid()) {
7069             PartialStruct.LowestElem = {FieldIndex, LB};
7070             PartialStruct.HighestElem = {FieldIndex, LB};
7071             PartialStruct.Base = BP;
7072           } else if (FieldIndex < PartialStruct.LowestElem.first) {
7073             PartialStruct.LowestElem = {FieldIndex, LB};
7074           } else if (FieldIndex > PartialStruct.HighestElem.first) {
7075             PartialStruct.HighestElem = {FieldIndex, LB};
7076           }
7077         }
7078
7079         // If we have a final array section, we are done with this expression.
7080         if (IsFinalArraySection)
7081           break;
7082
7083         // The pointer becomes the base for the next element.
7084         if (Next != CE)
7085           BP = LB;
7086
7087         IsExpressionFirstInfo = false;
7088         IsCaptureFirstInfo = false;
7089       }
7090     }
7091   }
7092
7093   /// Return the adjusted map modifiers if the declaration a capture refers to
7094   /// appears in a first-private clause. This is expected to be used only with
7095   /// directives that start with 'target'.
7096   MappableExprsHandler::OpenMPOffloadMappingFlags
7097   getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
7098     assert(Cap.capturesVariable() && "Expected capture by reference only!");
7099
7100     // A first private variable captured by reference will use only the
7101     // 'private ptr' and 'map to' flag. Return the right flags if the captured
7102     // declaration is known as first-private in this handler.
7103     if (FirstPrivateDecls.count(Cap.getCapturedVar()))
7104       return MappableExprsHandler::OMP_MAP_PRIVATE |
7105              MappableExprsHandler::OMP_MAP_TO;
7106     return MappableExprsHandler::OMP_MAP_TO |
7107            MappableExprsHandler::OMP_MAP_FROM;
7108   }
7109
7110   static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
7111     // Member of is given by the 16 MSB of the flag, so rotate by 48 bits.
7112     return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
7113                                                   << 48);
7114   }
7115
7116   static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
7117                                      OpenMPOffloadMappingFlags MemberOfFlag) {
7118     // If the entry is PTR_AND_OBJ but has not been marked with the special
7119     // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
7120     // marked as MEMBER_OF.
7121     if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
7122         ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
7123       return;
7124
7125     // Reset the placeholder value to prepare the flag for the assignment of the
7126     // proper MEMBER_OF value.
7127     Flags &= ~OMP_MAP_MEMBER_OF;
7128     Flags |= MemberOfFlag;
7129   }
7130
7131 public:
7132   MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
7133       : CurDir(Dir), CGF(CGF) {
7134     // Extract firstprivate clause information.
7135     for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
7136       for (const auto *D : C->varlists())
7137         FirstPrivateDecls.insert(
7138             cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
7139     // Extract device pointer clause information.
7140     for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
7141       for (auto L : C->component_lists())
7142         DevPointersMap[L.first].push_back(L.second);
7143   }
7144
7145   /// Generate code for the combined entry if we have a partially mapped struct
7146   /// and take care of the mapping flags of the arguments corresponding to
7147   /// individual struct members.
7148   void emitCombinedEntry(MapBaseValuesArrayTy &BasePointers,
7149                          MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
7150                          MapFlagsArrayTy &Types, MapFlagsArrayTy &CurTypes,
7151                          const StructRangeInfoTy &PartialStruct) const {
7152     // Base is the base of the struct
7153     BasePointers.push_back(PartialStruct.Base.getPointer());
7154     // Pointer is the address of the lowest element
7155     llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
7156     Pointers.push_back(LB);
7157     // Size is (addr of {highest+1} element) - (addr of lowest element)
7158     llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
7159     llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
7160     llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
7161     llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
7162     llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
7163     llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.SizeTy,
7164                                                   /*isSinged=*/false);
7165     Sizes.push_back(Size);
7166     // Map type is always TARGET_PARAM
7167     Types.push_back(OMP_MAP_TARGET_PARAM);
7168     // Remove TARGET_PARAM flag from the first element
7169     (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
7170
7171     // All other current entries will be MEMBER_OF the combined entry
7172     // (except for PTR_AND_OBJ entries which do not have a placeholder value
7173     // 0xFFFF in the MEMBER_OF field).
7174     OpenMPOffloadMappingFlags MemberOfFlag =
7175         getMemberOfFlag(BasePointers.size() - 1);
7176     for (auto &M : CurTypes)
7177       setCorrectMemberOfFlag(M, MemberOfFlag);
7178   }
7179
7180   /// Generate all the base pointers, section pointers, sizes and map
7181   /// types for the extracted mappable expressions. Also, for each item that
7182   /// relates with a device pointer, a pair of the relevant declaration and
7183   /// index where it occurs is appended to the device pointers info array.
7184   void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
7185                        MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
7186                        MapFlagsArrayTy &Types) const {
7187     // We have to process the component lists that relate with the same
7188     // declaration in a single chunk so that we can generate the map flags
7189     // correctly. Therefore, we organize all lists in a map.
7190     llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
7191
7192     // Helper function to fill the information map for the different supported
7193     // clauses.
7194     auto &&InfoGen = [&Info](
7195         const ValueDecl *D,
7196         OMPClauseMappableExprCommon::MappableExprComponentListRef L,
7197         OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
7198         bool ReturnDevicePointer, bool IsImplicit) {
7199       const ValueDecl *VD =
7200           D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
7201       Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
7202                             IsImplicit);
7203     };
7204
7205     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7206     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7207       for (const auto &L : C->component_lists()) {
7208         InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
7209             /*ReturnDevicePointer=*/false, C->isImplicit());
7210       }
7211     for (const auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
7212       for (const auto &L : C->component_lists()) {
7213         InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
7214             /*ReturnDevicePointer=*/false, C->isImplicit());
7215       }
7216     for (const auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
7217       for (const auto &L : C->component_lists()) {
7218         InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
7219             /*ReturnDevicePointer=*/false, C->isImplicit());
7220       }
7221
7222     // Look at the use_device_ptr clause information and mark the existing map
7223     // entries as such. If there is no map information for an entry in the
7224     // use_device_ptr list, we create one with map type 'alloc' and zero size
7225     // section. It is the user fault if that was not mapped before. If there is
7226     // no map information and the pointer is a struct member, then we defer the
7227     // emission of that entry until the whole struct has been processed.
7228     llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
7229         DeferredInfo;
7230
7231     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7232     for (const auto *C :
7233         this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>()) {
7234       for (const auto &L : C->component_lists()) {
7235         assert(!L.second.empty() && "Not expecting empty list of components!");
7236         const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
7237         VD = cast<ValueDecl>(VD->getCanonicalDecl());
7238         const Expr *IE = L.second.back().getAssociatedExpression();
7239         // If the first component is a member expression, we have to look into
7240         // 'this', which maps to null in the map of map information. Otherwise
7241         // look directly for the information.
7242         auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
7243
7244         // We potentially have map information for this declaration already.
7245         // Look for the first set of components that refer to it.
7246         if (It != Info.end()) {
7247           auto CI = std::find_if(
7248               It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
7249                 return MI.Components.back().getAssociatedDeclaration() == VD;
7250               });
7251           // If we found a map entry, signal that the pointer has to be returned
7252           // and move on to the next declaration.
7253           if (CI != It->second.end()) {
7254             CI->ReturnDevicePointer = true;
7255             continue;
7256           }
7257         }
7258
7259         // We didn't find any match in our map information - generate a zero
7260         // size array section - if the pointer is a struct member we defer this
7261         // action until the whole struct has been processed.
7262         // FIXME: MSVC 2013 seems to require this-> to find member CGF.
7263         if (isa<MemberExpr>(IE)) {
7264           // Insert the pointer into Info to be processed by
7265           // generateInfoForComponentList. Because it is a member pointer
7266           // without a pointee, no entry will be generated for it, therefore
7267           // we need to generate one after the whole struct has been processed.
7268           // Nonetheless, generateInfoForComponentList must be called to take
7269           // the pointer into account for the calculation of the range of the
7270           // partial struct.
7271           InfoGen(nullptr, L.second, OMPC_MAP_unknown, OMPC_MAP_unknown,
7272                   /*ReturnDevicePointer=*/false, C->isImplicit());
7273           DeferredInfo[nullptr].emplace_back(IE, VD);
7274         } else {
7275           llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
7276               this->CGF.EmitLValue(IE), IE->getExprLoc());
7277           BasePointers.emplace_back(Ptr, VD);
7278           Pointers.push_back(Ptr);
7279           Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
7280           Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
7281         }
7282       }
7283     }
7284
7285     for (const auto &M : Info) {
7286       // We need to know when we generate information for the first component
7287       // associated with a capture, because the mapping flags depend on it.
7288       bool IsFirstComponentList = true;
7289
7290       // Temporary versions of arrays
7291       MapBaseValuesArrayTy CurBasePointers;
7292       MapValuesArrayTy CurPointers;
7293       MapValuesArrayTy CurSizes;
7294       MapFlagsArrayTy CurTypes;
7295       StructRangeInfoTy PartialStruct;
7296
7297       for (const MapInfo &L : M.second) {
7298         assert(!L.Components.empty() &&
7299                "Not expecting declaration with no component lists.");
7300
7301         // Remember the current base pointer index.
7302         unsigned CurrentBasePointersIdx = CurBasePointers.size();
7303         // FIXME: MSVC 2013 seems to require this-> to find the member method.
7304         this->generateInfoForComponentList(
7305             L.MapType, L.MapTypeModifier, L.Components, CurBasePointers,
7306             CurPointers, CurSizes, CurTypes, PartialStruct,
7307             IsFirstComponentList, L.IsImplicit);
7308
7309         // If this entry relates with a device pointer, set the relevant
7310         // declaration and add the 'return pointer' flag.
7311         if (L.ReturnDevicePointer) {
7312           assert(CurBasePointers.size() > CurrentBasePointersIdx &&
7313                  "Unexpected number of mapped base pointers.");
7314
7315           const ValueDecl *RelevantVD =
7316               L.Components.back().getAssociatedDeclaration();
7317           assert(RelevantVD &&
7318                  "No relevant declaration related with device pointer??");
7319
7320           CurBasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
7321           CurTypes[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
7322         }
7323         IsFirstComponentList = false;
7324       }
7325
7326       // Append any pending zero-length pointers which are struct members and
7327       // used with use_device_ptr.
7328       auto CI = DeferredInfo.find(M.first);
7329       if (CI != DeferredInfo.end()) {
7330         for (const DeferredDevicePtrEntryTy &L : CI->second) {
7331           llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer();
7332           llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
7333               this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
7334           CurBasePointers.emplace_back(BasePtr, L.VD);
7335           CurPointers.push_back(Ptr);
7336           CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
7337           // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
7338           // value MEMBER_OF=FFFF so that the entry is later updated with the
7339           // correct value of MEMBER_OF.
7340           CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
7341                              OMP_MAP_MEMBER_OF);
7342         }
7343       }
7344
7345       // If there is an entry in PartialStruct it means we have a struct with
7346       // individual members mapped. Emit an extra combined entry.
7347       if (PartialStruct.Base.isValid())
7348         emitCombinedEntry(BasePointers, Pointers, Sizes, Types, CurTypes,
7349                           PartialStruct);
7350
7351       // We need to append the results of this capture to what we already have.
7352       BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7353       Pointers.append(CurPointers.begin(), CurPointers.end());
7354       Sizes.append(CurSizes.begin(), CurSizes.end());
7355       Types.append(CurTypes.begin(), CurTypes.end());
7356     }
7357   }
7358
7359   /// Generate the base pointers, section pointers, sizes and map types
7360   /// associated to a given capture.
7361   void generateInfoForCapture(const CapturedStmt::Capture *Cap,
7362                               llvm::Value *Arg,
7363                               MapBaseValuesArrayTy &BasePointers,
7364                               MapValuesArrayTy &Pointers,
7365                               MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
7366                               StructRangeInfoTy &PartialStruct) const {
7367     assert(!Cap->capturesVariableArrayType() &&
7368            "Not expecting to generate map info for a variable array type!");
7369
7370     // We need to know when we generating information for the first component
7371     // associated with a capture, because the mapping flags depend on it.
7372     bool IsFirstComponentList = true;
7373
7374     const ValueDecl *VD = Cap->capturesThis()
7375                               ? nullptr
7376                               : Cap->getCapturedVar()->getCanonicalDecl();
7377
7378     // If this declaration appears in a is_device_ptr clause we just have to
7379     // pass the pointer by value. If it is a reference to a declaration, we just
7380     // pass its value.
7381     if (DevPointersMap.count(VD)) {
7382       BasePointers.emplace_back(Arg, VD);
7383       Pointers.push_back(Arg);
7384       Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
7385       Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
7386       return;
7387     }
7388
7389     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7390     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7391       for (const auto &L : C->decl_component_lists(VD)) {
7392         assert(L.first == VD &&
7393                "We got information for the wrong declaration??");
7394         assert(!L.second.empty() &&
7395                "Not expecting declaration with no component lists.");
7396         generateInfoForComponentList(C->getMapType(), C->getMapTypeModifier(),
7397                                      L.second, BasePointers, Pointers, Sizes,
7398                                      Types, PartialStruct, IsFirstComponentList,
7399                                      C->isImplicit());
7400         IsFirstComponentList = false;
7401       }
7402   }
7403
7404   /// Generate the base pointers, section pointers, sizes and map types
7405   /// associated with the declare target link variables.
7406   void generateInfoForDeclareTargetLink(MapBaseValuesArrayTy &BasePointers,
7407                                         MapValuesArrayTy &Pointers,
7408                                         MapValuesArrayTy &Sizes,
7409                                         MapFlagsArrayTy &Types) const {
7410     // Map other list items in the map clause which are not captured variables
7411     // but "declare target link" global variables.,
7412     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
7413       for (const auto &L : C->component_lists()) {
7414         if (!L.first)
7415           continue;
7416         const auto *VD = dyn_cast<VarDecl>(L.first);
7417         if (!VD)
7418           continue;
7419         llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7420             OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
7421         if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
7422           continue;
7423         StructRangeInfoTy PartialStruct;
7424         generateInfoForComponentList(
7425             C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
7426             Pointers, Sizes, Types, PartialStruct,
7427             /*IsFirstComponentList=*/true, C->isImplicit());
7428         assert(!PartialStruct.Base.isValid() &&
7429                "No partial structs for declare target link expected.");
7430       }
7431     }
7432   }
7433
7434   /// Generate the default map information for a given capture \a CI,
7435   /// record field declaration \a RI and captured value \a CV.
7436   void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
7437                               const FieldDecl &RI, llvm::Value *CV,
7438                               MapBaseValuesArrayTy &CurBasePointers,
7439                               MapValuesArrayTy &CurPointers,
7440                               MapValuesArrayTy &CurSizes,
7441                               MapFlagsArrayTy &CurMapTypes) const {
7442     // Do the default mapping.
7443     if (CI.capturesThis()) {
7444       CurBasePointers.push_back(CV);
7445       CurPointers.push_back(CV);
7446       const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
7447       CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
7448       // Default map type.
7449       CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
7450     } else if (CI.capturesVariableByCopy()) {
7451       CurBasePointers.push_back(CV);
7452       CurPointers.push_back(CV);
7453       if (!RI.getType()->isAnyPointerType()) {
7454         // We have to signal to the runtime captures passed by value that are
7455         // not pointers.
7456         CurMapTypes.push_back(OMP_MAP_LITERAL);
7457         CurSizes.push_back(CGF.getTypeSize(RI.getType()));
7458       } else {
7459         // Pointers are implicitly mapped with a zero size and no flags
7460         // (other than first map that is added for all implicit maps).
7461         CurMapTypes.push_back(OMP_MAP_NONE);
7462         CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
7463       }
7464     } else {
7465       assert(CI.capturesVariable() && "Expected captured reference.");
7466       CurBasePointers.push_back(CV);
7467       CurPointers.push_back(CV);
7468
7469       const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
7470       QualType ElementType = PtrTy->getPointeeType();
7471       CurSizes.push_back(CGF.getTypeSize(ElementType));
7472       // The default map type for a scalar/complex type is 'to' because by
7473       // default the value doesn't have to be retrieved. For an aggregate
7474       // type, the default is 'tofrom'.
7475       CurMapTypes.push_back(getMapModifiersForPrivateClauses(CI));
7476     }
7477     // Every default map produces a single argument which is a target parameter.
7478     CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
7479
7480     // Add flag stating this is an implicit map.
7481     CurMapTypes.back() |= OMP_MAP_IMPLICIT;
7482   }
7483 };
7484
7485 enum OpenMPOffloadingReservedDeviceIDs {
7486   /// Device ID if the device was not defined, runtime should get it
7487   /// from environment variables in the spec.
7488   OMP_DEVICEID_UNDEF = -1,
7489 };
7490 } // anonymous namespace
7491
7492 /// Emit the arrays used to pass the captures and map information to the
7493 /// offloading runtime library. If there is no map or capture information,
7494 /// return nullptr by reference.
7495 static void
7496 emitOffloadingArrays(CodeGenFunction &CGF,
7497                      MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
7498                      MappableExprsHandler::MapValuesArrayTy &Pointers,
7499                      MappableExprsHandler::MapValuesArrayTy &Sizes,
7500                      MappableExprsHandler::MapFlagsArrayTy &MapTypes,
7501                      CGOpenMPRuntime::TargetDataInfo &Info) {
7502   CodeGenModule &CGM = CGF.CGM;
7503   ASTContext &Ctx = CGF.getContext();
7504
7505   // Reset the array information.
7506   Info.clearArrayInfo();
7507   Info.NumberOfPtrs = BasePointers.size();
7508
7509   if (Info.NumberOfPtrs) {
7510     // Detect if we have any capture size requiring runtime evaluation of the
7511     // size so that a constant array could be eventually used.
7512     bool hasRuntimeEvaluationCaptureSize = false;
7513     for (llvm::Value *S : Sizes)
7514       if (!isa<llvm::Constant>(S)) {
7515         hasRuntimeEvaluationCaptureSize = true;
7516         break;
7517       }
7518
7519     llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
7520     QualType PointerArrayType =
7521         Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
7522                                  /*IndexTypeQuals=*/0);
7523
7524     Info.BasePointersArray =
7525         CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
7526     Info.PointersArray =
7527         CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
7528
7529     // If we don't have any VLA types or other types that require runtime
7530     // evaluation, we can use a constant array for the map sizes, otherwise we
7531     // need to fill up the arrays as we do for the pointers.
7532     if (hasRuntimeEvaluationCaptureSize) {
7533       QualType SizeArrayType = Ctx.getConstantArrayType(
7534           Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
7535           /*IndexTypeQuals=*/0);
7536       Info.SizesArray =
7537           CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
7538     } else {
7539       // We expect all the sizes to be constant, so we collect them to create
7540       // a constant array.
7541       SmallVector<llvm::Constant *, 16> ConstSizes;
7542       for (llvm::Value *S : Sizes)
7543         ConstSizes.push_back(cast<llvm::Constant>(S));
7544
7545       auto *SizesArrayInit = llvm::ConstantArray::get(
7546           llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
7547       std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
7548       auto *SizesArrayGbl = new llvm::GlobalVariable(
7549           CGM.getModule(), SizesArrayInit->getType(),
7550           /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7551           SizesArrayInit, Name);
7552       SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7553       Info.SizesArray = SizesArrayGbl;
7554     }
7555
7556     // The map types are always constant so we don't need to generate code to
7557     // fill arrays. Instead, we create an array constant.
7558     SmallVector<uint64_t, 4> Mapping(MapTypes.size(), 0);
7559     llvm::copy(MapTypes, Mapping.begin());
7560     llvm::Constant *MapTypesArrayInit =
7561         llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
7562     std::string MaptypesName =
7563         CGM.getOpenMPRuntime().getName({"offload_maptypes"});
7564     auto *MapTypesArrayGbl = new llvm::GlobalVariable(
7565         CGM.getModule(), MapTypesArrayInit->getType(),
7566         /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7567         MapTypesArrayInit, MaptypesName);
7568     MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7569     Info.MapTypesArray = MapTypesArrayGbl;
7570
7571     for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
7572       llvm::Value *BPVal = *BasePointers[I];
7573       llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
7574           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7575           Info.BasePointersArray, 0, I);
7576       BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7577           BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
7578       Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7579       CGF.Builder.CreateStore(BPVal, BPAddr);
7580
7581       if (Info.requiresDevicePointerInfo())
7582         if (const ValueDecl *DevVD = BasePointers[I].getDevicePtrDecl())
7583           Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
7584
7585       llvm::Value *PVal = Pointers[I];
7586       llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
7587           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7588           Info.PointersArray, 0, I);
7589       P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7590           P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
7591       Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7592       CGF.Builder.CreateStore(PVal, PAddr);
7593
7594       if (hasRuntimeEvaluationCaptureSize) {
7595         llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
7596             llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
7597             Info.SizesArray,
7598             /*Idx0=*/0,
7599             /*Idx1=*/I);
7600         Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
7601         CGF.Builder.CreateStore(
7602             CGF.Builder.CreateIntCast(Sizes[I], CGM.SizeTy, /*isSigned=*/true),
7603             SAddr);
7604       }
7605     }
7606   }
7607 }
7608 /// Emit the arguments to be passed to the runtime library based on the
7609 /// arrays of pointers, sizes and map types.
7610 static void emitOffloadingArraysArgument(
7611     CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
7612     llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
7613     llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
7614   CodeGenModule &CGM = CGF.CGM;
7615   if (Info.NumberOfPtrs) {
7616     BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7617         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7618         Info.BasePointersArray,
7619         /*Idx0=*/0, /*Idx1=*/0);
7620     PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7621         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7622         Info.PointersArray,
7623         /*Idx0=*/0,
7624         /*Idx1=*/0);
7625     SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7626         llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
7627         /*Idx0=*/0, /*Idx1=*/0);
7628     MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7629         llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
7630         Info.MapTypesArray,
7631         /*Idx0=*/0,
7632         /*Idx1=*/0);
7633   } else {
7634     BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7635     PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7636     SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
7637     MapTypesArrayArg =
7638         llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
7639   }
7640 }
7641
7642 void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
7643                                      const OMPExecutableDirective &D,
7644                                      llvm::Value *OutlinedFn,
7645                                      llvm::Value *OutlinedFnID,
7646                                      const Expr *IfCond, const Expr *Device) {
7647   if (!CGF.HaveInsertPoint())
7648     return;
7649
7650   assert(OutlinedFn && "Invalid outlined function!");
7651
7652   const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
7653   llvm::SmallVector<llvm::Value *, 16> CapturedVars;
7654   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
7655   auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
7656                                             PrePostActionTy &) {
7657     CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7658   };
7659   emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
7660
7661   CodeGenFunction::OMPTargetDataInfo InputInfo;
7662   llvm::Value *MapTypesArray = nullptr;
7663   // Fill up the pointer arrays and transfer execution to the device.
7664   auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
7665                     &MapTypesArray, &CS, RequiresOuterTask,
7666                     &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) {
7667     // On top of the arrays that were filled up, the target offloading call
7668     // takes as arguments the device id as well as the host pointer. The host
7669     // pointer is used by the runtime library to identify the current target
7670     // region, so it only has to be unique and not necessarily point to
7671     // anything. It could be the pointer to the outlined function that
7672     // implements the target region, but we aren't using that so that the
7673     // compiler doesn't need to keep that, and could therefore inline the host
7674     // function if proven worthwhile during optimization.
7675
7676     // From this point on, we need to have an ID of the target region defined.
7677     assert(OutlinedFnID && "Invalid outlined function ID!");
7678
7679     // Emit device ID if any.
7680     llvm::Value *DeviceID;
7681     if (Device) {
7682       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7683                                            CGF.Int64Ty, /*isSigned=*/true);
7684     } else {
7685       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7686     }
7687
7688     // Emit the number of elements in the offloading arrays.
7689     llvm::Value *PointerNum =
7690         CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
7691
7692     // Return value of the runtime offloading call.
7693     llvm::Value *Return;
7694
7695     llvm::Value *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D);
7696     llvm::Value *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D);
7697
7698     bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
7699     // The target region is an outlined function launched by the runtime
7700     // via calls __tgt_target() or __tgt_target_teams().
7701     //
7702     // __tgt_target() launches a target region with one team and one thread,
7703     // executing a serial region.  This master thread may in turn launch
7704     // more threads within its team upon encountering a parallel region,
7705     // however, no additional teams can be launched on the device.
7706     //
7707     // __tgt_target_teams() launches a target region with one or more teams,
7708     // each with one or more threads.  This call is required for target
7709     // constructs such as:
7710     //  'target teams'
7711     //  'target' / 'teams'
7712     //  'target teams distribute parallel for'
7713     //  'target parallel'
7714     // and so on.
7715     //
7716     // Note that on the host and CPU targets, the runtime implementation of
7717     // these calls simply call the outlined function without forking threads.
7718     // The outlined functions themselves have runtime calls to
7719     // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
7720     // the compiler in emitTeamsCall() and emitParallelCall().
7721     //
7722     // In contrast, on the NVPTX target, the implementation of
7723     // __tgt_target_teams() launches a GPU kernel with the requested number
7724     // of teams and threads so no additional calls to the runtime are required.
7725     if (NumTeams) {
7726       // If we have NumTeams defined this means that we have an enclosed teams
7727       // region. Therefore we also expect to have NumThreads defined. These two
7728       // values should be defined in the presence of a teams directive,
7729       // regardless of having any clauses associated. If the user is using teams
7730       // but no clauses, these two values will be the default that should be
7731       // passed to the runtime library - a 32-bit integer with the value zero.
7732       assert(NumThreads && "Thread limit expression should be available along "
7733                            "with number of teams.");
7734       llvm::Value *OffloadingArgs[] = {DeviceID,
7735                                        OutlinedFnID,
7736                                        PointerNum,
7737                                        InputInfo.BasePointersArray.getPointer(),
7738                                        InputInfo.PointersArray.getPointer(),
7739                                        InputInfo.SizesArray.getPointer(),
7740                                        MapTypesArray,
7741                                        NumTeams,
7742                                        NumThreads};
7743       Return = CGF.EmitRuntimeCall(
7744           createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
7745                                           : OMPRTL__tgt_target_teams),
7746           OffloadingArgs);
7747     } else {
7748       llvm::Value *OffloadingArgs[] = {DeviceID,
7749                                        OutlinedFnID,
7750                                        PointerNum,
7751                                        InputInfo.BasePointersArray.getPointer(),
7752                                        InputInfo.PointersArray.getPointer(),
7753                                        InputInfo.SizesArray.getPointer(),
7754                                        MapTypesArray};
7755       Return = CGF.EmitRuntimeCall(
7756           createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
7757                                           : OMPRTL__tgt_target),
7758           OffloadingArgs);
7759     }
7760
7761     // Check the error code and execute the host version if required.
7762     llvm::BasicBlock *OffloadFailedBlock =
7763         CGF.createBasicBlock("omp_offload.failed");
7764     llvm::BasicBlock *OffloadContBlock =
7765         CGF.createBasicBlock("omp_offload.cont");
7766     llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
7767     CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
7768
7769     CGF.EmitBlock(OffloadFailedBlock);
7770     if (RequiresOuterTask) {
7771       CapturedVars.clear();
7772       CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7773     }
7774     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7775     CGF.EmitBranch(OffloadContBlock);
7776
7777     CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
7778   };
7779
7780   // Notify that the host version must be executed.
7781   auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
7782                     RequiresOuterTask](CodeGenFunction &CGF,
7783                                        PrePostActionTy &) {
7784     if (RequiresOuterTask) {
7785       CapturedVars.clear();
7786       CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7787     }
7788     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7789   };
7790
7791   auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
7792                           &CapturedVars, RequiresOuterTask,
7793                           &CS](CodeGenFunction &CGF, PrePostActionTy &) {
7794     // Fill up the arrays with all the captured variables.
7795     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
7796     MappableExprsHandler::MapValuesArrayTy Pointers;
7797     MappableExprsHandler::MapValuesArrayTy Sizes;
7798     MappableExprsHandler::MapFlagsArrayTy MapTypes;
7799
7800     // Get mappable expression information.
7801     MappableExprsHandler MEHandler(D, CGF);
7802
7803     auto RI = CS.getCapturedRecordDecl()->field_begin();
7804     auto CV = CapturedVars.begin();
7805     for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
7806                                               CE = CS.capture_end();
7807          CI != CE; ++CI, ++RI, ++CV) {
7808       MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
7809       MappableExprsHandler::MapValuesArrayTy CurPointers;
7810       MappableExprsHandler::MapValuesArrayTy CurSizes;
7811       MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
7812       MappableExprsHandler::StructRangeInfoTy PartialStruct;
7813
7814       // VLA sizes are passed to the outlined region by copy and do not have map
7815       // information associated.
7816       if (CI->capturesVariableArrayType()) {
7817         CurBasePointers.push_back(*CV);
7818         CurPointers.push_back(*CV);
7819         CurSizes.push_back(CGF.getTypeSize(RI->getType()));
7820         // Copy to the device as an argument. No need to retrieve it.
7821         CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
7822                               MappableExprsHandler::OMP_MAP_TARGET_PARAM);
7823       } else {
7824         // If we have any information in the map clause, we use it, otherwise we
7825         // just do a default mapping.
7826         MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
7827                                          CurSizes, CurMapTypes, PartialStruct);
7828         if (CurBasePointers.empty())
7829           MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
7830                                            CurPointers, CurSizes, CurMapTypes);
7831       }
7832       // We expect to have at least an element of information for this capture.
7833       assert(!CurBasePointers.empty() &&
7834              "Non-existing map pointer for capture!");
7835       assert(CurBasePointers.size() == CurPointers.size() &&
7836              CurBasePointers.size() == CurSizes.size() &&
7837              CurBasePointers.size() == CurMapTypes.size() &&
7838              "Inconsistent map information sizes!");
7839
7840       // If there is an entry in PartialStruct it means we have a struct with
7841       // individual members mapped. Emit an extra combined entry.
7842       if (PartialStruct.Base.isValid())
7843         MEHandler.emitCombinedEntry(BasePointers, Pointers, Sizes, MapTypes,
7844                                     CurMapTypes, PartialStruct);
7845
7846       // We need to append the results of this capture to what we already have.
7847       BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7848       Pointers.append(CurPointers.begin(), CurPointers.end());
7849       Sizes.append(CurSizes.begin(), CurSizes.end());
7850       MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
7851     }
7852     // Map other list items in the map clause which are not captured variables
7853     // but "declare target link" global variables.
7854     MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
7855                                                MapTypes);
7856
7857     TargetDataInfo Info;
7858     // Fill up the arrays and create the arguments.
7859     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
7860     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
7861                                  Info.PointersArray, Info.SizesArray,
7862                                  Info.MapTypesArray, Info);
7863     InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
7864     InputInfo.BasePointersArray =
7865         Address(Info.BasePointersArray, CGM.getPointerAlign());
7866     InputInfo.PointersArray =
7867         Address(Info.PointersArray, CGM.getPointerAlign());
7868     InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
7869     MapTypesArray = Info.MapTypesArray;
7870     if (RequiresOuterTask)
7871       CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
7872     else
7873       emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
7874   };
7875
7876   auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
7877                              CodeGenFunction &CGF, PrePostActionTy &) {
7878     if (RequiresOuterTask) {
7879       CodeGenFunction::OMPTargetDataInfo InputInfo;
7880       CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
7881     } else {
7882       emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
7883     }
7884   };
7885
7886   // If we have a target function ID it means that we need to support
7887   // offloading, otherwise, just execute on the host. We need to execute on host
7888   // regardless of the conditional in the if clause if, e.g., the user do not
7889   // specify target triples.
7890   if (OutlinedFnID) {
7891     if (IfCond) {
7892       emitOMPIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
7893     } else {
7894       RegionCodeGenTy ThenRCG(TargetThenGen);
7895       ThenRCG(CGF);
7896     }
7897   } else {
7898     RegionCodeGenTy ElseRCG(TargetElseGen);
7899     ElseRCG(CGF);
7900   }
7901 }
7902
7903 void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
7904                                                     StringRef ParentName) {
7905   if (!S)
7906     return;
7907
7908   // Codegen OMP target directives that offload compute to the device.
7909   bool RequiresDeviceCodegen =
7910       isa<OMPExecutableDirective>(S) &&
7911       isOpenMPTargetExecutionDirective(
7912           cast<OMPExecutableDirective>(S)->getDirectiveKind());
7913
7914   if (RequiresDeviceCodegen) {
7915     const auto &E = *cast<OMPExecutableDirective>(S);
7916     unsigned DeviceID;
7917     unsigned FileID;
7918     unsigned Line;
7919     getTargetEntryUniqueInfo(CGM.getContext(), E.getLocStart(), DeviceID,
7920                              FileID, Line);
7921
7922     // Is this a target region that should not be emitted as an entry point? If
7923     // so just signal we are done with this target region.
7924     if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
7925                                                             ParentName, Line))
7926       return;
7927
7928     switch (E.getDirectiveKind()) {
7929     case OMPD_target:
7930       CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
7931                                                    cast<OMPTargetDirective>(E));
7932       break;
7933     case OMPD_target_parallel:
7934       CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
7935           CGM, ParentName, cast<OMPTargetParallelDirective>(E));
7936       break;
7937     case OMPD_target_teams:
7938       CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
7939           CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
7940       break;
7941     case OMPD_target_teams_distribute:
7942       CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
7943           CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
7944       break;
7945     case OMPD_target_teams_distribute_simd:
7946       CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
7947           CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
7948       break;
7949     case OMPD_target_parallel_for:
7950       CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
7951           CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
7952       break;
7953     case OMPD_target_parallel_for_simd:
7954       CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
7955           CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
7956       break;
7957     case OMPD_target_simd:
7958       CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
7959           CGM, ParentName, cast<OMPTargetSimdDirective>(E));
7960       break;
7961     case OMPD_target_teams_distribute_parallel_for:
7962       CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
7963           CGM, ParentName,
7964           cast<OMPTargetTeamsDistributeParallelForDirective>(E));
7965       break;
7966     case OMPD_target_teams_distribute_parallel_for_simd:
7967       CodeGenFunction::
7968           EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
7969               CGM, ParentName,
7970               cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
7971       break;
7972     case OMPD_parallel:
7973     case OMPD_for:
7974     case OMPD_parallel_for:
7975     case OMPD_parallel_sections:
7976     case OMPD_for_simd:
7977     case OMPD_parallel_for_simd:
7978     case OMPD_cancel:
7979     case OMPD_cancellation_point:
7980     case OMPD_ordered:
7981     case OMPD_threadprivate:
7982     case OMPD_task:
7983     case OMPD_simd:
7984     case OMPD_sections:
7985     case OMPD_section:
7986     case OMPD_single:
7987     case OMPD_master:
7988     case OMPD_critical:
7989     case OMPD_taskyield:
7990     case OMPD_barrier:
7991     case OMPD_taskwait:
7992     case OMPD_taskgroup:
7993     case OMPD_atomic:
7994     case OMPD_flush:
7995     case OMPD_teams:
7996     case OMPD_target_data:
7997     case OMPD_target_exit_data:
7998     case OMPD_target_enter_data:
7999     case OMPD_distribute:
8000     case OMPD_distribute_simd:
8001     case OMPD_distribute_parallel_for:
8002     case OMPD_distribute_parallel_for_simd:
8003     case OMPD_teams_distribute:
8004     case OMPD_teams_distribute_simd:
8005     case OMPD_teams_distribute_parallel_for:
8006     case OMPD_teams_distribute_parallel_for_simd:
8007     case OMPD_target_update:
8008     case OMPD_declare_simd:
8009     case OMPD_declare_target:
8010     case OMPD_end_declare_target:
8011     case OMPD_declare_reduction:
8012     case OMPD_taskloop:
8013     case OMPD_taskloop_simd:
8014     case OMPD_unknown:
8015       llvm_unreachable("Unknown target directive for OpenMP device codegen.");
8016     }
8017     return;
8018   }
8019
8020   if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
8021     if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
8022       return;
8023
8024     scanForTargetRegionsFunctions(
8025         E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName);
8026     return;
8027   }
8028
8029   // If this is a lambda function, look into its body.
8030   if (const auto *L = dyn_cast<LambdaExpr>(S))
8031     S = L->getBody();
8032
8033   // Keep looking for target regions recursively.
8034   for (const Stmt *II : S->children())
8035     scanForTargetRegionsFunctions(II, ParentName);
8036 }
8037
8038 bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
8039   const auto *FD = cast<FunctionDecl>(GD.getDecl());
8040
8041   // If emitting code for the host, we do not process FD here. Instead we do
8042   // the normal code generation.
8043   if (!CGM.getLangOpts().OpenMPIsDevice)
8044     return false;
8045
8046   // Try to detect target regions in the function.
8047   scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
8048
8049   // Do not to emit function if it is not marked as declare target.
8050   return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD) &&
8051          AlreadyEmittedTargetFunctions.count(FD->getCanonicalDecl()) == 0;
8052 }
8053
8054 bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
8055   if (!CGM.getLangOpts().OpenMPIsDevice)
8056     return false;
8057
8058   // Check if there are Ctors/Dtors in this declaration and look for target
8059   // regions in it. We use the complete variant to produce the kernel name
8060   // mangling.
8061   QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
8062   if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
8063     for (const CXXConstructorDecl *Ctor : RD->ctors()) {
8064       StringRef ParentName =
8065           CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
8066       scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
8067     }
8068     if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
8069       StringRef ParentName =
8070           CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
8071       scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
8072     }
8073   }
8074
8075   // Do not to emit variable if it is not marked as declare target.
8076   llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
8077       OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
8078           cast<VarDecl>(GD.getDecl()));
8079   return !Res || *Res == OMPDeclareTargetDeclAttr::MT_Link;
8080 }
8081
8082 void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
8083                                                    llvm::Constant *Addr) {
8084   if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
8085           OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
8086     OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
8087     StringRef VarName;
8088     CharUnits VarSize;
8089     llvm::GlobalValue::LinkageTypes Linkage;
8090     switch (*Res) {
8091     case OMPDeclareTargetDeclAttr::MT_To:
8092       Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
8093       VarName = CGM.getMangledName(VD);
8094       VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
8095       Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
8096       // Temp solution to prevent optimizations of the internal variables.
8097       if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
8098         std::string RefName = getName({VarName, "ref"});
8099         if (!CGM.GetGlobalValue(RefName)) {
8100           llvm::Constant *AddrRef =
8101               getOrCreateInternalVariable(Addr->getType(), RefName);
8102           auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
8103           GVAddrRef->setConstant(/*Val=*/true);
8104           GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
8105           GVAddrRef->setInitializer(Addr);
8106           CGM.addCompilerUsedGlobal(GVAddrRef);
8107         }
8108       }
8109       break;
8110     case OMPDeclareTargetDeclAttr::MT_Link:
8111       Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
8112       if (CGM.getLangOpts().OpenMPIsDevice) {
8113         VarName = Addr->getName();
8114         Addr = nullptr;
8115       } else {
8116         VarName = getAddrOfDeclareTargetLink(VD).getName();
8117         Addr =
8118             cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
8119       }
8120       VarSize = CGM.getPointerSize();
8121       Linkage = llvm::GlobalValue::WeakAnyLinkage;
8122       break;
8123     }
8124     OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
8125         VarName, Addr, VarSize, Flags, Linkage);
8126   }
8127 }
8128
8129 bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
8130   if (isa<FunctionDecl>(GD.getDecl()))
8131     return emitTargetFunctions(GD);
8132
8133   return emitTargetGlobalVariable(GD);
8134 }
8135
8136 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
8137     CodeGenModule &CGM)
8138     : CGM(CGM) {
8139   if (CGM.getLangOpts().OpenMPIsDevice) {
8140     SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
8141     CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
8142   }
8143 }
8144
8145 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
8146   if (CGM.getLangOpts().OpenMPIsDevice)
8147     CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
8148 }
8149
8150 bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
8151   if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
8152     return true;
8153
8154   const auto *D = cast<FunctionDecl>(GD.getDecl());
8155   const FunctionDecl *FD = D->getCanonicalDecl();
8156   // Do not to emit function if it is marked as declare target as it was already
8157   // emitted.
8158   if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
8159     if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
8160       if (auto *F = dyn_cast_or_null<llvm::Function>(
8161               CGM.GetGlobalValue(CGM.getMangledName(GD))))
8162         return !F->isDeclaration();
8163       return false;
8164     }
8165     return true;
8166   }
8167
8168   return !AlreadyEmittedTargetFunctions.insert(FD).second;
8169 }
8170
8171 llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
8172   // If we have offloading in the current module, we need to emit the entries
8173   // now and register the offloading descriptor.
8174   createOffloadEntriesAndInfoMetadata();
8175
8176   // Create and register the offloading binary descriptors. This is the main
8177   // entity that captures all the information about offloading in the current
8178   // compilation unit.
8179   return createOffloadingBinaryDescriptorRegistration();
8180 }
8181
8182 void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
8183                                     const OMPExecutableDirective &D,
8184                                     SourceLocation Loc,
8185                                     llvm::Value *OutlinedFn,
8186                                     ArrayRef<llvm::Value *> CapturedVars) {
8187   if (!CGF.HaveInsertPoint())
8188     return;
8189
8190   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
8191   CodeGenFunction::RunCleanupsScope Scope(CGF);
8192
8193   // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
8194   llvm::Value *Args[] = {
8195       RTLoc,
8196       CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
8197       CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
8198   llvm::SmallVector<llvm::Value *, 16> RealArgs;
8199   RealArgs.append(std::begin(Args), std::end(Args));
8200   RealArgs.append(CapturedVars.begin(), CapturedVars.end());
8201
8202   llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
8203   CGF.EmitRuntimeCall(RTLFn, RealArgs);
8204 }
8205
8206 void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
8207                                          const Expr *NumTeams,
8208                                          const Expr *ThreadLimit,
8209                                          SourceLocation Loc) {
8210   if (!CGF.HaveInsertPoint())
8211     return;
8212
8213   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
8214
8215   llvm::Value *NumTeamsVal =
8216       NumTeams
8217           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
8218                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
8219           : CGF.Builder.getInt32(0);
8220
8221   llvm::Value *ThreadLimitVal =
8222       ThreadLimit
8223           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
8224                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
8225           : CGF.Builder.getInt32(0);
8226
8227   // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
8228   llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
8229                                      ThreadLimitVal};
8230   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
8231                       PushNumTeamsArgs);
8232 }
8233
8234 void CGOpenMPRuntime::emitTargetDataCalls(
8235     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8236     const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
8237   if (!CGF.HaveInsertPoint())
8238     return;
8239
8240   // Action used to replace the default codegen action and turn privatization
8241   // off.
8242   PrePostActionTy NoPrivAction;
8243
8244   // Generate the code for the opening of the data environment. Capture all the
8245   // arguments of the runtime call by reference because they are used in the
8246   // closing of the region.
8247   auto &&BeginThenGen = [this, &D, Device, &Info,
8248                          &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
8249     // Fill up the arrays with all the mapped variables.
8250     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8251     MappableExprsHandler::MapValuesArrayTy Pointers;
8252     MappableExprsHandler::MapValuesArrayTy Sizes;
8253     MappableExprsHandler::MapFlagsArrayTy MapTypes;
8254
8255     // Get map clause information.
8256     MappableExprsHandler MCHandler(D, CGF);
8257     MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8258
8259     // Fill up the arrays and create the arguments.
8260     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8261
8262     llvm::Value *BasePointersArrayArg = nullptr;
8263     llvm::Value *PointersArrayArg = nullptr;
8264     llvm::Value *SizesArrayArg = nullptr;
8265     llvm::Value *MapTypesArrayArg = nullptr;
8266     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8267                                  SizesArrayArg, MapTypesArrayArg, Info);
8268
8269     // Emit device ID if any.
8270     llvm::Value *DeviceID = nullptr;
8271     if (Device) {
8272       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8273                                            CGF.Int64Ty, /*isSigned=*/true);
8274     } else {
8275       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8276     }
8277
8278     // Emit the number of elements in the offloading arrays.
8279     llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8280
8281     llvm::Value *OffloadingArgs[] = {
8282         DeviceID,         PointerNum,    BasePointersArrayArg,
8283         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8284     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin),
8285                         OffloadingArgs);
8286
8287     // If device pointer privatization is required, emit the body of the region
8288     // here. It will have to be duplicated: with and without privatization.
8289     if (!Info.CaptureDeviceAddrMap.empty())
8290       CodeGen(CGF);
8291   };
8292
8293   // Generate code for the closing of the data region.
8294   auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF,
8295                                             PrePostActionTy &) {
8296     assert(Info.isValid() && "Invalid data environment closing arguments.");
8297
8298     llvm::Value *BasePointersArrayArg = nullptr;
8299     llvm::Value *PointersArrayArg = nullptr;
8300     llvm::Value *SizesArrayArg = nullptr;
8301     llvm::Value *MapTypesArrayArg = nullptr;
8302     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8303                                  SizesArrayArg, MapTypesArrayArg, Info);
8304
8305     // Emit device ID if any.
8306     llvm::Value *DeviceID = nullptr;
8307     if (Device) {
8308       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8309                                            CGF.Int64Ty, /*isSigned=*/true);
8310     } else {
8311       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8312     }
8313
8314     // Emit the number of elements in the offloading arrays.
8315     llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8316
8317     llvm::Value *OffloadingArgs[] = {
8318         DeviceID,         PointerNum,    BasePointersArrayArg,
8319         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8320     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end),
8321                         OffloadingArgs);
8322   };
8323
8324   // If we need device pointer privatization, we need to emit the body of the
8325   // region with no privatization in the 'else' branch of the conditional.
8326   // Otherwise, we don't have to do anything.
8327   auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
8328                                                          PrePostActionTy &) {
8329     if (!Info.CaptureDeviceAddrMap.empty()) {
8330       CodeGen.setAction(NoPrivAction);
8331       CodeGen(CGF);
8332     }
8333   };
8334
8335   // We don't have to do anything to close the region if the if clause evaluates
8336   // to false.
8337   auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
8338
8339   if (IfCond) {
8340     emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
8341   } else {
8342     RegionCodeGenTy RCG(BeginThenGen);
8343     RCG(CGF);
8344   }
8345
8346   // If we don't require privatization of device pointers, we emit the body in
8347   // between the runtime calls. This avoids duplicating the body code.
8348   if (Info.CaptureDeviceAddrMap.empty()) {
8349     CodeGen.setAction(NoPrivAction);
8350     CodeGen(CGF);
8351   }
8352
8353   if (IfCond) {
8354     emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen);
8355   } else {
8356     RegionCodeGenTy RCG(EndThenGen);
8357     RCG(CGF);
8358   }
8359 }
8360
8361 void CGOpenMPRuntime::emitTargetDataStandAloneCall(
8362     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8363     const Expr *Device) {
8364   if (!CGF.HaveInsertPoint())
8365     return;
8366
8367   assert((isa<OMPTargetEnterDataDirective>(D) ||
8368           isa<OMPTargetExitDataDirective>(D) ||
8369           isa<OMPTargetUpdateDirective>(D)) &&
8370          "Expecting either target enter, exit data, or update directives.");
8371
8372   CodeGenFunction::OMPTargetDataInfo InputInfo;
8373   llvm::Value *MapTypesArray = nullptr;
8374   // Generate the code for the opening of the data environment.
8375   auto &&ThenGen = [this, &D, Device, &InputInfo,
8376                     &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) {
8377     // Emit device ID if any.
8378     llvm::Value *DeviceID = nullptr;
8379     if (Device) {
8380       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8381                                            CGF.Int64Ty, /*isSigned=*/true);
8382     } else {
8383       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8384     }
8385
8386     // Emit the number of elements in the offloading arrays.
8387     llvm::Constant *PointerNum =
8388         CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
8389
8390     llvm::Value *OffloadingArgs[] = {DeviceID,
8391                                      PointerNum,
8392                                      InputInfo.BasePointersArray.getPointer(),
8393                                      InputInfo.PointersArray.getPointer(),
8394                                      InputInfo.SizesArray.getPointer(),
8395                                      MapTypesArray};
8396
8397     // Select the right runtime function call for each expected standalone
8398     // directive.
8399     const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
8400     OpenMPRTLFunction RTLFn;
8401     switch (D.getDirectiveKind()) {
8402     case OMPD_target_enter_data:
8403       RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
8404                         : OMPRTL__tgt_target_data_begin;
8405       break;
8406     case OMPD_target_exit_data:
8407       RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
8408                         : OMPRTL__tgt_target_data_end;
8409       break;
8410     case OMPD_target_update:
8411       RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
8412                         : OMPRTL__tgt_target_data_update;
8413       break;
8414     case OMPD_parallel:
8415     case OMPD_for:
8416     case OMPD_parallel_for:
8417     case OMPD_parallel_sections:
8418     case OMPD_for_simd:
8419     case OMPD_parallel_for_simd:
8420     case OMPD_cancel:
8421     case OMPD_cancellation_point:
8422     case OMPD_ordered:
8423     case OMPD_threadprivate:
8424     case OMPD_task:
8425     case OMPD_simd:
8426     case OMPD_sections:
8427     case OMPD_section:
8428     case OMPD_single:
8429     case OMPD_master:
8430     case OMPD_critical:
8431     case OMPD_taskyield:
8432     case OMPD_barrier:
8433     case OMPD_taskwait:
8434     case OMPD_taskgroup:
8435     case OMPD_atomic:
8436     case OMPD_flush:
8437     case OMPD_teams:
8438     case OMPD_target_data:
8439     case OMPD_distribute:
8440     case OMPD_distribute_simd:
8441     case OMPD_distribute_parallel_for:
8442     case OMPD_distribute_parallel_for_simd:
8443     case OMPD_teams_distribute:
8444     case OMPD_teams_distribute_simd:
8445     case OMPD_teams_distribute_parallel_for:
8446     case OMPD_teams_distribute_parallel_for_simd:
8447     case OMPD_declare_simd:
8448     case OMPD_declare_target:
8449     case OMPD_end_declare_target:
8450     case OMPD_declare_reduction:
8451     case OMPD_taskloop:
8452     case OMPD_taskloop_simd:
8453     case OMPD_target:
8454     case OMPD_target_simd:
8455     case OMPD_target_teams_distribute:
8456     case OMPD_target_teams_distribute_simd:
8457     case OMPD_target_teams_distribute_parallel_for:
8458     case OMPD_target_teams_distribute_parallel_for_simd:
8459     case OMPD_target_teams:
8460     case OMPD_target_parallel:
8461     case OMPD_target_parallel_for:
8462     case OMPD_target_parallel_for_simd:
8463     case OMPD_unknown:
8464       llvm_unreachable("Unexpected standalone target data directive.");
8465       break;
8466     }
8467     CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
8468   };
8469
8470   auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
8471                              CodeGenFunction &CGF, PrePostActionTy &) {
8472     // Fill up the arrays with all the mapped variables.
8473     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8474     MappableExprsHandler::MapValuesArrayTy Pointers;
8475     MappableExprsHandler::MapValuesArrayTy Sizes;
8476     MappableExprsHandler::MapFlagsArrayTy MapTypes;
8477
8478     // Get map clause information.
8479     MappableExprsHandler MEHandler(D, CGF);
8480     MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8481
8482     TargetDataInfo Info;
8483     // Fill up the arrays and create the arguments.
8484     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8485     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
8486                                  Info.PointersArray, Info.SizesArray,
8487                                  Info.MapTypesArray, Info);
8488     InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
8489     InputInfo.BasePointersArray =
8490         Address(Info.BasePointersArray, CGM.getPointerAlign());
8491     InputInfo.PointersArray =
8492         Address(Info.PointersArray, CGM.getPointerAlign());
8493     InputInfo.SizesArray =
8494         Address(Info.SizesArray, CGM.getPointerAlign());
8495     MapTypesArray = Info.MapTypesArray;
8496     if (D.hasClausesOfKind<OMPDependClause>())
8497       CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
8498     else
8499       emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
8500   };
8501
8502   if (IfCond) {
8503     emitOMPIfClause(CGF, IfCond, TargetThenGen,
8504                     [](CodeGenFunction &CGF, PrePostActionTy &) {});
8505   } else {
8506     RegionCodeGenTy ThenRCG(TargetThenGen);
8507     ThenRCG(CGF);
8508   }
8509 }
8510
8511 namespace {
8512   /// Kind of parameter in a function with 'declare simd' directive.
8513   enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
8514   /// Attribute set of the parameter.
8515   struct ParamAttrTy {
8516     ParamKindTy Kind = Vector;
8517     llvm::APSInt StrideOrArg;
8518     llvm::APSInt Alignment;
8519   };
8520 } // namespace
8521
8522 static unsigned evaluateCDTSize(const FunctionDecl *FD,
8523                                 ArrayRef<ParamAttrTy> ParamAttrs) {
8524   // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
8525   // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
8526   // of that clause. The VLEN value must be power of 2.
8527   // In other case the notion of the function`s "characteristic data type" (CDT)
8528   // is used to compute the vector length.
8529   // CDT is defined in the following order:
8530   //   a) For non-void function, the CDT is the return type.
8531   //   b) If the function has any non-uniform, non-linear parameters, then the
8532   //   CDT is the type of the first such parameter.
8533   //   c) If the CDT determined by a) or b) above is struct, union, or class
8534   //   type which is pass-by-value (except for the type that maps to the
8535   //   built-in complex data type), the characteristic data type is int.
8536   //   d) If none of the above three cases is applicable, the CDT is int.
8537   // The VLEN is then determined based on the CDT and the size of vector
8538   // register of that ISA for which current vector version is generated. The
8539   // VLEN is computed using the formula below:
8540   //   VLEN  = sizeof(vector_register) / sizeof(CDT),
8541   // where vector register size specified in section 3.2.1 Registers and the
8542   // Stack Frame of original AMD64 ABI document.
8543   QualType RetType = FD->getReturnType();
8544   if (RetType.isNull())
8545     return 0;
8546   ASTContext &C = FD->getASTContext();
8547   QualType CDT;
8548   if (!RetType.isNull() && !RetType->isVoidType()) {
8549     CDT = RetType;
8550   } else {
8551     unsigned Offset = 0;
8552     if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
8553       if (ParamAttrs[Offset].Kind == Vector)
8554         CDT = C.getPointerType(C.getRecordType(MD->getParent()));
8555       ++Offset;
8556     }
8557     if (CDT.isNull()) {
8558       for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
8559         if (ParamAttrs[I + Offset].Kind == Vector) {
8560           CDT = FD->getParamDecl(I)->getType();
8561           break;
8562         }
8563       }
8564     }
8565   }
8566   if (CDT.isNull())
8567     CDT = C.IntTy;
8568   CDT = CDT->getCanonicalTypeUnqualified();
8569   if (CDT->isRecordType() || CDT->isUnionType())
8570     CDT = C.IntTy;
8571   return C.getTypeSize(CDT);
8572 }
8573
8574 static void
8575 emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
8576                            const llvm::APSInt &VLENVal,
8577                            ArrayRef<ParamAttrTy> ParamAttrs,
8578                            OMPDeclareSimdDeclAttr::BranchStateTy State) {
8579   struct ISADataTy {
8580     char ISA;
8581     unsigned VecRegSize;
8582   };
8583   ISADataTy ISAData[] = {
8584       {
8585           'b', 128
8586       }, // SSE
8587       {
8588           'c', 256
8589       }, // AVX
8590       {
8591           'd', 256
8592       }, // AVX2
8593       {
8594           'e', 512
8595       }, // AVX512
8596   };
8597   llvm::SmallVector<char, 2> Masked;
8598   switch (State) {
8599   case OMPDeclareSimdDeclAttr::BS_Undefined:
8600     Masked.push_back('N');
8601     Masked.push_back('M');
8602     break;
8603   case OMPDeclareSimdDeclAttr::BS_Notinbranch:
8604     Masked.push_back('N');
8605     break;
8606   case OMPDeclareSimdDeclAttr::BS_Inbranch:
8607     Masked.push_back('M');
8608     break;
8609   }
8610   for (char Mask : Masked) {
8611     for (const ISADataTy &Data : ISAData) {
8612       SmallString<256> Buffer;
8613       llvm::raw_svector_ostream Out(Buffer);
8614       Out << "_ZGV" << Data.ISA << Mask;
8615       if (!VLENVal) {
8616         Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
8617                                          evaluateCDTSize(FD, ParamAttrs));
8618       } else {
8619         Out << VLENVal;
8620       }
8621       for (const ParamAttrTy &ParamAttr : ParamAttrs) {
8622         switch (ParamAttr.Kind){
8623         case LinearWithVarStride:
8624           Out << 's' << ParamAttr.StrideOrArg;
8625           break;
8626         case Linear:
8627           Out << 'l';
8628           if (!!ParamAttr.StrideOrArg)
8629             Out << ParamAttr.StrideOrArg;
8630           break;
8631         case Uniform:
8632           Out << 'u';
8633           break;
8634         case Vector:
8635           Out << 'v';
8636           break;
8637         }
8638         if (!!ParamAttr.Alignment)
8639           Out << 'a' << ParamAttr.Alignment;
8640       }
8641       Out << '_' << Fn->getName();
8642       Fn->addFnAttr(Out.str());
8643     }
8644   }
8645 }
8646
8647 void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
8648                                               llvm::Function *Fn) {
8649   ASTContext &C = CGM.getContext();
8650   FD = FD->getMostRecentDecl();
8651   // Map params to their positions in function decl.
8652   llvm::DenseMap<const Decl *, unsigned> ParamPositions;
8653   if (isa<CXXMethodDecl>(FD))
8654     ParamPositions.try_emplace(FD, 0);
8655   unsigned ParamPos = ParamPositions.size();
8656   for (const ParmVarDecl *P : FD->parameters()) {
8657     ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
8658     ++ParamPos;
8659   }
8660   while (FD) {
8661     for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
8662       llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
8663       // Mark uniform parameters.
8664       for (const Expr *E : Attr->uniforms()) {
8665         E = E->IgnoreParenImpCasts();
8666         unsigned Pos;
8667         if (isa<CXXThisExpr>(E)) {
8668           Pos = ParamPositions[FD];
8669         } else {
8670           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8671                                 ->getCanonicalDecl();
8672           Pos = ParamPositions[PVD];
8673         }
8674         ParamAttrs[Pos].Kind = Uniform;
8675       }
8676       // Get alignment info.
8677       auto NI = Attr->alignments_begin();
8678       for (const Expr *E : Attr->aligneds()) {
8679         E = E->IgnoreParenImpCasts();
8680         unsigned Pos;
8681         QualType ParmTy;
8682         if (isa<CXXThisExpr>(E)) {
8683           Pos = ParamPositions[FD];
8684           ParmTy = E->getType();
8685         } else {
8686           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8687                                 ->getCanonicalDecl();
8688           Pos = ParamPositions[PVD];
8689           ParmTy = PVD->getType();
8690         }
8691         ParamAttrs[Pos].Alignment =
8692             (*NI)
8693                 ? (*NI)->EvaluateKnownConstInt(C)
8694                 : llvm::APSInt::getUnsigned(
8695                       C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
8696                           .getQuantity());
8697         ++NI;
8698       }
8699       // Mark linear parameters.
8700       auto SI = Attr->steps_begin();
8701       auto MI = Attr->modifiers_begin();
8702       for (const Expr *E : Attr->linears()) {
8703         E = E->IgnoreParenImpCasts();
8704         unsigned Pos;
8705         if (isa<CXXThisExpr>(E)) {
8706           Pos = ParamPositions[FD];
8707         } else {
8708           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8709                                 ->getCanonicalDecl();
8710           Pos = ParamPositions[PVD];
8711         }
8712         ParamAttrTy &ParamAttr = ParamAttrs[Pos];
8713         ParamAttr.Kind = Linear;
8714         if (*SI) {
8715           if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
8716                                     Expr::SE_AllowSideEffects)) {
8717             if (const auto *DRE =
8718                     cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
8719               if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
8720                 ParamAttr.Kind = LinearWithVarStride;
8721                 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
8722                     ParamPositions[StridePVD->getCanonicalDecl()]);
8723               }
8724             }
8725           }
8726         }
8727         ++SI;
8728         ++MI;
8729       }
8730       llvm::APSInt VLENVal;
8731       if (const Expr *VLEN = Attr->getSimdlen())
8732         VLENVal = VLEN->EvaluateKnownConstInt(C);
8733       OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
8734       if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
8735           CGM.getTriple().getArch() == llvm::Triple::x86_64)
8736         emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
8737     }
8738     FD = FD->getPreviousDecl();
8739   }
8740 }
8741
8742 namespace {
8743 /// Cleanup action for doacross support.
8744 class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
8745 public:
8746   static const int DoacrossFinArgs = 2;
8747
8748 private:
8749   llvm::Value *RTLFn;
8750   llvm::Value *Args[DoacrossFinArgs];
8751
8752 public:
8753   DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs)
8754       : RTLFn(RTLFn) {
8755     assert(CallArgs.size() == DoacrossFinArgs);
8756     std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
8757   }
8758   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
8759     if (!CGF.HaveInsertPoint())
8760       return;
8761     CGF.EmitRuntimeCall(RTLFn, Args);
8762   }
8763 };
8764 } // namespace
8765
8766 void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
8767                                        const OMPLoopDirective &D,
8768                                        ArrayRef<Expr *> NumIterations) {
8769   if (!CGF.HaveInsertPoint())
8770     return;
8771
8772   ASTContext &C = CGM.getContext();
8773   QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
8774   RecordDecl *RD;
8775   if (KmpDimTy.isNull()) {
8776     // Build struct kmp_dim {  // loop bounds info casted to kmp_int64
8777     //  kmp_int64 lo; // lower
8778     //  kmp_int64 up; // upper
8779     //  kmp_int64 st; // stride
8780     // };
8781     RD = C.buildImplicitRecord("kmp_dim");
8782     RD->startDefinition();
8783     addFieldToRecordDecl(C, RD, Int64Ty);
8784     addFieldToRecordDecl(C, RD, Int64Ty);
8785     addFieldToRecordDecl(C, RD, Int64Ty);
8786     RD->completeDefinition();
8787     KmpDimTy = C.getRecordType(RD);
8788   } else {
8789     RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
8790   }
8791   llvm::APInt Size(/*numBits=*/32, NumIterations.size());
8792   QualType ArrayTy =
8793       C.getConstantArrayType(KmpDimTy, Size, ArrayType::Normal, 0);
8794
8795   Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
8796   CGF.EmitNullInitialization(DimsAddr, ArrayTy);
8797   enum { LowerFD = 0, UpperFD, StrideFD };
8798   // Fill dims with data.
8799   for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
8800     LValue DimsLVal =
8801         CGF.MakeAddrLValue(CGF.Builder.CreateConstArrayGEP(
8802                                DimsAddr, I, C.getTypeSizeInChars(KmpDimTy)),
8803                            KmpDimTy);
8804     // dims.upper = num_iterations;
8805     LValue UpperLVal = CGF.EmitLValueForField(
8806         DimsLVal, *std::next(RD->field_begin(), UpperFD));
8807     llvm::Value *NumIterVal =
8808         CGF.EmitScalarConversion(CGF.EmitScalarExpr(NumIterations[I]),
8809                                  D.getNumIterations()->getType(), Int64Ty,
8810                                  D.getNumIterations()->getExprLoc());
8811     CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
8812     // dims.stride = 1;
8813     LValue StrideLVal = CGF.EmitLValueForField(
8814         DimsLVal, *std::next(RD->field_begin(), StrideFD));
8815     CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
8816                           StrideLVal);
8817   }
8818
8819   // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
8820   // kmp_int32 num_dims, struct kmp_dim * dims);
8821   llvm::Value *Args[] = {
8822       emitUpdateLocation(CGF, D.getLocStart()),
8823       getThreadID(CGF, D.getLocStart()),
8824       llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
8825       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
8826           CGF.Builder
8827               .CreateConstArrayGEP(DimsAddr, 0, C.getTypeSizeInChars(KmpDimTy))
8828               .getPointer(),
8829           CGM.VoidPtrTy)};
8830
8831   llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
8832   CGF.EmitRuntimeCall(RTLFn, Args);
8833   llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
8834       emitUpdateLocation(CGF, D.getLocEnd()), getThreadID(CGF, D.getLocEnd())};
8835   llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
8836   CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
8837                                              llvm::makeArrayRef(FiniArgs));
8838 }
8839
8840 void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
8841                                           const OMPDependClause *C) {
8842   QualType Int64Ty =
8843       CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
8844   llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
8845   QualType ArrayTy = CGM.getContext().getConstantArrayType(
8846       Int64Ty, Size, ArrayType::Normal, 0);
8847   Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
8848   for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
8849     const Expr *CounterVal = C->getLoopData(I);
8850     assert(CounterVal);
8851     llvm::Value *CntVal = CGF.EmitScalarConversion(
8852         CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
8853         CounterVal->getExprLoc());
8854     CGF.EmitStoreOfScalar(
8855         CntVal,
8856         CGF.Builder.CreateConstArrayGEP(
8857             CntAddr, I, CGM.getContext().getTypeSizeInChars(Int64Ty)),
8858         /*Volatile=*/false, Int64Ty);
8859   }
8860   llvm::Value *Args[] = {
8861       emitUpdateLocation(CGF, C->getLocStart()),
8862       getThreadID(CGF, C->getLocStart()),
8863       CGF.Builder
8864           .CreateConstArrayGEP(CntAddr, 0,
8865                                CGM.getContext().getTypeSizeInChars(Int64Ty))
8866           .getPointer()};
8867   llvm::Value *RTLFn;
8868   if (C->getDependencyKind() == OMPC_DEPEND_source) {
8869     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
8870   } else {
8871     assert(C->getDependencyKind() == OMPC_DEPEND_sink);
8872     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
8873   }
8874   CGF.EmitRuntimeCall(RTLFn, Args);
8875 }
8876
8877 void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
8878                                llvm::Value *Callee,
8879                                ArrayRef<llvm::Value *> Args) const {
8880   assert(Loc.isValid() && "Outlined function call location must be valid.");
8881   auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
8882
8883   if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
8884     if (Fn->doesNotThrow()) {
8885       CGF.EmitNounwindRuntimeCall(Fn, Args);
8886       return;
8887     }
8888   }
8889   CGF.EmitRuntimeCall(Callee, Args);
8890 }
8891
8892 void CGOpenMPRuntime::emitOutlinedFunctionCall(
8893     CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
8894     ArrayRef<llvm::Value *> Args) const {
8895   emitCall(CGF, Loc, OutlinedFn, Args);
8896 }
8897
8898 Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
8899                                              const VarDecl *NativeParam,
8900                                              const VarDecl *TargetParam) const {
8901   return CGF.GetAddrOfLocalVar(NativeParam);
8902 }
8903
8904 Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
8905                                                    const VarDecl *VD) {
8906   return Address::invalid();
8907 }
8908
8909 llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
8910     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8911     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8912   llvm_unreachable("Not supported in SIMD-only mode");
8913 }
8914
8915 llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
8916     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8917     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8918   llvm_unreachable("Not supported in SIMD-only mode");
8919 }
8920
8921 llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
8922     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8923     const VarDecl *PartIDVar, const VarDecl *TaskTVar,
8924     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
8925     bool Tied, unsigned &NumberOfParts) {
8926   llvm_unreachable("Not supported in SIMD-only mode");
8927 }
8928
8929 void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
8930                                            SourceLocation Loc,
8931                                            llvm::Value *OutlinedFn,
8932                                            ArrayRef<llvm::Value *> CapturedVars,
8933                                            const Expr *IfCond) {
8934   llvm_unreachable("Not supported in SIMD-only mode");
8935 }
8936
8937 void CGOpenMPSIMDRuntime::emitCriticalRegion(
8938     CodeGenFunction &CGF, StringRef CriticalName,
8939     const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
8940     const Expr *Hint) {
8941   llvm_unreachable("Not supported in SIMD-only mode");
8942 }
8943
8944 void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
8945                                            const RegionCodeGenTy &MasterOpGen,
8946                                            SourceLocation Loc) {
8947   llvm_unreachable("Not supported in SIMD-only mode");
8948 }
8949
8950 void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
8951                                             SourceLocation Loc) {
8952   llvm_unreachable("Not supported in SIMD-only mode");
8953 }
8954
8955 void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
8956     CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
8957     SourceLocation Loc) {
8958   llvm_unreachable("Not supported in SIMD-only mode");
8959 }
8960
8961 void CGOpenMPSIMDRuntime::emitSingleRegion(
8962     CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
8963     SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
8964     ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
8965     ArrayRef<const Expr *> AssignmentOps) {
8966   llvm_unreachable("Not supported in SIMD-only mode");
8967 }
8968
8969 void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
8970                                             const RegionCodeGenTy &OrderedOpGen,
8971                                             SourceLocation Loc,
8972                                             bool IsThreads) {
8973   llvm_unreachable("Not supported in SIMD-only mode");
8974 }
8975
8976 void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
8977                                           SourceLocation Loc,
8978                                           OpenMPDirectiveKind Kind,
8979                                           bool EmitChecks,
8980                                           bool ForceSimpleCall) {
8981   llvm_unreachable("Not supported in SIMD-only mode");
8982 }
8983
8984 void CGOpenMPSIMDRuntime::emitForDispatchInit(
8985     CodeGenFunction &CGF, SourceLocation Loc,
8986     const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
8987     bool Ordered, const DispatchRTInput &DispatchValues) {
8988   llvm_unreachable("Not supported in SIMD-only mode");
8989 }
8990
8991 void CGOpenMPSIMDRuntime::emitForStaticInit(
8992     CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
8993     const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
8994   llvm_unreachable("Not supported in SIMD-only mode");
8995 }
8996
8997 void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
8998     CodeGenFunction &CGF, SourceLocation Loc,
8999     OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
9000   llvm_unreachable("Not supported in SIMD-only mode");
9001 }
9002
9003 void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
9004                                                      SourceLocation Loc,
9005                                                      unsigned IVSize,
9006                                                      bool IVSigned) {
9007   llvm_unreachable("Not supported in SIMD-only mode");
9008 }
9009
9010 void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
9011                                               SourceLocation Loc,
9012                                               OpenMPDirectiveKind DKind) {
9013   llvm_unreachable("Not supported in SIMD-only mode");
9014 }
9015
9016 llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
9017                                               SourceLocation Loc,
9018                                               unsigned IVSize, bool IVSigned,
9019                                               Address IL, Address LB,
9020                                               Address UB, Address ST) {
9021   llvm_unreachable("Not supported in SIMD-only mode");
9022 }
9023
9024 void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
9025                                                llvm::Value *NumThreads,
9026                                                SourceLocation Loc) {
9027   llvm_unreachable("Not supported in SIMD-only mode");
9028 }
9029
9030 void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
9031                                              OpenMPProcBindClauseKind ProcBind,
9032                                              SourceLocation Loc) {
9033   llvm_unreachable("Not supported in SIMD-only mode");
9034 }
9035
9036 Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
9037                                                     const VarDecl *VD,
9038                                                     Address VDAddr,
9039                                                     SourceLocation Loc) {
9040   llvm_unreachable("Not supported in SIMD-only mode");
9041 }
9042
9043 llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
9044     const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
9045     CodeGenFunction *CGF) {
9046   llvm_unreachable("Not supported in SIMD-only mode");
9047 }
9048
9049 Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
9050     CodeGenFunction &CGF, QualType VarType, StringRef Name) {
9051   llvm_unreachable("Not supported in SIMD-only mode");
9052 }
9053
9054 void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
9055                                     ArrayRef<const Expr *> Vars,
9056                                     SourceLocation Loc) {
9057   llvm_unreachable("Not supported in SIMD-only mode");
9058 }
9059
9060 void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
9061                                        const OMPExecutableDirective &D,
9062                                        llvm::Value *TaskFunction,
9063                                        QualType SharedsTy, Address Shareds,
9064                                        const Expr *IfCond,
9065                                        const OMPTaskDataTy &Data) {
9066   llvm_unreachable("Not supported in SIMD-only mode");
9067 }
9068
9069 void CGOpenMPSIMDRuntime::emitTaskLoopCall(
9070     CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
9071     llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
9072     const Expr *IfCond, const OMPTaskDataTy &Data) {
9073   llvm_unreachable("Not supported in SIMD-only mode");
9074 }
9075
9076 void CGOpenMPSIMDRuntime::emitReduction(
9077     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
9078     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
9079     ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
9080   assert(Options.SimpleReduction && "Only simple reduction is expected.");
9081   CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
9082                                  ReductionOps, Options);
9083 }
9084
9085 llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
9086     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
9087     ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
9088   llvm_unreachable("Not supported in SIMD-only mode");
9089 }
9090
9091 void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
9092                                                   SourceLocation Loc,
9093                                                   ReductionCodeGen &RCG,
9094                                                   unsigned N) {
9095   llvm_unreachable("Not supported in SIMD-only mode");
9096 }
9097
9098 Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
9099                                                   SourceLocation Loc,
9100                                                   llvm::Value *ReductionsPtr,
9101                                                   LValue SharedLVal) {
9102   llvm_unreachable("Not supported in SIMD-only mode");
9103 }
9104
9105 void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
9106                                            SourceLocation Loc) {
9107   llvm_unreachable("Not supported in SIMD-only mode");
9108 }
9109
9110 void CGOpenMPSIMDRuntime::emitCancellationPointCall(
9111     CodeGenFunction &CGF, SourceLocation Loc,
9112     OpenMPDirectiveKind CancelRegion) {
9113   llvm_unreachable("Not supported in SIMD-only mode");
9114 }
9115
9116 void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
9117                                          SourceLocation Loc, const Expr *IfCond,
9118                                          OpenMPDirectiveKind CancelRegion) {
9119   llvm_unreachable("Not supported in SIMD-only mode");
9120 }
9121
9122 void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
9123     const OMPExecutableDirective &D, StringRef ParentName,
9124     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
9125     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
9126   llvm_unreachable("Not supported in SIMD-only mode");
9127 }
9128
9129 void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF,
9130                                          const OMPExecutableDirective &D,
9131                                          llvm::Value *OutlinedFn,
9132                                          llvm::Value *OutlinedFnID,
9133                                          const Expr *IfCond, const Expr *Device) {
9134   llvm_unreachable("Not supported in SIMD-only mode");
9135 }
9136
9137 bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
9138   llvm_unreachable("Not supported in SIMD-only mode");
9139 }
9140
9141 bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
9142   llvm_unreachable("Not supported in SIMD-only mode");
9143 }
9144
9145 bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
9146   return false;
9147 }
9148
9149 llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() {
9150   return nullptr;
9151 }
9152
9153 void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
9154                                         const OMPExecutableDirective &D,
9155                                         SourceLocation Loc,
9156                                         llvm::Value *OutlinedFn,
9157                                         ArrayRef<llvm::Value *> CapturedVars) {
9158   llvm_unreachable("Not supported in SIMD-only mode");
9159 }
9160
9161 void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
9162                                              const Expr *NumTeams,
9163                                              const Expr *ThreadLimit,
9164                                              SourceLocation Loc) {
9165   llvm_unreachable("Not supported in SIMD-only mode");
9166 }
9167
9168 void CGOpenMPSIMDRuntime::emitTargetDataCalls(
9169     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
9170     const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
9171   llvm_unreachable("Not supported in SIMD-only mode");
9172 }
9173
9174 void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
9175     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
9176     const Expr *Device) {
9177   llvm_unreachable("Not supported in SIMD-only mode");
9178 }
9179
9180 void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
9181                                            const OMPLoopDirective &D,
9182                                            ArrayRef<Expr *> NumIterations) {
9183   llvm_unreachable("Not supported in SIMD-only mode");
9184 }
9185
9186 void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
9187                                               const OMPDependClause *C) {
9188   llvm_unreachable("Not supported in SIMD-only mode");
9189 }
9190
9191 const VarDecl *
9192 CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
9193                                         const VarDecl *NativeParam) const {
9194   llvm_unreachable("Not supported in SIMD-only mode");
9195 }
9196
9197 Address
9198 CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
9199                                          const VarDecl *NativeParam,
9200                                          const VarDecl *TargetParam) const {
9201   llvm_unreachable("Not supported in SIMD-only mode");
9202 }
9203