1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit OpenMP nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGCleanup.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/Stmt.h"
22 #include "clang/AST/StmtOpenMP.h"
23 #include "clang/Basic/PrettyStackTrace.h"
24 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
25 using namespace clang;
26 using namespace CodeGen;
27 using namespace llvm::omp;
30 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
31 /// for captured expressions.
32 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
33 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
34 for (const auto *C : S.clauses()) {
35 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
36 if (const auto *PreInit =
37 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
38 for (const auto *I : PreInit->decls()) {
39 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
40 CGF.EmitVarDecl(cast<VarDecl>(*I));
42 CodeGenFunction::AutoVarEmission Emission =
43 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
44 CGF.EmitAutoVarCleanups(Emission);
51 CodeGenFunction::OMPPrivateScope InlinedShareds;
53 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
54 return CGF.LambdaCaptureFields.lookup(VD) ||
55 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
56 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
61 CodeGenFunction &CGF, const OMPExecutableDirective &S,
62 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
63 const bool EmitPreInitStmt = true)
64 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
67 emitPreInitStmt(CGF, S);
68 if (!CapturedRegion.hasValue())
70 assert(S.hasAssociatedStmt() &&
71 "Expected associated statement for inlined directive.");
72 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
73 for (const auto &C : CS->captures()) {
74 if (C.capturesVariable() || C.capturesVariableByCopy()) {
75 auto *VD = C.getCapturedVar();
76 assert(VD == VD->getCanonicalDecl() &&
77 "Canonical decl must be captured.");
79 CGF.getContext(), const_cast<VarDecl *>(VD),
80 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
81 InlinedShareds.isGlobalVarCaptured(VD)),
82 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
83 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
84 return CGF.EmitLValue(&DRE).getAddress(CGF);
88 (void)InlinedShareds.Privatize();
92 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
93 /// for captured expressions.
94 class OMPParallelScope final : public OMPLexicalScope {
95 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
96 OpenMPDirectiveKind Kind = S.getDirectiveKind();
97 return !(isOpenMPTargetExecutionDirective(Kind) ||
98 isOpenMPLoopBoundSharingDirective(Kind)) &&
99 isOpenMPParallelDirective(Kind);
103 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
104 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
105 EmitPreInitStmt(S)) {}
108 /// Lexical scope for OpenMP teams construct, that handles correct codegen
109 /// for captured expressions.
110 class OMPTeamsScope final : public OMPLexicalScope {
111 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
112 OpenMPDirectiveKind Kind = S.getDirectiveKind();
113 return !isOpenMPTargetExecutionDirective(Kind) &&
114 isOpenMPTeamsDirective(Kind);
118 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
119 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
120 EmitPreInitStmt(S)) {}
123 /// Private scope for OpenMP loop-based directives, that supports capturing
124 /// of used expression from loop statement.
125 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
126 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
127 CodeGenFunction::OMPMapVars PreCondVars;
128 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
129 for (const auto *E : S.counters()) {
130 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
131 EmittedAsPrivate.insert(VD->getCanonicalDecl());
132 (void)PreCondVars.setVarAddr(
133 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
135 // Mark private vars as undefs.
136 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
137 for (const Expr *IRef : C->varlists()) {
138 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
139 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
140 (void)PreCondVars.setVarAddr(
142 Address(llvm::UndefValue::get(
143 CGF.ConvertTypeForMem(CGF.getContext().getPointerType(
144 OrigVD->getType().getNonReferenceType()))),
145 CGF.getContext().getDeclAlign(OrigVD)));
149 (void)PreCondVars.apply(CGF);
150 // Emit init, __range and __end variables for C++ range loops.
152 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
153 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) {
154 Body = OMPLoopDirective::tryToFindNextInnerLoop(
155 Body, /*TryImperfectlyNestedLoops=*/true);
156 if (auto *For = dyn_cast<ForStmt>(Body)) {
157 Body = For->getBody();
159 assert(isa<CXXForRangeStmt>(Body) &&
160 "Expected canonical for loop or range-based for loop.");
161 auto *CXXFor = cast<CXXForRangeStmt>(Body);
162 if (const Stmt *Init = CXXFor->getInit())
164 CGF.EmitStmt(CXXFor->getRangeStmt());
165 CGF.EmitStmt(CXXFor->getEndStmt());
166 Body = CXXFor->getBody();
169 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) {
170 for (const auto *I : PreInits->decls())
171 CGF.EmitVarDecl(cast<VarDecl>(*I));
173 PreCondVars.restore(CGF);
177 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
178 : CodeGenFunction::RunCleanupsScope(CGF) {
179 emitPreInitStmt(CGF, S);
183 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
184 CodeGenFunction::OMPPrivateScope InlinedShareds;
186 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
187 return CGF.LambdaCaptureFields.lookup(VD) ||
188 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
189 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
190 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
194 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
195 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
196 InlinedShareds(CGF) {
197 for (const auto *C : S.clauses()) {
198 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
199 if (const auto *PreInit =
200 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
201 for (const auto *I : PreInit->decls()) {
202 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
203 CGF.EmitVarDecl(cast<VarDecl>(*I));
205 CodeGenFunction::AutoVarEmission Emission =
206 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
207 CGF.EmitAutoVarCleanups(Emission);
211 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
212 for (const Expr *E : UDP->varlists()) {
213 const Decl *D = cast<DeclRefExpr>(E)->getDecl();
214 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
215 CGF.EmitVarDecl(*OED);
219 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
220 CGF.EmitOMPPrivateClause(S, InlinedShareds);
221 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
222 if (const Expr *E = TG->getReductionRef())
223 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
225 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
227 for (auto &C : CS->captures()) {
228 if (C.capturesVariable() || C.capturesVariableByCopy()) {
229 auto *VD = C.getCapturedVar();
230 assert(VD == VD->getCanonicalDecl() &&
231 "Canonical decl must be captured.");
232 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
233 isCapturedVar(CGF, VD) ||
234 (CGF.CapturedStmtInfo &&
235 InlinedShareds.isGlobalVarCaptured(VD)),
236 VD->getType().getNonReferenceType(), VK_LValue,
238 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
239 return CGF.EmitLValue(&DRE).getAddress(CGF);
243 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
245 (void)InlinedShareds.Privatize();
251 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
252 const OMPExecutableDirective &S,
253 const RegionCodeGenTy &CodeGen);
255 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
256 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
257 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
258 OrigVD = OrigVD->getCanonicalDecl();
260 LambdaCaptureFields.lookup(OrigVD) ||
261 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
262 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
263 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
264 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
265 return EmitLValue(&DRE);
268 return EmitLValue(E);
271 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
272 ASTContext &C = getContext();
273 llvm::Value *Size = nullptr;
274 auto SizeInChars = C.getTypeSizeInChars(Ty);
275 if (SizeInChars.isZero()) {
276 // getTypeSizeInChars() returns 0 for a VLA.
277 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
278 VlaSizePair VlaSize = getVLASize(VAT);
280 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts)
283 SizeInChars = C.getTypeSizeInChars(Ty);
284 if (SizeInChars.isZero())
285 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
286 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
288 return CGM.getSize(SizeInChars);
291 void CodeGenFunction::GenerateOpenMPCapturedVars(
292 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
293 const RecordDecl *RD = S.getCapturedRecordDecl();
294 auto CurField = RD->field_begin();
295 auto CurCap = S.captures().begin();
296 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
297 E = S.capture_init_end();
298 I != E; ++I, ++CurField, ++CurCap) {
299 if (CurField->hasCapturedVLAType()) {
300 const VariableArrayType *VAT = CurField->getCapturedVLAType();
301 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
302 CapturedVars.push_back(Val);
303 } else if (CurCap->capturesThis()) {
304 CapturedVars.push_back(CXXThisValue);
305 } else if (CurCap->capturesVariableByCopy()) {
306 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
308 // If the field is not a pointer, we need to save the actual value
309 // and load it as a void pointer.
310 if (!CurField->getType()->isAnyPointerType()) {
311 ASTContext &Ctx = getContext();
312 Address DstAddr = CreateMemTemp(
313 Ctx.getUIntPtrType(),
314 Twine(CurCap->getCapturedVar()->getName(), ".casted"));
315 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
317 llvm::Value *SrcAddrVal = EmitScalarConversion(
318 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
319 Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
321 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
323 // Store the value using the source type pointer.
324 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
326 // Load the value using the destination type pointer.
327 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
329 CapturedVars.push_back(CV);
331 assert(CurCap->capturesVariable() && "Expected capture by reference.");
332 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer());
337 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
338 QualType DstType, StringRef Name,
340 ASTContext &Ctx = CGF.getContext();
342 llvm::Value *CastedPtr = CGF.EmitScalarConversion(
343 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
344 Ctx.getPointerType(DstType), Loc);
346 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
351 static QualType getCanonicalParamType(ASTContext &C, QualType T) {
352 if (T->isLValueReferenceType())
353 return C.getLValueReferenceType(
354 getCanonicalParamType(C, T.getNonReferenceType()),
355 /*SpelledAsLValue=*/false);
356 if (T->isPointerType())
357 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
358 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
359 if (const auto *VLA = dyn_cast<VariableArrayType>(A))
360 return getCanonicalParamType(C, VLA->getElementType());
361 if (!A->isVariablyModifiedType())
362 return C.getCanonicalType(T);
364 return C.getCanonicalParamType(T);
368 /// Contains required data for proper outlined function codegen.
369 struct FunctionOptions {
370 /// Captured statement for which the function is generated.
371 const CapturedStmt *S = nullptr;
372 /// true if cast to/from UIntPtr is required for variables captured by
374 const bool UIntPtrCastRequired = true;
375 /// true if only casted arguments must be registered as local args or VLA
377 const bool RegisterCastedArgsOnly = false;
378 /// Name of the generated function.
379 const StringRef FunctionName;
380 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
381 bool RegisterCastedArgsOnly,
382 StringRef FunctionName)
383 : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
384 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
385 FunctionName(FunctionName) {}
389 static llvm::Function *emitOutlinedFunctionPrologue(
390 CodeGenFunction &CGF, FunctionArgList &Args,
391 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
393 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
395 llvm::Value *&CXXThisValue, const FunctionOptions &FO) {
396 const CapturedDecl *CD = FO.S->getCapturedDecl();
397 const RecordDecl *RD = FO.S->getCapturedRecordDecl();
398 assert(CD->hasBody() && "missing CapturedDecl body");
400 CXXThisValue = nullptr;
401 // Build the argument list.
402 CodeGenModule &CGM = CGF.CGM;
403 ASTContext &Ctx = CGM.getContext();
404 FunctionArgList TargetArgs;
405 Args.append(CD->param_begin(),
406 std::next(CD->param_begin(), CD->getContextParamPosition()));
409 std::next(CD->param_begin(), CD->getContextParamPosition()));
410 auto I = FO.S->captures().begin();
411 FunctionDecl *DebugFunctionDecl = nullptr;
412 if (!FO.UIntPtrCastRequired) {
413 FunctionProtoType::ExtProtoInfo EPI;
414 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
415 DebugFunctionDecl = FunctionDecl::Create(
416 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
417 SourceLocation(), DeclarationName(), FunctionTy,
418 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
419 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
421 for (const FieldDecl *FD : RD->fields()) {
422 QualType ArgType = FD->getType();
423 IdentifierInfo *II = nullptr;
424 VarDecl *CapVar = nullptr;
426 // If this is a capture by copy and the type is not a pointer, the outlined
427 // function argument type should be uintptr and the value properly casted to
428 // uintptr. This is necessary given that the runtime library is only able to
429 // deal with pointers. We can pass in the same way the VLA type sizes to the
430 // outlined function.
431 if (FO.UIntPtrCastRequired &&
432 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
433 I->capturesVariableArrayType()))
434 ArgType = Ctx.getUIntPtrType();
436 if (I->capturesVariable() || I->capturesVariableByCopy()) {
437 CapVar = I->getCapturedVar();
438 II = CapVar->getIdentifier();
439 } else if (I->capturesThis()) {
440 II = &Ctx.Idents.get("this");
442 assert(I->capturesVariableArrayType());
443 II = &Ctx.Idents.get("vla");
445 if (ArgType->isVariablyModifiedType())
446 ArgType = getCanonicalParamType(Ctx, ArgType);
448 if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
449 Arg = ParmVarDecl::Create(
450 Ctx, DebugFunctionDecl,
451 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
452 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
453 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
455 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
456 II, ArgType, ImplicitParamDecl::Other);
458 Args.emplace_back(Arg);
459 // Do not cast arguments if we emit function with non-original types.
460 TargetArgs.emplace_back(
461 FO.UIntPtrCastRequired
463 : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
467 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
470 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
473 // Create the function declaration.
474 const CGFunctionInfo &FuncInfo =
475 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
476 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
479 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
480 FO.FunctionName, &CGM.getModule());
481 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
483 F->setDoesNotThrow();
484 F->setDoesNotRecurse();
486 // Generate the function.
487 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
488 FO.S->getBeginLoc(), CD->getBody()->getBeginLoc());
489 unsigned Cnt = CD->getContextParamPosition();
490 I = FO.S->captures().begin();
491 for (const FieldDecl *FD : RD->fields()) {
492 // Do not map arguments if we emit function with non-original types.
493 Address LocalAddr(Address::invalid());
494 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
495 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
498 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
500 // If we are capturing a pointer by copy we don't need to do anything, just
501 // use the value that we get from the arguments.
502 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
503 const VarDecl *CurVD = I->getCapturedVar();
504 if (!FO.RegisterCastedArgsOnly)
505 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
511 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
512 AlignmentSource::Decl);
513 if (FD->hasCapturedVLAType()) {
514 if (FO.UIntPtrCastRequired) {
515 ArgLVal = CGF.MakeAddrLValue(
516 castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
517 Args[Cnt]->getName(), ArgLVal),
518 FD->getType(), AlignmentSource::Decl);
520 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
521 const VariableArrayType *VAT = FD->getCapturedVLAType();
522 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
523 } else if (I->capturesVariable()) {
524 const VarDecl *Var = I->getCapturedVar();
525 QualType VarTy = Var->getType();
526 Address ArgAddr = ArgLVal.getAddress(CGF);
527 if (ArgLVal.getType()->isLValueReferenceType()) {
528 ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
529 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
530 assert(ArgLVal.getType()->isPointerType());
531 ArgAddr = CGF.EmitLoadOfPointer(
532 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
534 if (!FO.RegisterCastedArgsOnly) {
537 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}});
539 } else if (I->capturesVariableByCopy()) {
540 assert(!FD->getType()->isAnyPointerType() &&
541 "Not expecting a captured pointer.");
542 const VarDecl *Var = I->getCapturedVar();
543 LocalAddrs.insert({Args[Cnt],
544 {Var, FO.UIntPtrCastRequired
545 ? castValueFromUintptr(
546 CGF, I->getLocation(), FD->getType(),
547 Args[Cnt]->getName(), ArgLVal)
548 : ArgLVal.getAddress(CGF)}});
550 // If 'this' is captured, load it into CXXThisValue.
551 assert(I->capturesThis());
552 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
553 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
563 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
566 "CapturedStmtInfo should be set when generating the captured function");
567 const CapturedDecl *CD = S.getCapturedDecl();
568 // Build the argument list.
569 bool NeedWrapperFunction =
570 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo();
571 FunctionArgList Args;
572 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
573 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
574 SmallString<256> Buffer;
575 llvm::raw_svector_ostream Out(Buffer);
576 Out << CapturedStmtInfo->getHelperName();
577 if (NeedWrapperFunction)
579 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
581 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
582 VLASizes, CXXThisValue, FO);
583 CodeGenFunction::OMPPrivateScope LocalScope(*this);
584 for (const auto &LocalAddrPair : LocalAddrs) {
585 if (LocalAddrPair.second.first) {
586 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() {
587 return LocalAddrPair.second.second;
591 (void)LocalScope.Privatize();
592 for (const auto &VLASizePair : VLASizes)
593 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
594 PGO.assignRegionCounters(GlobalDecl(CD), F);
595 CapturedStmtInfo->EmitBody(*this, CD->getBody());
596 (void)LocalScope.ForceCleanup();
597 FinishFunction(CD->getBodyRBrace());
598 if (!NeedWrapperFunction)
601 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
602 /*RegisterCastedArgsOnly=*/true,
603 CapturedStmtInfo->getHelperName());
604 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
605 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
609 llvm::Function *WrapperF =
610 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
611 WrapperCGF.CXXThisValue, WrapperFO);
612 llvm::SmallVector<llvm::Value *, 4> CallArgs;
613 for (const auto *Arg : Args) {
614 llvm::Value *CallArg;
615 auto I = LocalAddrs.find(Arg);
616 if (I != LocalAddrs.end()) {
617 LValue LV = WrapperCGF.MakeAddrLValue(
619 I->second.first ? I->second.first->getType() : Arg->getType(),
620 AlignmentSource::Decl);
621 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
623 auto EI = VLASizes.find(Arg);
624 if (EI != VLASizes.end()) {
625 CallArg = EI->second.second;
627 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
629 AlignmentSource::Decl);
630 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
633 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
635 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getBeginLoc(),
637 WrapperCGF.FinishFunction();
641 //===----------------------------------------------------------------------===//
642 // OpenMP Directive Emission
643 //===----------------------------------------------------------------------===//
644 void CodeGenFunction::EmitOMPAggregateAssign(
645 Address DestAddr, Address SrcAddr, QualType OriginalType,
646 const llvm::function_ref<void(Address, Address)> CopyGen) {
647 // Perform element-by-element initialization.
650 // Drill down to the base element type on both arrays.
651 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
652 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
653 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
655 llvm::Value *SrcBegin = SrcAddr.getPointer();
656 llvm::Value *DestBegin = DestAddr.getPointer();
657 // Cast from pointer to array type to pointer to single element.
658 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements);
659 // The basic structure here is a while-do loop.
660 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
661 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
662 llvm::Value *IsEmpty =
663 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
664 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
666 // Enter the loop body, making that address the current address.
667 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
670 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
672 llvm::PHINode *SrcElementPHI =
673 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
674 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
675 Address SrcElementCurrent =
676 Address(SrcElementPHI,
677 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
679 llvm::PHINode *DestElementPHI =
680 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
681 DestElementPHI->addIncoming(DestBegin, EntryBB);
682 Address DestElementCurrent =
683 Address(DestElementPHI,
684 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
687 CopyGen(DestElementCurrent, SrcElementCurrent);
689 // Shift the address forward by one element.
690 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32(
691 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
692 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32(
693 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
694 // Check whether we've reached the end.
696 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
697 Builder.CreateCondBr(Done, DoneBB, BodyBB);
698 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
699 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
702 EmitBlock(DoneBB, /*IsFinished=*/true);
705 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
706 Address SrcAddr, const VarDecl *DestVD,
707 const VarDecl *SrcVD, const Expr *Copy) {
708 if (OriginalType->isArrayType()) {
709 const auto *BO = dyn_cast<BinaryOperator>(Copy);
710 if (BO && BO->getOpcode() == BO_Assign) {
711 // Perform simple memcpy for simple copying.
712 LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
713 LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
714 EmitAggregateAssign(Dest, Src, OriginalType);
716 // For arrays with complex element types perform element by element
718 EmitOMPAggregateAssign(
719 DestAddr, SrcAddr, OriginalType,
720 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
721 // Working with the single array element, so have to remap
722 // destination and source variables to corresponding array
724 CodeGenFunction::OMPPrivateScope Remap(*this);
725 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; });
726 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; });
727 (void)Remap.Privatize();
728 EmitIgnoredExpr(Copy);
732 // Remap pseudo source variable to private copy.
733 CodeGenFunction::OMPPrivateScope Remap(*this);
734 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; });
735 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; });
736 (void)Remap.Privatize();
737 // Emit copying of the whole variable.
738 EmitIgnoredExpr(Copy);
742 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
743 OMPPrivateScope &PrivateScope) {
744 if (!HaveInsertPoint())
746 bool DeviceConstTarget =
747 getLangOpts().OpenMPIsDevice &&
748 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
749 bool FirstprivateIsLastprivate = false;
750 llvm::DenseSet<const VarDecl *> Lastprivates;
751 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
752 for (const auto *D : C->varlists())
754 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
756 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
757 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
758 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
759 // Force emission of the firstprivate copy if the directive does not emit
760 // outlined function, like omp for, omp simd, omp distribute etc.
761 bool MustEmitFirstprivateCopy =
762 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
763 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
764 auto IRef = C->varlist_begin();
765 auto InitsRef = C->inits().begin();
766 for (const Expr *IInit : C->private_copies()) {
767 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
768 bool ThisFirstprivateIsLastprivate =
769 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
770 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
771 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
772 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
773 !FD->getType()->isReferenceType() &&
774 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
775 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
780 // Do not emit copy for firstprivate constant variables in target regions,
781 // captured by reference.
782 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
783 FD && FD->getType()->isReferenceType() &&
784 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
785 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this,
791 FirstprivateIsLastprivate =
792 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
793 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
795 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
797 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
798 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
799 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
802 // Check if the firstprivate variable is just a constant value.
803 ConstantEmission CE = tryEmitAsConstant(&DRE);
804 if (CE && !CE.isReference()) {
805 // Constant value, no need to create a copy.
810 if (CE && CE.isReference()) {
811 OriginalLVal = CE.getReferenceLValue(*this, &DRE);
813 assert(!CE && "Expected non-constant firstprivate.");
814 OriginalLVal = EmitLValue(&DRE);
817 OriginalLVal = EmitLValue(&DRE);
819 QualType Type = VD->getType();
820 if (Type->isArrayType()) {
821 // Emit VarDecl with copy init for arrays.
822 // Get the address of the original variable captured in current
824 IsRegistered = PrivateScope.addPrivate(
825 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() {
826 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
827 const Expr *Init = VD->getInit();
828 if (!isa<CXXConstructExpr>(Init) ||
829 isTrivialInitializer(Init)) {
830 // Perform simple memcpy.
832 MakeAddrLValue(Emission.getAllocatedAddress(), Type);
833 EmitAggregateAssign(Dest, OriginalLVal, Type);
835 EmitOMPAggregateAssign(
836 Emission.getAllocatedAddress(),
837 OriginalLVal.getAddress(*this), Type,
838 [this, VDInit, Init](Address DestElement,
839 Address SrcElement) {
840 // Clean up any temporaries needed by the
842 RunCleanupsScope InitScope(*this);
843 // Emit initialization for single element.
844 setAddrOfLocalVar(VDInit, SrcElement);
845 EmitAnyExprToMem(Init, DestElement,
846 Init->getType().getQualifiers(),
847 /*IsInitializer*/ false);
848 LocalDeclMap.erase(VDInit);
851 EmitAutoVarCleanups(Emission);
852 return Emission.getAllocatedAddress();
855 Address OriginalAddr = OriginalLVal.getAddress(*this);
856 IsRegistered = PrivateScope.addPrivate(
857 OrigVD, [this, VDInit, OriginalAddr, VD]() {
858 // Emit private VarDecl with copy init.
859 // Remap temp VDInit variable to the address of the original
860 // variable (for proper handling of captured global variables).
861 setAddrOfLocalVar(VDInit, OriginalAddr);
863 LocalDeclMap.erase(VDInit);
864 return GetAddrOfLocalVar(VD);
867 assert(IsRegistered &&
868 "firstprivate var already registered as private");
869 // Silence the warning about unused variable.
876 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
879 void CodeGenFunction::EmitOMPPrivateClause(
880 const OMPExecutableDirective &D,
881 CodeGenFunction::OMPPrivateScope &PrivateScope) {
882 if (!HaveInsertPoint())
884 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
885 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
886 auto IRef = C->varlist_begin();
887 for (const Expr *IInit : C->private_copies()) {
888 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
889 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
890 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
891 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
892 // Emit private VarDecl with copy init.
894 return GetAddrOfLocalVar(VD);
896 assert(IsRegistered && "private var already registered as private");
897 // Silence the warning about unused variable.
905 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
906 if (!HaveInsertPoint())
908 // threadprivate_var1 = master_threadprivate_var1;
909 // operator=(threadprivate_var2, master_threadprivate_var2);
911 // __kmpc_barrier(&loc, global_tid);
912 llvm::DenseSet<const VarDecl *> CopiedVars;
913 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
914 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
915 auto IRef = C->varlist_begin();
916 auto ISrcRef = C->source_exprs().begin();
917 auto IDestRef = C->destination_exprs().begin();
918 for (const Expr *AssignOp : C->assignment_ops()) {
919 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
920 QualType Type = VD->getType();
921 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
922 // Get the address of the master variable. If we are emitting code with
923 // TLS support, the address is passed from the master as field in the
924 // captured declaration.
925 Address MasterAddr = Address::invalid();
926 if (getLangOpts().OpenMPUseTLS &&
927 getContext().getTargetInfo().isTLSSupported()) {
928 assert(CapturedStmtInfo->lookup(VD) &&
929 "Copyin threadprivates should have been captured!");
930 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
931 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
932 MasterAddr = EmitLValue(&DRE).getAddress(*this);
933 LocalDeclMap.erase(VD);
936 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
937 : CGM.GetAddrOfGlobal(VD),
938 getContext().getDeclAlign(VD));
940 // Get the address of the threadprivate variable.
941 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
942 if (CopiedVars.size() == 1) {
943 // At first check if current thread is a master thread. If it is, no
944 // need to copy data.
945 CopyBegin = createBasicBlock("copyin.not.master");
946 CopyEnd = createBasicBlock("copyin.not.master.end");
947 Builder.CreateCondBr(
948 Builder.CreateICmpNE(
949 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
950 Builder.CreatePtrToInt(PrivateAddr.getPointer(),
953 EmitBlock(CopyBegin);
956 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
958 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
959 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
967 // Exit out of copying procedure for non-master thread.
968 EmitBlock(CopyEnd, /*IsFinished=*/true);
974 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
975 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
976 if (!HaveInsertPoint())
978 bool HasAtLeastOneLastprivate = false;
979 llvm::DenseSet<const VarDecl *> SIMDLCVs;
980 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
981 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
982 for (const Expr *C : LoopDirective->counters()) {
984 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
987 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
988 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
989 HasAtLeastOneLastprivate = true;
990 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
991 !getLangOpts().OpenMPSimd)
993 auto IRef = C->varlist_begin();
994 auto IDestRef = C->destination_exprs().begin();
995 for (const Expr *IInit : C->private_copies()) {
996 // Keep the address of the original variable for future update at the end
998 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
999 // Taskloops do not require additional initialization, it is done in
1000 // runtime support library.
1001 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
1002 const auto *DestVD =
1003 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1004 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
1005 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1006 /*RefersToEnclosingVariableOrCapture=*/
1007 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1008 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
1009 return EmitLValue(&DRE).getAddress(*this);
1011 // Check if the variable is also a firstprivate: in this case IInit is
1012 // not generated. Initialization of this variable will happen in codegen
1013 // for 'firstprivate' clause.
1014 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
1015 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1016 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
1017 // Emit private VarDecl with copy init.
1019 return GetAddrOfLocalVar(VD);
1021 assert(IsRegistered &&
1022 "lastprivate var already registered as private");
1030 return HasAtLeastOneLastprivate;
1033 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
1034 const OMPExecutableDirective &D, bool NoFinals,
1035 llvm::Value *IsLastIterCond) {
1036 if (!HaveInsertPoint())
1038 // Emit following code:
1039 // if (<IsLastIterCond>) {
1040 // orig_var1 = private_orig_var1;
1042 // orig_varn = private_orig_varn;
1044 llvm::BasicBlock *ThenBB = nullptr;
1045 llvm::BasicBlock *DoneBB = nullptr;
1046 if (IsLastIterCond) {
1047 // Emit implicit barrier if at least one lastprivate conditional is found
1048 // and this is not a simd mode.
1049 if (!getLangOpts().OpenMPSimd &&
1050 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(),
1051 [](const OMPLastprivateClause *C) {
1052 return C->getKind() == OMPC_LASTPRIVATE_conditional;
1054 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(),
1056 /*EmitChecks=*/false,
1057 /*ForceSimpleCall=*/true);
1059 ThenBB = createBasicBlock(".omp.lastprivate.then");
1060 DoneBB = createBasicBlock(".omp.lastprivate.done");
1061 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1064 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1065 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1066 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1067 auto IC = LoopDirective->counters().begin();
1068 for (const Expr *F : LoopDirective->finals()) {
1070 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1072 AlreadyEmittedVars.insert(D);
1074 LoopCountersAndUpdates[D] = F;
1078 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1079 auto IRef = C->varlist_begin();
1080 auto ISrcRef = C->source_exprs().begin();
1081 auto IDestRef = C->destination_exprs().begin();
1082 for (const Expr *AssignOp : C->assignment_ops()) {
1083 const auto *PrivateVD =
1084 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1085 QualType Type = PrivateVD->getType();
1086 const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1087 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1088 // If lastprivate variable is a loop control variable for loop-based
1089 // directive, update its value before copyin back to original
1091 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1092 EmitIgnoredExpr(FinalExpr);
1094 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1095 const auto *DestVD =
1096 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1097 // Get the address of the private variable.
1098 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1099 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1101 Address(Builder.CreateLoad(PrivateAddr),
1102 getNaturalTypeAlignment(RefTy->getPointeeType()));
1103 // Store the last value to the private copy in the last iteration.
1104 if (C->getKind() == OMPC_LASTPRIVATE_conditional)
1105 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
1106 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD,
1107 (*IRef)->getExprLoc());
1108 // Get the address of the original variable.
1109 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1110 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1116 if (const Expr *PostUpdate = C->getPostUpdateExpr())
1117 EmitIgnoredExpr(PostUpdate);
1120 EmitBlock(DoneBB, /*IsFinished=*/true);
1123 void CodeGenFunction::EmitOMPReductionClauseInit(
1124 const OMPExecutableDirective &D,
1125 CodeGenFunction::OMPPrivateScope &PrivateScope) {
1126 if (!HaveInsertPoint())
1128 SmallVector<const Expr *, 4> Shareds;
1129 SmallVector<const Expr *, 4> Privates;
1130 SmallVector<const Expr *, 4> ReductionOps;
1131 SmallVector<const Expr *, 4> LHSs;
1132 SmallVector<const Expr *, 4> RHSs;
1133 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1134 auto IPriv = C->privates().begin();
1135 auto IRed = C->reduction_ops().begin();
1136 auto ILHS = C->lhs_exprs().begin();
1137 auto IRHS = C->rhs_exprs().begin();
1138 for (const Expr *Ref : C->varlists()) {
1139 Shareds.emplace_back(Ref);
1140 Privates.emplace_back(*IPriv);
1141 ReductionOps.emplace_back(*IRed);
1142 LHSs.emplace_back(*ILHS);
1143 RHSs.emplace_back(*IRHS);
1144 std::advance(IPriv, 1);
1145 std::advance(IRed, 1);
1146 std::advance(ILHS, 1);
1147 std::advance(IRHS, 1);
1150 ReductionCodeGen RedCG(Shareds, Privates, ReductionOps);
1152 auto ILHS = LHSs.begin();
1153 auto IRHS = RHSs.begin();
1154 auto IPriv = Privates.begin();
1155 for (const Expr *IRef : Shareds) {
1156 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1157 // Emit private VarDecl with reduction init.
1158 RedCG.emitSharedLValue(*this, Count);
1159 RedCG.emitAggregateType(*this, Count);
1160 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1161 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1162 RedCG.getSharedLValue(Count),
1163 [&Emission](CodeGenFunction &CGF) {
1164 CGF.EmitAutoVarInit(Emission);
1167 EmitAutoVarCleanups(Emission);
1168 Address BaseAddr = RedCG.adjustPrivateAddress(
1169 *this, Count, Emission.getAllocatedAddress());
1170 bool IsRegistered = PrivateScope.addPrivate(
1171 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; });
1172 assert(IsRegistered && "private var already registered as private");
1173 // Silence the warning about unused variable.
1176 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1177 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1178 QualType Type = PrivateVD->getType();
1179 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
1180 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
1181 // Store the address of the original variable associated with the LHS
1182 // implicit variable.
1183 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
1184 return RedCG.getSharedLValue(Count).getAddress(*this);
1186 PrivateScope.addPrivate(
1187 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); });
1188 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
1189 isa<ArraySubscriptExpr>(IRef)) {
1190 // Store the address of the original variable associated with the LHS
1191 // implicit variable.
1192 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
1193 return RedCG.getSharedLValue(Count).getAddress(*this);
1195 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() {
1196 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD),
1197 ConvertTypeForMem(RHSVD->getType()),
1201 QualType Type = PrivateVD->getType();
1202 bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1203 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
1204 // Store the address of the original variable associated with the LHS
1205 // implicit variable.
1207 OriginalAddr = Builder.CreateElementBitCast(
1208 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1210 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; });
1211 PrivateScope.addPrivate(
1212 RHSVD, [this, PrivateVD, RHSVD, IsArray]() {
1214 ? Builder.CreateElementBitCast(
1215 GetAddrOfLocalVar(PrivateVD),
1216 ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
1217 : GetAddrOfLocalVar(PrivateVD);
1227 void CodeGenFunction::EmitOMPReductionClauseFinal(
1228 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1229 if (!HaveInsertPoint())
1231 llvm::SmallVector<const Expr *, 8> Privates;
1232 llvm::SmallVector<const Expr *, 8> LHSExprs;
1233 llvm::SmallVector<const Expr *, 8> RHSExprs;
1234 llvm::SmallVector<const Expr *, 8> ReductionOps;
1235 bool HasAtLeastOneReduction = false;
1236 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1237 HasAtLeastOneReduction = true;
1238 Privates.append(C->privates().begin(), C->privates().end());
1239 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1240 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1241 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1243 if (HasAtLeastOneReduction) {
1244 bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1245 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1246 ReductionKind == OMPD_simd;
1247 bool SimpleReduction = ReductionKind == OMPD_simd;
1248 // Emit nowait reduction if nowait clause is present or directive is a
1249 // parallel directive (it always has implicit barrier).
1250 CGM.getOpenMPRuntime().emitReduction(
1251 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1252 {WithNowait, SimpleReduction, ReductionKind});
1256 static void emitPostUpdateForReductionClause(
1257 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1258 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1259 if (!CGF.HaveInsertPoint())
1261 llvm::BasicBlock *DoneBB = nullptr;
1262 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1263 if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1265 if (llvm::Value *Cond = CondGen(CGF)) {
1266 // If the first post-update expression is found, emit conditional
1267 // block if it was requested.
1268 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1269 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1270 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1271 CGF.EmitBlock(ThenBB);
1274 CGF.EmitIgnoredExpr(PostUpdate);
1278 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1282 /// Codegen lambda for appending distribute lower and upper bounds to outlined
1283 /// parallel function. This is necessary for combined constructs such as
1284 /// 'distribute parallel for'
1285 typedef llvm::function_ref<void(CodeGenFunction &,
1286 const OMPExecutableDirective &,
1287 llvm::SmallVectorImpl<llvm::Value *> &)>
1288 CodeGenBoundParametersTy;
1289 } // anonymous namespace
1291 static void emitCommonOMPParallelDirective(
1292 CodeGenFunction &CGF, const OMPExecutableDirective &S,
1293 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1294 const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1295 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1296 llvm::Function *OutlinedFn =
1297 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1298 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1299 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1300 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1301 llvm::Value *NumThreads =
1302 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1303 /*IgnoreResultAssign=*/true);
1304 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1305 CGF, NumThreads, NumThreadsClause->getBeginLoc());
1307 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1308 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1309 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1310 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1312 const Expr *IfCond = nullptr;
1313 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1314 if (C->getNameModifier() == OMPD_unknown ||
1315 C->getNameModifier() == OMPD_parallel) {
1316 IfCond = C->getCondition();
1321 OMPParallelScope Scope(CGF, S);
1322 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1323 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1324 // lower and upper bounds with the pragma 'for' chunking mechanism.
1325 // The following lambda takes care of appending the lower and upper bound
1326 // parameters when necessary
1327 CodeGenBoundParameters(CGF, S, CapturedVars);
1328 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1329 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1330 CapturedVars, IfCond);
1333 static void emitEmptyBoundParameters(CodeGenFunction &,
1334 const OMPExecutableDirective &,
1335 llvm::SmallVectorImpl<llvm::Value *> &) {}
1337 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1339 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) {
1340 // Check if we have any if clause associated with the directive.
1341 llvm::Value *IfCond = nullptr;
1342 if (const auto *C = S.getSingleClause<OMPIfClause>())
1343 IfCond = EmitScalarExpr(C->getCondition(),
1344 /*IgnoreResultAssign=*/true);
1346 llvm::Value *NumThreads = nullptr;
1347 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>())
1348 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(),
1349 /*IgnoreResultAssign=*/true);
1351 ProcBindKind ProcBind = OMP_PROC_BIND_default;
1352 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>())
1353 ProcBind = ProcBindClause->getProcBindKind();
1355 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1357 // The cleanup callback that finalizes all variabels at the given location,
1358 // thus calls destructors etc.
1359 auto FiniCB = [this](InsertPointTy IP) {
1360 CGBuilderTy::InsertPointGuard IPG(Builder);
1361 assert(IP.getBlock()->end() != IP.getPoint() &&
1362 "OpenMP IR Builder should cause terminated block!");
1363 llvm::BasicBlock *IPBB = IP.getBlock();
1364 llvm::BasicBlock *DestBB = IPBB->splitBasicBlock(IP.getPoint());
1365 IPBB->getTerminator()->eraseFromParent();
1366 Builder.SetInsertPoint(IPBB);
1367 CodeGenFunction::JumpDest Dest = getJumpDestInCurrentScope(DestBB);
1368 EmitBranchThroughCleanup(Dest);
1371 // Privatization callback that performs appropriate action for
1372 // shared/private/firstprivate/lastprivate/copyin/... variables.
1374 // TODO: This defaults to shared right now.
1375 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1376 llvm::Value &Val, llvm::Value *&ReplVal) {
1377 // The next line is appropriate only for variables (Val) with the
1378 // data-sharing attribute "shared".
1384 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1385 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
1387 auto BodyGenCB = [ParallelRegionBodyStmt,
1388 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1389 llvm::BasicBlock &ContinuationBB) {
1390 auto OldAllocaIP = AllocaInsertPt;
1391 AllocaInsertPt = &*AllocaIP.getPoint();
1393 auto OldReturnBlock = ReturnBlock;
1394 ReturnBlock = getJumpDestInCurrentScope(&ContinuationBB);
1396 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1397 CodeGenIPBB->splitBasicBlock(CodeGenIP.getPoint());
1398 llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator();
1399 CodeGenIPBBTI->removeFromParent();
1401 Builder.SetInsertPoint(CodeGenIPBB);
1403 EmitStmt(ParallelRegionBodyStmt);
1405 Builder.Insert(CodeGenIPBBTI);
1407 AllocaInsertPt = OldAllocaIP;
1408 ReturnBlock = OldReturnBlock;
1411 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
1412 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
1413 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB,
1414 FiniCB, IfCond, NumThreads,
1415 ProcBind, S.hasCancel()));
1419 // Emit parallel region as a standalone region.
1420 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1422 OMPPrivateScope PrivateScope(CGF);
1423 bool Copyins = CGF.EmitOMPCopyinClause(S);
1424 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1426 // Emit implicit barrier to synchronize threads and avoid data races on
1427 // propagation master's thread values of threadprivate variables to local
1428 // instances of that variables of all other implicit threads.
1429 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1430 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
1431 /*ForceSimpleCall=*/true);
1433 CGF.EmitOMPPrivateClause(S, PrivateScope);
1434 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1435 (void)PrivateScope.Privatize();
1436 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1437 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1439 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1440 emitEmptyBoundParameters);
1441 emitPostUpdateForReductionClause(*this, S,
1442 [](CodeGenFunction &) { return nullptr; });
1445 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
1446 int MaxLevel, int Level = 0) {
1447 assert(Level < MaxLevel && "Too deep lookup during loop body codegen.");
1448 const Stmt *SimplifiedS = S->IgnoreContainers();
1449 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) {
1450 PrettyStackTraceLoc CrashInfo(
1451 CGF.getContext().getSourceManager(), CS->getLBracLoc(),
1452 "LLVM IR generation of compound statement ('{}')");
1454 // Keep track of the current cleanup stack depth, including debug scopes.
1455 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange());
1456 for (const Stmt *CurStmt : CS->body())
1457 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level);
1460 if (SimplifiedS == NextLoop) {
1461 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
1464 assert(isa<CXXForRangeStmt>(SimplifiedS) &&
1465 "Expected canonical for loop or range-based for loop.");
1466 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS);
1467 CGF.EmitStmt(CXXFor->getLoopVarStmt());
1468 S = CXXFor->getBody();
1470 if (Level + 1 < MaxLevel) {
1471 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop(
1472 S, /*TryImperfectlyNestedLoops=*/true);
1473 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1);
1480 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1481 JumpDest LoopExit) {
1482 RunCleanupsScope BodyScope(*this);
1483 // Update counters values on current iteration.
1484 for (const Expr *UE : D.updates())
1485 EmitIgnoredExpr(UE);
1486 // Update the linear variables.
1487 // In distribute directives only loop counters may be marked as linear, no
1488 // need to generate the code for them.
1489 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1490 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1491 for (const Expr *UE : C->updates())
1492 EmitIgnoredExpr(UE);
1496 // On a continue in the body, jump to the end.
1497 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1498 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1499 for (const Expr *E : D.finals_conditions()) {
1502 // Check that loop counter in non-rectangular nest fits into the iteration
1504 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next");
1505 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(),
1506 getProfileCount(D.getBody()));
1509 // Emit loop variables for C++ range loops.
1511 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
1513 emitBody(*this, Body,
1514 OMPLoopDirective::tryToFindNextInnerLoop(
1515 Body, /*TryImperfectlyNestedLoops=*/true),
1516 D.getCollapsedNumber());
1518 // The end (updates/cleanups).
1519 EmitBlock(Continue.getBlock());
1520 BreakContinueStack.pop_back();
1523 void CodeGenFunction::EmitOMPInnerLoop(
1524 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1525 const Expr *IncExpr,
1526 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
1527 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
1528 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1530 // Start the loop with a block that tests the condition.
1531 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1532 EmitBlock(CondBlock);
1533 const SourceRange R = S.getSourceRange();
1534 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1535 SourceLocToDebugLoc(R.getEnd()));
1537 // If there are any cleanups between here and the loop-exit scope,
1538 // create a block to stage a loop exit along.
1539 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1540 if (RequiresCleanup)
1541 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1543 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
1546 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1547 if (ExitBlock != LoopExit.getBlock()) {
1548 EmitBlock(ExitBlock);
1549 EmitBranchThroughCleanup(LoopExit);
1552 EmitBlock(LoopBody);
1553 incrementProfileCounter(&S);
1555 // Create a block for the increment.
1556 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1557 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1561 // Emit "IV = IV + 1" and a back-edge to the condition block.
1562 EmitBlock(Continue.getBlock());
1563 EmitIgnoredExpr(IncExpr);
1565 BreakContinueStack.pop_back();
1566 EmitBranch(CondBlock);
1568 // Emit the fall-through block.
1569 EmitBlock(LoopExit.getBlock());
1572 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1573 if (!HaveInsertPoint())
1575 // Emit inits for the linear variables.
1576 bool HasLinears = false;
1577 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1578 for (const Expr *Init : C->inits()) {
1580 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1581 if (const auto *Ref =
1582 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1583 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1584 const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1585 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1586 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1587 VD->getInit()->getType(), VK_LValue,
1588 VD->getInit()->getExprLoc());
1589 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1591 /*capturedByInit=*/false);
1592 EmitAutoVarCleanups(Emission);
1597 // Emit the linear steps for the linear clauses.
1598 // If a step is not constant, it is pre-calculated before the loop.
1599 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1600 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1601 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1602 // Emit calculation of the linear step.
1603 EmitIgnoredExpr(CS);
1609 void CodeGenFunction::EmitOMPLinearClauseFinal(
1610 const OMPLoopDirective &D,
1611 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1612 if (!HaveInsertPoint())
1614 llvm::BasicBlock *DoneBB = nullptr;
1615 // Emit the final values of the linear variables.
1616 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1617 auto IC = C->varlist_begin();
1618 for (const Expr *F : C->finals()) {
1620 if (llvm::Value *Cond = CondGen(*this)) {
1621 // If the first post-update expression is found, emit conditional
1622 // block if it was requested.
1623 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
1624 DoneBB = createBasicBlock(".omp.linear.pu.done");
1625 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1629 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1630 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1631 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1632 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1633 Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
1634 CodeGenFunction::OMPPrivateScope VarScope(*this);
1635 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
1636 (void)VarScope.Privatize();
1640 if (const Expr *PostUpdate = C->getPostUpdateExpr())
1641 EmitIgnoredExpr(PostUpdate);
1644 EmitBlock(DoneBB, /*IsFinished=*/true);
1647 static void emitAlignedClause(CodeGenFunction &CGF,
1648 const OMPExecutableDirective &D) {
1649 if (!CGF.HaveInsertPoint())
1651 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1652 llvm::APInt ClauseAlignment(64, 0);
1653 if (const Expr *AlignmentExpr = Clause->getAlignment()) {
1655 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1656 ClauseAlignment = AlignmentCI->getValue();
1658 for (const Expr *E : Clause->varlists()) {
1659 llvm::APInt Alignment(ClauseAlignment);
1660 if (Alignment == 0) {
1661 // OpenMP [2.8.1, Description]
1662 // If no optional parameter is specified, implementation-defined default
1663 // alignments for SIMD instructions on the target platforms are assumed.
1666 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1667 E->getType()->getPointeeType()))
1670 assert((Alignment == 0 || Alignment.isPowerOf2()) &&
1671 "alignment is not power of 2");
1672 if (Alignment != 0) {
1673 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1674 CGF.EmitAlignmentAssumption(
1675 PtrValue, E, /*No second loc needed*/ SourceLocation(),
1676 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
1682 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1683 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1684 if (!HaveInsertPoint())
1686 auto I = S.private_counters().begin();
1687 for (const Expr *E : S.counters()) {
1688 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1689 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1690 // Emit var without initialization.
1691 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
1692 EmitAutoVarCleanups(VarEmission);
1693 LocalDeclMap.erase(PrivateVD);
1694 (void)LoopScope.addPrivate(VD, [&VarEmission]() {
1695 return VarEmission.getAllocatedAddress();
1697 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1698 VD->hasGlobalStorage()) {
1699 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
1700 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
1701 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1702 E->getType(), VK_LValue, E->getExprLoc());
1703 return EmitLValue(&DRE).getAddress(*this);
1706 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() {
1707 return VarEmission.getAllocatedAddress();
1712 // Privatize extra loop counters used in loops for ordered(n) clauses.
1713 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
1714 if (!C->getNumForLoops())
1716 for (unsigned I = S.getCollapsedNumber(),
1717 E = C->getLoopNumIterations().size();
1719 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
1720 const auto *VD = cast<VarDecl>(DRE->getDecl());
1721 // Override only those variables that can be captured to avoid re-emission
1722 // of the variables declared within the loops.
1723 if (DRE->refersToEnclosingVariableOrCapture()) {
1724 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
1725 return CreateMemTemp(DRE->getType(), VD->getName());
1732 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1733 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1734 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1735 if (!CGF.HaveInsertPoint())
1738 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1739 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1740 (void)PreCondScope.Privatize();
1741 // Get initial values of real counters.
1742 for (const Expr *I : S.inits()) {
1743 CGF.EmitIgnoredExpr(I);
1746 // Create temp loop control variables with their init values to support
1747 // non-rectangular loops.
1748 CodeGenFunction::OMPMapVars PreCondVars;
1749 for (const Expr * E: S.dependent_counters()) {
1752 assert(!E->getType().getNonReferenceType()->isRecordType() &&
1753 "dependent counter must not be an iterator.");
1754 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1755 Address CounterAddr =
1756 CGF.CreateMemTemp(VD->getType().getNonReferenceType());
1757 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr);
1759 (void)PreCondVars.apply(CGF);
1760 for (const Expr *E : S.dependent_inits()) {
1763 CGF.EmitIgnoredExpr(E);
1765 // Check that loop is executed at least one time.
1766 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1767 PreCondVars.restore(CGF);
1770 void CodeGenFunction::EmitOMPLinearClause(
1771 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1772 if (!HaveInsertPoint())
1774 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1775 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1776 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1777 for (const Expr *C : LoopDirective->counters()) {
1779 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1782 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1783 auto CurPrivate = C->privates().begin();
1784 for (const Expr *E : C->varlists()) {
1785 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1786 const auto *PrivateVD =
1787 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1788 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1789 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() {
1790 // Emit private VarDecl with copy init.
1791 EmitVarDecl(*PrivateVD);
1792 return GetAddrOfLocalVar(PrivateVD);
1794 assert(IsRegistered && "linear var already registered as private");
1795 // Silence the warning about unused variable.
1798 EmitVarDecl(*PrivateVD);
1805 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1806 const OMPExecutableDirective &D,
1808 if (!CGF.HaveInsertPoint())
1810 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1811 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1812 /*ignoreResult=*/true);
1813 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1814 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1815 // In presence of finite 'safelen', it may be unsafe to mark all
1816 // the memory instructions parallel, because loop-carried
1817 // dependences of 'safelen' iterations are possible.
1819 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1820 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1821 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1822 /*ignoreResult=*/true);
1823 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1824 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1825 // In presence of finite 'safelen', it may be unsafe to mark all
1826 // the memory instructions parallel, because loop-carried
1827 // dependences of 'safelen' iterations are possible.
1828 CGF.LoopStack.setParallel(/*Enable=*/false);
1832 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1834 // Walk clauses and process safelen/lastprivate.
1835 LoopStack.setParallel(!IsMonotonic);
1836 LoopStack.setVectorizeEnable();
1837 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1840 void CodeGenFunction::EmitOMPSimdFinal(
1841 const OMPLoopDirective &D,
1842 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1843 if (!HaveInsertPoint())
1845 llvm::BasicBlock *DoneBB = nullptr;
1846 auto IC = D.counters().begin();
1847 auto IPC = D.private_counters().begin();
1848 for (const Expr *F : D.finals()) {
1849 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1850 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1851 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1852 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1853 OrigVD->hasGlobalStorage() || CED) {
1855 if (llvm::Value *Cond = CondGen(*this)) {
1856 // If the first post-update expression is found, emit conditional
1857 // block if it was requested.
1858 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
1859 DoneBB = createBasicBlock(".omp.final.done");
1860 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1864 Address OrigAddr = Address::invalid();
1867 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
1869 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
1870 /*RefersToEnclosingVariableOrCapture=*/false,
1871 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1872 OrigAddr = EmitLValue(&DRE).getAddress(*this);
1874 OMPPrivateScope VarScope(*this);
1875 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
1876 (void)VarScope.Privatize();
1883 EmitBlock(DoneBB, /*IsFinished=*/true);
1886 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
1887 const OMPLoopDirective &S,
1888 CodeGenFunction::JumpDest LoopExit) {
1889 CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(CGF, S);
1890 CGF.EmitOMPLoopBody(S, LoopExit);
1891 CGF.EmitStopPoint(&S);
1894 /// Emit a helper variable and return corresponding lvalue.
1895 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1896 const DeclRefExpr *Helper) {
1897 auto VDecl = cast<VarDecl>(Helper->getDecl());
1898 CGF.EmitVarDecl(*VDecl);
1899 return CGF.EmitLValue(Helper);
1902 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S,
1903 const RegionCodeGenTy &SimdInitGen,
1904 const RegionCodeGenTy &BodyCodeGen) {
1905 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF,
1906 PrePostActionTy &) {
1907 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S);
1908 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
1913 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
1914 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
1915 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false);
1919 const Expr *IfCond = nullptr;
1920 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1921 if (CGF.getLangOpts().OpenMP >= 50 &&
1922 (C->getNameModifier() == OMPD_unknown ||
1923 C->getNameModifier() == OMPD_simd)) {
1924 IfCond = C->getCondition();
1929 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen);
1931 RegionCodeGenTy ThenRCG(ThenGen);
1936 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
1937 PrePostActionTy &Action) {
1939 assert(isOpenMPSimdDirective(S.getDirectiveKind()) &&
1940 "Expected simd directive");
1941 OMPLoopScope PreInitScope(CGF, S);
1943 // for (IV in 0..LastIteration) BODY;
1944 // <Final counter/linear vars updates>;
1947 if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
1948 isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
1949 isOpenMPTaskLoopDirective(S.getDirectiveKind())) {
1950 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1951 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1954 // Emit: if (PreCond) - begin.
1955 // If the condition constant folds and can be elided, avoid emitting the
1958 llvm::BasicBlock *ContBlock = nullptr;
1959 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1963 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
1964 ContBlock = CGF.createBasicBlock("simd.if.end");
1965 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1966 CGF.getProfileCount(&S));
1967 CGF.EmitBlock(ThenBlock);
1968 CGF.incrementProfileCounter(&S);
1971 // Emit the loop iteration variable.
1972 const Expr *IVExpr = S.getIterationVariable();
1973 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1974 CGF.EmitVarDecl(*IVDecl);
1975 CGF.EmitIgnoredExpr(S.getInit());
1977 // Emit the iterations count variable.
1978 // If it is not a variable, Sema decided to calculate iterations count on
1979 // each iteration (e.g., it is foldable into a constant).
1980 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1981 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1982 // Emit calculation of the iterations count.
1983 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1986 emitAlignedClause(CGF, S);
1987 (void)CGF.EmitOMPLinearClauseInit(S);
1989 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1990 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1991 CGF.EmitOMPLinearClause(S, LoopScope);
1992 CGF.EmitOMPPrivateClause(S, LoopScope);
1993 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1994 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
1995 CGF, S, CGF.EmitLValue(S.getIterationVariable()));
1996 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1997 (void)LoopScope.Privatize();
1998 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
1999 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
2003 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2004 CGF.EmitOMPSimdInit(S);
2006 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2007 CGF.EmitOMPInnerLoop(
2008 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
2009 [&S](CodeGenFunction &CGF) {
2010 CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(
2012 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest());
2013 CGF.EmitStopPoint(&S);
2015 [](CodeGenFunction &) {});
2017 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
2018 // Emit final copy of the lastprivate variables at the end of loops.
2019 if (HasLastprivateClause)
2020 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
2021 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
2022 emitPostUpdateForReductionClause(CGF, S,
2023 [](CodeGenFunction &) { return nullptr; });
2025 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
2026 // Emit: if (PreCond) - end.
2028 CGF.EmitBranch(ContBlock);
2029 CGF.EmitBlock(ContBlock, true);
2033 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
2034 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2035 emitOMPSimdRegion(CGF, S, Action);
2037 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2038 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2041 void CodeGenFunction::EmitOMPOuterLoop(
2042 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
2043 CodeGenFunction::OMPPrivateScope &LoopScope,
2044 const CodeGenFunction::OMPLoopArguments &LoopArgs,
2045 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
2046 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
2047 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2049 const Expr *IVExpr = S.getIterationVariable();
2050 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2051 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2053 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
2055 // Start the loop with a block that tests the condition.
2056 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
2057 EmitBlock(CondBlock);
2058 const SourceRange R = S.getSourceRange();
2059 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2060 SourceLocToDebugLoc(R.getEnd()));
2062 llvm::Value *BoolCondVal = nullptr;
2063 if (!DynamicOrOrdered) {
2064 // UB = min(UB, GlobalUB) or
2065 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
2066 // 'distribute parallel for')
2067 EmitIgnoredExpr(LoopArgs.EUB);
2069 EmitIgnoredExpr(LoopArgs.Init);
2071 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
2074 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
2075 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
2078 // If there are any cleanups between here and the loop-exit scope,
2079 // create a block to stage a loop exit along.
2080 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2081 if (LoopScope.requiresCleanups())
2082 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
2084 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
2085 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
2086 if (ExitBlock != LoopExit.getBlock()) {
2087 EmitBlock(ExitBlock);
2088 EmitBranchThroughCleanup(LoopExit);
2090 EmitBlock(LoopBody);
2092 // Emit "IV = LB" (in case of static schedule, we have already calculated new
2093 // LB for loop condition and emitted it above).
2094 if (DynamicOrOrdered)
2095 EmitIgnoredExpr(LoopArgs.Init);
2097 // Create a block for the increment.
2098 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
2099 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2103 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
2104 // Generate !llvm.loop.parallel metadata for loads and stores for loops
2105 // with dynamic/guided scheduling and without ordered clause.
2106 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
2107 CGF.LoopStack.setParallel(!IsMonotonic);
2109 CGF.EmitOMPSimdInit(S, IsMonotonic);
2111 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
2112 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2113 SourceLocation Loc = S.getBeginLoc();
2114 // when 'distribute' is not combined with a 'for':
2115 // while (idx <= UB) { BODY; ++idx; }
2116 // when 'distribute' is combined with a 'for'
2117 // (e.g. 'distribute parallel for')
2118 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
2119 CGF.EmitOMPInnerLoop(
2120 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
2121 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
2122 CodeGenLoop(CGF, S, LoopExit);
2124 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
2125 CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
2129 EmitBlock(Continue.getBlock());
2130 BreakContinueStack.pop_back();
2131 if (!DynamicOrOrdered) {
2132 // Emit "LB = LB + Stride", "UB = UB + Stride".
2133 EmitIgnoredExpr(LoopArgs.NextLB);
2134 EmitIgnoredExpr(LoopArgs.NextUB);
2137 EmitBranch(CondBlock);
2139 // Emit the fall-through block.
2140 EmitBlock(LoopExit.getBlock());
2142 // Tell the runtime we are done.
2143 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
2144 if (!DynamicOrOrdered)
2145 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2146 S.getDirectiveKind());
2148 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2151 void CodeGenFunction::EmitOMPForOuterLoop(
2152 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
2153 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
2154 const OMPLoopArguments &LoopArgs,
2155 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2156 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2158 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
2159 const bool DynamicOrOrdered =
2160 Ordered || RT.isDynamic(ScheduleKind.Schedule);
2163 !RT.isStaticNonchunked(ScheduleKind.Schedule,
2164 LoopArgs.Chunk != nullptr)) &&
2165 "static non-chunked schedule does not need outer loop");
2169 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2170 // When schedule(dynamic,chunk_size) is specified, the iterations are
2171 // distributed to threads in the team in chunks as the threads request them.
2172 // Each thread executes a chunk of iterations, then requests another chunk,
2173 // until no chunks remain to be distributed. Each chunk contains chunk_size
2174 // iterations, except for the last chunk to be distributed, which may have
2175 // fewer iterations. When no chunk_size is specified, it defaults to 1.
2177 // When schedule(guided,chunk_size) is specified, the iterations are assigned
2178 // to threads in the team in chunks as the executing threads request them.
2179 // Each thread executes a chunk of iterations, then requests another chunk,
2180 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
2181 // each chunk is proportional to the number of unassigned iterations divided
2182 // by the number of threads in the team, decreasing to 1. For a chunk_size
2183 // with value k (greater than 1), the size of each chunk is determined in the
2184 // same way, with the restriction that the chunks do not contain fewer than k
2185 // iterations (except for the last chunk to be assigned, which may have fewer
2186 // than k iterations).
2188 // When schedule(auto) is specified, the decision regarding scheduling is
2189 // delegated to the compiler and/or runtime system. The programmer gives the
2190 // implementation the freedom to choose any possible mapping of iterations to
2191 // threads in the team.
2193 // When schedule(runtime) is specified, the decision regarding scheduling is
2194 // deferred until run time, and the schedule and chunk size are taken from the
2195 // run-sched-var ICV. If the ICV is set to auto, the schedule is
2196 // implementation defined
2198 // while(__kmpc_dispatch_next(&LB, &UB)) {
2200 // while (idx <= UB) { BODY; ++idx;
2201 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
2205 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2206 // When schedule(static, chunk_size) is specified, iterations are divided into
2207 // chunks of size chunk_size, and the chunks are assigned to the threads in
2208 // the team in a round-robin fashion in the order of the thread number.
2210 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
2211 // while (idx <= UB) { BODY; ++idx; } // inner loop
2217 const Expr *IVExpr = S.getIterationVariable();
2218 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2219 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2221 if (DynamicOrOrdered) {
2222 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
2223 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
2224 llvm::Value *LBVal = DispatchBounds.first;
2225 llvm::Value *UBVal = DispatchBounds.second;
2226 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
2228 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
2229 IVSigned, Ordered, DipatchRTInputValues);
2231 CGOpenMPRuntime::StaticRTInput StaticInit(
2232 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
2233 LoopArgs.ST, LoopArgs.Chunk);
2234 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
2235 ScheduleKind, StaticInit);
2238 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
2239 const unsigned IVSize,
2240 const bool IVSigned) {
2242 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
2247 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
2248 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
2249 OuterLoopArgs.IncExpr = S.getInc();
2250 OuterLoopArgs.Init = S.getInit();
2251 OuterLoopArgs.Cond = S.getCond();
2252 OuterLoopArgs.NextLB = S.getNextLowerBound();
2253 OuterLoopArgs.NextUB = S.getNextUpperBound();
2254 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
2255 emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
2258 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
2259 const unsigned IVSize, const bool IVSigned) {}
2261 void CodeGenFunction::EmitOMPDistributeOuterLoop(
2262 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
2263 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
2264 const CodeGenLoopTy &CodeGenLoopContent) {
2266 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2269 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
2273 const Expr *IVExpr = S.getIterationVariable();
2274 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2275 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2277 CGOpenMPRuntime::StaticRTInput StaticInit(
2278 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
2279 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
2280 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
2282 // for combined 'distribute' and 'for' the increment expression of distribute
2283 // is stored in DistInc. For 'distribute' alone, it is in Inc.
2285 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
2286 IncExpr = S.getDistInc();
2288 IncExpr = S.getInc();
2290 // this routine is shared by 'omp distribute parallel for' and
2291 // 'omp distribute': select the right EUB expression depending on the
2293 OMPLoopArguments OuterLoopArgs;
2294 OuterLoopArgs.LB = LoopArgs.LB;
2295 OuterLoopArgs.UB = LoopArgs.UB;
2296 OuterLoopArgs.ST = LoopArgs.ST;
2297 OuterLoopArgs.IL = LoopArgs.IL;
2298 OuterLoopArgs.Chunk = LoopArgs.Chunk;
2299 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2300 ? S.getCombinedEnsureUpperBound()
2301 : S.getEnsureUpperBound();
2302 OuterLoopArgs.IncExpr = IncExpr;
2303 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2304 ? S.getCombinedInit()
2306 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2307 ? S.getCombinedCond()
2309 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2310 ? S.getCombinedNextLowerBound()
2311 : S.getNextLowerBound();
2312 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2313 ? S.getCombinedNextUpperBound()
2314 : S.getNextUpperBound();
2316 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
2317 LoopScope, OuterLoopArgs, CodeGenLoopContent,
2321 static std::pair<LValue, LValue>
2322 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
2323 const OMPExecutableDirective &S) {
2324 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2326 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2328 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2330 // When composing 'distribute' with 'for' (e.g. as in 'distribute
2331 // parallel for') we need to use the 'distribute'
2332 // chunk lower and upper bounds rather than the whole loop iteration
2333 // space. These are parameters to the outlined function for 'parallel'
2334 // and we copy the bounds of the previous schedule into the
2335 // the current ones.
2336 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
2337 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
2338 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
2339 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
2340 PrevLBVal = CGF.EmitScalarConversion(
2341 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
2342 LS.getIterationVariable()->getType(),
2343 LS.getPrevLowerBoundVariable()->getExprLoc());
2344 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
2345 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
2346 PrevUBVal = CGF.EmitScalarConversion(
2347 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
2348 LS.getIterationVariable()->getType(),
2349 LS.getPrevUpperBoundVariable()->getExprLoc());
2351 CGF.EmitStoreOfScalar(PrevLBVal, LB);
2352 CGF.EmitStoreOfScalar(PrevUBVal, UB);
2357 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
2358 /// we need to use the LB and UB expressions generated by the worksharing
2359 /// code generation support, whereas in non combined situations we would
2360 /// just emit 0 and the LastIteration expression
2361 /// This function is necessary due to the difference of the LB and UB
2362 /// types for the RT emission routines for 'for_static_init' and
2363 /// 'for_dispatch_init'
2364 static std::pair<llvm::Value *, llvm::Value *>
2365 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
2366 const OMPExecutableDirective &S,
2367 Address LB, Address UB) {
2368 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2369 const Expr *IVExpr = LS.getIterationVariable();
2370 // when implementing a dynamic schedule for a 'for' combined with a
2371 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
2372 // is not normalized as each team only executes its own assigned
2374 QualType IteratorTy = IVExpr->getType();
2375 llvm::Value *LBVal =
2376 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
2377 llvm::Value *UBVal =
2378 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
2379 return {LBVal, UBVal};
2382 static void emitDistributeParallelForDistributeInnerBoundParams(
2383 CodeGenFunction &CGF, const OMPExecutableDirective &S,
2384 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
2385 const auto &Dir = cast<OMPLoopDirective>(S);
2387 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
2388 llvm::Value *LBCast =
2389 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
2390 CGF.SizeTy, /*isSigned=*/false);
2391 CapturedVars.push_back(LBCast);
2393 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
2395 llvm::Value *UBCast =
2396 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
2397 CGF.SizeTy, /*isSigned=*/false);
2398 CapturedVars.push_back(UBCast);
2402 emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
2403 const OMPLoopDirective &S,
2404 CodeGenFunction::JumpDest LoopExit) {
2405 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
2406 PrePostActionTy &Action) {
2408 bool HasCancel = false;
2409 if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2410 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
2411 HasCancel = D->hasCancel();
2412 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
2413 HasCancel = D->hasCancel();
2414 else if (const auto *D =
2415 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
2416 HasCancel = D->hasCancel();
2418 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
2420 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
2421 emitDistributeParallelForInnerBounds,
2422 emitDistributeParallelForDispatchBounds);
2425 emitCommonOMPParallelDirective(
2427 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for,
2428 CGInlinedWorksharingLoop,
2429 emitDistributeParallelForDistributeInnerBoundParams);
2432 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
2433 const OMPDistributeParallelForDirective &S) {
2434 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2435 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
2438 OMPLexicalScope Scope(*this, S, OMPD_parallel);
2439 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
2442 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
2443 const OMPDistributeParallelForSimdDirective &S) {
2444 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2445 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
2448 OMPLexicalScope Scope(*this, S, OMPD_parallel);
2449 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
2452 void CodeGenFunction::EmitOMPDistributeSimdDirective(
2453 const OMPDistributeSimdDirective &S) {
2454 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2455 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
2457 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2458 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2461 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
2462 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) {
2463 // Emit SPMD target parallel for region as a standalone region.
2464 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2465 emitOMPSimdRegion(CGF, S, Action);
2468 llvm::Constant *Addr;
2469 // Emit target region as a standalone region.
2470 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
2471 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
2472 assert(Fn && Addr && "Target device function emission failed.");
2475 void CodeGenFunction::EmitOMPTargetSimdDirective(
2476 const OMPTargetSimdDirective &S) {
2477 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2478 emitOMPSimdRegion(CGF, S, Action);
2480 emitCommonOMPTargetDirective(*this, S, CodeGen);
2484 struct ScheduleKindModifiersTy {
2485 OpenMPScheduleClauseKind Kind;
2486 OpenMPScheduleClauseModifier M1;
2487 OpenMPScheduleClauseModifier M2;
2488 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2489 OpenMPScheduleClauseModifier M1,
2490 OpenMPScheduleClauseModifier M2)
2491 : Kind(Kind), M1(M1), M2(M2) {}
2495 bool CodeGenFunction::EmitOMPWorksharingLoop(
2496 const OMPLoopDirective &S, Expr *EUB,
2497 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
2498 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2499 // Emit the loop iteration variable.
2500 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2501 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
2502 EmitVarDecl(*IVDecl);
2504 // Emit the iterations count variable.
2505 // If it is not a variable, Sema decided to calculate iterations count on each
2506 // iteration (e.g., it is foldable into a constant).
2507 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2508 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2509 // Emit calculation of the iterations count.
2510 EmitIgnoredExpr(S.getCalcLastIteration());
2513 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2515 bool HasLastprivateClause;
2516 // Check pre-condition.
2518 OMPLoopScope PreInitScope(*this, S);
2519 // Skip the entire loop if we don't meet the precondition.
2520 // If the condition constant folds and can be elided, avoid emitting the
2523 llvm::BasicBlock *ContBlock = nullptr;
2524 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2528 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
2529 ContBlock = createBasicBlock("omp.precond.end");
2530 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2531 getProfileCount(&S));
2532 EmitBlock(ThenBlock);
2533 incrementProfileCounter(&S);
2536 RunCleanupsScope DoacrossCleanupScope(*this);
2537 bool Ordered = false;
2538 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2539 if (OrderedClause->getNumForLoops())
2540 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
2545 llvm::DenseSet<const Expr *> EmittedFinals;
2546 emitAlignedClause(*this, S);
2547 bool HasLinears = EmitOMPLinearClauseInit(S);
2548 // Emit helper vars inits.
2550 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
2551 LValue LB = Bounds.first;
2552 LValue UB = Bounds.second;
2554 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2556 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2558 // Emit 'then' code.
2560 OMPPrivateScope LoopScope(*this);
2561 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) {
2562 // Emit implicit barrier to synchronize threads and avoid data races on
2563 // initialization of firstprivate variables and post-update of
2564 // lastprivate variables.
2565 CGM.getOpenMPRuntime().emitBarrierCall(
2566 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
2567 /*ForceSimpleCall=*/true);
2569 EmitOMPPrivateClause(S, LoopScope);
2570 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
2571 *this, S, EmitLValue(S.getIterationVariable()));
2572 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2573 EmitOMPReductionClauseInit(S, LoopScope);
2574 EmitOMPPrivateLoopCounters(S, LoopScope);
2575 EmitOMPLinearClause(S, LoopScope);
2576 (void)LoopScope.Privatize();
2577 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2578 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
2580 // Detect the loop schedule kind and chunk.
2581 const Expr *ChunkExpr = nullptr;
2582 OpenMPScheduleTy ScheduleKind;
2583 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
2584 ScheduleKind.Schedule = C->getScheduleKind();
2585 ScheduleKind.M1 = C->getFirstScheduleModifier();
2586 ScheduleKind.M2 = C->getSecondScheduleModifier();
2587 ChunkExpr = C->getChunkSize();
2589 // Default behaviour for schedule clause.
2590 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
2591 *this, S, ScheduleKind.Schedule, ChunkExpr);
2593 bool HasChunkSizeOne = false;
2594 llvm::Value *Chunk = nullptr;
2596 Chunk = EmitScalarExpr(ChunkExpr);
2597 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
2598 S.getIterationVariable()->getType(),
2600 Expr::EvalResult Result;
2601 if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
2602 llvm::APSInt EvaluatedChunk = Result.Val.getInt();
2603 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
2606 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2607 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2608 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2609 // If the static schedule kind is specified or if the ordered clause is
2610 // specified, and if no monotonic modifier is specified, the effect will
2611 // be as if the monotonic modifier was specified.
2612 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
2613 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
2614 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
2615 if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
2616 /* Chunked */ Chunk != nullptr) ||
2617 StaticChunkedOne) &&
2620 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2623 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2624 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2625 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2627 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
2628 &S, ScheduleKind, LoopExit,
2629 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2630 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2631 // When no chunk_size is specified, the iteration space is divided
2632 // into chunks that are approximately equal in size, and at most
2633 // one chunk is distributed to each thread. Note that the size of
2634 // the chunks is unspecified in this case.
2635 CGOpenMPRuntime::StaticRTInput StaticInit(
2636 IVSize, IVSigned, Ordered, IL.getAddress(CGF),
2637 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
2638 StaticChunkedOne ? Chunk : nullptr);
2639 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2640 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
2642 // UB = min(UB, GlobalUB);
2643 if (!StaticChunkedOne)
2644 CGF.EmitIgnoredExpr(S.getEnsureUpperBound());
2646 CGF.EmitIgnoredExpr(S.getInit());
2647 // For unchunked static schedule generate:
2649 // while (idx <= UB) {
2654 // For static schedule with chunk one:
2656 // while (IV <= PrevUB) {
2660 CGF.EmitOMPInnerLoop(
2661 S, LoopScope.requiresCleanups(),
2662 StaticChunkedOne ? S.getCombinedParForInDistCond()
2664 StaticChunkedOne ? S.getDistInc() : S.getInc(),
2665 [&S, LoopExit](CodeGenFunction &CGF) {
2666 CGF.CGM.getOpenMPRuntime()
2667 .initLastprivateConditionalCounter(CGF, S);
2668 CGF.EmitOMPLoopBody(S, LoopExit);
2669 CGF.EmitStopPoint(&S);
2671 [](CodeGenFunction &) {});
2673 EmitBlock(LoopExit.getBlock());
2674 // Tell the runtime we are done.
2675 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2676 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2677 S.getDirectiveKind());
2679 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2681 const bool IsMonotonic =
2682 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2683 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2684 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2685 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2686 // Emit the outer loop, which requests its work chunk [LB..UB] from
2687 // runtime and runs the inner loop to process it.
2688 const OMPLoopArguments LoopArguments(
2689 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
2690 IL.getAddress(*this), Chunk, EUB);
2691 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2692 LoopArguments, CGDispatchBounds);
2694 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2695 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
2696 return CGF.Builder.CreateIsNotNull(
2697 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2700 EmitOMPReductionClauseFinal(
2701 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
2702 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
2703 : /*Parallel only*/ OMPD_parallel);
2704 // Emit post-update of the reduction variables if IsLastIter != 0.
2705 emitPostUpdateForReductionClause(
2706 *this, S, [IL, &S](CodeGenFunction &CGF) {
2707 return CGF.Builder.CreateIsNotNull(
2708 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2710 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2711 if (HasLastprivateClause)
2712 EmitOMPLastprivateClauseFinal(
2713 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2714 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
2716 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
2717 return CGF.Builder.CreateIsNotNull(
2718 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2720 DoacrossCleanupScope.ForceCleanup();
2721 // We're now done with the loop, so jump to the continuation block.
2723 EmitBranch(ContBlock);
2724 EmitBlock(ContBlock, /*IsFinished=*/true);
2727 return HasLastprivateClause;
2730 /// The following two functions generate expressions for the loop lower
2731 /// and upper bounds in case of static and dynamic (dispatch) schedule
2732 /// of the associated 'for' or 'distribute' loop.
2733 static std::pair<LValue, LValue>
2734 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
2735 const auto &LS = cast<OMPLoopDirective>(S);
2737 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2739 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2743 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
2744 /// consider the lower and upper bound expressions generated by the
2745 /// worksharing loop support, but we use 0 and the iteration space size as
2747 static std::pair<llvm::Value *, llvm::Value *>
2748 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
2749 Address LB, Address UB) {
2750 const auto &LS = cast<OMPLoopDirective>(S);
2751 const Expr *IVExpr = LS.getIterationVariable();
2752 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
2753 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
2754 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
2755 return {LBVal, UBVal};
2758 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2759 bool HasLastprivates = false;
2760 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2761 PrePostActionTy &) {
2762 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
2763 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
2765 emitDispatchForLoopBounds);
2768 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2769 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2773 // Emit an implicit barrier at the end.
2774 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
2775 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
2778 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2779 bool HasLastprivates = false;
2780 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2781 PrePostActionTy &) {
2782 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
2784 emitDispatchForLoopBounds);
2787 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2788 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2791 // Emit an implicit barrier at the end.
2792 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
2793 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
2796 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2798 llvm::Value *Init = nullptr) {
2799 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2801 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
2805 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2806 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
2807 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
2808 bool HasLastprivates = false;
2809 auto &&CodeGen = [&S, CapturedStmt, CS,
2810 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
2811 ASTContext &C = CGF.getContext();
2812 QualType KmpInt32Ty =
2813 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2814 // Emit helper vars inits.
2815 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2816 CGF.Builder.getInt32(0));
2817 llvm::ConstantInt *GlobalUBVal = CS != nullptr
2818 ? CGF.Builder.getInt32(CS->size() - 1)
2819 : CGF.Builder.getInt32(0);
2821 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2822 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2823 CGF.Builder.getInt32(1));
2824 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2825 CGF.Builder.getInt32(0));
2827 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2828 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
2829 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2830 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
2831 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2832 // Generate condition for loop.
2833 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2834 OK_Ordinary, S.getBeginLoc(), FPOptions());
2835 // Increment for loop counter.
2836 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2837 S.getBeginLoc(), true);
2838 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
2839 // Iterate through all sections and emit a switch construct:
2842 // <SectionStmt[0]>;
2845 // case <NumSection> - 1:
2846 // <SectionStmt[<NumSection> - 1]>;
2849 // .omp.sections.exit:
2850 CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(CGF, S);
2851 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2852 llvm::SwitchInst *SwitchStmt =
2853 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
2854 ExitBB, CS == nullptr ? 1 : CS->size());
2856 unsigned CaseNumber = 0;
2857 for (const Stmt *SubStmt : CS->children()) {
2858 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2859 CGF.EmitBlock(CaseBB);
2860 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2861 CGF.EmitStmt(SubStmt);
2862 CGF.EmitBranch(ExitBB);
2866 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
2867 CGF.EmitBlock(CaseBB);
2868 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2869 CGF.EmitStmt(CapturedStmt);
2870 CGF.EmitBranch(ExitBB);
2872 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2875 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2876 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2877 // Emit implicit barrier to synchronize threads and avoid data races on
2878 // initialization of firstprivate variables and post-update of lastprivate
2880 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2881 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
2882 /*ForceSimpleCall=*/true);
2884 CGF.EmitOMPPrivateClause(S, LoopScope);
2885 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV);
2886 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2887 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2888 (void)LoopScope.Privatize();
2889 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2890 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
2892 // Emit static non-chunked loop.
2893 OpenMPScheduleTy ScheduleKind;
2894 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2895 CGOpenMPRuntime::StaticRTInput StaticInit(
2896 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
2897 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
2898 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2899 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
2900 // UB = min(UB, GlobalUB);
2901 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
2902 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
2903 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2904 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2906 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
2907 // while (idx <= UB) { BODY; ++idx; }
2908 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2909 [](CodeGenFunction &) {});
2910 // Tell the runtime we are done.
2911 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2912 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2913 S.getDirectiveKind());
2915 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
2916 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
2917 // Emit post-update of the reduction variables if IsLastIter != 0.
2918 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
2919 return CGF.Builder.CreateIsNotNull(
2920 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2923 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2924 if (HasLastprivates)
2925 CGF.EmitOMPLastprivateClauseFinal(
2926 S, /*NoFinals=*/false,
2927 CGF.Builder.CreateIsNotNull(
2928 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
2931 bool HasCancel = false;
2932 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2933 HasCancel = OSD->hasCancel();
2934 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2935 HasCancel = OPSD->hasCancel();
2936 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
2937 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2939 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2940 // clause. Otherwise the barrier will be generated by the codegen for the
2942 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2943 // Emit implicit barrier to synchronize threads and avoid data races on
2944 // initialization of firstprivate variables.
2945 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
2950 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2952 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2955 // Emit an implicit barrier at the end.
2956 if (!S.getSingleClause<OMPNowaitClause>()) {
2957 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
2962 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2963 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2964 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2966 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2967 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2971 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2972 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2973 llvm::SmallVector<const Expr *, 8> DestExprs;
2974 llvm::SmallVector<const Expr *, 8> SrcExprs;
2975 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2976 // Check if there are any 'copyprivate' clauses associated with this
2977 // 'single' construct.
2978 // Build a list of copyprivate variables along with helper expressions
2979 // (<source>, <destination>, <destination>=<source> expressions)
2980 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2981 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2982 DestExprs.append(C->destination_exprs().begin(),
2983 C->destination_exprs().end());
2984 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2985 AssignmentOps.append(C->assignment_ops().begin(),
2986 C->assignment_ops().end());
2988 // Emit code for 'single' region along with 'copyprivate' clauses
2989 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2991 OMPPrivateScope SingleScope(CGF);
2992 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2993 CGF.EmitOMPPrivateClause(S, SingleScope);
2994 (void)SingleScope.Privatize();
2995 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2998 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2999 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
3000 CopyprivateVars, DestExprs,
3001 SrcExprs, AssignmentOps);
3003 // Emit an implicit barrier at the end (to avoid data race on firstprivate
3004 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
3005 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
3006 CGM.getOpenMPRuntime().emitBarrierCall(
3007 *this, S.getBeginLoc(),
3008 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
3012 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
3013 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3015 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
3017 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
3020 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
3021 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3022 emitMaster(*this, S);
3025 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
3026 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3028 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
3030 const Expr *Hint = nullptr;
3031 if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
3032 Hint = HintClause->getHint();
3033 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3034 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
3035 S.getDirectiveName().getAsString(),
3036 CodeGen, S.getBeginLoc(), Hint);
3039 void CodeGenFunction::EmitOMPParallelForDirective(
3040 const OMPParallelForDirective &S) {
3041 // Emit directive as a combined directive that consists of two implicit
3042 // directives: 'parallel' with 'for' directive.
3043 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3045 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
3046 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
3047 emitDispatchForLoopBounds);
3049 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
3050 emitEmptyBoundParameters);
3053 void CodeGenFunction::EmitOMPParallelForSimdDirective(
3054 const OMPParallelForSimdDirective &S) {
3055 // Emit directive as a combined directive that consists of two implicit
3056 // directives: 'parallel' with 'for' directive.
3057 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3059 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
3060 emitDispatchForLoopBounds);
3062 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen,
3063 emitEmptyBoundParameters);
3066 void CodeGenFunction::EmitOMPParallelMasterDirective(
3067 const OMPParallelMasterDirective &S) {
3068 // Emit directive as a combined directive that consists of two implicit
3069 // directives: 'parallel' with 'master' directive.
3070 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3072 OMPPrivateScope PrivateScope(CGF);
3073 bool Copyins = CGF.EmitOMPCopyinClause(S);
3074 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3076 // Emit implicit barrier to synchronize threads and avoid data races on
3077 // propagation master's thread values of threadprivate variables to local
3078 // instances of that variables of all other implicit threads.
3079 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3080 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3081 /*ForceSimpleCall=*/true);
3083 CGF.EmitOMPPrivateClause(S, PrivateScope);
3084 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
3085 (void)PrivateScope.Privatize();
3087 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
3089 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
3090 emitEmptyBoundParameters);
3091 emitPostUpdateForReductionClause(*this, S,
3092 [](CodeGenFunction &) { return nullptr; });
3095 void CodeGenFunction::EmitOMPParallelSectionsDirective(
3096 const OMPParallelSectionsDirective &S) {
3097 // Emit directive as a combined directive that consists of two implicit
3098 // directives: 'parallel' with 'sections' directive.
3099 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3101 CGF.EmitSections(S);
3103 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
3104 emitEmptyBoundParameters);
3107 void CodeGenFunction::EmitOMPTaskBasedDirective(
3108 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
3109 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
3110 OMPTaskDataTy &Data) {
3111 // Emit outlined function for task construct.
3112 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
3113 auto I = CS->getCapturedDecl()->param_begin();
3114 auto PartId = std::next(I);
3115 auto TaskT = std::next(I, 4);
3116 // Check if the task is final
3117 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
3118 // If the condition constant folds and can be elided, try to avoid emitting
3119 // the condition and the dead arm of the if/else.
3120 const Expr *Cond = Clause->getCondition();
3122 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
3123 Data.Final.setInt(CondConstant);
3125 Data.Final.setPointer(EvaluateExprAsBool(Cond));
3127 // By default the task is not final.
3128 Data.Final.setInt(/*IntVal=*/false);
3130 // Check if the task has 'priority' clause.
3131 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
3132 const Expr *Prio = Clause->getPriority();
3133 Data.Priority.setInt(/*IntVal=*/true);
3134 Data.Priority.setPointer(EmitScalarConversion(
3135 EmitScalarExpr(Prio), Prio->getType(),
3136 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
3137 Prio->getExprLoc()));
3139 // The first function argument for tasks is a thread id, the second one is a
3140 // part id (0 for tied tasks, >=0 for untied task).
3141 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
3142 // Get list of private variables.
3143 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
3144 auto IRef = C->varlist_begin();
3145 for (const Expr *IInit : C->private_copies()) {
3146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
3147 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
3148 Data.PrivateVars.push_back(*IRef);
3149 Data.PrivateCopies.push_back(IInit);
3154 EmittedAsPrivate.clear();
3155 // Get list of firstprivate variables.
3156 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
3157 auto IRef = C->varlist_begin();
3158 auto IElemInitRef = C->inits().begin();
3159 for (const Expr *IInit : C->private_copies()) {
3160 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
3161 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
3162 Data.FirstprivateVars.push_back(*IRef);
3163 Data.FirstprivateCopies.push_back(IInit);
3164 Data.FirstprivateInits.push_back(*IElemInitRef);
3170 // Get list of lastprivate variables (for taskloops).
3171 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
3172 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
3173 auto IRef = C->varlist_begin();
3174 auto ID = C->destination_exprs().begin();
3175 for (const Expr *IInit : C->private_copies()) {
3176 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
3177 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
3178 Data.LastprivateVars.push_back(*IRef);
3179 Data.LastprivateCopies.push_back(IInit);
3181 LastprivateDstsOrigs.insert(
3182 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
3183 cast<DeclRefExpr>(*IRef)});
3188 SmallVector<const Expr *, 4> LHSs;
3189 SmallVector<const Expr *, 4> RHSs;
3190 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3191 auto IPriv = C->privates().begin();
3192 auto IRed = C->reduction_ops().begin();
3193 auto ILHS = C->lhs_exprs().begin();
3194 auto IRHS = C->rhs_exprs().begin();
3195 for (const Expr *Ref : C->varlists()) {
3196 Data.ReductionVars.emplace_back(Ref);
3197 Data.ReductionCopies.emplace_back(*IPriv);
3198 Data.ReductionOps.emplace_back(*IRed);
3199 LHSs.emplace_back(*ILHS);
3200 RHSs.emplace_back(*IRHS);
3201 std::advance(IPriv, 1);
3202 std::advance(IRed, 1);
3203 std::advance(ILHS, 1);
3204 std::advance(IRHS, 1);
3207 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
3208 *this, S.getBeginLoc(), LHSs, RHSs, Data);
3209 // Build list of dependences.
3210 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
3211 for (const Expr *IRef : C->varlists())
3212 Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
3213 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
3214 CapturedRegion](CodeGenFunction &CGF,
3215 PrePostActionTy &Action) {
3216 // Set proper addresses for generated private copies.
3217 OMPPrivateScope Scope(CGF);
3218 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
3219 !Data.LastprivateVars.empty()) {
3220 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
3221 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
3222 enum { PrivatesParam = 2, CopyFnParam = 3 };
3223 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
3224 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
3225 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
3226 CS->getCapturedDecl()->getParam(PrivatesParam)));
3228 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
3229 llvm::SmallVector<llvm::Value *, 16> CallArgs;
3230 CallArgs.push_back(PrivatesPtr);
3231 for (const Expr *E : Data.PrivateVars) {
3232 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3233 Address PrivatePtr = CGF.CreateMemTemp(
3234 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
3235 PrivatePtrs.emplace_back(VD, PrivatePtr);
3236 CallArgs.push_back(PrivatePtr.getPointer());
3238 for (const Expr *E : Data.FirstprivateVars) {
3239 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3240 Address PrivatePtr =
3241 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
3242 ".firstpriv.ptr.addr");
3243 PrivatePtrs.emplace_back(VD, PrivatePtr);
3244 CallArgs.push_back(PrivatePtr.getPointer());
3246 for (const Expr *E : Data.LastprivateVars) {
3247 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3248 Address PrivatePtr =
3249 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
3250 ".lastpriv.ptr.addr");
3251 PrivatePtrs.emplace_back(VD, PrivatePtr);
3252 CallArgs.push_back(PrivatePtr.getPointer());
3254 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3255 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
3256 for (const auto &Pair : LastprivateDstsOrigs) {
3257 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
3258 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
3259 /*RefersToEnclosingVariableOrCapture=*/
3260 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
3261 Pair.second->getType(), VK_LValue,
3262 Pair.second->getExprLoc());
3263 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
3264 return CGF.EmitLValue(&DRE).getAddress(CGF);
3267 for (const auto &Pair : PrivatePtrs) {
3268 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
3269 CGF.getContext().getDeclAlign(Pair.first));
3270 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
3273 if (Data.Reductions) {
3274 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
3275 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies,
3277 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
3278 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
3279 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
3280 RedCG.emitSharedLValue(CGF, Cnt);
3281 RedCG.emitAggregateType(CGF, Cnt);
3282 // FIXME: This must removed once the runtime library is fixed.
3283 // Emit required threadprivate variables for
3284 // initializer/combiner/finalizer.
3285 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
3287 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
3288 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
3290 Address(CGF.EmitScalarConversion(
3291 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
3292 CGF.getContext().getPointerType(
3293 Data.ReductionCopies[Cnt]->getType()),
3294 Data.ReductionCopies[Cnt]->getExprLoc()),
3295 Replacement.getAlignment());
3296 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
3297 Scope.addPrivate(RedCG.getBaseDecl(Cnt),
3298 [Replacement]() { return Replacement; });
3301 // Privatize all private variables except for in_reduction items.
3302 (void)Scope.Privatize();
3303 SmallVector<const Expr *, 4> InRedVars;
3304 SmallVector<const Expr *, 4> InRedPrivs;
3305 SmallVector<const Expr *, 4> InRedOps;
3306 SmallVector<const Expr *, 4> TaskgroupDescriptors;
3307 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
3308 auto IPriv = C->privates().begin();
3309 auto IRed = C->reduction_ops().begin();
3310 auto ITD = C->taskgroup_descriptors().begin();
3311 for (const Expr *Ref : C->varlists()) {
3312 InRedVars.emplace_back(Ref);
3313 InRedPrivs.emplace_back(*IPriv);
3314 InRedOps.emplace_back(*IRed);
3315 TaskgroupDescriptors.emplace_back(*ITD);
3316 std::advance(IPriv, 1);
3317 std::advance(IRed, 1);
3318 std::advance(ITD, 1);
3321 // Privatize in_reduction items here, because taskgroup descriptors must be
3322 // privatized earlier.
3323 OMPPrivateScope InRedScope(CGF);
3324 if (!InRedVars.empty()) {
3325 ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps);
3326 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
3327 RedCG.emitSharedLValue(CGF, Cnt);
3328 RedCG.emitAggregateType(CGF, Cnt);
3329 // The taskgroup descriptor variable is always implicit firstprivate and
3330 // privatized already during processing of the firstprivates.
3331 // FIXME: This must removed once the runtime library is fixed.
3332 // Emit required threadprivate variables for
3333 // initializer/combiner/finalizer.
3334 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
3336 llvm::Value *ReductionsPtr =
3337 CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]),
3338 TaskgroupDescriptors[Cnt]->getExprLoc());
3339 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
3340 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
3341 Replacement = Address(
3342 CGF.EmitScalarConversion(
3343 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
3344 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
3345 InRedPrivs[Cnt]->getExprLoc()),
3346 Replacement.getAlignment());
3347 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
3348 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt),
3349 [Replacement]() { return Replacement; });
3352 (void)InRedScope.Privatize();
3357 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
3358 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
3359 Data.NumberOfParts);
3360 OMPLexicalScope Scope(*this, S, llvm::None,
3361 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
3362 !isOpenMPSimdDirective(S.getDirectiveKind()));
3363 TaskGen(*this, OutlinedFn, Data);
3366 static ImplicitParamDecl *
3367 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
3368 QualType Ty, CapturedDecl *CD,
3369 SourceLocation Loc) {
3370 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
3371 ImplicitParamDecl::Other);
3372 auto *OrigRef = DeclRefExpr::Create(
3373 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
3374 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
3375 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
3376 ImplicitParamDecl::Other);
3377 auto *PrivateRef = DeclRefExpr::Create(
3378 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
3379 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
3380 QualType ElemType = C.getBaseElementType(Ty);
3381 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
3382 ImplicitParamDecl::Other);
3383 auto *InitRef = DeclRefExpr::Create(
3384 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
3385 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
3386 PrivateVD->setInitStyle(VarDecl::CInit);
3387 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
3388 InitRef, /*BasePath=*/nullptr,
3390 Data.FirstprivateVars.emplace_back(OrigRef);
3391 Data.FirstprivateCopies.emplace_back(PrivateRef);
3392 Data.FirstprivateInits.emplace_back(InitRef);
3396 void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
3397 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
3398 OMPTargetDataInfo &InputInfo) {
3399 // Emit outlined function for task construct.
3400 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
3401 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
3402 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3403 auto I = CS->getCapturedDecl()->param_begin();
3404 auto PartId = std::next(I);
3405 auto TaskT = std::next(I, 4);
3407 // The task is not final.
3408 Data.Final.setInt(/*IntVal=*/false);
3409 // Get list of firstprivate variables.
3410 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
3411 auto IRef = C->varlist_begin();
3412 auto IElemInitRef = C->inits().begin();
3413 for (auto *IInit : C->private_copies()) {
3414 Data.FirstprivateVars.push_back(*IRef);
3415 Data.FirstprivateCopies.push_back(IInit);
3416 Data.FirstprivateInits.push_back(*IElemInitRef);
3421 OMPPrivateScope TargetScope(*this);
3422 VarDecl *BPVD = nullptr;
3423 VarDecl *PVD = nullptr;
3424 VarDecl *SVD = nullptr;
3425 if (InputInfo.NumberOfTargetItems > 0) {
3426 auto *CD = CapturedDecl::Create(
3427 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
3428 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
3429 QualType BaseAndPointersType = getContext().getConstantArrayType(
3430 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal,
3431 /*IndexTypeQuals=*/0);
3432 BPVD = createImplicitFirstprivateForType(
3433 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
3434 PVD = createImplicitFirstprivateForType(
3435 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
3436 QualType SizesType = getContext().getConstantArrayType(
3437 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
3438 ArrSize, nullptr, ArrayType::Normal,
3439 /*IndexTypeQuals=*/0);
3440 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
3442 TargetScope.addPrivate(
3443 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
3444 TargetScope.addPrivate(PVD,
3445 [&InputInfo]() { return InputInfo.PointersArray; });
3446 TargetScope.addPrivate(SVD,
3447 [&InputInfo]() { return InputInfo.SizesArray; });
3449 (void)TargetScope.Privatize();
3450 // Build list of dependences.
3451 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
3452 for (const Expr *IRef : C->varlists())
3453 Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
3454 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD,
3455 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
3456 // Set proper addresses for generated private copies.
3457 OMPPrivateScope Scope(CGF);
3458 if (!Data.FirstprivateVars.empty()) {
3459 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
3460 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
3461 enum { PrivatesParam = 2, CopyFnParam = 3 };
3462 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
3463 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
3464 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
3465 CS->getCapturedDecl()->getParam(PrivatesParam)));
3467 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
3468 llvm::SmallVector<llvm::Value *, 16> CallArgs;
3469 CallArgs.push_back(PrivatesPtr);
3470 for (const Expr *E : Data.FirstprivateVars) {
3471 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3472 Address PrivatePtr =
3473 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
3474 ".firstpriv.ptr.addr");
3475 PrivatePtrs.emplace_back(VD, PrivatePtr);
3476 CallArgs.push_back(PrivatePtr.getPointer());
3478 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3479 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
3480 for (const auto &Pair : PrivatePtrs) {
3481 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
3482 CGF.getContext().getDeclAlign(Pair.first));
3483 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
3486 // Privatize all private variables except for in_reduction items.
3487 (void)Scope.Privatize();
3488 if (InputInfo.NumberOfTargetItems > 0) {
3489 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
3490 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
3491 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
3492 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
3493 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
3494 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
3498 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
3501 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
3502 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
3503 Data.NumberOfParts);
3504 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
3505 IntegerLiteral IfCond(getContext(), TrueOrFalse,
3506 getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3509 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
3510 SharedsTy, CapturedStruct, &IfCond, Data);
3513 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
3514 // Emit outlined function for task construct.
3515 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
3516 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
3517 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3518 const Expr *IfCond = nullptr;
3519 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3520 if (C->getNameModifier() == OMPD_unknown ||
3521 C->getNameModifier() == OMPD_task) {
3522 IfCond = C->getCondition();
3528 // Check if we should emit tied or untied task.
3529 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
3530 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
3531 CGF.EmitStmt(CS->getCapturedStmt());
3533 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
3534 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
3535 const OMPTaskDataTy &Data) {
3536 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
3537 SharedsTy, CapturedStruct, IfCond,
3540 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
3543 void CodeGenFunction::EmitOMPTaskyieldDirective(
3544 const OMPTaskyieldDirective &S) {
3545 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
3548 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
3549 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
3552 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
3553 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc());
3556 void CodeGenFunction::EmitOMPTaskgroupDirective(
3557 const OMPTaskgroupDirective &S) {
3558 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3560 if (const Expr *E = S.getReductionRef()) {
3561 SmallVector<const Expr *, 4> LHSs;
3562 SmallVector<const Expr *, 4> RHSs;
3564 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
3565 auto IPriv = C->privates().begin();
3566 auto IRed = C->reduction_ops().begin();
3567 auto ILHS = C->lhs_exprs().begin();
3568 auto IRHS = C->rhs_exprs().begin();
3569 for (const Expr *Ref : C->varlists()) {
3570 Data.ReductionVars.emplace_back(Ref);
3571 Data.ReductionCopies.emplace_back(*IPriv);
3572 Data.ReductionOps.emplace_back(*IRed);
3573 LHSs.emplace_back(*ILHS);
3574 RHSs.emplace_back(*IRHS);
3575 std::advance(IPriv, 1);
3576 std::advance(IRed, 1);
3577 std::advance(ILHS, 1);
3578 std::advance(IRHS, 1);
3581 llvm::Value *ReductionDesc =
3582 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
3584 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3585 CGF.EmitVarDecl(*VD);
3586 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
3587 /*Volatile=*/false, E->getType());
3589 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
3591 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3592 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
3595 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
3596 CGM.getOpenMPRuntime().emitFlush(
3598 [&S]() -> ArrayRef<const Expr *> {
3599 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
3600 return llvm::makeArrayRef(FlushClause->varlist_begin(),
3601 FlushClause->varlist_end());
3607 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
3608 const CodeGenLoopTy &CodeGenLoop,
3610 // Emit the loop iteration variable.
3611 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3612 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3613 EmitVarDecl(*IVDecl);
3615 // Emit the iterations count variable.
3616 // If it is not a variable, Sema decided to calculate iterations count on each
3617 // iteration (e.g., it is foldable into a constant).
3618 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3619 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3620 // Emit calculation of the iterations count.
3621 EmitIgnoredExpr(S.getCalcLastIteration());
3624 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3626 bool HasLastprivateClause = false;
3627 // Check pre-condition.
3629 OMPLoopScope PreInitScope(*this, S);
3630 // Skip the entire loop if we don't meet the precondition.
3631 // If the condition constant folds and can be elided, avoid emitting the
3634 llvm::BasicBlock *ContBlock = nullptr;
3635 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3639 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
3640 ContBlock = createBasicBlock("omp.precond.end");
3641 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
3642 getProfileCount(&S));
3643 EmitBlock(ThenBlock);
3644 incrementProfileCounter(&S);
3647 emitAlignedClause(*this, S);
3648 // Emit 'then' code.
3650 // Emit helper vars inits.
3652 LValue LB = EmitOMPHelperVar(
3653 *this, cast<DeclRefExpr>(
3654 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3655 ? S.getCombinedLowerBoundVariable()
3656 : S.getLowerBoundVariable())));
3657 LValue UB = EmitOMPHelperVar(
3658 *this, cast<DeclRefExpr>(
3659 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3660 ? S.getCombinedUpperBoundVariable()
3661 : S.getUpperBoundVariable())));
3663 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3665 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3667 OMPPrivateScope LoopScope(*this);
3668 if (EmitOMPFirstprivateClause(S, LoopScope)) {
3669 // Emit implicit barrier to synchronize threads and avoid data races
3670 // on initialization of firstprivate variables and post-update of
3671 // lastprivate variables.
3672 CGM.getOpenMPRuntime().emitBarrierCall(
3673 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3674 /*ForceSimpleCall=*/true);
3676 EmitOMPPrivateClause(S, LoopScope);
3677 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
3678 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
3679 !isOpenMPTeamsDirective(S.getDirectiveKind()))
3680 EmitOMPReductionClauseInit(S, LoopScope);
3681 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
3682 EmitOMPPrivateLoopCounters(S, LoopScope);
3683 (void)LoopScope.Privatize();
3684 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3685 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
3687 // Detect the distribute schedule kind and chunk.
3688 llvm::Value *Chunk = nullptr;
3689 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
3690 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
3691 ScheduleKind = C->getDistScheduleKind();
3692 if (const Expr *Ch = C->getChunkSize()) {
3693 Chunk = EmitScalarExpr(Ch);
3694 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
3695 S.getIterationVariable()->getType(),
3699 // Default behaviour for dist_schedule clause.
3700 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
3701 *this, S, ScheduleKind, Chunk);
3703 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3704 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3706 // OpenMP [2.10.8, distribute Construct, Description]
3707 // If dist_schedule is specified, kind must be static. If specified,
3708 // iterations are divided into chunks of size chunk_size, chunks are
3709 // assigned to the teams of the league in a round-robin fashion in the
3710 // order of the team number. When no chunk_size is specified, the
3711 // iteration space is divided into chunks that are approximately equal
3712 // in size, and at most one chunk is distributed to each team of the
3713 // league. The size of the chunks is unspecified in this case.
3714 bool StaticChunked = RT.isStaticChunked(
3715 ScheduleKind, /* Chunked */ Chunk != nullptr) &&
3716 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
3717 if (RT.isStaticNonchunked(ScheduleKind,
3718 /* Chunked */ Chunk != nullptr) ||
3720 CGOpenMPRuntime::StaticRTInput StaticInit(
3721 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this),
3722 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
3723 StaticChunked ? Chunk : nullptr);
3724 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
3727 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3728 // UB = min(UB, GlobalUB);
3729 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3730 ? S.getCombinedEnsureUpperBound()
3731 : S.getEnsureUpperBound());
3733 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3734 ? S.getCombinedInit()
3738 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3739 ? S.getCombinedCond()
3743 Cond = S.getCombinedDistCond();
3745 // For static unchunked schedules generate:
3747 // 1. For distribute alone, codegen
3748 // while (idx <= UB) {
3753 // 2. When combined with 'for' (e.g. as in 'distribute parallel for')
3754 // while (idx <= UB) {
3755 // <CodeGen rest of pragma>(LB, UB);
3759 // For static chunk one schedule generate:
3761 // while (IV <= GlobalUB) {
3762 // <CodeGen rest of pragma>(LB, UB);
3765 // UB = min(UB, GlobalUB);
3771 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3772 if (isOpenMPSimdDirective(S.getDirectiveKind()))
3773 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true);
3775 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop,
3776 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) {
3777 CGF.EmitOMPInnerLoop(
3778 S, LoopScope.requiresCleanups(), Cond, IncExpr,
3779 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
3780 CodeGenLoop(CGF, S, LoopExit);
3782 [&S, StaticChunked](CodeGenFunction &CGF) {
3783 if (StaticChunked) {
3784 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
3785 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
3786 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
3787 CGF.EmitIgnoredExpr(S.getCombinedInit());
3791 EmitBlock(LoopExit.getBlock());
3792 // Tell the runtime we are done.
3793 RT.emitForStaticFinish(*this, S.getBeginLoc(), S.getDirectiveKind());
3795 // Emit the outer loop, which requests its work chunk [LB..UB] from
3796 // runtime and runs the inner loop to process it.
3797 const OMPLoopArguments LoopArguments = {
3798 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
3799 IL.getAddress(*this), Chunk};
3800 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
3803 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3804 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3805 return CGF.Builder.CreateIsNotNull(
3806 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3809 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
3810 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
3811 !isOpenMPTeamsDirective(S.getDirectiveKind())) {
3812 EmitOMPReductionClauseFinal(S, OMPD_simd);
3813 // Emit post-update of the reduction variables if IsLastIter != 0.
3814 emitPostUpdateForReductionClause(
3815 *this, S, [IL, &S](CodeGenFunction &CGF) {
3816 return CGF.Builder.CreateIsNotNull(
3817 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3820 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3821 if (HasLastprivateClause) {
3822 EmitOMPLastprivateClauseFinal(
3823 S, /*NoFinals=*/false,
3824 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3828 // We're now done with the loop, so jump to the continuation block.
3830 EmitBranch(ContBlock);
3831 EmitBlock(ContBlock, true);
3836 void CodeGenFunction::EmitOMPDistributeDirective(
3837 const OMPDistributeDirective &S) {
3838 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3839 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
3841 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3842 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3845 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
3846 const CapturedStmt *S) {
3847 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3848 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
3849 CGF.CapturedStmtInfo = &CapStmtInfo;
3850 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
3851 Fn->setDoesNotRecurse();
3855 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
3856 if (S.hasClausesOfKind<OMPDependClause>()) {
3857 assert(!S.getAssociatedStmt() &&
3858 "No associated statement must be in ordered depend construct.");
3859 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
3860 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
3863 const auto *C = S.getSingleClause<OMPSIMDClause>();
3864 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
3865 PrePostActionTy &Action) {
3866 const CapturedStmt *CS = S.getInnermostCapturedStmt();
3868 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3869 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3870 llvm::Function *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
3871 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
3872 OutlinedFn, CapturedVars);
3875 CGF.EmitStmt(CS->getCapturedStmt());
3878 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3879 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C);
3882 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
3883 QualType SrcType, QualType DestType,
3884 SourceLocation Loc) {
3885 assert(CGF.hasScalarEvaluationKind(DestType) &&
3886 "DestType must have scalar evaluation kind.");
3887 assert(!Val.isAggregate() && "Must be a scalar or complex.");
3888 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
3890 : CGF.EmitComplexToScalarConversion(
3891 Val.getComplexVal(), SrcType, DestType, Loc);
3894 static CodeGenFunction::ComplexPairTy
3895 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
3896 QualType DestType, SourceLocation Loc) {
3897 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
3898 "DestType must have complex evaluation kind.");
3899 CodeGenFunction::ComplexPairTy ComplexVal;
3900 if (Val.isScalar()) {
3901 // Convert the input element to the element type of the complex.
3902 QualType DestElementType =
3903 DestType->castAs<ComplexType>()->getElementType();
3904 llvm::Value *ScalarVal = CGF.EmitScalarConversion(
3905 Val.getScalarVal(), SrcType, DestElementType, Loc);
3906 ComplexVal = CodeGenFunction::ComplexPairTy(
3907 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
3909 assert(Val.isComplex() && "Must be a scalar or complex.");
3910 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
3911 QualType DestElementType =
3912 DestType->castAs<ComplexType>()->getElementType();
3913 ComplexVal.first = CGF.EmitScalarConversion(
3914 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
3915 ComplexVal.second = CGF.EmitScalarConversion(
3916 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
3921 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
3922 LValue LVal, RValue RVal) {
3923 if (LVal.isGlobalReg()) {
3924 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
3926 CGF.EmitAtomicStore(RVal, LVal,
3927 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3928 : llvm::AtomicOrdering::Monotonic,
3929 LVal.isVolatile(), /*isInit=*/false);
3933 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
3934 QualType RValTy, SourceLocation Loc) {
3935 switch (getEvaluationKind(LVal.getType())) {
3937 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
3938 *this, RVal, RValTy, LVal.getType(), Loc)),
3943 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
3947 llvm_unreachable("Must be a scalar or complex.");
3951 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
3952 const Expr *X, const Expr *V,
3953 SourceLocation Loc) {
3955 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
3956 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
3957 LValue XLValue = CGF.EmitLValue(X);
3958 LValue VLValue = CGF.EmitLValue(V);
3959 RValue Res = XLValue.isGlobalReg()
3960 ? CGF.EmitLoadOfLValue(XLValue, Loc)
3961 : CGF.EmitAtomicLoad(
3963 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3964 : llvm::AtomicOrdering::Monotonic,
3965 XLValue.isVolatile());
3966 // OpenMP, 2.12.6, atomic Construct
3967 // Any atomic construct with a seq_cst clause forces the atomically
3968 // performed operation to include an implicit flush operation without a
3971 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3972 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
3975 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
3976 const Expr *X, const Expr *E,
3977 SourceLocation Loc) {
3979 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3980 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3981 // OpenMP, 2.12.6, atomic Construct
3982 // Any atomic construct with a seq_cst clause forces the atomically
3983 // performed operation to include an implicit flush operation without a
3986 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3989 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
3991 BinaryOperatorKind BO,
3992 llvm::AtomicOrdering AO,
3993 bool IsXLHSInRHSPart) {
3994 ASTContext &Context = CGF.getContext();
3995 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3996 // expression is simple and atomic is allowed for the given type for the
3998 if (BO == BO_Comma || !Update.isScalar() ||
3999 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() ||
4000 (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
4001 (Update.getScalarVal()->getType() !=
4002 X.getAddress(CGF).getElementType())) ||
4003 !X.getAddress(CGF).getElementType()->isIntegerTy() ||
4004 !Context.getTargetInfo().hasBuiltinAtomic(
4005 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
4006 return std::make_pair(false, RValue::get(nullptr));
4008 llvm::AtomicRMWInst::BinOp RMWOp;
4011 RMWOp = llvm::AtomicRMWInst::Add;
4014 if (!IsXLHSInRHSPart)
4015 return std::make_pair(false, RValue::get(nullptr));
4016 RMWOp = llvm::AtomicRMWInst::Sub;
4019 RMWOp = llvm::AtomicRMWInst::And;
4022 RMWOp = llvm::AtomicRMWInst::Or;
4025 RMWOp = llvm::AtomicRMWInst::Xor;
4028 RMWOp = X.getType()->hasSignedIntegerRepresentation()
4029 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
4030 : llvm::AtomicRMWInst::Max)
4031 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
4032 : llvm::AtomicRMWInst::UMax);
4035 RMWOp = X.getType()->hasSignedIntegerRepresentation()
4036 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
4037 : llvm::AtomicRMWInst::Min)
4038 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
4039 : llvm::AtomicRMWInst::UMin);
4042 RMWOp = llvm::AtomicRMWInst::Xchg;
4051 return std::make_pair(false, RValue::get(nullptr));
4070 llvm_unreachable("Unsupported atomic update operation");
4072 llvm::Value *UpdateVal = Update.getScalarVal();
4073 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
4074 UpdateVal = CGF.Builder.CreateIntCast(
4075 IC, X.getAddress(CGF).getElementType(),
4076 X.getType()->hasSignedIntegerRepresentation());
4079 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO);
4080 return std::make_pair(true, RValue::get(Res));
4083 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
4084 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
4085 llvm::AtomicOrdering AO, SourceLocation Loc,
4086 const llvm::function_ref<RValue(RValue)> CommonGen) {
4087 // Update expressions are allowed to have the following forms:
4088 // x binop= expr; -> xrval + expr;
4089 // x++, ++x -> xrval + 1;
4090 // x--, --x -> xrval - 1;
4091 // x = x binop expr; -> xrval binop expr
4092 // x = expr Op x; - > expr binop xrval;
4093 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
4095 if (X.isGlobalReg()) {
4096 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
4098 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
4100 // Perform compare-and-swap procedure.
4101 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
4107 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
4108 const Expr *X, const Expr *E,
4109 const Expr *UE, bool IsXLHSInRHSPart,
4110 SourceLocation Loc) {
4111 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
4112 "Update expr in 'atomic update' must be a binary operator.");
4113 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
4114 // Update expressions are allowed to have the following forms:
4115 // x binop= expr; -> xrval + expr;
4116 // x++, ++x -> xrval + 1;
4117 // x--, --x -> xrval - 1;
4118 // x = x binop expr; -> xrval binop expr
4119 // x = expr Op x; - > expr binop xrval;
4120 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
4121 LValue XLValue = CGF.EmitLValue(X);
4122 RValue ExprRValue = CGF.EmitAnyExpr(E);
4123 llvm::AtomicOrdering AO = IsSeqCst
4124 ? llvm::AtomicOrdering::SequentiallyConsistent
4125 : llvm::AtomicOrdering::Monotonic;
4126 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
4127 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
4128 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
4129 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
4130 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) {
4131 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
4132 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
4133 return CGF.EmitAnyExpr(UE);
4135 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
4136 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
4137 // OpenMP, 2.12.6, atomic Construct
4138 // Any atomic construct with a seq_cst clause forces the atomically
4139 // performed operation to include an implicit flush operation without a
4142 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
4145 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
4146 QualType SourceType, QualType ResType,
4147 SourceLocation Loc) {
4148 switch (CGF.getEvaluationKind(ResType)) {
4151 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
4153 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
4154 return RValue::getComplex(Res.first, Res.second);
4159 llvm_unreachable("Must be a scalar or complex.");
4162 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
4163 bool IsPostfixUpdate, const Expr *V,
4164 const Expr *X, const Expr *E,
4165 const Expr *UE, bool IsXLHSInRHSPart,
4166 SourceLocation Loc) {
4167 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
4168 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
4170 LValue VLValue = CGF.EmitLValue(V);
4171 LValue XLValue = CGF.EmitLValue(X);
4172 RValue ExprRValue = CGF.EmitAnyExpr(E);
4173 llvm::AtomicOrdering AO = IsSeqCst
4174 ? llvm::AtomicOrdering::SequentiallyConsistent
4175 : llvm::AtomicOrdering::Monotonic;
4176 QualType NewVValType;
4178 // 'x' is updated with some additional value.
4179 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
4180 "Update expr in 'atomic capture' must be a binary operator.");
4181 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
4182 // Update expressions are allowed to have the following forms:
4183 // x binop= expr; -> xrval + expr;
4184 // x++, ++x -> xrval + 1;
4185 // x--, --x -> xrval - 1;
4186 // x = x binop expr; -> xrval binop expr
4187 // x = expr Op x; - > expr binop xrval;
4188 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
4189 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
4190 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
4191 NewVValType = XRValExpr->getType();
4192 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
4193 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
4194 IsPostfixUpdate](RValue XRValue) {
4195 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
4196 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
4197 RValue Res = CGF.EmitAnyExpr(UE);
4198 NewVVal = IsPostfixUpdate ? XRValue : Res;
4201 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
4202 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
4204 // 'atomicrmw' instruction was generated.
4205 if (IsPostfixUpdate) {
4206 // Use old value from 'atomicrmw'.
4207 NewVVal = Res.second;
4209 // 'atomicrmw' does not provide new value, so evaluate it using old
4211 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
4212 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
4213 NewVVal = CGF.EmitAnyExpr(UE);
4217 // 'x' is simply rewritten with some 'expr'.
4218 NewVValType = X->getType().getNonReferenceType();
4219 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
4220 X->getType().getNonReferenceType(), Loc);
4221 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) {
4225 // Try to perform atomicrmw xchg, otherwise simple exchange.
4226 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
4227 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
4230 // 'atomicrmw' instruction was generated.
4231 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
4234 // Emit post-update store to 'v' of old/new 'x' value.
4235 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
4236 // OpenMP, 2.12.6, atomic Construct
4237 // Any atomic construct with a seq_cst clause forces the atomically
4238 // performed operation to include an implicit flush operation without a
4241 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
4244 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
4245 bool IsSeqCst, bool IsPostfixUpdate,
4246 const Expr *X, const Expr *V, const Expr *E,
4247 const Expr *UE, bool IsXLHSInRHSPart,
4248 SourceLocation Loc) {
4251 emitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
4254 emitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
4258 emitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
4261 emitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
4262 IsXLHSInRHSPart, Loc);
4266 case OMPC_num_threads:
4268 case OMPC_firstprivate:
4269 case OMPC_lastprivate:
4270 case OMPC_reduction:
4271 case OMPC_task_reduction:
4272 case OMPC_in_reduction:
4275 case OMPC_allocator:
4284 case OMPC_copyprivate:
4286 case OMPC_proc_bind:
4291 case OMPC_threadprivate:
4293 case OMPC_mergeable:
4298 case OMPC_num_teams:
4299 case OMPC_thread_limit:
4301 case OMPC_grainsize:
4303 case OMPC_num_tasks:
4305 case OMPC_dist_schedule:
4306 case OMPC_defaultmap:
4310 case OMPC_use_device_ptr:
4311 case OMPC_is_device_ptr:
4312 case OMPC_unified_address:
4313 case OMPC_unified_shared_memory:
4314 case OMPC_reverse_offload:
4315 case OMPC_dynamic_allocators:
4316 case OMPC_atomic_default_mem_order:
4317 case OMPC_device_type:
4319 case OMPC_nontemporal:
4320 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
4324 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
4325 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
4326 OpenMPClauseKind Kind = OMPC_unknown;
4327 for (const OMPClause *C : S.clauses()) {
4328 // Find first clause (skip seq_cst clause, if it is first).
4329 if (C->getClauseKind() != OMPC_seq_cst) {
4330 Kind = C->getClauseKind();
4335 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
4336 if (const auto *FE = dyn_cast<FullExpr>(CS))
4337 enterFullExpression(FE);
4338 // Processing for statements under 'atomic capture'.
4339 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
4340 for (const Stmt *C : Compound->body()) {
4341 if (const auto *FE = dyn_cast<FullExpr>(C))
4342 enterFullExpression(FE);
4346 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
4347 PrePostActionTy &) {
4348 CGF.EmitStopPoint(CS);
4349 emitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
4350 S.getV(), S.getExpr(), S.getUpdateExpr(),
4351 S.isXLHSInRHSPart(), S.getBeginLoc());
4353 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4354 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
4357 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
4358 const OMPExecutableDirective &S,
4359 const RegionCodeGenTy &CodeGen) {
4360 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
4361 CodeGenModule &CGM = CGF.CGM;
4363 // On device emit this construct as inlined code.
4364 if (CGM.getLangOpts().OpenMPIsDevice) {
4365 OMPLexicalScope Scope(CGF, S, OMPD_target);
4366 CGM.getOpenMPRuntime().emitInlinedDirective(
4367 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4368 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4373 llvm::Function *Fn = nullptr;
4374 llvm::Constant *FnID = nullptr;
4376 const Expr *IfCond = nullptr;
4377 // Check for the at most one if clause associated with the target region.
4378 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4379 if (C->getNameModifier() == OMPD_unknown ||
4380 C->getNameModifier() == OMPD_target) {
4381 IfCond = C->getCondition();
4386 // Check if we have any device clause associated with the directive.
4387 const Expr *Device = nullptr;
4388 if (auto *C = S.getSingleClause<OMPDeviceClause>())
4389 Device = C->getDevice();
4391 // Check if we have an if clause whose conditional always evaluates to false
4392 // or if we do not have any targets specified. If so the target region is not
4393 // an offload entry point.
4394 bool IsOffloadEntry = true;
4397 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
4398 IsOffloadEntry = false;
4400 if (CGM.getLangOpts().OMPTargetTriples.empty())
4401 IsOffloadEntry = false;
4403 assert(CGF.CurFuncDecl && "No parent declaration for target region!");
4404 StringRef ParentName;
4405 // In case we have Ctors/Dtors we use the complete type variant to produce
4406 // the mangling of the device outlined kernel.
4407 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
4408 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
4409 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
4410 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
4413 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl)));
4415 // Emit target region as a standalone region.
4416 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
4417 IsOffloadEntry, CodeGen);
4418 OMPLexicalScope Scope(CGF, S, OMPD_task);
4419 auto &&SizeEmitter =
4420 [IsOffloadEntry](CodeGenFunction &CGF,
4421 const OMPLoopDirective &D) -> llvm::Value * {
4422 if (IsOffloadEntry) {
4423 OMPLoopScope(CGF, D);
4424 // Emit calculation of the iterations count.
4425 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations());
4426 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty,
4427 /*isSigned=*/false);
4428 return NumIterations;
4432 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device,
4436 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
4437 PrePostActionTy &Action) {
4439 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4440 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4441 CGF.EmitOMPPrivateClause(S, PrivateScope);
4442 (void)PrivateScope.Privatize();
4443 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4444 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4446 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
4449 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
4450 StringRef ParentName,
4451 const OMPTargetDirective &S) {
4452 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4453 emitTargetRegion(CGF, S, Action);
4456 llvm::Constant *Addr;
4457 // Emit target region as a standalone region.
4458 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4459 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4460 assert(Fn && Addr && "Target device function emission failed.");
4463 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
4464 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4465 emitTargetRegion(CGF, S, Action);
4467 emitCommonOMPTargetDirective(*this, S, CodeGen);
4470 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
4471 const OMPExecutableDirective &S,
4472 OpenMPDirectiveKind InnermostKind,
4473 const RegionCodeGenTy &CodeGen) {
4474 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
4475 llvm::Function *OutlinedFn =
4476 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
4477 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
4479 const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
4480 const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
4482 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr;
4483 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr;
4485 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
4489 OMPTeamsScope Scope(CGF, S);
4490 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
4491 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
4492 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn,
4496 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
4497 // Emit teams region as a standalone region.
4498 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4500 OMPPrivateScope PrivateScope(CGF);
4501 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4502 CGF.EmitOMPPrivateClause(S, PrivateScope);
4503 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4504 (void)PrivateScope.Privatize();
4505 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt());
4506 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4508 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
4509 emitPostUpdateForReductionClause(*this, S,
4510 [](CodeGenFunction &) { return nullptr; });
4513 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
4514 const OMPTargetTeamsDirective &S) {
4515 auto *CS = S.getCapturedStmt(OMPD_teams);
4517 // Emit teams region as a standalone region.
4518 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
4520 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4521 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4522 CGF.EmitOMPPrivateClause(S, PrivateScope);
4523 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4524 (void)PrivateScope.Privatize();
4525 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4526 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4527 CGF.EmitStmt(CS->getCapturedStmt());
4528 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4530 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen);
4531 emitPostUpdateForReductionClause(CGF, S,
4532 [](CodeGenFunction &) { return nullptr; });
4535 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
4536 CodeGenModule &CGM, StringRef ParentName,
4537 const OMPTargetTeamsDirective &S) {
4538 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4539 emitTargetTeamsRegion(CGF, Action, S);
4542 llvm::Constant *Addr;
4543 // Emit target region as a standalone region.
4544 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4545 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4546 assert(Fn && Addr && "Target device function emission failed.");
4549 void CodeGenFunction::EmitOMPTargetTeamsDirective(
4550 const OMPTargetTeamsDirective &S) {
4551 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4552 emitTargetTeamsRegion(CGF, Action, S);
4554 emitCommonOMPTargetDirective(*this, S, CodeGen);
4558 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
4559 const OMPTargetTeamsDistributeDirective &S) {
4561 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4562 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4565 // Emit teams region as a standalone region.
4566 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4567 PrePostActionTy &Action) {
4569 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4570 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4571 (void)PrivateScope.Privatize();
4572 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4574 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4576 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
4577 emitPostUpdateForReductionClause(CGF, S,
4578 [](CodeGenFunction &) { return nullptr; });
4581 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
4582 CodeGenModule &CGM, StringRef ParentName,
4583 const OMPTargetTeamsDistributeDirective &S) {
4584 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4585 emitTargetTeamsDistributeRegion(CGF, Action, S);
4588 llvm::Constant *Addr;
4589 // Emit target region as a standalone region.
4590 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4591 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4592 assert(Fn && Addr && "Target device function emission failed.");
4595 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
4596 const OMPTargetTeamsDistributeDirective &S) {
4597 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4598 emitTargetTeamsDistributeRegion(CGF, Action, S);
4600 emitCommonOMPTargetDirective(*this, S, CodeGen);
4603 static void emitTargetTeamsDistributeSimdRegion(
4604 CodeGenFunction &CGF, PrePostActionTy &Action,
4605 const OMPTargetTeamsDistributeSimdDirective &S) {
4607 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4608 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4611 // Emit teams region as a standalone region.
4612 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4613 PrePostActionTy &Action) {
4615 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4616 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4617 (void)PrivateScope.Privatize();
4618 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4620 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4622 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen);
4623 emitPostUpdateForReductionClause(CGF, S,
4624 [](CodeGenFunction &) { return nullptr; });
4627 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
4628 CodeGenModule &CGM, StringRef ParentName,
4629 const OMPTargetTeamsDistributeSimdDirective &S) {
4630 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4631 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
4634 llvm::Constant *Addr;
4635 // Emit target region as a standalone region.
4636 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4637 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4638 assert(Fn && Addr && "Target device function emission failed.");
4641 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
4642 const OMPTargetTeamsDistributeSimdDirective &S) {
4643 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4644 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
4646 emitCommonOMPTargetDirective(*this, S, CodeGen);
4649 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
4650 const OMPTeamsDistributeDirective &S) {
4652 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4653 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4656 // Emit teams region as a standalone region.
4657 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4658 PrePostActionTy &Action) {
4660 OMPPrivateScope PrivateScope(CGF);
4661 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4662 (void)PrivateScope.Privatize();
4663 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4665 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4667 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
4668 emitPostUpdateForReductionClause(*this, S,
4669 [](CodeGenFunction &) { return nullptr; });
4672 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
4673 const OMPTeamsDistributeSimdDirective &S) {
4674 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4675 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4678 // Emit teams region as a standalone region.
4679 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4680 PrePostActionTy &Action) {
4682 OMPPrivateScope PrivateScope(CGF);
4683 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4684 (void)PrivateScope.Privatize();
4685 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd,
4687 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4689 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen);
4690 emitPostUpdateForReductionClause(*this, S,
4691 [](CodeGenFunction &) { return nullptr; });
4694 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
4695 const OMPTeamsDistributeParallelForDirective &S) {
4696 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4697 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4701 // Emit teams region as a standalone region.
4702 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4703 PrePostActionTy &Action) {
4705 OMPPrivateScope PrivateScope(CGF);
4706 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4707 (void)PrivateScope.Privatize();
4708 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4710 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4712 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
4713 emitPostUpdateForReductionClause(*this, S,
4714 [](CodeGenFunction &) { return nullptr; });
4717 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
4718 const OMPTeamsDistributeParallelForSimdDirective &S) {
4719 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4720 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4724 // Emit teams region as a standalone region.
4725 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4726 PrePostActionTy &Action) {
4728 OMPPrivateScope PrivateScope(CGF);
4729 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4730 (void)PrivateScope.Privatize();
4731 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4732 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
4733 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4735 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd,
4737 emitPostUpdateForReductionClause(*this, S,
4738 [](CodeGenFunction &) { return nullptr; });
4741 static void emitTargetTeamsDistributeParallelForRegion(
4742 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S,
4743 PrePostActionTy &Action) {
4745 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4746 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4750 // Emit teams region as a standalone region.
4751 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4752 PrePostActionTy &Action) {
4754 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4755 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4756 (void)PrivateScope.Privatize();
4757 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4758 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
4759 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4762 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
4764 emitPostUpdateForReductionClause(CGF, S,
4765 [](CodeGenFunction &) { return nullptr; });
4768 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
4769 CodeGenModule &CGM, StringRef ParentName,
4770 const OMPTargetTeamsDistributeParallelForDirective &S) {
4771 // Emit SPMD target teams distribute parallel for region as a standalone
4773 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4774 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
4777 llvm::Constant *Addr;
4778 // Emit target region as a standalone region.
4779 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4780 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4781 assert(Fn && Addr && "Target device function emission failed.");
4784 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
4785 const OMPTargetTeamsDistributeParallelForDirective &S) {
4786 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4787 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
4789 emitCommonOMPTargetDirective(*this, S, CodeGen);
4792 static void emitTargetTeamsDistributeParallelForSimdRegion(
4793 CodeGenFunction &CGF,
4794 const OMPTargetTeamsDistributeParallelForSimdDirective &S,
4795 PrePostActionTy &Action) {
4797 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4798 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4802 // Emit teams region as a standalone region.
4803 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4804 PrePostActionTy &Action) {
4806 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4807 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4808 (void)PrivateScope.Privatize();
4809 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4810 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
4811 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4814 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd,
4816 emitPostUpdateForReductionClause(CGF, S,
4817 [](CodeGenFunction &) { return nullptr; });
4820 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
4821 CodeGenModule &CGM, StringRef ParentName,
4822 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
4823 // Emit SPMD target teams distribute parallel for simd region as a standalone
4825 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4826 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
4829 llvm::Constant *Addr;
4830 // Emit target region as a standalone region.
4831 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4832 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4833 assert(Fn && Addr && "Target device function emission failed.");
4836 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
4837 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
4838 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4839 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
4841 emitCommonOMPTargetDirective(*this, S, CodeGen);
4844 void CodeGenFunction::EmitOMPCancellationPointDirective(
4845 const OMPCancellationPointDirective &S) {
4846 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(),
4847 S.getCancelRegion());
4850 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
4851 const Expr *IfCond = nullptr;
4852 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4853 if (C->getNameModifier() == OMPD_unknown ||
4854 C->getNameModifier() == OMPD_cancel) {
4855 IfCond = C->getCondition();
4859 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) {
4860 // TODO: This check is necessary as we only generate `omp parallel` through
4861 // the OpenMPIRBuilder for now.
4862 if (S.getCancelRegion() == OMPD_parallel) {
4863 llvm::Value *IfCondition = nullptr;
4865 IfCondition = EmitScalarExpr(IfCond,
4866 /*IgnoreResultAssign=*/true);
4867 return Builder.restoreIP(
4868 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion()));
4872 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond,
4873 S.getCancelRegion());
4876 CodeGenFunction::JumpDest
4877 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
4878 if (Kind == OMPD_parallel || Kind == OMPD_task ||
4879 Kind == OMPD_target_parallel)
4881 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
4882 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
4883 Kind == OMPD_distribute_parallel_for ||
4884 Kind == OMPD_target_parallel_for ||
4885 Kind == OMPD_teams_distribute_parallel_for ||
4886 Kind == OMPD_target_teams_distribute_parallel_for);
4887 return OMPCancelStack.getExitBlock();
4890 void CodeGenFunction::EmitOMPUseDevicePtrClause(
4891 const OMPClause &NC, OMPPrivateScope &PrivateScope,
4892 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
4893 const auto &C = cast<OMPUseDevicePtrClause>(NC);
4894 auto OrigVarIt = C.varlist_begin();
4895 auto InitIt = C.inits().begin();
4896 for (const Expr *PvtVarIt : C.private_copies()) {
4897 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
4898 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
4899 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
4901 // In order to identify the right initializer we need to match the
4902 // declaration used by the mapping logic. In some cases we may get
4903 // OMPCapturedExprDecl that refers to the original declaration.
4904 const ValueDecl *MatchingVD = OrigVD;
4905 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
4906 // OMPCapturedExprDecl are used to privative fields of the current
4908 const auto *ME = cast<MemberExpr>(OED->getInit());
4909 assert(isa<CXXThisExpr>(ME->getBase()) &&
4910 "Base should be the current struct!");
4911 MatchingVD = ME->getMemberDecl();
4914 // If we don't have information about the current list item, move on to
4916 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
4917 if (InitAddrIt == CaptureDeviceAddrMap.end())
4920 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD,
4923 // Initialize the temporary initialization variable with the address we
4924 // get from the runtime library. We have to cast the source address
4925 // because it is always a void *. References are materialized in the
4926 // privatization scope, so the initialization here disregards the fact
4927 // the original variable is a reference.
4929 getContext().getPointerType(OrigVD->getType().getNonReferenceType());
4930 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
4931 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
4932 setAddrOfLocalVar(InitVD, InitAddr);
4934 // Emit private declaration, it will be initialized by the value we
4935 // declaration we just added to the local declarations map.
4938 // The initialization variables reached its purpose in the emission
4939 // of the previous declaration, so we don't need it anymore.
4940 LocalDeclMap.erase(InitVD);
4942 // Return the address of the private variable.
4943 return GetAddrOfLocalVar(PvtVD);
4945 assert(IsRegistered && "firstprivate var already registered as private");
4946 // Silence the warning about unused variable.
4954 // Generate the instructions for '#pragma omp target data' directive.
4955 void CodeGenFunction::EmitOMPTargetDataDirective(
4956 const OMPTargetDataDirective &S) {
4957 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
4959 // Create a pre/post action to signal the privatization of the device pointer.
4960 // This action can be replaced by the OpenMP runtime code generation to
4961 // deactivate privatization.
4962 bool PrivatizeDevicePointers = false;
4963 class DevicePointerPrivActionTy : public PrePostActionTy {
4964 bool &PrivatizeDevicePointers;
4967 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
4968 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
4969 void Enter(CodeGenFunction &CGF) override {
4970 PrivatizeDevicePointers = true;
4973 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
4975 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
4976 CodeGenFunction &CGF, PrePostActionTy &Action) {
4977 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4978 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4981 // Codegen that selects whether to generate the privatization code or not.
4982 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
4983 &InnermostCodeGen](CodeGenFunction &CGF,
4984 PrePostActionTy &Action) {
4985 RegionCodeGenTy RCG(InnermostCodeGen);
4986 PrivatizeDevicePointers = false;
4988 // Call the pre-action to change the status of PrivatizeDevicePointers if
4992 if (PrivatizeDevicePointers) {
4993 OMPPrivateScope PrivateScope(CGF);
4994 // Emit all instances of the use_device_ptr clause.
4995 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
4996 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
4997 Info.CaptureDeviceAddrMap);
4998 (void)PrivateScope.Privatize();
5005 // Forward the provided action to the privatization codegen.
5006 RegionCodeGenTy PrivRCG(PrivCodeGen);
5007 PrivRCG.setAction(Action);
5009 // Notwithstanding the body of the region is emitted as inlined directive,
5010 // we don't use an inline scope as changes in the references inside the
5011 // region are expected to be visible outside, so we do not privative them.
5012 OMPLexicalScope Scope(CGF, S);
5013 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
5017 RegionCodeGenTy RCG(CodeGen);
5019 // If we don't have target devices, don't bother emitting the data mapping
5021 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
5026 // Check if we have any if clause associated with the directive.
5027 const Expr *IfCond = nullptr;
5028 if (const auto *C = S.getSingleClause<OMPIfClause>())
5029 IfCond = C->getCondition();
5031 // Check if we have any device clause associated with the directive.
5032 const Expr *Device = nullptr;
5033 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
5034 Device = C->getDevice();
5036 // Set the action to signal privatization of device pointers.
5037 RCG.setAction(PrivAction);
5039 // Emit region code.
5040 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
5044 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
5045 const OMPTargetEnterDataDirective &S) {
5046 // If we don't have target devices, don't bother emitting the data mapping
5048 if (CGM.getLangOpts().OMPTargetTriples.empty())
5051 // Check if we have any if clause associated with the directive.
5052 const Expr *IfCond = nullptr;
5053 if (const auto *C = S.getSingleClause<OMPIfClause>())
5054 IfCond = C->getCondition();
5056 // Check if we have any device clause associated with the directive.
5057 const Expr *Device = nullptr;
5058 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
5059 Device = C->getDevice();
5061 OMPLexicalScope Scope(*this, S, OMPD_task);
5062 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
5065 void CodeGenFunction::EmitOMPTargetExitDataDirective(
5066 const OMPTargetExitDataDirective &S) {
5067 // If we don't have target devices, don't bother emitting the data mapping
5069 if (CGM.getLangOpts().OMPTargetTriples.empty())
5072 // Check if we have any if clause associated with the directive.
5073 const Expr *IfCond = nullptr;
5074 if (const auto *C = S.getSingleClause<OMPIfClause>())
5075 IfCond = C->getCondition();
5077 // Check if we have any device clause associated with the directive.
5078 const Expr *Device = nullptr;
5079 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
5080 Device = C->getDevice();
5082 OMPLexicalScope Scope(*this, S, OMPD_task);
5083 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
5086 static void emitTargetParallelRegion(CodeGenFunction &CGF,
5087 const OMPTargetParallelDirective &S,
5088 PrePostActionTy &Action) {
5089 // Get the captured statement associated with the 'parallel' region.
5090 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
5092 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
5094 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5095 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
5096 CGF.EmitOMPPrivateClause(S, PrivateScope);
5097 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
5098 (void)PrivateScope.Privatize();
5099 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
5100 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
5101 // TODO: Add support for clauses.
5102 CGF.EmitStmt(CS->getCapturedStmt());
5103 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
5105 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen,
5106 emitEmptyBoundParameters);
5107 emitPostUpdateForReductionClause(CGF, S,
5108 [](CodeGenFunction &) { return nullptr; });
5111 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
5112 CodeGenModule &CGM, StringRef ParentName,
5113 const OMPTargetParallelDirective &S) {
5114 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5115 emitTargetParallelRegion(CGF, S, Action);
5118 llvm::Constant *Addr;
5119 // Emit target region as a standalone region.
5120 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
5121 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
5122 assert(Fn && Addr && "Target device function emission failed.");
5125 void CodeGenFunction::EmitOMPTargetParallelDirective(
5126 const OMPTargetParallelDirective &S) {
5127 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5128 emitTargetParallelRegion(CGF, S, Action);
5130 emitCommonOMPTargetDirective(*this, S, CodeGen);
5133 static void emitTargetParallelForRegion(CodeGenFunction &CGF,
5134 const OMPTargetParallelForDirective &S,
5135 PrePostActionTy &Action) {
5137 // Emit directive as a combined directive that consists of two implicit
5138 // directives: 'parallel' with 'for' directive.
5139 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5141 CodeGenFunction::OMPCancelStackRAII CancelRegion(
5142 CGF, OMPD_target_parallel_for, S.hasCancel());
5143 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
5144 emitDispatchForLoopBounds);
5146 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen,
5147 emitEmptyBoundParameters);
5150 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
5151 CodeGenModule &CGM, StringRef ParentName,
5152 const OMPTargetParallelForDirective &S) {
5153 // Emit SPMD target parallel for region as a standalone region.
5154 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5155 emitTargetParallelForRegion(CGF, S, Action);
5158 llvm::Constant *Addr;
5159 // Emit target region as a standalone region.
5160 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
5161 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
5162 assert(Fn && Addr && "Target device function emission failed.");
5165 void CodeGenFunction::EmitOMPTargetParallelForDirective(
5166 const OMPTargetParallelForDirective &S) {
5167 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5168 emitTargetParallelForRegion(CGF, S, Action);
5170 emitCommonOMPTargetDirective(*this, S, CodeGen);
5174 emitTargetParallelForSimdRegion(CodeGenFunction &CGF,
5175 const OMPTargetParallelForSimdDirective &S,
5176 PrePostActionTy &Action) {
5178 // Emit directive as a combined directive that consists of two implicit
5179 // directives: 'parallel' with 'for' directive.
5180 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5182 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
5183 emitDispatchForLoopBounds);
5185 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen,
5186 emitEmptyBoundParameters);
5189 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
5190 CodeGenModule &CGM, StringRef ParentName,
5191 const OMPTargetParallelForSimdDirective &S) {
5192 // Emit SPMD target parallel for region as a standalone region.
5193 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5194 emitTargetParallelForSimdRegion(CGF, S, Action);
5197 llvm::Constant *Addr;
5198 // Emit target region as a standalone region.
5199 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
5200 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
5201 assert(Fn && Addr && "Target device function emission failed.");
5204 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
5205 const OMPTargetParallelForSimdDirective &S) {
5206 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5207 emitTargetParallelForSimdRegion(CGF, S, Action);
5209 emitCommonOMPTargetDirective(*this, S, CodeGen);
5212 /// Emit a helper variable and return corresponding lvalue.
5213 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
5214 const ImplicitParamDecl *PVD,
5215 CodeGenFunction::OMPPrivateScope &Privates) {
5216 const auto *VDecl = cast<VarDecl>(Helper->getDecl());
5217 Privates.addPrivate(VDecl,
5218 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); });
5221 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
5222 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
5223 // Emit outlined function for task construct.
5224 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
5225 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
5226 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
5227 const Expr *IfCond = nullptr;
5228 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
5229 if (C->getNameModifier() == OMPD_unknown ||
5230 C->getNameModifier() == OMPD_taskloop) {
5231 IfCond = C->getCondition();
5237 // Check if taskloop must be emitted without taskgroup.
5238 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
5239 // TODO: Check if we should emit tied or untied task.
5241 // Set scheduling for taskloop
5242 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
5244 Data.Schedule.setInt(/*IntVal=*/false);
5245 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
5246 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
5248 Data.Schedule.setInt(/*IntVal=*/true);
5249 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
5252 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
5254 // for (IV in 0..LastIteration) BODY;
5255 // <Final counter/linear vars updates>;
5259 // Emit: if (PreCond) - begin.
5260 // If the condition constant folds and can be elided, avoid emitting the
5263 llvm::BasicBlock *ContBlock = nullptr;
5264 OMPLoopScope PreInitScope(CGF, S);
5265 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
5269 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
5270 ContBlock = CGF.createBasicBlock("taskloop.if.end");
5271 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
5272 CGF.getProfileCount(&S));
5273 CGF.EmitBlock(ThenBlock);
5274 CGF.incrementProfileCounter(&S);
5277 (void)CGF.EmitOMPLinearClauseInit(S);
5279 OMPPrivateScope LoopScope(CGF);
5280 // Emit helper vars inits.
5281 enum { LowerBound = 5, UpperBound, Stride, LastIter };
5282 auto *I = CS->getCapturedDecl()->param_begin();
5283 auto *LBP = std::next(I, LowerBound);
5284 auto *UBP = std::next(I, UpperBound);
5285 auto *STP = std::next(I, Stride);
5286 auto *LIP = std::next(I, LastIter);
5287 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
5289 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
5291 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
5292 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
5294 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
5295 CGF.EmitOMPLinearClause(S, LoopScope);
5296 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
5297 (void)LoopScope.Privatize();
5298 // Emit the loop iteration variable.
5299 const Expr *IVExpr = S.getIterationVariable();
5300 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
5301 CGF.EmitVarDecl(*IVDecl);
5302 CGF.EmitIgnoredExpr(S.getInit());
5304 // Emit the iterations count variable.
5305 // If it is not a variable, Sema decided to calculate iterations count on
5306 // each iteration (e.g., it is foldable into a constant).
5307 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
5308 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
5309 // Emit calculation of the iterations count.
5310 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
5314 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
5317 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
5318 if (isOpenMPSimdDirective(S.getDirectiveKind()))
5319 CGF.EmitOMPSimdInit(S);
5321 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
5322 CGF.EmitOMPInnerLoop(
5323 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
5324 [&S](CodeGenFunction &CGF) {
5325 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest());
5326 CGF.EmitStopPoint(&S);
5328 [](CodeGenFunction &) {});
5331 // Emit: if (PreCond) - end.
5333 CGF.EmitBranch(ContBlock);
5334 CGF.EmitBlock(ContBlock, true);
5336 // Emit final copy of the lastprivate variables if IsLastIter != 0.
5337 if (HasLastprivateClause) {
5338 CGF.EmitOMPLastprivateClauseFinal(
5339 S, isOpenMPSimdDirective(S.getDirectiveKind()),
5340 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
5341 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
5342 (*LIP)->getType(), S.getBeginLoc())));
5344 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) {
5345 return CGF.Builder.CreateIsNotNull(
5346 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
5347 (*LIP)->getType(), S.getBeginLoc()));
5350 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
5351 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
5352 const OMPTaskDataTy &Data) {
5353 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
5354 &Data](CodeGenFunction &CGF, PrePostActionTy &) {
5355 OMPLoopScope PreInitScope(CGF, S);
5356 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S,
5357 OutlinedFn, SharedsTy,
5358 CapturedStruct, IfCond, Data);
5360 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
5364 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data);
5366 CGM.getOpenMPRuntime().emitTaskgroupRegion(
5368 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF,
5369 PrePostActionTy &Action) {
5371 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen,
5378 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
5379 EmitOMPTaskLoopBasedDirective(S);
5382 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
5383 const OMPTaskLoopSimdDirective &S) {
5384 OMPLexicalScope Scope(*this, S);
5385 EmitOMPTaskLoopBasedDirective(S);
5388 void CodeGenFunction::EmitOMPMasterTaskLoopDirective(
5389 const OMPMasterTaskLoopDirective &S) {
5390 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5392 EmitOMPTaskLoopBasedDirective(S);
5394 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false);
5395 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
5398 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective(
5399 const OMPMasterTaskLoopSimdDirective &S) {
5400 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5402 EmitOMPTaskLoopBasedDirective(S);
5404 OMPLexicalScope Scope(*this, S);
5405 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
5408 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective(
5409 const OMPParallelMasterTaskLoopDirective &S) {
5410 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5411 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
5412 PrePostActionTy &Action) {
5414 CGF.EmitOMPTaskLoopBasedDirective(S);
5416 OMPLexicalScope Scope(CGF, S, llvm::None, /*EmitPreInitStmt=*/false);
5417 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
5420 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen,
5421 emitEmptyBoundParameters);
5424 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective(
5425 const OMPParallelMasterTaskLoopSimdDirective &S) {
5426 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5427 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
5428 PrePostActionTy &Action) {
5430 CGF.EmitOMPTaskLoopBasedDirective(S);
5432 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
5433 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
5436 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen,
5437 emitEmptyBoundParameters);
5440 // Generate the instructions for '#pragma omp target update' directive.
5441 void CodeGenFunction::EmitOMPTargetUpdateDirective(
5442 const OMPTargetUpdateDirective &S) {
5443 // If we don't have target devices, don't bother emitting the data mapping
5445 if (CGM.getLangOpts().OMPTargetTriples.empty())
5448 // Check if we have any if clause associated with the directive.
5449 const Expr *IfCond = nullptr;
5450 if (const auto *C = S.getSingleClause<OMPIfClause>())
5451 IfCond = C->getCondition();
5453 // Check if we have any device clause associated with the directive.
5454 const Expr *Device = nullptr;
5455 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
5456 Device = C->getDevice();
5458 OMPLexicalScope Scope(*this, S, OMPD_task);
5459 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
5462 void CodeGenFunction::EmitSimpleOMPExecutableDirective(
5463 const OMPExecutableDirective &D) {
5464 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
5466 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) {
5467 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
5468 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
5470 OMPPrivateScope LoopGlobals(CGF);
5471 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
5472 for (const Expr *E : LD->counters()) {
5473 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5474 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
5475 LValue GlobLVal = CGF.EmitLValue(E);
5476 LoopGlobals.addPrivate(
5477 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
5479 if (isa<OMPCapturedExprDecl>(VD)) {
5480 // Emit only those that were not explicitly referenced in clauses.
5481 if (!CGF.LocalDeclMap.count(VD))
5482 CGF.EmitVarDecl(*VD);
5485 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
5486 if (!C->getNumForLoops())
5488 for (unsigned I = LD->getCollapsedNumber(),
5489 E = C->getLoopNumIterations().size();
5491 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
5492 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) {
5493 // Emit only those that were not explicitly referenced in clauses.
5494 if (!CGF.LocalDeclMap.count(VD))
5495 CGF.EmitVarDecl(*VD);
5500 LoopGlobals.Privatize();
5501 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
5504 OMPSimdLexicalScope Scope(*this, D);
5505 CGM.getOpenMPRuntime().emitInlinedDirective(
5507 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
5508 : D.getDirectiveKind(),