1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit OpenMP nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGCleanup.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Stmt.h"
19 #include "clang/AST/StmtOpenMP.h"
20 #include "clang/AST/DeclOpenMP.h"
21 using namespace clang;
22 using namespace CodeGen;
25 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
26 /// for captured expressions.
27 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
28 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
29 for (const auto *C : S.clauses()) {
30 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
31 if (const auto *PreInit =
32 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
33 for (const auto *I : PreInit->decls()) {
34 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
35 CGF.EmitVarDecl(cast<VarDecl>(*I));
37 CodeGenFunction::AutoVarEmission Emission =
38 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
39 CGF.EmitAutoVarCleanups(Emission);
46 CodeGenFunction::OMPPrivateScope InlinedShareds;
48 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
49 return CGF.LambdaCaptureFields.lookup(VD) ||
50 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
51 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
56 CodeGenFunction &CGF, const OMPExecutableDirective &S,
57 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
58 const bool EmitPreInitStmt = true)
59 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
62 emitPreInitStmt(CGF, S);
63 if (!CapturedRegion.hasValue())
65 assert(S.hasAssociatedStmt() &&
66 "Expected associated statement for inlined directive.");
67 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
68 for (const auto &C : CS->captures()) {
69 if (C.capturesVariable() || C.capturesVariableByCopy()) {
70 auto *VD = C.getCapturedVar();
71 assert(VD == VD->getCanonicalDecl() &&
72 "Canonical decl must be captured.");
74 CGF.getContext(), const_cast<VarDecl *>(VD),
75 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
76 InlinedShareds.isGlobalVarCaptured(VD)),
77 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
78 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
79 return CGF.EmitLValue(&DRE).getAddress();
83 (void)InlinedShareds.Privatize();
87 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
88 /// for captured expressions.
89 class OMPParallelScope final : public OMPLexicalScope {
90 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
91 OpenMPDirectiveKind Kind = S.getDirectiveKind();
92 return !(isOpenMPTargetExecutionDirective(Kind) ||
93 isOpenMPLoopBoundSharingDirective(Kind)) &&
94 isOpenMPParallelDirective(Kind);
98 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
99 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
100 EmitPreInitStmt(S)) {}
103 /// Lexical scope for OpenMP teams construct, that handles correct codegen
104 /// for captured expressions.
105 class OMPTeamsScope final : public OMPLexicalScope {
106 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
107 OpenMPDirectiveKind Kind = S.getDirectiveKind();
108 return !isOpenMPTargetExecutionDirective(Kind) &&
109 isOpenMPTeamsDirective(Kind);
113 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
114 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
115 EmitPreInitStmt(S)) {}
118 /// Private scope for OpenMP loop-based directives, that supports capturing
119 /// of used expression from loop statement.
120 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
121 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
122 CodeGenFunction::OMPMapVars PreCondVars;
123 for (const auto *E : S.counters()) {
124 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
125 (void)PreCondVars.setVarAddr(
126 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
128 (void)PreCondVars.apply(CGF);
129 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) {
130 for (const auto *I : PreInits->decls())
131 CGF.EmitVarDecl(cast<VarDecl>(*I));
133 PreCondVars.restore(CGF);
137 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
138 : CodeGenFunction::RunCleanupsScope(CGF) {
139 emitPreInitStmt(CGF, S);
143 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
144 CodeGenFunction::OMPPrivateScope InlinedShareds;
146 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
147 return CGF.LambdaCaptureFields.lookup(VD) ||
148 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
149 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
150 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
154 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
155 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
156 InlinedShareds(CGF) {
157 for (const auto *C : S.clauses()) {
158 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
159 if (const auto *PreInit =
160 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
161 for (const auto *I : PreInit->decls()) {
162 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
163 CGF.EmitVarDecl(cast<VarDecl>(*I));
165 CodeGenFunction::AutoVarEmission Emission =
166 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
167 CGF.EmitAutoVarCleanups(Emission);
171 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
172 for (const Expr *E : UDP->varlists()) {
173 const Decl *D = cast<DeclRefExpr>(E)->getDecl();
174 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
175 CGF.EmitVarDecl(*OED);
179 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
180 CGF.EmitOMPPrivateClause(S, InlinedShareds);
181 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
182 if (const Expr *E = TG->getReductionRef())
183 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
185 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
187 for (auto &C : CS->captures()) {
188 if (C.capturesVariable() || C.capturesVariableByCopy()) {
189 auto *VD = C.getCapturedVar();
190 assert(VD == VD->getCanonicalDecl() &&
191 "Canonical decl must be captured.");
192 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
193 isCapturedVar(CGF, VD) ||
194 (CGF.CapturedStmtInfo &&
195 InlinedShareds.isGlobalVarCaptured(VD)),
196 VD->getType().getNonReferenceType(), VK_LValue,
198 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
199 return CGF.EmitLValue(&DRE).getAddress();
203 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
205 (void)InlinedShareds.Privatize();
211 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
212 const OMPExecutableDirective &S,
213 const RegionCodeGenTy &CodeGen);
215 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
216 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
217 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
218 OrigVD = OrigVD->getCanonicalDecl();
220 LambdaCaptureFields.lookup(OrigVD) ||
221 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
222 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
223 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
224 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
225 return EmitLValue(&DRE);
228 return EmitLValue(E);
231 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
232 ASTContext &C = getContext();
233 llvm::Value *Size = nullptr;
234 auto SizeInChars = C.getTypeSizeInChars(Ty);
235 if (SizeInChars.isZero()) {
236 // getTypeSizeInChars() returns 0 for a VLA.
237 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
238 VlaSizePair VlaSize = getVLASize(VAT);
240 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts)
243 SizeInChars = C.getTypeSizeInChars(Ty);
244 if (SizeInChars.isZero())
245 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
246 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
248 return CGM.getSize(SizeInChars);
251 void CodeGenFunction::GenerateOpenMPCapturedVars(
252 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
253 const RecordDecl *RD = S.getCapturedRecordDecl();
254 auto CurField = RD->field_begin();
255 auto CurCap = S.captures().begin();
256 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
257 E = S.capture_init_end();
258 I != E; ++I, ++CurField, ++CurCap) {
259 if (CurField->hasCapturedVLAType()) {
260 const VariableArrayType *VAT = CurField->getCapturedVLAType();
261 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
262 CapturedVars.push_back(Val);
263 } else if (CurCap->capturesThis()) {
264 CapturedVars.push_back(CXXThisValue);
265 } else if (CurCap->capturesVariableByCopy()) {
266 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
268 // If the field is not a pointer, we need to save the actual value
269 // and load it as a void pointer.
270 if (!CurField->getType()->isAnyPointerType()) {
271 ASTContext &Ctx = getContext();
272 Address DstAddr = CreateMemTemp(
273 Ctx.getUIntPtrType(),
274 Twine(CurCap->getCapturedVar()->getName(), ".casted"));
275 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
277 llvm::Value *SrcAddrVal = EmitScalarConversion(
278 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
279 Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
281 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
283 // Store the value using the source type pointer.
284 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
286 // Load the value using the destination type pointer.
287 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
289 CapturedVars.push_back(CV);
291 assert(CurCap->capturesVariable() && "Expected capture by reference.");
292 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
297 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
298 QualType DstType, StringRef Name,
300 ASTContext &Ctx = CGF.getContext();
302 llvm::Value *CastedPtr = CGF.EmitScalarConversion(
303 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
304 Ctx.getPointerType(DstType), Loc);
306 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
311 static QualType getCanonicalParamType(ASTContext &C, QualType T) {
312 if (T->isLValueReferenceType())
313 return C.getLValueReferenceType(
314 getCanonicalParamType(C, T.getNonReferenceType()),
315 /*SpelledAsLValue=*/false);
316 if (T->isPointerType())
317 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
318 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
319 if (const auto *VLA = dyn_cast<VariableArrayType>(A))
320 return getCanonicalParamType(C, VLA->getElementType());
321 if (!A->isVariablyModifiedType())
322 return C.getCanonicalType(T);
324 return C.getCanonicalParamType(T);
328 /// Contains required data for proper outlined function codegen.
329 struct FunctionOptions {
330 /// Captured statement for which the function is generated.
331 const CapturedStmt *S = nullptr;
332 /// true if cast to/from UIntPtr is required for variables captured by
334 const bool UIntPtrCastRequired = true;
335 /// true if only casted arguments must be registered as local args or VLA
337 const bool RegisterCastedArgsOnly = false;
338 /// Name of the generated function.
339 const StringRef FunctionName;
340 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
341 bool RegisterCastedArgsOnly,
342 StringRef FunctionName)
343 : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
344 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
345 FunctionName(FunctionName) {}
349 static llvm::Function *emitOutlinedFunctionPrologue(
350 CodeGenFunction &CGF, FunctionArgList &Args,
351 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
353 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
355 llvm::Value *&CXXThisValue, const FunctionOptions &FO) {
356 const CapturedDecl *CD = FO.S->getCapturedDecl();
357 const RecordDecl *RD = FO.S->getCapturedRecordDecl();
358 assert(CD->hasBody() && "missing CapturedDecl body");
360 CXXThisValue = nullptr;
361 // Build the argument list.
362 CodeGenModule &CGM = CGF.CGM;
363 ASTContext &Ctx = CGM.getContext();
364 FunctionArgList TargetArgs;
365 Args.append(CD->param_begin(),
366 std::next(CD->param_begin(), CD->getContextParamPosition()));
369 std::next(CD->param_begin(), CD->getContextParamPosition()));
370 auto I = FO.S->captures().begin();
371 FunctionDecl *DebugFunctionDecl = nullptr;
372 if (!FO.UIntPtrCastRequired) {
373 FunctionProtoType::ExtProtoInfo EPI;
374 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
375 DebugFunctionDecl = FunctionDecl::Create(
376 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
377 SourceLocation(), DeclarationName(), FunctionTy,
378 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
379 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
381 for (const FieldDecl *FD : RD->fields()) {
382 QualType ArgType = FD->getType();
383 IdentifierInfo *II = nullptr;
384 VarDecl *CapVar = nullptr;
386 // If this is a capture by copy and the type is not a pointer, the outlined
387 // function argument type should be uintptr and the value properly casted to
388 // uintptr. This is necessary given that the runtime library is only able to
389 // deal with pointers. We can pass in the same way the VLA type sizes to the
390 // outlined function.
391 if (FO.UIntPtrCastRequired &&
392 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
393 I->capturesVariableArrayType()))
394 ArgType = Ctx.getUIntPtrType();
396 if (I->capturesVariable() || I->capturesVariableByCopy()) {
397 CapVar = I->getCapturedVar();
398 II = CapVar->getIdentifier();
399 } else if (I->capturesThis()) {
400 II = &Ctx.Idents.get("this");
402 assert(I->capturesVariableArrayType());
403 II = &Ctx.Idents.get("vla");
405 if (ArgType->isVariablyModifiedType())
406 ArgType = getCanonicalParamType(Ctx, ArgType);
408 if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
409 Arg = ParmVarDecl::Create(
410 Ctx, DebugFunctionDecl,
411 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
412 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
413 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
415 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
416 II, ArgType, ImplicitParamDecl::Other);
418 Args.emplace_back(Arg);
419 // Do not cast arguments if we emit function with non-original types.
420 TargetArgs.emplace_back(
421 FO.UIntPtrCastRequired
423 : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
427 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
430 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
433 // Create the function declaration.
434 const CGFunctionInfo &FuncInfo =
435 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
436 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
439 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
440 FO.FunctionName, &CGM.getModule());
441 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
443 F->setDoesNotThrow();
444 F->setDoesNotRecurse();
446 // Generate the function.
447 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
448 FO.S->getBeginLoc(), CD->getBody()->getBeginLoc());
449 unsigned Cnt = CD->getContextParamPosition();
450 I = FO.S->captures().begin();
451 for (const FieldDecl *FD : RD->fields()) {
452 // Do not map arguments if we emit function with non-original types.
453 Address LocalAddr(Address::invalid());
454 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
455 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
458 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
460 // If we are capturing a pointer by copy we don't need to do anything, just
461 // use the value that we get from the arguments.
462 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
463 const VarDecl *CurVD = I->getCapturedVar();
464 if (!FO.RegisterCastedArgsOnly)
465 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
471 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
472 AlignmentSource::Decl);
473 if (FD->hasCapturedVLAType()) {
474 if (FO.UIntPtrCastRequired) {
475 ArgLVal = CGF.MakeAddrLValue(
476 castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
477 Args[Cnt]->getName(), ArgLVal),
478 FD->getType(), AlignmentSource::Decl);
480 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
481 const VariableArrayType *VAT = FD->getCapturedVLAType();
482 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
483 } else if (I->capturesVariable()) {
484 const VarDecl *Var = I->getCapturedVar();
485 QualType VarTy = Var->getType();
486 Address ArgAddr = ArgLVal.getAddress();
487 if (ArgLVal.getType()->isLValueReferenceType()) {
488 ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
489 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
490 assert(ArgLVal.getType()->isPointerType());
491 ArgAddr = CGF.EmitLoadOfPointer(
492 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
494 if (!FO.RegisterCastedArgsOnly) {
497 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}});
499 } else if (I->capturesVariableByCopy()) {
500 assert(!FD->getType()->isAnyPointerType() &&
501 "Not expecting a captured pointer.");
502 const VarDecl *Var = I->getCapturedVar();
503 LocalAddrs.insert({Args[Cnt],
504 {Var, FO.UIntPtrCastRequired
505 ? castValueFromUintptr(
506 CGF, I->getLocation(), FD->getType(),
507 Args[Cnt]->getName(), ArgLVal)
508 : ArgLVal.getAddress()}});
510 // If 'this' is captured, load it into CXXThisValue.
511 assert(I->capturesThis());
512 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
513 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}});
523 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
526 "CapturedStmtInfo should be set when generating the captured function");
527 const CapturedDecl *CD = S.getCapturedDecl();
528 // Build the argument list.
529 bool NeedWrapperFunction =
531 CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo;
532 FunctionArgList Args;
533 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
534 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
535 SmallString<256> Buffer;
536 llvm::raw_svector_ostream Out(Buffer);
537 Out << CapturedStmtInfo->getHelperName();
538 if (NeedWrapperFunction)
540 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
542 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
543 VLASizes, CXXThisValue, FO);
544 CodeGenFunction::OMPPrivateScope LocalScope(*this);
545 for (const auto &LocalAddrPair : LocalAddrs) {
546 if (LocalAddrPair.second.first) {
547 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() {
548 return LocalAddrPair.second.second;
552 (void)LocalScope.Privatize();
553 for (const auto &VLASizePair : VLASizes)
554 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
555 PGO.assignRegionCounters(GlobalDecl(CD), F);
556 CapturedStmtInfo->EmitBody(*this, CD->getBody());
557 (void)LocalScope.ForceCleanup();
558 FinishFunction(CD->getBodyRBrace());
559 if (!NeedWrapperFunction)
562 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
563 /*RegisterCastedArgsOnly=*/true,
564 CapturedStmtInfo->getHelperName());
565 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
566 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
570 llvm::Function *WrapperF =
571 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
572 WrapperCGF.CXXThisValue, WrapperFO);
573 llvm::SmallVector<llvm::Value *, 4> CallArgs;
574 for (const auto *Arg : Args) {
575 llvm::Value *CallArg;
576 auto I = LocalAddrs.find(Arg);
577 if (I != LocalAddrs.end()) {
578 LValue LV = WrapperCGF.MakeAddrLValue(
580 I->second.first ? I->second.first->getType() : Arg->getType(),
581 AlignmentSource::Decl);
582 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
584 auto EI = VLASizes.find(Arg);
585 if (EI != VLASizes.end()) {
586 CallArg = EI->second.second;
588 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
590 AlignmentSource::Decl);
591 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
594 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
596 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getBeginLoc(),
598 WrapperCGF.FinishFunction();
602 //===----------------------------------------------------------------------===//
603 // OpenMP Directive Emission
604 //===----------------------------------------------------------------------===//
605 void CodeGenFunction::EmitOMPAggregateAssign(
606 Address DestAddr, Address SrcAddr, QualType OriginalType,
607 const llvm::function_ref<void(Address, Address)> CopyGen) {
608 // Perform element-by-element initialization.
611 // Drill down to the base element type on both arrays.
612 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
613 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
614 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
616 llvm::Value *SrcBegin = SrcAddr.getPointer();
617 llvm::Value *DestBegin = DestAddr.getPointer();
618 // Cast from pointer to array type to pointer to single element.
619 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements);
620 // The basic structure here is a while-do loop.
621 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
622 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
623 llvm::Value *IsEmpty =
624 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
625 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
627 // Enter the loop body, making that address the current address.
628 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
631 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
633 llvm::PHINode *SrcElementPHI =
634 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
635 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
636 Address SrcElementCurrent =
637 Address(SrcElementPHI,
638 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
640 llvm::PHINode *DestElementPHI =
641 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
642 DestElementPHI->addIncoming(DestBegin, EntryBB);
643 Address DestElementCurrent =
644 Address(DestElementPHI,
645 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
648 CopyGen(DestElementCurrent, SrcElementCurrent);
650 // Shift the address forward by one element.
651 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32(
652 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
653 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32(
654 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
655 // Check whether we've reached the end.
657 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
658 Builder.CreateCondBr(Done, DoneBB, BodyBB);
659 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
660 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
663 EmitBlock(DoneBB, /*IsFinished=*/true);
666 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
667 Address SrcAddr, const VarDecl *DestVD,
668 const VarDecl *SrcVD, const Expr *Copy) {
669 if (OriginalType->isArrayType()) {
670 const auto *BO = dyn_cast<BinaryOperator>(Copy);
671 if (BO && BO->getOpcode() == BO_Assign) {
672 // Perform simple memcpy for simple copying.
673 LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
674 LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
675 EmitAggregateAssign(Dest, Src, OriginalType);
677 // For arrays with complex element types perform element by element
679 EmitOMPAggregateAssign(
680 DestAddr, SrcAddr, OriginalType,
681 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
682 // Working with the single array element, so have to remap
683 // destination and source variables to corresponding array
685 CodeGenFunction::OMPPrivateScope Remap(*this);
686 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; });
687 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; });
688 (void)Remap.Privatize();
689 EmitIgnoredExpr(Copy);
693 // Remap pseudo source variable to private copy.
694 CodeGenFunction::OMPPrivateScope Remap(*this);
695 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; });
696 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; });
697 (void)Remap.Privatize();
698 // Emit copying of the whole variable.
699 EmitIgnoredExpr(Copy);
703 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
704 OMPPrivateScope &PrivateScope) {
705 if (!HaveInsertPoint())
707 bool DeviceConstTarget =
708 getLangOpts().OpenMPIsDevice &&
709 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
710 bool FirstprivateIsLastprivate = false;
711 llvm::DenseSet<const VarDecl *> Lastprivates;
712 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
713 for (const auto *D : C->varlists())
715 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
717 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
718 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
719 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
720 // Force emission of the firstprivate copy if the directive does not emit
721 // outlined function, like omp for, omp simd, omp distribute etc.
722 bool MustEmitFirstprivateCopy =
723 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
724 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
725 auto IRef = C->varlist_begin();
726 auto InitsRef = C->inits().begin();
727 for (const Expr *IInit : C->private_copies()) {
728 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
729 bool ThisFirstprivateIsLastprivate =
730 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
731 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
732 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
733 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
734 !FD->getType()->isReferenceType() &&
735 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
736 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
741 // Do not emit copy for firstprivate constant variables in target regions,
742 // captured by reference.
743 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
744 FD && FD->getType()->isReferenceType() &&
745 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
746 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this,
752 FirstprivateIsLastprivate =
753 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
754 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
756 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
758 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
759 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
760 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
763 // Check if the firstprivate variable is just a constant value.
764 ConstantEmission CE = tryEmitAsConstant(&DRE);
765 if (CE && !CE.isReference()) {
766 // Constant value, no need to create a copy.
771 if (CE && CE.isReference()) {
772 OriginalLVal = CE.getReferenceLValue(*this, &DRE);
774 assert(!CE && "Expected non-constant firstprivate.");
775 OriginalLVal = EmitLValue(&DRE);
778 OriginalLVal = EmitLValue(&DRE);
780 QualType Type = VD->getType();
781 if (Type->isArrayType()) {
782 // Emit VarDecl with copy init for arrays.
783 // Get the address of the original variable captured in current
785 IsRegistered = PrivateScope.addPrivate(
786 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() {
787 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
788 const Expr *Init = VD->getInit();
789 if (!isa<CXXConstructExpr>(Init) ||
790 isTrivialInitializer(Init)) {
791 // Perform simple memcpy.
793 MakeAddrLValue(Emission.getAllocatedAddress(), Type);
794 EmitAggregateAssign(Dest, OriginalLVal, Type);
796 EmitOMPAggregateAssign(
797 Emission.getAllocatedAddress(), OriginalLVal.getAddress(),
799 [this, VDInit, Init](Address DestElement,
800 Address SrcElement) {
801 // Clean up any temporaries needed by the
803 RunCleanupsScope InitScope(*this);
804 // Emit initialization for single element.
805 setAddrOfLocalVar(VDInit, SrcElement);
806 EmitAnyExprToMem(Init, DestElement,
807 Init->getType().getQualifiers(),
808 /*IsInitializer*/ false);
809 LocalDeclMap.erase(VDInit);
812 EmitAutoVarCleanups(Emission);
813 return Emission.getAllocatedAddress();
816 Address OriginalAddr = OriginalLVal.getAddress();
817 IsRegistered = PrivateScope.addPrivate(
818 OrigVD, [this, VDInit, OriginalAddr, VD]() {
819 // Emit private VarDecl with copy init.
820 // Remap temp VDInit variable to the address of the original
821 // variable (for proper handling of captured global variables).
822 setAddrOfLocalVar(VDInit, OriginalAddr);
824 LocalDeclMap.erase(VDInit);
825 return GetAddrOfLocalVar(VD);
828 assert(IsRegistered &&
829 "firstprivate var already registered as private");
830 // Silence the warning about unused variable.
837 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
840 void CodeGenFunction::EmitOMPPrivateClause(
841 const OMPExecutableDirective &D,
842 CodeGenFunction::OMPPrivateScope &PrivateScope) {
843 if (!HaveInsertPoint())
845 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
846 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
847 auto IRef = C->varlist_begin();
848 for (const Expr *IInit : C->private_copies()) {
849 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
850 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
851 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
852 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
853 // Emit private VarDecl with copy init.
855 return GetAddrOfLocalVar(VD);
857 assert(IsRegistered && "private var already registered as private");
858 // Silence the warning about unused variable.
866 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
867 if (!HaveInsertPoint())
869 // threadprivate_var1 = master_threadprivate_var1;
870 // operator=(threadprivate_var2, master_threadprivate_var2);
872 // __kmpc_barrier(&loc, global_tid);
873 llvm::DenseSet<const VarDecl *> CopiedVars;
874 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
875 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
876 auto IRef = C->varlist_begin();
877 auto ISrcRef = C->source_exprs().begin();
878 auto IDestRef = C->destination_exprs().begin();
879 for (const Expr *AssignOp : C->assignment_ops()) {
880 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
881 QualType Type = VD->getType();
882 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
883 // Get the address of the master variable. If we are emitting code with
884 // TLS support, the address is passed from the master as field in the
885 // captured declaration.
886 Address MasterAddr = Address::invalid();
887 if (getLangOpts().OpenMPUseTLS &&
888 getContext().getTargetInfo().isTLSSupported()) {
889 assert(CapturedStmtInfo->lookup(VD) &&
890 "Copyin threadprivates should have been captured!");
891 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
892 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
893 MasterAddr = EmitLValue(&DRE).getAddress();
894 LocalDeclMap.erase(VD);
897 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
898 : CGM.GetAddrOfGlobal(VD),
899 getContext().getDeclAlign(VD));
901 // Get the address of the threadprivate variable.
902 Address PrivateAddr = EmitLValue(*IRef).getAddress();
903 if (CopiedVars.size() == 1) {
904 // At first check if current thread is a master thread. If it is, no
905 // need to copy data.
906 CopyBegin = createBasicBlock("copyin.not.master");
907 CopyEnd = createBasicBlock("copyin.not.master.end");
908 Builder.CreateCondBr(
909 Builder.CreateICmpNE(
910 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
911 Builder.CreatePtrToInt(PrivateAddr.getPointer(),
914 EmitBlock(CopyBegin);
917 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
919 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
920 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
928 // Exit out of copying procedure for non-master thread.
929 EmitBlock(CopyEnd, /*IsFinished=*/true);
935 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
936 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
937 if (!HaveInsertPoint())
939 bool HasAtLeastOneLastprivate = false;
940 llvm::DenseSet<const VarDecl *> SIMDLCVs;
941 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
942 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
943 for (const Expr *C : LoopDirective->counters()) {
945 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
948 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
949 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
950 HasAtLeastOneLastprivate = true;
951 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
952 !getLangOpts().OpenMPSimd)
954 auto IRef = C->varlist_begin();
955 auto IDestRef = C->destination_exprs().begin();
956 for (const Expr *IInit : C->private_copies()) {
957 // Keep the address of the original variable for future update at the end
959 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
960 // Taskloops do not require additional initialization, it is done in
961 // runtime support library.
962 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
964 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
965 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
966 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
967 /*RefersToEnclosingVariableOrCapture=*/
968 CapturedStmtInfo->lookup(OrigVD) != nullptr,
969 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
970 return EmitLValue(&DRE).getAddress();
972 // Check if the variable is also a firstprivate: in this case IInit is
973 // not generated. Initialization of this variable will happen in codegen
974 // for 'firstprivate' clause.
975 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
976 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
977 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
978 // Emit private VarDecl with copy init.
980 return GetAddrOfLocalVar(VD);
982 assert(IsRegistered &&
983 "lastprivate var already registered as private");
991 return HasAtLeastOneLastprivate;
994 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
995 const OMPExecutableDirective &D, bool NoFinals,
996 llvm::Value *IsLastIterCond) {
997 if (!HaveInsertPoint())
999 // Emit following code:
1000 // if (<IsLastIterCond>) {
1001 // orig_var1 = private_orig_var1;
1003 // orig_varn = private_orig_varn;
1005 llvm::BasicBlock *ThenBB = nullptr;
1006 llvm::BasicBlock *DoneBB = nullptr;
1007 if (IsLastIterCond) {
1008 ThenBB = createBasicBlock(".omp.lastprivate.then");
1009 DoneBB = createBasicBlock(".omp.lastprivate.done");
1010 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1013 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1014 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1015 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1016 auto IC = LoopDirective->counters().begin();
1017 for (const Expr *F : LoopDirective->finals()) {
1019 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1021 AlreadyEmittedVars.insert(D);
1023 LoopCountersAndUpdates[D] = F;
1027 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1028 auto IRef = C->varlist_begin();
1029 auto ISrcRef = C->source_exprs().begin();
1030 auto IDestRef = C->destination_exprs().begin();
1031 for (const Expr *AssignOp : C->assignment_ops()) {
1032 const auto *PrivateVD =
1033 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1034 QualType Type = PrivateVD->getType();
1035 const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1036 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1037 // If lastprivate variable is a loop control variable for loop-based
1038 // directive, update its value before copyin back to original
1040 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1041 EmitIgnoredExpr(FinalExpr);
1043 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1044 const auto *DestVD =
1045 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1046 // Get the address of the original variable.
1047 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1048 // Get the address of the private variable.
1049 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1050 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1052 Address(Builder.CreateLoad(PrivateAddr),
1053 getNaturalTypeAlignment(RefTy->getPointeeType()));
1054 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1060 if (const Expr *PostUpdate = C->getPostUpdateExpr())
1061 EmitIgnoredExpr(PostUpdate);
1064 EmitBlock(DoneBB, /*IsFinished=*/true);
1067 void CodeGenFunction::EmitOMPReductionClauseInit(
1068 const OMPExecutableDirective &D,
1069 CodeGenFunction::OMPPrivateScope &PrivateScope) {
1070 if (!HaveInsertPoint())
1072 SmallVector<const Expr *, 4> Shareds;
1073 SmallVector<const Expr *, 4> Privates;
1074 SmallVector<const Expr *, 4> ReductionOps;
1075 SmallVector<const Expr *, 4> LHSs;
1076 SmallVector<const Expr *, 4> RHSs;
1077 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1078 auto IPriv = C->privates().begin();
1079 auto IRed = C->reduction_ops().begin();
1080 auto ILHS = C->lhs_exprs().begin();
1081 auto IRHS = C->rhs_exprs().begin();
1082 for (const Expr *Ref : C->varlists()) {
1083 Shareds.emplace_back(Ref);
1084 Privates.emplace_back(*IPriv);
1085 ReductionOps.emplace_back(*IRed);
1086 LHSs.emplace_back(*ILHS);
1087 RHSs.emplace_back(*IRHS);
1088 std::advance(IPriv, 1);
1089 std::advance(IRed, 1);
1090 std::advance(ILHS, 1);
1091 std::advance(IRHS, 1);
1094 ReductionCodeGen RedCG(Shareds, Privates, ReductionOps);
1096 auto ILHS = LHSs.begin();
1097 auto IRHS = RHSs.begin();
1098 auto IPriv = Privates.begin();
1099 for (const Expr *IRef : Shareds) {
1100 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1101 // Emit private VarDecl with reduction init.
1102 RedCG.emitSharedLValue(*this, Count);
1103 RedCG.emitAggregateType(*this, Count);
1104 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1105 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1106 RedCG.getSharedLValue(Count),
1107 [&Emission](CodeGenFunction &CGF) {
1108 CGF.EmitAutoVarInit(Emission);
1111 EmitAutoVarCleanups(Emission);
1112 Address BaseAddr = RedCG.adjustPrivateAddress(
1113 *this, Count, Emission.getAllocatedAddress());
1114 bool IsRegistered = PrivateScope.addPrivate(
1115 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; });
1116 assert(IsRegistered && "private var already registered as private");
1117 // Silence the warning about unused variable.
1120 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1121 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1122 QualType Type = PrivateVD->getType();
1123 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
1124 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
1125 // Store the address of the original variable associated with the LHS
1126 // implicit variable.
1127 PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() {
1128 return RedCG.getSharedLValue(Count).getAddress();
1130 PrivateScope.addPrivate(
1131 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); });
1132 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
1133 isa<ArraySubscriptExpr>(IRef)) {
1134 // Store the address of the original variable associated with the LHS
1135 // implicit variable.
1136 PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() {
1137 return RedCG.getSharedLValue(Count).getAddress();
1139 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() {
1140 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD),
1141 ConvertTypeForMem(RHSVD->getType()),
1145 QualType Type = PrivateVD->getType();
1146 bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1147 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress();
1148 // Store the address of the original variable associated with the LHS
1149 // implicit variable.
1151 OriginalAddr = Builder.CreateElementBitCast(
1152 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1154 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; });
1155 PrivateScope.addPrivate(
1156 RHSVD, [this, PrivateVD, RHSVD, IsArray]() {
1158 ? Builder.CreateElementBitCast(
1159 GetAddrOfLocalVar(PrivateVD),
1160 ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
1161 : GetAddrOfLocalVar(PrivateVD);
1171 void CodeGenFunction::EmitOMPReductionClauseFinal(
1172 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1173 if (!HaveInsertPoint())
1175 llvm::SmallVector<const Expr *, 8> Privates;
1176 llvm::SmallVector<const Expr *, 8> LHSExprs;
1177 llvm::SmallVector<const Expr *, 8> RHSExprs;
1178 llvm::SmallVector<const Expr *, 8> ReductionOps;
1179 bool HasAtLeastOneReduction = false;
1180 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1181 HasAtLeastOneReduction = true;
1182 Privates.append(C->privates().begin(), C->privates().end());
1183 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1184 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1185 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1187 if (HasAtLeastOneReduction) {
1188 bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1189 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1190 ReductionKind == OMPD_simd;
1191 bool SimpleReduction = ReductionKind == OMPD_simd;
1192 // Emit nowait reduction if nowait clause is present or directive is a
1193 // parallel directive (it always has implicit barrier).
1194 CGM.getOpenMPRuntime().emitReduction(
1195 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1196 {WithNowait, SimpleReduction, ReductionKind});
1200 static void emitPostUpdateForReductionClause(
1201 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1202 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1203 if (!CGF.HaveInsertPoint())
1205 llvm::BasicBlock *DoneBB = nullptr;
1206 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1207 if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1209 if (llvm::Value *Cond = CondGen(CGF)) {
1210 // If the first post-update expression is found, emit conditional
1211 // block if it was requested.
1212 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1213 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1214 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1215 CGF.EmitBlock(ThenBB);
1218 CGF.EmitIgnoredExpr(PostUpdate);
1222 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1226 /// Codegen lambda for appending distribute lower and upper bounds to outlined
1227 /// parallel function. This is necessary for combined constructs such as
1228 /// 'distribute parallel for'
1229 typedef llvm::function_ref<void(CodeGenFunction &,
1230 const OMPExecutableDirective &,
1231 llvm::SmallVectorImpl<llvm::Value *> &)>
1232 CodeGenBoundParametersTy;
1233 } // anonymous namespace
1235 static void emitCommonOMPParallelDirective(
1236 CodeGenFunction &CGF, const OMPExecutableDirective &S,
1237 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1238 const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1239 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1240 llvm::Function *OutlinedFn =
1241 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1242 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1243 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1244 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1245 llvm::Value *NumThreads =
1246 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1247 /*IgnoreResultAssign=*/true);
1248 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1249 CGF, NumThreads, NumThreadsClause->getBeginLoc());
1251 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1252 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1253 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1254 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1256 const Expr *IfCond = nullptr;
1257 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1258 if (C->getNameModifier() == OMPD_unknown ||
1259 C->getNameModifier() == OMPD_parallel) {
1260 IfCond = C->getCondition();
1265 OMPParallelScope Scope(CGF, S);
1266 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1267 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1268 // lower and upper bounds with the pragma 'for' chunking mechanism.
1269 // The following lambda takes care of appending the lower and upper bound
1270 // parameters when necessary
1271 CodeGenBoundParameters(CGF, S, CapturedVars);
1272 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1273 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1274 CapturedVars, IfCond);
1277 static void emitEmptyBoundParameters(CodeGenFunction &,
1278 const OMPExecutableDirective &,
1279 llvm::SmallVectorImpl<llvm::Value *> &) {}
1281 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1282 // Emit parallel region as a standalone region.
1283 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1285 OMPPrivateScope PrivateScope(CGF);
1286 bool Copyins = CGF.EmitOMPCopyinClause(S);
1287 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1289 // Emit implicit barrier to synchronize threads and avoid data races on
1290 // propagation master's thread values of threadprivate variables to local
1291 // instances of that variables of all other implicit threads.
1292 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1293 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
1294 /*ForceSimpleCall=*/true);
1296 CGF.EmitOMPPrivateClause(S, PrivateScope);
1297 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1298 (void)PrivateScope.Privatize();
1299 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1300 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1302 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1303 emitEmptyBoundParameters);
1304 emitPostUpdateForReductionClause(*this, S,
1305 [](CodeGenFunction &) { return nullptr; });
1308 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1309 JumpDest LoopExit) {
1310 RunCleanupsScope BodyScope(*this);
1311 // Update counters values on current iteration.
1312 for (const Expr *UE : D.updates())
1313 EmitIgnoredExpr(UE);
1314 // Update the linear variables.
1315 // In distribute directives only loop counters may be marked as linear, no
1316 // need to generate the code for them.
1317 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1318 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1319 for (const Expr *UE : C->updates())
1320 EmitIgnoredExpr(UE);
1324 // On a continue in the body, jump to the end.
1325 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1326 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1328 EmitStmt(D.getBody());
1329 // The end (updates/cleanups).
1330 EmitBlock(Continue.getBlock());
1331 BreakContinueStack.pop_back();
1334 void CodeGenFunction::EmitOMPInnerLoop(
1335 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1336 const Expr *IncExpr,
1337 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
1338 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
1339 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1341 // Start the loop with a block that tests the condition.
1342 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1343 EmitBlock(CondBlock);
1344 const SourceRange R = S.getSourceRange();
1345 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1346 SourceLocToDebugLoc(R.getEnd()));
1348 // If there are any cleanups between here and the loop-exit scope,
1349 // create a block to stage a loop exit along.
1350 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1351 if (RequiresCleanup)
1352 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1354 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
1357 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1358 if (ExitBlock != LoopExit.getBlock()) {
1359 EmitBlock(ExitBlock);
1360 EmitBranchThroughCleanup(LoopExit);
1363 EmitBlock(LoopBody);
1364 incrementProfileCounter(&S);
1366 // Create a block for the increment.
1367 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1368 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1372 // Emit "IV = IV + 1" and a back-edge to the condition block.
1373 EmitBlock(Continue.getBlock());
1374 EmitIgnoredExpr(IncExpr);
1376 BreakContinueStack.pop_back();
1377 EmitBranch(CondBlock);
1379 // Emit the fall-through block.
1380 EmitBlock(LoopExit.getBlock());
1383 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1384 if (!HaveInsertPoint())
1386 // Emit inits for the linear variables.
1387 bool HasLinears = false;
1388 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1389 for (const Expr *Init : C->inits()) {
1391 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1392 if (const auto *Ref =
1393 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1394 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1395 const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1396 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1397 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1398 VD->getInit()->getType(), VK_LValue,
1399 VD->getInit()->getExprLoc());
1400 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1402 /*capturedByInit=*/false);
1403 EmitAutoVarCleanups(Emission);
1408 // Emit the linear steps for the linear clauses.
1409 // If a step is not constant, it is pre-calculated before the loop.
1410 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1411 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1412 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1413 // Emit calculation of the linear step.
1414 EmitIgnoredExpr(CS);
1420 void CodeGenFunction::EmitOMPLinearClauseFinal(
1421 const OMPLoopDirective &D,
1422 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1423 if (!HaveInsertPoint())
1425 llvm::BasicBlock *DoneBB = nullptr;
1426 // Emit the final values of the linear variables.
1427 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1428 auto IC = C->varlist_begin();
1429 for (const Expr *F : C->finals()) {
1431 if (llvm::Value *Cond = CondGen(*this)) {
1432 // If the first post-update expression is found, emit conditional
1433 // block if it was requested.
1434 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
1435 DoneBB = createBasicBlock(".omp.linear.pu.done");
1436 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1440 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1441 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1442 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1443 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1444 Address OrigAddr = EmitLValue(&DRE).getAddress();
1445 CodeGenFunction::OMPPrivateScope VarScope(*this);
1446 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
1447 (void)VarScope.Privatize();
1451 if (const Expr *PostUpdate = C->getPostUpdateExpr())
1452 EmitIgnoredExpr(PostUpdate);
1455 EmitBlock(DoneBB, /*IsFinished=*/true);
1458 static void emitAlignedClause(CodeGenFunction &CGF,
1459 const OMPExecutableDirective &D) {
1460 if (!CGF.HaveInsertPoint())
1462 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1463 unsigned ClauseAlignment = 0;
1464 if (const Expr *AlignmentExpr = Clause->getAlignment()) {
1466 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1467 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1469 for (const Expr *E : Clause->varlists()) {
1470 unsigned Alignment = ClauseAlignment;
1471 if (Alignment == 0) {
1472 // OpenMP [2.8.1, Description]
1473 // If no optional parameter is specified, implementation-defined default
1474 // alignments for SIMD instructions on the target platforms are assumed.
1477 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1478 E->getType()->getPointeeType()))
1481 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1482 "alignment is not power of 2");
1483 if (Alignment != 0) {
1484 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1485 CGF.EmitAlignmentAssumption(
1486 PtrValue, E, /*No second loc needed*/ SourceLocation(), Alignment);
1492 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1493 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1494 if (!HaveInsertPoint())
1496 auto I = S.private_counters().begin();
1497 for (const Expr *E : S.counters()) {
1498 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1499 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1500 // Emit var without initialization.
1501 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
1502 EmitAutoVarCleanups(VarEmission);
1503 LocalDeclMap.erase(PrivateVD);
1504 (void)LoopScope.addPrivate(VD, [&VarEmission]() {
1505 return VarEmission.getAllocatedAddress();
1507 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1508 VD->hasGlobalStorage()) {
1509 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
1510 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
1511 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1512 E->getType(), VK_LValue, E->getExprLoc());
1513 return EmitLValue(&DRE).getAddress();
1516 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() {
1517 return VarEmission.getAllocatedAddress();
1522 // Privatize extra loop counters used in loops for ordered(n) clauses.
1523 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
1524 if (!C->getNumForLoops())
1526 for (unsigned I = S.getCollapsedNumber(),
1527 E = C->getLoopNumIterations().size();
1529 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
1530 const auto *VD = cast<VarDecl>(DRE->getDecl());
1531 // Override only those variables that can be captured to avoid re-emission
1532 // of the variables declared within the loops.
1533 if (DRE->refersToEnclosingVariableOrCapture()) {
1534 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
1535 return CreateMemTemp(DRE->getType(), VD->getName());
1542 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1543 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1544 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1545 if (!CGF.HaveInsertPoint())
1548 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1549 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1550 (void)PreCondScope.Privatize();
1551 // Get initial values of real counters.
1552 for (const Expr *I : S.inits()) {
1553 CGF.EmitIgnoredExpr(I);
1556 // Check that loop is executed at least one time.
1557 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1560 void CodeGenFunction::EmitOMPLinearClause(
1561 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1562 if (!HaveInsertPoint())
1564 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1565 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1566 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1567 for (const Expr *C : LoopDirective->counters()) {
1569 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1572 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1573 auto CurPrivate = C->privates().begin();
1574 for (const Expr *E : C->varlists()) {
1575 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1576 const auto *PrivateVD =
1577 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1578 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1579 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() {
1580 // Emit private VarDecl with copy init.
1581 EmitVarDecl(*PrivateVD);
1582 return GetAddrOfLocalVar(PrivateVD);
1584 assert(IsRegistered && "linear var already registered as private");
1585 // Silence the warning about unused variable.
1588 EmitVarDecl(*PrivateVD);
1595 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1596 const OMPExecutableDirective &D,
1598 if (!CGF.HaveInsertPoint())
1600 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1601 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1602 /*ignoreResult=*/true);
1603 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1604 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1605 // In presence of finite 'safelen', it may be unsafe to mark all
1606 // the memory instructions parallel, because loop-carried
1607 // dependences of 'safelen' iterations are possible.
1609 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1610 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1611 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1612 /*ignoreResult=*/true);
1613 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1614 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1615 // In presence of finite 'safelen', it may be unsafe to mark all
1616 // the memory instructions parallel, because loop-carried
1617 // dependences of 'safelen' iterations are possible.
1618 CGF.LoopStack.setParallel(/*Enable=*/false);
1622 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1624 // Walk clauses and process safelen/lastprivate.
1625 LoopStack.setParallel(!IsMonotonic);
1626 LoopStack.setVectorizeEnable();
1627 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1630 void CodeGenFunction::EmitOMPSimdFinal(
1631 const OMPLoopDirective &D,
1632 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1633 if (!HaveInsertPoint())
1635 llvm::BasicBlock *DoneBB = nullptr;
1636 auto IC = D.counters().begin();
1637 auto IPC = D.private_counters().begin();
1638 for (const Expr *F : D.finals()) {
1639 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1640 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1641 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1642 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1643 OrigVD->hasGlobalStorage() || CED) {
1645 if (llvm::Value *Cond = CondGen(*this)) {
1646 // If the first post-update expression is found, emit conditional
1647 // block if it was requested.
1648 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
1649 DoneBB = createBasicBlock(".omp.final.done");
1650 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1654 Address OrigAddr = Address::invalid();
1656 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1658 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
1659 /*RefersToEnclosingVariableOrCapture=*/false,
1660 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1661 OrigAddr = EmitLValue(&DRE).getAddress();
1663 OMPPrivateScope VarScope(*this);
1664 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
1665 (void)VarScope.Privatize();
1672 EmitBlock(DoneBB, /*IsFinished=*/true);
1675 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
1676 const OMPLoopDirective &S,
1677 CodeGenFunction::JumpDest LoopExit) {
1678 CGF.EmitOMPLoopBody(S, LoopExit);
1679 CGF.EmitStopPoint(&S);
1682 /// Emit a helper variable and return corresponding lvalue.
1683 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1684 const DeclRefExpr *Helper) {
1685 auto VDecl = cast<VarDecl>(Helper->getDecl());
1686 CGF.EmitVarDecl(*VDecl);
1687 return CGF.EmitLValue(Helper);
1690 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
1691 PrePostActionTy &Action) {
1693 assert(isOpenMPSimdDirective(S.getDirectiveKind()) &&
1694 "Expected simd directive");
1695 OMPLoopScope PreInitScope(CGF, S);
1697 // for (IV in 0..LastIteration) BODY;
1698 // <Final counter/linear vars updates>;
1701 if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
1702 isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
1703 isOpenMPTaskLoopDirective(S.getDirectiveKind())) {
1704 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1705 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1708 // Emit: if (PreCond) - begin.
1709 // If the condition constant folds and can be elided, avoid emitting the
1712 llvm::BasicBlock *ContBlock = nullptr;
1713 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1717 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
1718 ContBlock = CGF.createBasicBlock("simd.if.end");
1719 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1720 CGF.getProfileCount(&S));
1721 CGF.EmitBlock(ThenBlock);
1722 CGF.incrementProfileCounter(&S);
1725 // Emit the loop iteration variable.
1726 const Expr *IVExpr = S.getIterationVariable();
1727 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1728 CGF.EmitVarDecl(*IVDecl);
1729 CGF.EmitIgnoredExpr(S.getInit());
1731 // Emit the iterations count variable.
1732 // If it is not a variable, Sema decided to calculate iterations count on
1733 // each iteration (e.g., it is foldable into a constant).
1734 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1735 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1736 // Emit calculation of the iterations count.
1737 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1740 CGF.EmitOMPSimdInit(S);
1742 emitAlignedClause(CGF, S);
1743 (void)CGF.EmitOMPLinearClauseInit(S);
1745 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1746 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1747 CGF.EmitOMPLinearClause(S, LoopScope);
1748 CGF.EmitOMPPrivateClause(S, LoopScope);
1749 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1750 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1751 (void)LoopScope.Privatize();
1752 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
1753 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
1754 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1756 [&S](CodeGenFunction &CGF) {
1757 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest());
1758 CGF.EmitStopPoint(&S);
1760 [](CodeGenFunction &) {});
1761 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
1762 // Emit final copy of the lastprivate variables at the end of loops.
1763 if (HasLastprivateClause)
1764 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
1765 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
1766 emitPostUpdateForReductionClause(CGF, S,
1767 [](CodeGenFunction &) { return nullptr; });
1769 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
1770 // Emit: if (PreCond) - end.
1772 CGF.EmitBranch(ContBlock);
1773 CGF.EmitBlock(ContBlock, true);
1777 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1778 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1779 emitOMPSimdRegion(CGF, S, Action);
1781 OMPLexicalScope Scope(*this, S, OMPD_unknown);
1782 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1785 void CodeGenFunction::EmitOMPOuterLoop(
1786 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
1787 CodeGenFunction::OMPPrivateScope &LoopScope,
1788 const CodeGenFunction::OMPLoopArguments &LoopArgs,
1789 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
1790 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
1791 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
1793 const Expr *IVExpr = S.getIterationVariable();
1794 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1795 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1797 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1799 // Start the loop with a block that tests the condition.
1800 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
1801 EmitBlock(CondBlock);
1802 const SourceRange R = S.getSourceRange();
1803 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1804 SourceLocToDebugLoc(R.getEnd()));
1806 llvm::Value *BoolCondVal = nullptr;
1807 if (!DynamicOrOrdered) {
1808 // UB = min(UB, GlobalUB) or
1809 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
1810 // 'distribute parallel for')
1811 EmitIgnoredExpr(LoopArgs.EUB);
1813 EmitIgnoredExpr(LoopArgs.Init);
1815 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
1818 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
1819 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
1822 // If there are any cleanups between here and the loop-exit scope,
1823 // create a block to stage a loop exit along.
1824 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1825 if (LoopScope.requiresCleanups())
1826 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1828 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
1829 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1830 if (ExitBlock != LoopExit.getBlock()) {
1831 EmitBlock(ExitBlock);
1832 EmitBranchThroughCleanup(LoopExit);
1834 EmitBlock(LoopBody);
1836 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1837 // LB for loop condition and emitted it above).
1838 if (DynamicOrOrdered)
1839 EmitIgnoredExpr(LoopArgs.Init);
1841 // Create a block for the increment.
1842 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1843 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1845 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1846 // with dynamic/guided scheduling and without ordered clause.
1847 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1848 LoopStack.setParallel(!IsMonotonic);
1850 EmitOMPSimdInit(S, IsMonotonic);
1852 SourceLocation Loc = S.getBeginLoc();
1854 // when 'distribute' is not combined with a 'for':
1855 // while (idx <= UB) { BODY; ++idx; }
1856 // when 'distribute' is combined with a 'for'
1857 // (e.g. 'distribute parallel for')
1858 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
1860 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
1861 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
1862 CodeGenLoop(CGF, S, LoopExit);
1864 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
1865 CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
1868 EmitBlock(Continue.getBlock());
1869 BreakContinueStack.pop_back();
1870 if (!DynamicOrOrdered) {
1871 // Emit "LB = LB + Stride", "UB = UB + Stride".
1872 EmitIgnoredExpr(LoopArgs.NextLB);
1873 EmitIgnoredExpr(LoopArgs.NextUB);
1876 EmitBranch(CondBlock);
1878 // Emit the fall-through block.
1879 EmitBlock(LoopExit.getBlock());
1881 // Tell the runtime we are done.
1882 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
1883 if (!DynamicOrOrdered)
1884 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
1885 S.getDirectiveKind());
1887 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
1890 void CodeGenFunction::EmitOMPForOuterLoop(
1891 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
1892 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1893 const OMPLoopArguments &LoopArgs,
1894 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
1895 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
1897 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1898 const bool DynamicOrOrdered =
1899 Ordered || RT.isDynamic(ScheduleKind.Schedule);
1902 !RT.isStaticNonchunked(ScheduleKind.Schedule,
1903 LoopArgs.Chunk != nullptr)) &&
1904 "static non-chunked schedule does not need outer loop");
1908 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1909 // When schedule(dynamic,chunk_size) is specified, the iterations are
1910 // distributed to threads in the team in chunks as the threads request them.
1911 // Each thread executes a chunk of iterations, then requests another chunk,
1912 // until no chunks remain to be distributed. Each chunk contains chunk_size
1913 // iterations, except for the last chunk to be distributed, which may have
1914 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1916 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1917 // to threads in the team in chunks as the executing threads request them.
1918 // Each thread executes a chunk of iterations, then requests another chunk,
1919 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1920 // each chunk is proportional to the number of unassigned iterations divided
1921 // by the number of threads in the team, decreasing to 1. For a chunk_size
1922 // with value k (greater than 1), the size of each chunk is determined in the
1923 // same way, with the restriction that the chunks do not contain fewer than k
1924 // iterations (except for the last chunk to be assigned, which may have fewer
1925 // than k iterations).
1927 // When schedule(auto) is specified, the decision regarding scheduling is
1928 // delegated to the compiler and/or runtime system. The programmer gives the
1929 // implementation the freedom to choose any possible mapping of iterations to
1930 // threads in the team.
1932 // When schedule(runtime) is specified, the decision regarding scheduling is
1933 // deferred until run time, and the schedule and chunk size are taken from the
1934 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1935 // implementation defined
1937 // while(__kmpc_dispatch_next(&LB, &UB)) {
1939 // while (idx <= UB) { BODY; ++idx;
1940 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1944 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1945 // When schedule(static, chunk_size) is specified, iterations are divided into
1946 // chunks of size chunk_size, and the chunks are assigned to the threads in
1947 // the team in a round-robin fashion in the order of the thread number.
1949 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1950 // while (idx <= UB) { BODY; ++idx; } // inner loop
1956 const Expr *IVExpr = S.getIterationVariable();
1957 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1958 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1960 if (DynamicOrOrdered) {
1961 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
1962 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
1963 llvm::Value *LBVal = DispatchBounds.first;
1964 llvm::Value *UBVal = DispatchBounds.second;
1965 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
1967 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
1968 IVSigned, Ordered, DipatchRTInputValues);
1970 CGOpenMPRuntime::StaticRTInput StaticInit(
1971 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
1972 LoopArgs.ST, LoopArgs.Chunk);
1973 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
1974 ScheduleKind, StaticInit);
1977 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
1978 const unsigned IVSize,
1979 const bool IVSigned) {
1981 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
1986 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
1987 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
1988 OuterLoopArgs.IncExpr = S.getInc();
1989 OuterLoopArgs.Init = S.getInit();
1990 OuterLoopArgs.Cond = S.getCond();
1991 OuterLoopArgs.NextLB = S.getNextLowerBound();
1992 OuterLoopArgs.NextUB = S.getNextUpperBound();
1993 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
1994 emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
1997 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
1998 const unsigned IVSize, const bool IVSigned) {}
2000 void CodeGenFunction::EmitOMPDistributeOuterLoop(
2001 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
2002 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
2003 const CodeGenLoopTy &CodeGenLoopContent) {
2005 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2008 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
2012 const Expr *IVExpr = S.getIterationVariable();
2013 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2014 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2016 CGOpenMPRuntime::StaticRTInput StaticInit(
2017 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
2018 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
2019 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
2021 // for combined 'distribute' and 'for' the increment expression of distribute
2022 // is stored in DistInc. For 'distribute' alone, it is in Inc.
2024 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
2025 IncExpr = S.getDistInc();
2027 IncExpr = S.getInc();
2029 // this routine is shared by 'omp distribute parallel for' and
2030 // 'omp distribute': select the right EUB expression depending on the
2032 OMPLoopArguments OuterLoopArgs;
2033 OuterLoopArgs.LB = LoopArgs.LB;
2034 OuterLoopArgs.UB = LoopArgs.UB;
2035 OuterLoopArgs.ST = LoopArgs.ST;
2036 OuterLoopArgs.IL = LoopArgs.IL;
2037 OuterLoopArgs.Chunk = LoopArgs.Chunk;
2038 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2039 ? S.getCombinedEnsureUpperBound()
2040 : S.getEnsureUpperBound();
2041 OuterLoopArgs.IncExpr = IncExpr;
2042 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2043 ? S.getCombinedInit()
2045 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2046 ? S.getCombinedCond()
2048 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2049 ? S.getCombinedNextLowerBound()
2050 : S.getNextLowerBound();
2051 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2052 ? S.getCombinedNextUpperBound()
2053 : S.getNextUpperBound();
2055 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
2056 LoopScope, OuterLoopArgs, CodeGenLoopContent,
2060 static std::pair<LValue, LValue>
2061 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
2062 const OMPExecutableDirective &S) {
2063 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2065 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2067 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2069 // When composing 'distribute' with 'for' (e.g. as in 'distribute
2070 // parallel for') we need to use the 'distribute'
2071 // chunk lower and upper bounds rather than the whole loop iteration
2072 // space. These are parameters to the outlined function for 'parallel'
2073 // and we copy the bounds of the previous schedule into the
2074 // the current ones.
2075 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
2076 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
2077 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
2078 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
2079 PrevLBVal = CGF.EmitScalarConversion(
2080 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
2081 LS.getIterationVariable()->getType(),
2082 LS.getPrevLowerBoundVariable()->getExprLoc());
2083 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
2084 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
2085 PrevUBVal = CGF.EmitScalarConversion(
2086 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
2087 LS.getIterationVariable()->getType(),
2088 LS.getPrevUpperBoundVariable()->getExprLoc());
2090 CGF.EmitStoreOfScalar(PrevLBVal, LB);
2091 CGF.EmitStoreOfScalar(PrevUBVal, UB);
2096 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
2097 /// we need to use the LB and UB expressions generated by the worksharing
2098 /// code generation support, whereas in non combined situations we would
2099 /// just emit 0 and the LastIteration expression
2100 /// This function is necessary due to the difference of the LB and UB
2101 /// types for the RT emission routines for 'for_static_init' and
2102 /// 'for_dispatch_init'
2103 static std::pair<llvm::Value *, llvm::Value *>
2104 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
2105 const OMPExecutableDirective &S,
2106 Address LB, Address UB) {
2107 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2108 const Expr *IVExpr = LS.getIterationVariable();
2109 // when implementing a dynamic schedule for a 'for' combined with a
2110 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
2111 // is not normalized as each team only executes its own assigned
2113 QualType IteratorTy = IVExpr->getType();
2114 llvm::Value *LBVal =
2115 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
2116 llvm::Value *UBVal =
2117 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
2118 return {LBVal, UBVal};
2121 static void emitDistributeParallelForDistributeInnerBoundParams(
2122 CodeGenFunction &CGF, const OMPExecutableDirective &S,
2123 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
2124 const auto &Dir = cast<OMPLoopDirective>(S);
2126 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
2127 llvm::Value *LBCast = CGF.Builder.CreateIntCast(
2128 CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
2129 CapturedVars.push_back(LBCast);
2131 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
2133 llvm::Value *UBCast = CGF.Builder.CreateIntCast(
2134 CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
2135 CapturedVars.push_back(UBCast);
2139 emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
2140 const OMPLoopDirective &S,
2141 CodeGenFunction::JumpDest LoopExit) {
2142 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
2143 PrePostActionTy &Action) {
2145 bool HasCancel = false;
2146 if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2147 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
2148 HasCancel = D->hasCancel();
2149 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
2150 HasCancel = D->hasCancel();
2151 else if (const auto *D =
2152 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
2153 HasCancel = D->hasCancel();
2155 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
2157 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
2158 emitDistributeParallelForInnerBounds,
2159 emitDistributeParallelForDispatchBounds);
2162 emitCommonOMPParallelDirective(
2164 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for,
2165 CGInlinedWorksharingLoop,
2166 emitDistributeParallelForDistributeInnerBoundParams);
2169 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
2170 const OMPDistributeParallelForDirective &S) {
2171 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2172 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
2175 OMPLexicalScope Scope(*this, S, OMPD_parallel);
2176 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
2179 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
2180 const OMPDistributeParallelForSimdDirective &S) {
2181 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2182 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
2185 OMPLexicalScope Scope(*this, S, OMPD_parallel);
2186 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
2189 void CodeGenFunction::EmitOMPDistributeSimdDirective(
2190 const OMPDistributeSimdDirective &S) {
2191 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2192 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
2194 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2195 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2198 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
2199 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) {
2200 // Emit SPMD target parallel for region as a standalone region.
2201 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2202 emitOMPSimdRegion(CGF, S, Action);
2205 llvm::Constant *Addr;
2206 // Emit target region as a standalone region.
2207 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
2208 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
2209 assert(Fn && Addr && "Target device function emission failed.");
2212 void CodeGenFunction::EmitOMPTargetSimdDirective(
2213 const OMPTargetSimdDirective &S) {
2214 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2215 emitOMPSimdRegion(CGF, S, Action);
2217 emitCommonOMPTargetDirective(*this, S, CodeGen);
2221 struct ScheduleKindModifiersTy {
2222 OpenMPScheduleClauseKind Kind;
2223 OpenMPScheduleClauseModifier M1;
2224 OpenMPScheduleClauseModifier M2;
2225 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2226 OpenMPScheduleClauseModifier M1,
2227 OpenMPScheduleClauseModifier M2)
2228 : Kind(Kind), M1(M1), M2(M2) {}
2232 bool CodeGenFunction::EmitOMPWorksharingLoop(
2233 const OMPLoopDirective &S, Expr *EUB,
2234 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
2235 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2236 // Emit the loop iteration variable.
2237 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2238 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
2239 EmitVarDecl(*IVDecl);
2241 // Emit the iterations count variable.
2242 // If it is not a variable, Sema decided to calculate iterations count on each
2243 // iteration (e.g., it is foldable into a constant).
2244 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2245 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2246 // Emit calculation of the iterations count.
2247 EmitIgnoredExpr(S.getCalcLastIteration());
2250 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2252 bool HasLastprivateClause;
2253 // Check pre-condition.
2255 OMPLoopScope PreInitScope(*this, S);
2256 // Skip the entire loop if we don't meet the precondition.
2257 // If the condition constant folds and can be elided, avoid emitting the
2260 llvm::BasicBlock *ContBlock = nullptr;
2261 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2265 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
2266 ContBlock = createBasicBlock("omp.precond.end");
2267 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2268 getProfileCount(&S));
2269 EmitBlock(ThenBlock);
2270 incrementProfileCounter(&S);
2273 RunCleanupsScope DoacrossCleanupScope(*this);
2274 bool Ordered = false;
2275 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2276 if (OrderedClause->getNumForLoops())
2277 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
2282 llvm::DenseSet<const Expr *> EmittedFinals;
2283 emitAlignedClause(*this, S);
2284 bool HasLinears = EmitOMPLinearClauseInit(S);
2285 // Emit helper vars inits.
2287 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
2288 LValue LB = Bounds.first;
2289 LValue UB = Bounds.second;
2291 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2293 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2295 // Emit 'then' code.
2297 OMPPrivateScope LoopScope(*this);
2298 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) {
2299 // Emit implicit barrier to synchronize threads and avoid data races on
2300 // initialization of firstprivate variables and post-update of
2301 // lastprivate variables.
2302 CGM.getOpenMPRuntime().emitBarrierCall(
2303 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
2304 /*ForceSimpleCall=*/true);
2306 EmitOMPPrivateClause(S, LoopScope);
2307 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2308 EmitOMPReductionClauseInit(S, LoopScope);
2309 EmitOMPPrivateLoopCounters(S, LoopScope);
2310 EmitOMPLinearClause(S, LoopScope);
2311 (void)LoopScope.Privatize();
2312 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2313 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
2315 // Detect the loop schedule kind and chunk.
2316 const Expr *ChunkExpr = nullptr;
2317 OpenMPScheduleTy ScheduleKind;
2318 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
2319 ScheduleKind.Schedule = C->getScheduleKind();
2320 ScheduleKind.M1 = C->getFirstScheduleModifier();
2321 ScheduleKind.M2 = C->getSecondScheduleModifier();
2322 ChunkExpr = C->getChunkSize();
2324 // Default behaviour for schedule clause.
2325 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
2326 *this, S, ScheduleKind.Schedule, ChunkExpr);
2328 bool HasChunkSizeOne = false;
2329 llvm::Value *Chunk = nullptr;
2331 Chunk = EmitScalarExpr(ChunkExpr);
2332 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
2333 S.getIterationVariable()->getType(),
2335 Expr::EvalResult Result;
2336 if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
2337 llvm::APSInt EvaluatedChunk = Result.Val.getInt();
2338 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
2341 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2342 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2343 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2344 // If the static schedule kind is specified or if the ordered clause is
2345 // specified, and if no monotonic modifier is specified, the effect will
2346 // be as if the monotonic modifier was specified.
2347 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
2348 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
2349 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
2350 if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
2351 /* Chunked */ Chunk != nullptr) ||
2352 StaticChunkedOne) &&
2354 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2355 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2356 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2357 // When no chunk_size is specified, the iteration space is divided into
2358 // chunks that are approximately equal in size, and at most one chunk is
2359 // distributed to each thread. Note that the size of the chunks is
2360 // unspecified in this case.
2361 CGOpenMPRuntime::StaticRTInput StaticInit(
2362 IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
2363 UB.getAddress(), ST.getAddress(),
2364 StaticChunkedOne ? Chunk : nullptr);
2365 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
2366 ScheduleKind, StaticInit);
2368 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2369 // UB = min(UB, GlobalUB);
2370 if (!StaticChunkedOne)
2371 EmitIgnoredExpr(S.getEnsureUpperBound());
2373 EmitIgnoredExpr(S.getInit());
2374 // For unchunked static schedule generate:
2376 // while (idx <= UB) {
2381 // For static schedule with chunk one:
2383 // while (IV <= PrevUB) {
2387 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(),
2388 StaticChunkedOne ? S.getCombinedParForInDistCond() : S.getCond(),
2389 StaticChunkedOne ? S.getDistInc() : S.getInc(),
2390 [&S, LoopExit](CodeGenFunction &CGF) {
2391 CGF.EmitOMPLoopBody(S, LoopExit);
2392 CGF.EmitStopPoint(&S);
2394 [](CodeGenFunction &) {});
2395 EmitBlock(LoopExit.getBlock());
2396 // Tell the runtime we are done.
2397 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2398 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2399 S.getDirectiveKind());
2401 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2403 const bool IsMonotonic =
2404 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2405 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2406 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2407 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2408 // Emit the outer loop, which requests its work chunk [LB..UB] from
2409 // runtime and runs the inner loop to process it.
2410 const OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
2411 ST.getAddress(), IL.getAddress(),
2413 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2414 LoopArguments, CGDispatchBounds);
2416 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2417 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
2418 return CGF.Builder.CreateIsNotNull(
2419 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2422 EmitOMPReductionClauseFinal(
2423 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
2424 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
2425 : /*Parallel only*/ OMPD_parallel);
2426 // Emit post-update of the reduction variables if IsLastIter != 0.
2427 emitPostUpdateForReductionClause(
2428 *this, S, [IL, &S](CodeGenFunction &CGF) {
2429 return CGF.Builder.CreateIsNotNull(
2430 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2432 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2433 if (HasLastprivateClause)
2434 EmitOMPLastprivateClauseFinal(
2435 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2436 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
2438 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
2439 return CGF.Builder.CreateIsNotNull(
2440 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2442 DoacrossCleanupScope.ForceCleanup();
2443 // We're now done with the loop, so jump to the continuation block.
2445 EmitBranch(ContBlock);
2446 EmitBlock(ContBlock, /*IsFinished=*/true);
2449 return HasLastprivateClause;
2452 /// The following two functions generate expressions for the loop lower
2453 /// and upper bounds in case of static and dynamic (dispatch) schedule
2454 /// of the associated 'for' or 'distribute' loop.
2455 static std::pair<LValue, LValue>
2456 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
2457 const auto &LS = cast<OMPLoopDirective>(S);
2459 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2461 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2465 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
2466 /// consider the lower and upper bound expressions generated by the
2467 /// worksharing loop support, but we use 0 and the iteration space size as
2469 static std::pair<llvm::Value *, llvm::Value *>
2470 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
2471 Address LB, Address UB) {
2472 const auto &LS = cast<OMPLoopDirective>(S);
2473 const Expr *IVExpr = LS.getIterationVariable();
2474 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
2475 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
2476 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
2477 return {LBVal, UBVal};
2480 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2481 bool HasLastprivates = false;
2482 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2483 PrePostActionTy &) {
2484 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
2485 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
2487 emitDispatchForLoopBounds);
2490 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2491 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2495 // Emit an implicit barrier at the end.
2496 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
2497 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
2500 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2501 bool HasLastprivates = false;
2502 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2503 PrePostActionTy &) {
2504 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
2506 emitDispatchForLoopBounds);
2509 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2510 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2513 // Emit an implicit barrier at the end.
2514 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
2515 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
2518 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2520 llvm::Value *Init = nullptr) {
2521 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2523 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
2527 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2528 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
2529 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
2530 bool HasLastprivates = false;
2531 auto &&CodeGen = [&S, CapturedStmt, CS,
2532 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
2533 ASTContext &C = CGF.getContext();
2534 QualType KmpInt32Ty =
2535 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2536 // Emit helper vars inits.
2537 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2538 CGF.Builder.getInt32(0));
2539 llvm::ConstantInt *GlobalUBVal = CS != nullptr
2540 ? CGF.Builder.getInt32(CS->size() - 1)
2541 : CGF.Builder.getInt32(0);
2543 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2544 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2545 CGF.Builder.getInt32(1));
2546 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2547 CGF.Builder.getInt32(0));
2549 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2550 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
2551 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2552 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
2553 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2554 // Generate condition for loop.
2555 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2556 OK_Ordinary, S.getBeginLoc(), FPOptions());
2557 // Increment for loop counter.
2558 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2559 S.getBeginLoc(), true);
2560 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
2561 // Iterate through all sections and emit a switch construct:
2564 // <SectionStmt[0]>;
2567 // case <NumSection> - 1:
2568 // <SectionStmt[<NumSection> - 1]>;
2571 // .omp.sections.exit:
2572 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2573 llvm::SwitchInst *SwitchStmt =
2574 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
2575 ExitBB, CS == nullptr ? 1 : CS->size());
2577 unsigned CaseNumber = 0;
2578 for (const Stmt *SubStmt : CS->children()) {
2579 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2580 CGF.EmitBlock(CaseBB);
2581 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2582 CGF.EmitStmt(SubStmt);
2583 CGF.EmitBranch(ExitBB);
2587 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
2588 CGF.EmitBlock(CaseBB);
2589 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2590 CGF.EmitStmt(CapturedStmt);
2591 CGF.EmitBranch(ExitBB);
2593 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2596 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2597 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2598 // Emit implicit barrier to synchronize threads and avoid data races on
2599 // initialization of firstprivate variables and post-update of lastprivate
2601 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2602 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
2603 /*ForceSimpleCall=*/true);
2605 CGF.EmitOMPPrivateClause(S, LoopScope);
2606 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2607 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2608 (void)LoopScope.Privatize();
2609 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2610 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
2612 // Emit static non-chunked loop.
2613 OpenMPScheduleTy ScheduleKind;
2614 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2615 CGOpenMPRuntime::StaticRTInput StaticInit(
2616 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
2617 LB.getAddress(), UB.getAddress(), ST.getAddress());
2618 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2619 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
2620 // UB = min(UB, GlobalUB);
2621 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
2622 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
2623 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2624 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2626 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
2627 // while (idx <= UB) { BODY; ++idx; }
2628 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2629 [](CodeGenFunction &) {});
2630 // Tell the runtime we are done.
2631 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2632 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2633 S.getDirectiveKind());
2635 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
2636 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
2637 // Emit post-update of the reduction variables if IsLastIter != 0.
2638 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
2639 return CGF.Builder.CreateIsNotNull(
2640 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
2643 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2644 if (HasLastprivates)
2645 CGF.EmitOMPLastprivateClauseFinal(
2646 S, /*NoFinals=*/false,
2647 CGF.Builder.CreateIsNotNull(
2648 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
2651 bool HasCancel = false;
2652 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2653 HasCancel = OSD->hasCancel();
2654 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2655 HasCancel = OPSD->hasCancel();
2656 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
2657 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2659 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2660 // clause. Otherwise the barrier will be generated by the codegen for the
2662 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2663 // Emit implicit barrier to synchronize threads and avoid data races on
2664 // initialization of firstprivate variables.
2665 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
2670 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2672 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2675 // Emit an implicit barrier at the end.
2676 if (!S.getSingleClause<OMPNowaitClause>()) {
2677 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
2682 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2683 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2684 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2686 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2687 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2691 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2692 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2693 llvm::SmallVector<const Expr *, 8> DestExprs;
2694 llvm::SmallVector<const Expr *, 8> SrcExprs;
2695 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2696 // Check if there are any 'copyprivate' clauses associated with this
2697 // 'single' construct.
2698 // Build a list of copyprivate variables along with helper expressions
2699 // (<source>, <destination>, <destination>=<source> expressions)
2700 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2701 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2702 DestExprs.append(C->destination_exprs().begin(),
2703 C->destination_exprs().end());
2704 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2705 AssignmentOps.append(C->assignment_ops().begin(),
2706 C->assignment_ops().end());
2708 // Emit code for 'single' region along with 'copyprivate' clauses
2709 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2711 OMPPrivateScope SingleScope(CGF);
2712 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2713 CGF.EmitOMPPrivateClause(S, SingleScope);
2714 (void)SingleScope.Privatize();
2715 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2718 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2719 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
2720 CopyprivateVars, DestExprs,
2721 SrcExprs, AssignmentOps);
2723 // Emit an implicit barrier at the end (to avoid data race on firstprivate
2724 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2725 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2726 CGM.getOpenMPRuntime().emitBarrierCall(
2727 *this, S.getBeginLoc(),
2728 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2732 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2733 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2735 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2737 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2738 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
2741 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2742 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2744 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
2746 const Expr *Hint = nullptr;
2747 if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
2748 Hint = HintClause->getHint();
2749 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2750 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2751 S.getDirectiveName().getAsString(),
2752 CodeGen, S.getBeginLoc(), Hint);
2755 void CodeGenFunction::EmitOMPParallelForDirective(
2756 const OMPParallelForDirective &S) {
2757 // Emit directive as a combined directive that consists of two implicit
2758 // directives: 'parallel' with 'for' directive.
2759 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2761 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
2762 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
2763 emitDispatchForLoopBounds);
2765 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
2766 emitEmptyBoundParameters);
2769 void CodeGenFunction::EmitOMPParallelForSimdDirective(
2770 const OMPParallelForSimdDirective &S) {
2771 // Emit directive as a combined directive that consists of two implicit
2772 // directives: 'parallel' with 'for' directive.
2773 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2775 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
2776 emitDispatchForLoopBounds);
2778 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen,
2779 emitEmptyBoundParameters);
2782 void CodeGenFunction::EmitOMPParallelSectionsDirective(
2783 const OMPParallelSectionsDirective &S) {
2784 // Emit directive as a combined directive that consists of two implicit
2785 // directives: 'parallel' with 'sections' directive.
2786 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2788 CGF.EmitSections(S);
2790 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
2791 emitEmptyBoundParameters);
2794 void CodeGenFunction::EmitOMPTaskBasedDirective(
2795 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
2796 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
2797 OMPTaskDataTy &Data) {
2798 // Emit outlined function for task construct.
2799 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
2800 auto I = CS->getCapturedDecl()->param_begin();
2801 auto PartId = std::next(I);
2802 auto TaskT = std::next(I, 4);
2803 // Check if the task is final
2804 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2805 // If the condition constant folds and can be elided, try to avoid emitting
2806 // the condition and the dead arm of the if/else.
2807 const Expr *Cond = Clause->getCondition();
2809 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
2810 Data.Final.setInt(CondConstant);
2812 Data.Final.setPointer(EvaluateExprAsBool(Cond));
2814 // By default the task is not final.
2815 Data.Final.setInt(/*IntVal=*/false);
2817 // Check if the task has 'priority' clause.
2818 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2819 const Expr *Prio = Clause->getPriority();
2820 Data.Priority.setInt(/*IntVal=*/true);
2821 Data.Priority.setPointer(EmitScalarConversion(
2822 EmitScalarExpr(Prio), Prio->getType(),
2823 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
2824 Prio->getExprLoc()));
2826 // The first function argument for tasks is a thread id, the second one is a
2827 // part id (0 for tied tasks, >=0 for untied task).
2828 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2829 // Get list of private variables.
2830 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2831 auto IRef = C->varlist_begin();
2832 for (const Expr *IInit : C->private_copies()) {
2833 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2834 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2835 Data.PrivateVars.push_back(*IRef);
2836 Data.PrivateCopies.push_back(IInit);
2841 EmittedAsPrivate.clear();
2842 // Get list of firstprivate variables.
2843 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2844 auto IRef = C->varlist_begin();
2845 auto IElemInitRef = C->inits().begin();
2846 for (const Expr *IInit : C->private_copies()) {
2847 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2848 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2849 Data.FirstprivateVars.push_back(*IRef);
2850 Data.FirstprivateCopies.push_back(IInit);
2851 Data.FirstprivateInits.push_back(*IElemInitRef);
2857 // Get list of lastprivate variables (for taskloops).
2858 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2859 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2860 auto IRef = C->varlist_begin();
2861 auto ID = C->destination_exprs().begin();
2862 for (const Expr *IInit : C->private_copies()) {
2863 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2864 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2865 Data.LastprivateVars.push_back(*IRef);
2866 Data.LastprivateCopies.push_back(IInit);
2868 LastprivateDstsOrigs.insert(
2869 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2870 cast<DeclRefExpr>(*IRef)});
2875 SmallVector<const Expr *, 4> LHSs;
2876 SmallVector<const Expr *, 4> RHSs;
2877 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
2878 auto IPriv = C->privates().begin();
2879 auto IRed = C->reduction_ops().begin();
2880 auto ILHS = C->lhs_exprs().begin();
2881 auto IRHS = C->rhs_exprs().begin();
2882 for (const Expr *Ref : C->varlists()) {
2883 Data.ReductionVars.emplace_back(Ref);
2884 Data.ReductionCopies.emplace_back(*IPriv);
2885 Data.ReductionOps.emplace_back(*IRed);
2886 LHSs.emplace_back(*ILHS);
2887 RHSs.emplace_back(*IRHS);
2888 std::advance(IPriv, 1);
2889 std::advance(IRed, 1);
2890 std::advance(ILHS, 1);
2891 std::advance(IRHS, 1);
2894 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
2895 *this, S.getBeginLoc(), LHSs, RHSs, Data);
2896 // Build list of dependences.
2897 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2898 for (const Expr *IRef : C->varlists())
2899 Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
2900 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
2901 CapturedRegion](CodeGenFunction &CGF,
2902 PrePostActionTy &Action) {
2903 // Set proper addresses for generated private copies.
2904 OMPPrivateScope Scope(CGF);
2905 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2906 !Data.LastprivateVars.empty()) {
2907 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
2908 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
2909 enum { PrivatesParam = 2, CopyFnParam = 3 };
2910 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
2911 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
2912 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
2913 CS->getCapturedDecl()->getParam(PrivatesParam)));
2915 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
2916 llvm::SmallVector<llvm::Value *, 16> CallArgs;
2917 CallArgs.push_back(PrivatesPtr);
2918 for (const Expr *E : Data.PrivateVars) {
2919 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2920 Address PrivatePtr = CGF.CreateMemTemp(
2921 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2922 PrivatePtrs.emplace_back(VD, PrivatePtr);
2923 CallArgs.push_back(PrivatePtr.getPointer());
2925 for (const Expr *E : Data.FirstprivateVars) {
2926 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2927 Address PrivatePtr =
2928 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2929 ".firstpriv.ptr.addr");
2930 PrivatePtrs.emplace_back(VD, PrivatePtr);
2931 CallArgs.push_back(PrivatePtr.getPointer());
2933 for (const Expr *E : Data.LastprivateVars) {
2934 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2935 Address PrivatePtr =
2936 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2937 ".lastpriv.ptr.addr");
2938 PrivatePtrs.emplace_back(VD, PrivatePtr);
2939 CallArgs.push_back(PrivatePtr.getPointer());
2941 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2942 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
2943 for (const auto &Pair : LastprivateDstsOrigs) {
2944 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2945 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
2946 /*RefersToEnclosingVariableOrCapture=*/
2947 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
2948 Pair.second->getType(), VK_LValue,
2949 Pair.second->getExprLoc());
2950 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2951 return CGF.EmitLValue(&DRE).getAddress();
2954 for (const auto &Pair : PrivatePtrs) {
2955 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2956 CGF.getContext().getDeclAlign(Pair.first));
2957 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2960 if (Data.Reductions) {
2961 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
2962 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies,
2964 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
2965 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
2966 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
2967 RedCG.emitSharedLValue(CGF, Cnt);
2968 RedCG.emitAggregateType(CGF, Cnt);
2969 // FIXME: This must removed once the runtime library is fixed.
2970 // Emit required threadprivate variables for
2971 // initializer/combiner/finalizer.
2972 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
2974 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
2975 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
2977 Address(CGF.EmitScalarConversion(
2978 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
2979 CGF.getContext().getPointerType(
2980 Data.ReductionCopies[Cnt]->getType()),
2981 Data.ReductionCopies[Cnt]->getExprLoc()),
2982 Replacement.getAlignment());
2983 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
2984 Scope.addPrivate(RedCG.getBaseDecl(Cnt),
2985 [Replacement]() { return Replacement; });
2988 // Privatize all private variables except for in_reduction items.
2989 (void)Scope.Privatize();
2990 SmallVector<const Expr *, 4> InRedVars;
2991 SmallVector<const Expr *, 4> InRedPrivs;
2992 SmallVector<const Expr *, 4> InRedOps;
2993 SmallVector<const Expr *, 4> TaskgroupDescriptors;
2994 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
2995 auto IPriv = C->privates().begin();
2996 auto IRed = C->reduction_ops().begin();
2997 auto ITD = C->taskgroup_descriptors().begin();
2998 for (const Expr *Ref : C->varlists()) {
2999 InRedVars.emplace_back(Ref);
3000 InRedPrivs.emplace_back(*IPriv);
3001 InRedOps.emplace_back(*IRed);
3002 TaskgroupDescriptors.emplace_back(*ITD);
3003 std::advance(IPriv, 1);
3004 std::advance(IRed, 1);
3005 std::advance(ITD, 1);
3008 // Privatize in_reduction items here, because taskgroup descriptors must be
3009 // privatized earlier.
3010 OMPPrivateScope InRedScope(CGF);
3011 if (!InRedVars.empty()) {
3012 ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps);
3013 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
3014 RedCG.emitSharedLValue(CGF, Cnt);
3015 RedCG.emitAggregateType(CGF, Cnt);
3016 // The taskgroup descriptor variable is always implicit firstprivate and
3017 // privatized already during processing of the firstprivates.
3018 // FIXME: This must removed once the runtime library is fixed.
3019 // Emit required threadprivate variables for
3020 // initializer/combiner/finalizer.
3021 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
3023 llvm::Value *ReductionsPtr =
3024 CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]),
3025 TaskgroupDescriptors[Cnt]->getExprLoc());
3026 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
3027 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
3028 Replacement = Address(
3029 CGF.EmitScalarConversion(
3030 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
3031 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
3032 InRedPrivs[Cnt]->getExprLoc()),
3033 Replacement.getAlignment());
3034 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
3035 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt),
3036 [Replacement]() { return Replacement; });
3039 (void)InRedScope.Privatize();
3044 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
3045 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
3046 Data.NumberOfParts);
3047 OMPLexicalScope Scope(*this, S);
3048 TaskGen(*this, OutlinedFn, Data);
3051 static ImplicitParamDecl *
3052 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
3053 QualType Ty, CapturedDecl *CD,
3054 SourceLocation Loc) {
3055 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
3056 ImplicitParamDecl::Other);
3057 auto *OrigRef = DeclRefExpr::Create(
3058 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
3059 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
3060 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
3061 ImplicitParamDecl::Other);
3062 auto *PrivateRef = DeclRefExpr::Create(
3063 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
3064 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
3065 QualType ElemType = C.getBaseElementType(Ty);
3066 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
3067 ImplicitParamDecl::Other);
3068 auto *InitRef = DeclRefExpr::Create(
3069 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
3070 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
3071 PrivateVD->setInitStyle(VarDecl::CInit);
3072 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
3073 InitRef, /*BasePath=*/nullptr,
3075 Data.FirstprivateVars.emplace_back(OrigRef);
3076 Data.FirstprivateCopies.emplace_back(PrivateRef);
3077 Data.FirstprivateInits.emplace_back(InitRef);
3081 void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
3082 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
3083 OMPTargetDataInfo &InputInfo) {
3084 // Emit outlined function for task construct.
3085 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
3086 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
3087 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3088 auto I = CS->getCapturedDecl()->param_begin();
3089 auto PartId = std::next(I);
3090 auto TaskT = std::next(I, 4);
3092 // The task is not final.
3093 Data.Final.setInt(/*IntVal=*/false);
3094 // Get list of firstprivate variables.
3095 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
3096 auto IRef = C->varlist_begin();
3097 auto IElemInitRef = C->inits().begin();
3098 for (auto *IInit : C->private_copies()) {
3099 Data.FirstprivateVars.push_back(*IRef);
3100 Data.FirstprivateCopies.push_back(IInit);
3101 Data.FirstprivateInits.push_back(*IElemInitRef);
3106 OMPPrivateScope TargetScope(*this);
3107 VarDecl *BPVD = nullptr;
3108 VarDecl *PVD = nullptr;
3109 VarDecl *SVD = nullptr;
3110 if (InputInfo.NumberOfTargetItems > 0) {
3111 auto *CD = CapturedDecl::Create(
3112 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
3113 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
3114 QualType BaseAndPointersType = getContext().getConstantArrayType(
3115 getContext().VoidPtrTy, ArrSize, ArrayType::Normal,
3116 /*IndexTypeQuals=*/0);
3117 BPVD = createImplicitFirstprivateForType(
3118 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
3119 PVD = createImplicitFirstprivateForType(
3120 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
3121 QualType SizesType = getContext().getConstantArrayType(
3122 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
3123 ArrSize, ArrayType::Normal,
3124 /*IndexTypeQuals=*/0);
3125 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
3127 TargetScope.addPrivate(
3128 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
3129 TargetScope.addPrivate(PVD,
3130 [&InputInfo]() { return InputInfo.PointersArray; });
3131 TargetScope.addPrivate(SVD,
3132 [&InputInfo]() { return InputInfo.SizesArray; });
3134 (void)TargetScope.Privatize();
3135 // Build list of dependences.
3136 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
3137 for (const Expr *IRef : C->varlists())
3138 Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
3139 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD,
3140 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
3141 // Set proper addresses for generated private copies.
3142 OMPPrivateScope Scope(CGF);
3143 if (!Data.FirstprivateVars.empty()) {
3144 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
3145 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
3146 enum { PrivatesParam = 2, CopyFnParam = 3 };
3147 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
3148 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
3149 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
3150 CS->getCapturedDecl()->getParam(PrivatesParam)));
3152 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
3153 llvm::SmallVector<llvm::Value *, 16> CallArgs;
3154 CallArgs.push_back(PrivatesPtr);
3155 for (const Expr *E : Data.FirstprivateVars) {
3156 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3157 Address PrivatePtr =
3158 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
3159 ".firstpriv.ptr.addr");
3160 PrivatePtrs.emplace_back(VD, PrivatePtr);
3161 CallArgs.push_back(PrivatePtr.getPointer());
3163 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3164 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
3165 for (const auto &Pair : PrivatePtrs) {
3166 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
3167 CGF.getContext().getDeclAlign(Pair.first));
3168 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
3171 // Privatize all private variables except for in_reduction items.
3172 (void)Scope.Privatize();
3173 if (InputInfo.NumberOfTargetItems > 0) {
3174 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
3175 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
3176 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
3177 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
3178 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
3179 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
3183 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
3186 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
3187 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
3188 Data.NumberOfParts);
3189 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
3190 IntegerLiteral IfCond(getContext(), TrueOrFalse,
3191 getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3194 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
3195 SharedsTy, CapturedStruct, &IfCond, Data);
3198 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
3199 // Emit outlined function for task construct.
3200 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
3201 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
3202 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3203 const Expr *IfCond = nullptr;
3204 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3205 if (C->getNameModifier() == OMPD_unknown ||
3206 C->getNameModifier() == OMPD_task) {
3207 IfCond = C->getCondition();
3213 // Check if we should emit tied or untied task.
3214 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
3215 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
3216 CGF.EmitStmt(CS->getCapturedStmt());
3218 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
3219 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
3220 const OMPTaskDataTy &Data) {
3221 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
3222 SharedsTy, CapturedStruct, IfCond,
3225 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
3228 void CodeGenFunction::EmitOMPTaskyieldDirective(
3229 const OMPTaskyieldDirective &S) {
3230 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
3233 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
3234 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
3237 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
3238 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc());
3241 void CodeGenFunction::EmitOMPTaskgroupDirective(
3242 const OMPTaskgroupDirective &S) {
3243 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3245 if (const Expr *E = S.getReductionRef()) {
3246 SmallVector<const Expr *, 4> LHSs;
3247 SmallVector<const Expr *, 4> RHSs;
3249 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
3250 auto IPriv = C->privates().begin();
3251 auto IRed = C->reduction_ops().begin();
3252 auto ILHS = C->lhs_exprs().begin();
3253 auto IRHS = C->rhs_exprs().begin();
3254 for (const Expr *Ref : C->varlists()) {
3255 Data.ReductionVars.emplace_back(Ref);
3256 Data.ReductionCopies.emplace_back(*IPriv);
3257 Data.ReductionOps.emplace_back(*IRed);
3258 LHSs.emplace_back(*ILHS);
3259 RHSs.emplace_back(*IRHS);
3260 std::advance(IPriv, 1);
3261 std::advance(IRed, 1);
3262 std::advance(ILHS, 1);
3263 std::advance(IRHS, 1);
3266 llvm::Value *ReductionDesc =
3267 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
3269 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3270 CGF.EmitVarDecl(*VD);
3271 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
3272 /*Volatile=*/false, E->getType());
3274 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
3276 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3277 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
3280 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
3281 CGM.getOpenMPRuntime().emitFlush(
3283 [&S]() -> ArrayRef<const Expr *> {
3284 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
3285 return llvm::makeArrayRef(FlushClause->varlist_begin(),
3286 FlushClause->varlist_end());
3292 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
3293 const CodeGenLoopTy &CodeGenLoop,
3295 // Emit the loop iteration variable.
3296 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3297 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3298 EmitVarDecl(*IVDecl);
3300 // Emit the iterations count variable.
3301 // If it is not a variable, Sema decided to calculate iterations count on each
3302 // iteration (e.g., it is foldable into a constant).
3303 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3304 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3305 // Emit calculation of the iterations count.
3306 EmitIgnoredExpr(S.getCalcLastIteration());
3309 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3311 bool HasLastprivateClause = false;
3312 // Check pre-condition.
3314 OMPLoopScope PreInitScope(*this, S);
3315 // Skip the entire loop if we don't meet the precondition.
3316 // If the condition constant folds and can be elided, avoid emitting the
3319 llvm::BasicBlock *ContBlock = nullptr;
3320 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3324 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
3325 ContBlock = createBasicBlock("omp.precond.end");
3326 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
3327 getProfileCount(&S));
3328 EmitBlock(ThenBlock);
3329 incrementProfileCounter(&S);
3332 emitAlignedClause(*this, S);
3333 // Emit 'then' code.
3335 // Emit helper vars inits.
3337 LValue LB = EmitOMPHelperVar(
3338 *this, cast<DeclRefExpr>(
3339 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3340 ? S.getCombinedLowerBoundVariable()
3341 : S.getLowerBoundVariable())));
3342 LValue UB = EmitOMPHelperVar(
3343 *this, cast<DeclRefExpr>(
3344 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3345 ? S.getCombinedUpperBoundVariable()
3346 : S.getUpperBoundVariable())));
3348 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3350 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3352 OMPPrivateScope LoopScope(*this);
3353 if (EmitOMPFirstprivateClause(S, LoopScope)) {
3354 // Emit implicit barrier to synchronize threads and avoid data races
3355 // on initialization of firstprivate variables and post-update of
3356 // lastprivate variables.
3357 CGM.getOpenMPRuntime().emitBarrierCall(
3358 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3359 /*ForceSimpleCall=*/true);
3361 EmitOMPPrivateClause(S, LoopScope);
3362 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
3363 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
3364 !isOpenMPTeamsDirective(S.getDirectiveKind()))
3365 EmitOMPReductionClauseInit(S, LoopScope);
3366 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
3367 EmitOMPPrivateLoopCounters(S, LoopScope);
3368 (void)LoopScope.Privatize();
3369 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3370 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
3372 // Detect the distribute schedule kind and chunk.
3373 llvm::Value *Chunk = nullptr;
3374 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
3375 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
3376 ScheduleKind = C->getDistScheduleKind();
3377 if (const Expr *Ch = C->getChunkSize()) {
3378 Chunk = EmitScalarExpr(Ch);
3379 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
3380 S.getIterationVariable()->getType(),
3384 // Default behaviour for dist_schedule clause.
3385 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
3386 *this, S, ScheduleKind, Chunk);
3388 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3389 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3391 // OpenMP [2.10.8, distribute Construct, Description]
3392 // If dist_schedule is specified, kind must be static. If specified,
3393 // iterations are divided into chunks of size chunk_size, chunks are
3394 // assigned to the teams of the league in a round-robin fashion in the
3395 // order of the team number. When no chunk_size is specified, the
3396 // iteration space is divided into chunks that are approximately equal
3397 // in size, and at most one chunk is distributed to each team of the
3398 // league. The size of the chunks is unspecified in this case.
3399 bool StaticChunked = RT.isStaticChunked(
3400 ScheduleKind, /* Chunked */ Chunk != nullptr) &&
3401 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
3402 if (RT.isStaticNonchunked(ScheduleKind,
3403 /* Chunked */ Chunk != nullptr) ||
3405 if (isOpenMPSimdDirective(S.getDirectiveKind()))
3406 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
3407 CGOpenMPRuntime::StaticRTInput StaticInit(
3408 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(),
3409 LB.getAddress(), UB.getAddress(), ST.getAddress(),
3410 StaticChunked ? Chunk : nullptr);
3411 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
3414 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3415 // UB = min(UB, GlobalUB);
3416 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3417 ? S.getCombinedEnsureUpperBound()
3418 : S.getEnsureUpperBound());
3420 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3421 ? S.getCombinedInit()
3425 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3426 ? S.getCombinedCond()
3430 Cond = S.getCombinedDistCond();
3432 // For static unchunked schedules generate:
3434 // 1. For distribute alone, codegen
3435 // while (idx <= UB) {
3440 // 2. When combined with 'for' (e.g. as in 'distribute parallel for')
3441 // while (idx <= UB) {
3442 // <CodeGen rest of pragma>(LB, UB);
3446 // For static chunk one schedule generate:
3448 // while (IV <= GlobalUB) {
3449 // <CodeGen rest of pragma>(LB, UB);
3452 // UB = min(UB, GlobalUB);
3456 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr,
3457 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
3458 CodeGenLoop(CGF, S, LoopExit);
3460 [&S, StaticChunked](CodeGenFunction &CGF) {
3461 if (StaticChunked) {
3462 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
3463 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
3464 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
3465 CGF.EmitIgnoredExpr(S.getCombinedInit());
3468 EmitBlock(LoopExit.getBlock());
3469 // Tell the runtime we are done.
3470 RT.emitForStaticFinish(*this, S.getBeginLoc(), S.getDirectiveKind());
3472 // Emit the outer loop, which requests its work chunk [LB..UB] from
3473 // runtime and runs the inner loop to process it.
3474 const OMPLoopArguments LoopArguments = {
3475 LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
3477 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
3480 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3481 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3482 return CGF.Builder.CreateIsNotNull(
3483 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3486 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
3487 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
3488 !isOpenMPTeamsDirective(S.getDirectiveKind())) {
3489 EmitOMPReductionClauseFinal(S, OMPD_simd);
3490 // Emit post-update of the reduction variables if IsLastIter != 0.
3491 emitPostUpdateForReductionClause(
3492 *this, S, [IL, &S](CodeGenFunction &CGF) {
3493 return CGF.Builder.CreateIsNotNull(
3494 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3497 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3498 if (HasLastprivateClause) {
3499 EmitOMPLastprivateClauseFinal(
3500 S, /*NoFinals=*/false,
3501 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3505 // We're now done with the loop, so jump to the continuation block.
3507 EmitBranch(ContBlock);
3508 EmitBlock(ContBlock, true);
3513 void CodeGenFunction::EmitOMPDistributeDirective(
3514 const OMPDistributeDirective &S) {
3515 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3516 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
3518 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3519 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3522 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
3523 const CapturedStmt *S) {
3524 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3525 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
3526 CGF.CapturedStmtInfo = &CapStmtInfo;
3527 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
3528 Fn->setDoesNotRecurse();
3532 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
3533 if (S.hasClausesOfKind<OMPDependClause>()) {
3534 assert(!S.getAssociatedStmt() &&
3535 "No associated statement must be in ordered depend construct.");
3536 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
3537 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
3540 const auto *C = S.getSingleClause<OMPSIMDClause>();
3541 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
3542 PrePostActionTy &Action) {
3543 const CapturedStmt *CS = S.getInnermostCapturedStmt();
3545 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3546 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3547 llvm::Function *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
3548 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
3549 OutlinedFn, CapturedVars);
3552 CGF.EmitStmt(CS->getCapturedStmt());
3555 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3556 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C);
3559 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
3560 QualType SrcType, QualType DestType,
3561 SourceLocation Loc) {
3562 assert(CGF.hasScalarEvaluationKind(DestType) &&
3563 "DestType must have scalar evaluation kind.");
3564 assert(!Val.isAggregate() && "Must be a scalar or complex.");
3565 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
3567 : CGF.EmitComplexToScalarConversion(
3568 Val.getComplexVal(), SrcType, DestType, Loc);
3571 static CodeGenFunction::ComplexPairTy
3572 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
3573 QualType DestType, SourceLocation Loc) {
3574 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
3575 "DestType must have complex evaluation kind.");
3576 CodeGenFunction::ComplexPairTy ComplexVal;
3577 if (Val.isScalar()) {
3578 // Convert the input element to the element type of the complex.
3579 QualType DestElementType =
3580 DestType->castAs<ComplexType>()->getElementType();
3581 llvm::Value *ScalarVal = CGF.EmitScalarConversion(
3582 Val.getScalarVal(), SrcType, DestElementType, Loc);
3583 ComplexVal = CodeGenFunction::ComplexPairTy(
3584 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
3586 assert(Val.isComplex() && "Must be a scalar or complex.");
3587 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
3588 QualType DestElementType =
3589 DestType->castAs<ComplexType>()->getElementType();
3590 ComplexVal.first = CGF.EmitScalarConversion(
3591 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
3592 ComplexVal.second = CGF.EmitScalarConversion(
3593 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
3598 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
3599 LValue LVal, RValue RVal) {
3600 if (LVal.isGlobalReg()) {
3601 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
3603 CGF.EmitAtomicStore(RVal, LVal,
3604 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3605 : llvm::AtomicOrdering::Monotonic,
3606 LVal.isVolatile(), /*isInit=*/false);
3610 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
3611 QualType RValTy, SourceLocation Loc) {
3612 switch (getEvaluationKind(LVal.getType())) {
3614 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
3615 *this, RVal, RValTy, LVal.getType(), Loc)),
3620 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
3624 llvm_unreachable("Must be a scalar or complex.");
3628 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
3629 const Expr *X, const Expr *V,
3630 SourceLocation Loc) {
3632 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
3633 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
3634 LValue XLValue = CGF.EmitLValue(X);
3635 LValue VLValue = CGF.EmitLValue(V);
3636 RValue Res = XLValue.isGlobalReg()
3637 ? CGF.EmitLoadOfLValue(XLValue, Loc)
3638 : CGF.EmitAtomicLoad(
3640 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3641 : llvm::AtomicOrdering::Monotonic,
3642 XLValue.isVolatile());
3643 // OpenMP, 2.12.6, atomic Construct
3644 // Any atomic construct with a seq_cst clause forces the atomically
3645 // performed operation to include an implicit flush operation without a
3648 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3649 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
3652 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
3653 const Expr *X, const Expr *E,
3654 SourceLocation Loc) {
3656 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3657 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3658 // OpenMP, 2.12.6, atomic Construct
3659 // Any atomic construct with a seq_cst clause forces the atomically
3660 // performed operation to include an implicit flush operation without a
3663 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3666 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
3668 BinaryOperatorKind BO,
3669 llvm::AtomicOrdering AO,
3670 bool IsXLHSInRHSPart) {
3671 ASTContext &Context = CGF.getContext();
3672 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3673 // expression is simple and atomic is allowed for the given type for the
3675 if (BO == BO_Comma || !Update.isScalar() ||
3676 !Update.getScalarVal()->getType()->isIntegerTy() ||
3677 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
3678 (Update.getScalarVal()->getType() !=
3679 X.getAddress().getElementType())) ||
3680 !X.getAddress().getElementType()->isIntegerTy() ||
3681 !Context.getTargetInfo().hasBuiltinAtomic(
3682 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
3683 return std::make_pair(false, RValue::get(nullptr));
3685 llvm::AtomicRMWInst::BinOp RMWOp;
3688 RMWOp = llvm::AtomicRMWInst::Add;
3691 if (!IsXLHSInRHSPart)
3692 return std::make_pair(false, RValue::get(nullptr));
3693 RMWOp = llvm::AtomicRMWInst::Sub;
3696 RMWOp = llvm::AtomicRMWInst::And;
3699 RMWOp = llvm::AtomicRMWInst::Or;
3702 RMWOp = llvm::AtomicRMWInst::Xor;
3705 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3706 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
3707 : llvm::AtomicRMWInst::Max)
3708 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
3709 : llvm::AtomicRMWInst::UMax);
3712 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3713 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
3714 : llvm::AtomicRMWInst::Min)
3715 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
3716 : llvm::AtomicRMWInst::UMin);
3719 RMWOp = llvm::AtomicRMWInst::Xchg;
3728 return std::make_pair(false, RValue::get(nullptr));
3747 llvm_unreachable("Unsupported atomic update operation");
3749 llvm::Value *UpdateVal = Update.getScalarVal();
3750 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
3751 UpdateVal = CGF.Builder.CreateIntCast(
3752 IC, X.getAddress().getElementType(),
3753 X.getType()->hasSignedIntegerRepresentation());
3756 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
3757 return std::make_pair(true, RValue::get(Res));
3760 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
3761 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3762 llvm::AtomicOrdering AO, SourceLocation Loc,
3763 const llvm::function_ref<RValue(RValue)> CommonGen) {
3764 // Update expressions are allowed to have the following forms:
3765 // x binop= expr; -> xrval + expr;
3766 // x++, ++x -> xrval + 1;
3767 // x--, --x -> xrval - 1;
3768 // x = x binop expr; -> xrval binop expr
3769 // x = expr Op x; - > expr binop xrval;
3770 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
3772 if (X.isGlobalReg()) {
3773 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
3775 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
3777 // Perform compare-and-swap procedure.
3778 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
3784 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
3785 const Expr *X, const Expr *E,
3786 const Expr *UE, bool IsXLHSInRHSPart,
3787 SourceLocation Loc) {
3788 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3789 "Update expr in 'atomic update' must be a binary operator.");
3790 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3791 // Update expressions are allowed to have the following forms:
3792 // x binop= expr; -> xrval + expr;
3793 // x++, ++x -> xrval + 1;
3794 // x--, --x -> xrval - 1;
3795 // x = x binop expr; -> xrval binop expr
3796 // x = expr Op x; - > expr binop xrval;
3797 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3798 LValue XLValue = CGF.EmitLValue(X);
3799 RValue ExprRValue = CGF.EmitAnyExpr(E);
3800 llvm::AtomicOrdering AO = IsSeqCst
3801 ? llvm::AtomicOrdering::SequentiallyConsistent
3802 : llvm::AtomicOrdering::Monotonic;
3803 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3804 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3805 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3806 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3807 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) {
3808 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3809 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3810 return CGF.EmitAnyExpr(UE);
3812 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3813 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3814 // OpenMP, 2.12.6, atomic Construct
3815 // Any atomic construct with a seq_cst clause forces the atomically
3816 // performed operation to include an implicit flush operation without a
3819 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3822 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
3823 QualType SourceType, QualType ResType,
3824 SourceLocation Loc) {
3825 switch (CGF.getEvaluationKind(ResType)) {
3828 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
3830 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
3831 return RValue::getComplex(Res.first, Res.second);
3836 llvm_unreachable("Must be a scalar or complex.");
3839 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
3840 bool IsPostfixUpdate, const Expr *V,
3841 const Expr *X, const Expr *E,
3842 const Expr *UE, bool IsXLHSInRHSPart,
3843 SourceLocation Loc) {
3844 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3845 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3847 LValue VLValue = CGF.EmitLValue(V);
3848 LValue XLValue = CGF.EmitLValue(X);
3849 RValue ExprRValue = CGF.EmitAnyExpr(E);
3850 llvm::AtomicOrdering AO = IsSeqCst
3851 ? llvm::AtomicOrdering::SequentiallyConsistent
3852 : llvm::AtomicOrdering::Monotonic;
3853 QualType NewVValType;
3855 // 'x' is updated with some additional value.
3856 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3857 "Update expr in 'atomic capture' must be a binary operator.");
3858 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3859 // Update expressions are allowed to have the following forms:
3860 // x binop= expr; -> xrval + expr;
3861 // x++, ++x -> xrval + 1;
3862 // x--, --x -> xrval - 1;
3863 // x = x binop expr; -> xrval binop expr
3864 // x = expr Op x; - > expr binop xrval;
3865 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3866 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3867 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3868 NewVValType = XRValExpr->getType();
3869 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3870 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
3871 IsPostfixUpdate](RValue XRValue) {
3872 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3873 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3874 RValue Res = CGF.EmitAnyExpr(UE);
3875 NewVVal = IsPostfixUpdate ? XRValue : Res;
3878 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3879 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3881 // 'atomicrmw' instruction was generated.
3882 if (IsPostfixUpdate) {
3883 // Use old value from 'atomicrmw'.
3884 NewVVal = Res.second;
3886 // 'atomicrmw' does not provide new value, so evaluate it using old
3888 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3889 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3890 NewVVal = CGF.EmitAnyExpr(UE);
3894 // 'x' is simply rewritten with some 'expr'.
3895 NewVValType = X->getType().getNonReferenceType();
3896 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
3897 X->getType().getNonReferenceType(), Loc);
3898 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) {
3902 // Try to perform atomicrmw xchg, otherwise simple exchange.
3903 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3904 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3907 // 'atomicrmw' instruction was generated.
3908 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3911 // Emit post-update store to 'v' of old/new 'x' value.
3912 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
3913 // OpenMP, 2.12.6, atomic Construct
3914 // Any atomic construct with a seq_cst clause forces the atomically
3915 // performed operation to include an implicit flush operation without a
3918 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3921 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
3922 bool IsSeqCst, bool IsPostfixUpdate,
3923 const Expr *X, const Expr *V, const Expr *E,
3924 const Expr *UE, bool IsXLHSInRHSPart,
3925 SourceLocation Loc) {
3928 emitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
3931 emitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
3935 emitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
3938 emitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
3939 IsXLHSInRHSPart, Loc);
3943 case OMPC_num_threads:
3945 case OMPC_firstprivate:
3946 case OMPC_lastprivate:
3947 case OMPC_reduction:
3948 case OMPC_task_reduction:
3949 case OMPC_in_reduction:
3952 case OMPC_allocator:
3961 case OMPC_copyprivate:
3963 case OMPC_proc_bind:
3968 case OMPC_threadprivate:
3970 case OMPC_mergeable:
3975 case OMPC_num_teams:
3976 case OMPC_thread_limit:
3978 case OMPC_grainsize:
3980 case OMPC_num_tasks:
3982 case OMPC_dist_schedule:
3983 case OMPC_defaultmap:
3987 case OMPC_use_device_ptr:
3988 case OMPC_is_device_ptr:
3989 case OMPC_unified_address:
3990 case OMPC_unified_shared_memory:
3991 case OMPC_reverse_offload:
3992 case OMPC_dynamic_allocators:
3993 case OMPC_atomic_default_mem_order:
3994 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3998 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3999 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
4000 OpenMPClauseKind Kind = OMPC_unknown;
4001 for (const OMPClause *C : S.clauses()) {
4002 // Find first clause (skip seq_cst clause, if it is first).
4003 if (C->getClauseKind() != OMPC_seq_cst) {
4004 Kind = C->getClauseKind();
4009 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
4010 if (const auto *FE = dyn_cast<FullExpr>(CS))
4011 enterFullExpression(FE);
4012 // Processing for statements under 'atomic capture'.
4013 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
4014 for (const Stmt *C : Compound->body()) {
4015 if (const auto *FE = dyn_cast<FullExpr>(C))
4016 enterFullExpression(FE);
4020 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
4021 PrePostActionTy &) {
4022 CGF.EmitStopPoint(CS);
4023 emitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
4024 S.getV(), S.getExpr(), S.getUpdateExpr(),
4025 S.isXLHSInRHSPart(), S.getBeginLoc());
4027 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4028 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
4031 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
4032 const OMPExecutableDirective &S,
4033 const RegionCodeGenTy &CodeGen) {
4034 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
4035 CodeGenModule &CGM = CGF.CGM;
4037 // On device emit this construct as inlined code.
4038 if (CGM.getLangOpts().OpenMPIsDevice) {
4039 OMPLexicalScope Scope(CGF, S, OMPD_target);
4040 CGM.getOpenMPRuntime().emitInlinedDirective(
4041 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4042 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4047 llvm::Function *Fn = nullptr;
4048 llvm::Constant *FnID = nullptr;
4050 const Expr *IfCond = nullptr;
4051 // Check for the at most one if clause associated with the target region.
4052 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4053 if (C->getNameModifier() == OMPD_unknown ||
4054 C->getNameModifier() == OMPD_target) {
4055 IfCond = C->getCondition();
4060 // Check if we have any device clause associated with the directive.
4061 const Expr *Device = nullptr;
4062 if (auto *C = S.getSingleClause<OMPDeviceClause>())
4063 Device = C->getDevice();
4065 // Check if we have an if clause whose conditional always evaluates to false
4066 // or if we do not have any targets specified. If so the target region is not
4067 // an offload entry point.
4068 bool IsOffloadEntry = true;
4071 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
4072 IsOffloadEntry = false;
4074 if (CGM.getLangOpts().OMPTargetTriples.empty())
4075 IsOffloadEntry = false;
4077 assert(CGF.CurFuncDecl && "No parent declaration for target region!");
4078 StringRef ParentName;
4079 // In case we have Ctors/Dtors we use the complete type variant to produce
4080 // the mangling of the device outlined kernel.
4081 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
4082 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
4083 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
4084 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
4087 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl)));
4089 // Emit target region as a standalone region.
4090 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
4091 IsOffloadEntry, CodeGen);
4092 OMPLexicalScope Scope(CGF, S, OMPD_task);
4093 auto &&SizeEmitter = [](CodeGenFunction &CGF, const OMPLoopDirective &D) {
4094 OMPLoopScope(CGF, D);
4095 // Emit calculation of the iterations count.
4096 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations());
4097 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty,
4098 /*isSigned=*/false);
4099 return NumIterations;
4102 CGM.getOpenMPRuntime().emitTargetNumIterationsCall(CGF, S, Device,
4104 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device);
4107 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
4108 PrePostActionTy &Action) {
4110 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4111 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4112 CGF.EmitOMPPrivateClause(S, PrivateScope);
4113 (void)PrivateScope.Privatize();
4114 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4115 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4117 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
4120 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
4121 StringRef ParentName,
4122 const OMPTargetDirective &S) {
4123 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4124 emitTargetRegion(CGF, S, Action);
4127 llvm::Constant *Addr;
4128 // Emit target region as a standalone region.
4129 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4130 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4131 assert(Fn && Addr && "Target device function emission failed.");
4134 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
4135 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4136 emitTargetRegion(CGF, S, Action);
4138 emitCommonOMPTargetDirective(*this, S, CodeGen);
4141 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
4142 const OMPExecutableDirective &S,
4143 OpenMPDirectiveKind InnermostKind,
4144 const RegionCodeGenTy &CodeGen) {
4145 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
4146 llvm::Function *OutlinedFn =
4147 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
4148 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
4150 const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
4151 const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
4153 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr;
4154 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr;
4156 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
4160 OMPTeamsScope Scope(CGF, S);
4161 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
4162 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
4163 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn,
4167 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
4168 // Emit teams region as a standalone region.
4169 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4171 OMPPrivateScope PrivateScope(CGF);
4172 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4173 CGF.EmitOMPPrivateClause(S, PrivateScope);
4174 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4175 (void)PrivateScope.Privatize();
4176 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt());
4177 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4179 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
4180 emitPostUpdateForReductionClause(*this, S,
4181 [](CodeGenFunction &) { return nullptr; });
4184 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
4185 const OMPTargetTeamsDirective &S) {
4186 auto *CS = S.getCapturedStmt(OMPD_teams);
4188 // Emit teams region as a standalone region.
4189 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
4191 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4192 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4193 CGF.EmitOMPPrivateClause(S, PrivateScope);
4194 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4195 (void)PrivateScope.Privatize();
4196 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4197 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4198 CGF.EmitStmt(CS->getCapturedStmt());
4199 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4201 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen);
4202 emitPostUpdateForReductionClause(CGF, S,
4203 [](CodeGenFunction &) { return nullptr; });
4206 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
4207 CodeGenModule &CGM, StringRef ParentName,
4208 const OMPTargetTeamsDirective &S) {
4209 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4210 emitTargetTeamsRegion(CGF, Action, S);
4213 llvm::Constant *Addr;
4214 // Emit target region as a standalone region.
4215 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4216 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4217 assert(Fn && Addr && "Target device function emission failed.");
4220 void CodeGenFunction::EmitOMPTargetTeamsDirective(
4221 const OMPTargetTeamsDirective &S) {
4222 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4223 emitTargetTeamsRegion(CGF, Action, S);
4225 emitCommonOMPTargetDirective(*this, S, CodeGen);
4229 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
4230 const OMPTargetTeamsDistributeDirective &S) {
4232 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4233 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4236 // Emit teams region as a standalone region.
4237 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4238 PrePostActionTy &Action) {
4240 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4241 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4242 (void)PrivateScope.Privatize();
4243 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4245 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4247 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
4248 emitPostUpdateForReductionClause(CGF, S,
4249 [](CodeGenFunction &) { return nullptr; });
4252 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
4253 CodeGenModule &CGM, StringRef ParentName,
4254 const OMPTargetTeamsDistributeDirective &S) {
4255 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4256 emitTargetTeamsDistributeRegion(CGF, Action, S);
4259 llvm::Constant *Addr;
4260 // Emit target region as a standalone region.
4261 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4262 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4263 assert(Fn && Addr && "Target device function emission failed.");
4266 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
4267 const OMPTargetTeamsDistributeDirective &S) {
4268 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4269 emitTargetTeamsDistributeRegion(CGF, Action, S);
4271 emitCommonOMPTargetDirective(*this, S, CodeGen);
4274 static void emitTargetTeamsDistributeSimdRegion(
4275 CodeGenFunction &CGF, PrePostActionTy &Action,
4276 const OMPTargetTeamsDistributeSimdDirective &S) {
4278 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4279 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4282 // Emit teams region as a standalone region.
4283 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4284 PrePostActionTy &Action) {
4286 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4287 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4288 (void)PrivateScope.Privatize();
4289 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4291 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4293 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen);
4294 emitPostUpdateForReductionClause(CGF, S,
4295 [](CodeGenFunction &) { return nullptr; });
4298 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
4299 CodeGenModule &CGM, StringRef ParentName,
4300 const OMPTargetTeamsDistributeSimdDirective &S) {
4301 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4302 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
4305 llvm::Constant *Addr;
4306 // Emit target region as a standalone region.
4307 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4308 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4309 assert(Fn && Addr && "Target device function emission failed.");
4312 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
4313 const OMPTargetTeamsDistributeSimdDirective &S) {
4314 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4315 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
4317 emitCommonOMPTargetDirective(*this, S, CodeGen);
4320 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
4321 const OMPTeamsDistributeDirective &S) {
4323 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4324 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4327 // Emit teams region as a standalone region.
4328 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4329 PrePostActionTy &Action) {
4331 OMPPrivateScope PrivateScope(CGF);
4332 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4333 (void)PrivateScope.Privatize();
4334 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4336 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4338 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
4339 emitPostUpdateForReductionClause(*this, S,
4340 [](CodeGenFunction &) { return nullptr; });
4343 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
4344 const OMPTeamsDistributeSimdDirective &S) {
4345 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4346 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
4349 // Emit teams region as a standalone region.
4350 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4351 PrePostActionTy &Action) {
4353 OMPPrivateScope PrivateScope(CGF);
4354 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4355 (void)PrivateScope.Privatize();
4356 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd,
4358 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4360 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen);
4361 emitPostUpdateForReductionClause(*this, S,
4362 [](CodeGenFunction &) { return nullptr; });
4365 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
4366 const OMPTeamsDistributeParallelForDirective &S) {
4367 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4368 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4372 // Emit teams region as a standalone region.
4373 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4374 PrePostActionTy &Action) {
4376 OMPPrivateScope PrivateScope(CGF);
4377 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4378 (void)PrivateScope.Privatize();
4379 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
4381 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4383 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
4384 emitPostUpdateForReductionClause(*this, S,
4385 [](CodeGenFunction &) { return nullptr; });
4388 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
4389 const OMPTeamsDistributeParallelForSimdDirective &S) {
4390 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4391 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4395 // Emit teams region as a standalone region.
4396 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4397 PrePostActionTy &Action) {
4399 OMPPrivateScope PrivateScope(CGF);
4400 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4401 (void)PrivateScope.Privatize();
4402 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4403 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
4404 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4406 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
4407 emitPostUpdateForReductionClause(*this, S,
4408 [](CodeGenFunction &) { return nullptr; });
4411 static void emitTargetTeamsDistributeParallelForRegion(
4412 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S,
4413 PrePostActionTy &Action) {
4415 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4416 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4420 // Emit teams region as a standalone region.
4421 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4422 PrePostActionTy &Action) {
4424 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4425 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4426 (void)PrivateScope.Privatize();
4427 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4428 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
4429 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4432 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
4434 emitPostUpdateForReductionClause(CGF, S,
4435 [](CodeGenFunction &) { return nullptr; });
4438 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
4439 CodeGenModule &CGM, StringRef ParentName,
4440 const OMPTargetTeamsDistributeParallelForDirective &S) {
4441 // Emit SPMD target teams distribute parallel for region as a standalone
4443 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4444 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
4447 llvm::Constant *Addr;
4448 // Emit target region as a standalone region.
4449 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4450 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4451 assert(Fn && Addr && "Target device function emission failed.");
4454 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
4455 const OMPTargetTeamsDistributeParallelForDirective &S) {
4456 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4457 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
4459 emitCommonOMPTargetDirective(*this, S, CodeGen);
4462 static void emitTargetTeamsDistributeParallelForSimdRegion(
4463 CodeGenFunction &CGF,
4464 const OMPTargetTeamsDistributeParallelForSimdDirective &S,
4465 PrePostActionTy &Action) {
4467 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4468 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
4472 // Emit teams region as a standalone region.
4473 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
4474 PrePostActionTy &Action) {
4476 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4477 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4478 (void)PrivateScope.Privatize();
4479 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
4480 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
4481 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
4484 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd,
4486 emitPostUpdateForReductionClause(CGF, S,
4487 [](CodeGenFunction &) { return nullptr; });
4490 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
4491 CodeGenModule &CGM, StringRef ParentName,
4492 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
4493 // Emit SPMD target teams distribute parallel for simd region as a standalone
4495 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4496 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
4499 llvm::Constant *Addr;
4500 // Emit target region as a standalone region.
4501 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4502 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4503 assert(Fn && Addr && "Target device function emission failed.");
4506 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
4507 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
4508 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4509 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
4511 emitCommonOMPTargetDirective(*this, S, CodeGen);
4514 void CodeGenFunction::EmitOMPCancellationPointDirective(
4515 const OMPCancellationPointDirective &S) {
4516 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(),
4517 S.getCancelRegion());
4520 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
4521 const Expr *IfCond = nullptr;
4522 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4523 if (C->getNameModifier() == OMPD_unknown ||
4524 C->getNameModifier() == OMPD_cancel) {
4525 IfCond = C->getCondition();
4529 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond,
4530 S.getCancelRegion());
4533 CodeGenFunction::JumpDest
4534 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
4535 if (Kind == OMPD_parallel || Kind == OMPD_task ||
4536 Kind == OMPD_target_parallel)
4538 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
4539 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
4540 Kind == OMPD_distribute_parallel_for ||
4541 Kind == OMPD_target_parallel_for ||
4542 Kind == OMPD_teams_distribute_parallel_for ||
4543 Kind == OMPD_target_teams_distribute_parallel_for);
4544 return OMPCancelStack.getExitBlock();
4547 void CodeGenFunction::EmitOMPUseDevicePtrClause(
4548 const OMPClause &NC, OMPPrivateScope &PrivateScope,
4549 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
4550 const auto &C = cast<OMPUseDevicePtrClause>(NC);
4551 auto OrigVarIt = C.varlist_begin();
4552 auto InitIt = C.inits().begin();
4553 for (const Expr *PvtVarIt : C.private_copies()) {
4554 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
4555 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
4556 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
4558 // In order to identify the right initializer we need to match the
4559 // declaration used by the mapping logic. In some cases we may get
4560 // OMPCapturedExprDecl that refers to the original declaration.
4561 const ValueDecl *MatchingVD = OrigVD;
4562 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
4563 // OMPCapturedExprDecl are used to privative fields of the current
4565 const auto *ME = cast<MemberExpr>(OED->getInit());
4566 assert(isa<CXXThisExpr>(ME->getBase()) &&
4567 "Base should be the current struct!");
4568 MatchingVD = ME->getMemberDecl();
4571 // If we don't have information about the current list item, move on to
4573 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
4574 if (InitAddrIt == CaptureDeviceAddrMap.end())
4577 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD,
4580 // Initialize the temporary initialization variable with the address we
4581 // get from the runtime library. We have to cast the source address
4582 // because it is always a void *. References are materialized in the
4583 // privatization scope, so the initialization here disregards the fact
4584 // the original variable is a reference.
4586 getContext().getPointerType(OrigVD->getType().getNonReferenceType());
4587 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
4588 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
4589 setAddrOfLocalVar(InitVD, InitAddr);
4591 // Emit private declaration, it will be initialized by the value we
4592 // declaration we just added to the local declarations map.
4595 // The initialization variables reached its purpose in the emission
4596 // of the previous declaration, so we don't need it anymore.
4597 LocalDeclMap.erase(InitVD);
4599 // Return the address of the private variable.
4600 return GetAddrOfLocalVar(PvtVD);
4602 assert(IsRegistered && "firstprivate var already registered as private");
4603 // Silence the warning about unused variable.
4611 // Generate the instructions for '#pragma omp target data' directive.
4612 void CodeGenFunction::EmitOMPTargetDataDirective(
4613 const OMPTargetDataDirective &S) {
4614 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
4616 // Create a pre/post action to signal the privatization of the device pointer.
4617 // This action can be replaced by the OpenMP runtime code generation to
4618 // deactivate privatization.
4619 bool PrivatizeDevicePointers = false;
4620 class DevicePointerPrivActionTy : public PrePostActionTy {
4621 bool &PrivatizeDevicePointers;
4624 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
4625 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
4626 void Enter(CodeGenFunction &CGF) override {
4627 PrivatizeDevicePointers = true;
4630 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
4632 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
4633 CodeGenFunction &CGF, PrePostActionTy &Action) {
4634 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
4635 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4638 // Codegen that selects whether to generate the privatization code or not.
4639 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
4640 &InnermostCodeGen](CodeGenFunction &CGF,
4641 PrePostActionTy &Action) {
4642 RegionCodeGenTy RCG(InnermostCodeGen);
4643 PrivatizeDevicePointers = false;
4645 // Call the pre-action to change the status of PrivatizeDevicePointers if
4649 if (PrivatizeDevicePointers) {
4650 OMPPrivateScope PrivateScope(CGF);
4651 // Emit all instances of the use_device_ptr clause.
4652 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
4653 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
4654 Info.CaptureDeviceAddrMap);
4655 (void)PrivateScope.Privatize();
4662 // Forward the provided action to the privatization codegen.
4663 RegionCodeGenTy PrivRCG(PrivCodeGen);
4664 PrivRCG.setAction(Action);
4666 // Notwithstanding the body of the region is emitted as inlined directive,
4667 // we don't use an inline scope as changes in the references inside the
4668 // region are expected to be visible outside, so we do not privative them.
4669 OMPLexicalScope Scope(CGF, S);
4670 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
4674 RegionCodeGenTy RCG(CodeGen);
4676 // If we don't have target devices, don't bother emitting the data mapping
4678 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
4683 // Check if we have any if clause associated with the directive.
4684 const Expr *IfCond = nullptr;
4685 if (const auto *C = S.getSingleClause<OMPIfClause>())
4686 IfCond = C->getCondition();
4688 // Check if we have any device clause associated with the directive.
4689 const Expr *Device = nullptr;
4690 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
4691 Device = C->getDevice();
4693 // Set the action to signal privatization of device pointers.
4694 RCG.setAction(PrivAction);
4696 // Emit region code.
4697 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
4701 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
4702 const OMPTargetEnterDataDirective &S) {
4703 // If we don't have target devices, don't bother emitting the data mapping
4705 if (CGM.getLangOpts().OMPTargetTriples.empty())
4708 // Check if we have any if clause associated with the directive.
4709 const Expr *IfCond = nullptr;
4710 if (const auto *C = S.getSingleClause<OMPIfClause>())
4711 IfCond = C->getCondition();
4713 // Check if we have any device clause associated with the directive.
4714 const Expr *Device = nullptr;
4715 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
4716 Device = C->getDevice();
4718 OMPLexicalScope Scope(*this, S, OMPD_task);
4719 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
4722 void CodeGenFunction::EmitOMPTargetExitDataDirective(
4723 const OMPTargetExitDataDirective &S) {
4724 // If we don't have target devices, don't bother emitting the data mapping
4726 if (CGM.getLangOpts().OMPTargetTriples.empty())
4729 // Check if we have any if clause associated with the directive.
4730 const Expr *IfCond = nullptr;
4731 if (const auto *C = S.getSingleClause<OMPIfClause>())
4732 IfCond = C->getCondition();
4734 // Check if we have any device clause associated with the directive.
4735 const Expr *Device = nullptr;
4736 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
4737 Device = C->getDevice();
4739 OMPLexicalScope Scope(*this, S, OMPD_task);
4740 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
4743 static void emitTargetParallelRegion(CodeGenFunction &CGF,
4744 const OMPTargetParallelDirective &S,
4745 PrePostActionTy &Action) {
4746 // Get the captured statement associated with the 'parallel' region.
4747 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
4749 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
4751 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4752 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4753 CGF.EmitOMPPrivateClause(S, PrivateScope);
4754 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4755 (void)PrivateScope.Privatize();
4756 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4757 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4758 // TODO: Add support for clauses.
4759 CGF.EmitStmt(CS->getCapturedStmt());
4760 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4762 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen,
4763 emitEmptyBoundParameters);
4764 emitPostUpdateForReductionClause(CGF, S,
4765 [](CodeGenFunction &) { return nullptr; });
4768 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
4769 CodeGenModule &CGM, StringRef ParentName,
4770 const OMPTargetParallelDirective &S) {
4771 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4772 emitTargetParallelRegion(CGF, S, Action);
4775 llvm::Constant *Addr;
4776 // Emit target region as a standalone region.
4777 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4778 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4779 assert(Fn && Addr && "Target device function emission failed.");
4782 void CodeGenFunction::EmitOMPTargetParallelDirective(
4783 const OMPTargetParallelDirective &S) {
4784 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4785 emitTargetParallelRegion(CGF, S, Action);
4787 emitCommonOMPTargetDirective(*this, S, CodeGen);
4790 static void emitTargetParallelForRegion(CodeGenFunction &CGF,
4791 const OMPTargetParallelForDirective &S,
4792 PrePostActionTy &Action) {
4794 // Emit directive as a combined directive that consists of two implicit
4795 // directives: 'parallel' with 'for' directive.
4796 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4798 CodeGenFunction::OMPCancelStackRAII CancelRegion(
4799 CGF, OMPD_target_parallel_for, S.hasCancel());
4800 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
4801 emitDispatchForLoopBounds);
4803 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen,
4804 emitEmptyBoundParameters);
4807 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
4808 CodeGenModule &CGM, StringRef ParentName,
4809 const OMPTargetParallelForDirective &S) {
4810 // Emit SPMD target parallel for region as a standalone region.
4811 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4812 emitTargetParallelForRegion(CGF, S, Action);
4815 llvm::Constant *Addr;
4816 // Emit target region as a standalone region.
4817 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4818 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4819 assert(Fn && Addr && "Target device function emission failed.");
4822 void CodeGenFunction::EmitOMPTargetParallelForDirective(
4823 const OMPTargetParallelForDirective &S) {
4824 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4825 emitTargetParallelForRegion(CGF, S, Action);
4827 emitCommonOMPTargetDirective(*this, S, CodeGen);
4831 emitTargetParallelForSimdRegion(CodeGenFunction &CGF,
4832 const OMPTargetParallelForSimdDirective &S,
4833 PrePostActionTy &Action) {
4835 // Emit directive as a combined directive that consists of two implicit
4836 // directives: 'parallel' with 'for' directive.
4837 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4839 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
4840 emitDispatchForLoopBounds);
4842 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen,
4843 emitEmptyBoundParameters);
4846 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
4847 CodeGenModule &CGM, StringRef ParentName,
4848 const OMPTargetParallelForSimdDirective &S) {
4849 // Emit SPMD target parallel for region as a standalone region.
4850 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4851 emitTargetParallelForSimdRegion(CGF, S, Action);
4854 llvm::Constant *Addr;
4855 // Emit target region as a standalone region.
4856 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4857 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4858 assert(Fn && Addr && "Target device function emission failed.");
4861 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
4862 const OMPTargetParallelForSimdDirective &S) {
4863 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4864 emitTargetParallelForSimdRegion(CGF, S, Action);
4866 emitCommonOMPTargetDirective(*this, S, CodeGen);
4869 /// Emit a helper variable and return corresponding lvalue.
4870 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
4871 const ImplicitParamDecl *PVD,
4872 CodeGenFunction::OMPPrivateScope &Privates) {
4873 const auto *VDecl = cast<VarDecl>(Helper->getDecl());
4874 Privates.addPrivate(VDecl,
4875 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); });
4878 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
4879 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
4880 // Emit outlined function for task construct.
4881 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
4882 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
4883 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4884 const Expr *IfCond = nullptr;
4885 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4886 if (C->getNameModifier() == OMPD_unknown ||
4887 C->getNameModifier() == OMPD_taskloop) {
4888 IfCond = C->getCondition();
4894 // Check if taskloop must be emitted without taskgroup.
4895 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
4896 // TODO: Check if we should emit tied or untied task.
4898 // Set scheduling for taskloop
4899 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
4901 Data.Schedule.setInt(/*IntVal=*/false);
4902 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
4903 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
4905 Data.Schedule.setInt(/*IntVal=*/true);
4906 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
4909 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
4911 // for (IV in 0..LastIteration) BODY;
4912 // <Final counter/linear vars updates>;
4916 // Emit: if (PreCond) - begin.
4917 // If the condition constant folds and can be elided, avoid emitting the
4920 llvm::BasicBlock *ContBlock = nullptr;
4921 OMPLoopScope PreInitScope(CGF, S);
4922 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
4926 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
4927 ContBlock = CGF.createBasicBlock("taskloop.if.end");
4928 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
4929 CGF.getProfileCount(&S));
4930 CGF.EmitBlock(ThenBlock);
4931 CGF.incrementProfileCounter(&S);
4934 if (isOpenMPSimdDirective(S.getDirectiveKind()))
4935 CGF.EmitOMPSimdInit(S);
4937 OMPPrivateScope LoopScope(CGF);
4938 // Emit helper vars inits.
4939 enum { LowerBound = 5, UpperBound, Stride, LastIter };
4940 auto *I = CS->getCapturedDecl()->param_begin();
4941 auto *LBP = std::next(I, LowerBound);
4942 auto *UBP = std::next(I, UpperBound);
4943 auto *STP = std::next(I, Stride);
4944 auto *LIP = std::next(I, LastIter);
4945 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
4947 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
4949 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
4950 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
4952 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
4953 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
4954 (void)LoopScope.Privatize();
4955 // Emit the loop iteration variable.
4956 const Expr *IVExpr = S.getIterationVariable();
4957 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
4958 CGF.EmitVarDecl(*IVDecl);
4959 CGF.EmitIgnoredExpr(S.getInit());
4961 // Emit the iterations count variable.
4962 // If it is not a variable, Sema decided to calculate iterations count on
4963 // each iteration (e.g., it is foldable into a constant).
4964 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
4965 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
4966 // Emit calculation of the iterations count.
4967 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
4970 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
4972 [&S](CodeGenFunction &CGF) {
4973 CGF.EmitOMPLoopBody(S, JumpDest());
4974 CGF.EmitStopPoint(&S);
4976 [](CodeGenFunction &) {});
4977 // Emit: if (PreCond) - end.
4979 CGF.EmitBranch(ContBlock);
4980 CGF.EmitBlock(ContBlock, true);
4982 // Emit final copy of the lastprivate variables if IsLastIter != 0.
4983 if (HasLastprivateClause) {
4984 CGF.EmitOMPLastprivateClauseFinal(
4985 S, isOpenMPSimdDirective(S.getDirectiveKind()),
4986 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
4987 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
4988 (*LIP)->getType(), S.getBeginLoc())));
4991 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
4992 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
4993 const OMPTaskDataTy &Data) {
4994 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
4995 &Data](CodeGenFunction &CGF, PrePostActionTy &) {
4996 OMPLoopScope PreInitScope(CGF, S);
4997 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S,
4998 OutlinedFn, SharedsTy,
4999 CapturedStruct, IfCond, Data);
5001 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
5005 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data);
5007 CGM.getOpenMPRuntime().emitTaskgroupRegion(
5009 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF,
5010 PrePostActionTy &Action) {
5012 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen,
5019 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
5020 EmitOMPTaskLoopBasedDirective(S);
5023 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
5024 const OMPTaskLoopSimdDirective &S) {
5025 EmitOMPTaskLoopBasedDirective(S);
5028 // Generate the instructions for '#pragma omp target update' directive.
5029 void CodeGenFunction::EmitOMPTargetUpdateDirective(
5030 const OMPTargetUpdateDirective &S) {
5031 // If we don't have target devices, don't bother emitting the data mapping
5033 if (CGM.getLangOpts().OMPTargetTriples.empty())
5036 // Check if we have any if clause associated with the directive.
5037 const Expr *IfCond = nullptr;
5038 if (const auto *C = S.getSingleClause<OMPIfClause>())
5039 IfCond = C->getCondition();
5041 // Check if we have any device clause associated with the directive.
5042 const Expr *Device = nullptr;
5043 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
5044 Device = C->getDevice();
5046 OMPLexicalScope Scope(*this, S, OMPD_task);
5047 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
5050 void CodeGenFunction::EmitSimpleOMPExecutableDirective(
5051 const OMPExecutableDirective &D) {
5052 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
5054 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) {
5055 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
5056 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
5058 OMPPrivateScope LoopGlobals(CGF);
5059 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
5060 for (const Expr *E : LD->counters()) {
5061 const auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5062 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
5063 LValue GlobLVal = CGF.EmitLValue(E);
5064 LoopGlobals.addPrivate(
5065 VD, [&GlobLVal]() { return GlobLVal.getAddress(); });
5067 if (isa<OMPCapturedExprDecl>(VD)) {
5068 // Emit only those that were not explicitly referenced in clauses.
5069 if (!CGF.LocalDeclMap.count(VD))
5070 CGF.EmitVarDecl(*VD);
5073 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
5074 if (!C->getNumForLoops())
5076 for (unsigned I = LD->getCollapsedNumber(),
5077 E = C->getLoopNumIterations().size();
5079 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
5080 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) {
5081 // Emit only those that were not explicitly referenced in clauses.
5082 if (!CGF.LocalDeclMap.count(VD))
5083 CGF.EmitVarDecl(*VD);
5088 LoopGlobals.Privatize();
5089 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
5092 OMPSimdLexicalScope Scope(*this, D);
5093 CGM.getOpenMPRuntime().emitInlinedDirective(
5095 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
5096 : D.getDirectiveKind(),