1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Stmt.h"
19 #include "clang/AST/StmtOpenMP.h"
20 using namespace clang;
21 using namespace CodeGen;
23 //===----------------------------------------------------------------------===//
24 // OpenMP Directive Emission
25 //===----------------------------------------------------------------------===//
26 void CodeGenFunction::EmitOMPAggregateAssign(
27 llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
28 const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) {
29 // Perform element-by-element initialization.
31 auto SrcBegin = SrcAddr;
32 auto DestBegin = DestAddr;
33 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
34 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
35 // Cast from pointer to array type to pointer to single element.
36 SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin,
37 DestBegin->getType());
38 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
39 // The basic structure here is a while-do loop.
40 auto BodyBB = createBasicBlock("omp.arraycpy.body");
41 auto DoneBB = createBasicBlock("omp.arraycpy.done");
43 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
44 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
46 // Enter the loop body, making that address the current address.
47 auto EntryBB = Builder.GetInsertBlock();
49 auto SrcElementCurrent =
50 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
51 SrcElementCurrent->addIncoming(SrcBegin, EntryBB);
52 auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2,
53 "omp.arraycpy.destElementPast");
54 DestElementCurrent->addIncoming(DestBegin, EntryBB);
57 CopyGen(DestElementCurrent, SrcElementCurrent);
59 // Shift the address forward by one element.
60 auto DestElementNext = Builder.CreateConstGEP1_32(
61 DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element");
62 auto SrcElementNext = Builder.CreateConstGEP1_32(
63 SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element");
64 // Check whether we've reached the end.
66 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
67 Builder.CreateCondBr(Done, DoneBB, BodyBB);
68 DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock());
69 SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock());
72 EmitBlock(DoneBB, /*IsFinished=*/true);
75 void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF,
76 QualType OriginalType, llvm::Value *DestAddr,
77 llvm::Value *SrcAddr, const VarDecl *DestVD,
78 const VarDecl *SrcVD, const Expr *Copy) {
79 if (OriginalType->isArrayType()) {
80 auto *BO = dyn_cast<BinaryOperator>(Copy);
81 if (BO && BO->getOpcode() == BO_Assign) {
82 // Perform simple memcpy for simple copying.
83 CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
85 // For arrays with complex element types perform element by element
87 CGF.EmitOMPAggregateAssign(
88 DestAddr, SrcAddr, OriginalType,
89 [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement,
90 llvm::Value *SrcElement) {
91 // Working with the single array element, so have to remap
92 // destination and source variables to corresponding array
94 CodeGenFunction::OMPPrivateScope Remap(CGF);
95 Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{
99 SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; });
100 (void)Remap.Privatize();
101 CGF.EmitIgnoredExpr(Copy);
105 // Remap pseudo source variable to private copy.
106 CodeGenFunction::OMPPrivateScope Remap(CGF);
107 Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; });
108 Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; });
109 (void)Remap.Privatize();
110 // Emit copying of the whole variable.
111 CGF.EmitIgnoredExpr(Copy);
115 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
116 OMPPrivateScope &PrivateScope) {
117 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
118 for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) {
119 auto *C = cast<OMPFirstprivateClause>(*I);
120 auto IRef = C->varlist_begin();
121 auto InitsRef = C->inits().begin();
122 for (auto IInit : C->private_copies()) {
123 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
124 if (EmittedAsFirstprivate.count(OrigVD) == 0) {
125 EmittedAsFirstprivate.insert(OrigVD);
126 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
127 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
130 const_cast<VarDecl *>(OrigVD),
131 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
133 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
134 auto *OriginalAddr = EmitLValue(&DRE).getAddress();
135 QualType Type = OrigVD->getType();
136 if (Type->isArrayType()) {
137 // Emit VarDecl with copy init for arrays.
138 // Get the address of the original variable captured in current
140 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
141 auto Emission = EmitAutoVarAlloca(*VD);
142 auto *Init = VD->getInit();
143 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
144 // Perform simple memcpy.
145 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
148 EmitOMPAggregateAssign(
149 Emission.getAllocatedAddress(), OriginalAddr, Type,
150 [this, VDInit, Init](llvm::Value *DestElement,
151 llvm::Value *SrcElement) {
152 // Clean up any temporaries needed by the initialization.
153 RunCleanupsScope InitScope(*this);
154 // Emit initialization for single element.
155 LocalDeclMap[VDInit] = SrcElement;
156 EmitAnyExprToMem(Init, DestElement,
157 Init->getType().getQualifiers(),
158 /*IsInitializer*/ false);
159 LocalDeclMap.erase(VDInit);
162 EmitAutoVarCleanups(Emission);
163 return Emission.getAllocatedAddress();
166 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
167 // Emit private VarDecl with copy init.
168 // Remap temp VDInit variable to the address of the original
170 // (for proper handling of captured global variables).
171 LocalDeclMap[VDInit] = OriginalAddr;
173 LocalDeclMap.erase(VDInit);
174 return GetAddrOfLocalVar(VD);
177 assert(IsRegistered &&
178 "firstprivate var already registered as private");
179 // Silence the warning about unused variable.
185 return !EmittedAsFirstprivate.empty();
188 void CodeGenFunction::EmitOMPPrivateClause(
189 const OMPExecutableDirective &D,
190 CodeGenFunction::OMPPrivateScope &PrivateScope) {
191 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
192 for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) {
193 auto *C = cast<OMPPrivateClause>(*I);
194 auto IRef = C->varlist_begin();
195 for (auto IInit : C->private_copies()) {
196 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
197 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
198 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
200 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
201 // Emit private VarDecl with copy init.
203 return GetAddrOfLocalVar(VD);
205 assert(IsRegistered && "private var already registered as private");
206 // Silence the warning about unused variable.
214 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
215 // threadprivate_var1 = master_threadprivate_var1;
216 // operator=(threadprivate_var2, master_threadprivate_var2);
218 // __kmpc_barrier(&loc, global_tid);
219 llvm::DenseSet<const VarDecl *> CopiedVars;
220 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
221 for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) {
222 auto *C = cast<OMPCopyinClause>(*I);
223 auto IRef = C->varlist_begin();
224 auto ISrcRef = C->source_exprs().begin();
225 auto IDestRef = C->destination_exprs().begin();
226 for (auto *AssignOp : C->assignment_ops()) {
227 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
228 QualType Type = VD->getType();
229 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
230 // Get the address of the master variable.
231 auto *MasterAddr = VD->isStaticLocal()
232 ? CGM.getStaticLocalDeclAddress(VD)
233 : CGM.GetAddrOfGlobal(VD);
234 // Get the address of the threadprivate variable.
235 auto *PrivateAddr = EmitLValue(*IRef).getAddress();
236 if (CopiedVars.size() == 1) {
237 // At first check if current thread is a master thread. If it is, no
238 // need to copy data.
239 CopyBegin = createBasicBlock("copyin.not.master");
240 CopyEnd = createBasicBlock("copyin.not.master.end");
241 Builder.CreateCondBr(
242 Builder.CreateICmpNE(
243 Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
244 Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
246 EmitBlock(CopyBegin);
248 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
249 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
250 EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD,
259 // Exit out of copying procedure for non-master thread.
260 EmitBlock(CopyEnd, /*IsFinished=*/true);
266 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
267 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
268 bool HasAtLeastOneLastprivate = false;
269 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
270 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
271 HasAtLeastOneLastprivate = true;
272 auto *C = cast<OMPLastprivateClause>(*I);
273 auto IRef = C->varlist_begin();
274 auto IDestRef = C->destination_exprs().begin();
275 for (auto *IInit : C->private_copies()) {
276 // Keep the address of the original variable for future update at the end
278 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
279 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
280 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
281 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{
283 const_cast<VarDecl *>(OrigVD),
284 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
286 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
287 return EmitLValue(&DRE).getAddress();
289 // Check if the variable is also a firstprivate: in this case IInit is
290 // not generated. Initialization of this variable will happen in codegen
291 // for 'firstprivate' clause.
293 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
295 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
296 // Emit private VarDecl with copy init.
298 return GetAddrOfLocalVar(VD);
300 assert(IsRegistered &&
301 "lastprivate var already registered as private");
308 return HasAtLeastOneLastprivate;
311 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
312 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
313 // Emit following code:
314 // if (<IsLastIterCond>) {
315 // orig_var1 = private_orig_var1;
317 // orig_varn = private_orig_varn;
319 llvm::BasicBlock *ThenBB = nullptr;
320 llvm::BasicBlock *DoneBB = nullptr;
321 if (IsLastIterCond) {
322 ThenBB = createBasicBlock(".omp.lastprivate.then");
323 DoneBB = createBasicBlock(".omp.lastprivate.done");
324 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
327 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates;
328 const Expr *LastIterVal = nullptr;
329 const Expr *IVExpr = nullptr;
330 const Expr *IncExpr = nullptr;
331 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
332 if (isOpenMPWorksharingDirective(D.getDirectiveKind())) {
333 LastIterVal = cast<VarDecl>(cast<DeclRefExpr>(
334 LoopDirective->getUpperBoundVariable())
336 ->getAnyInitializer();
337 IVExpr = LoopDirective->getIterationVariable();
338 IncExpr = LoopDirective->getInc();
339 auto IUpdate = LoopDirective->updates().begin();
340 for (auto *E : LoopDirective->counters()) {
341 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
342 LoopCountersAndUpdates[D] = *IUpdate;
348 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
349 bool FirstLCV = true;
350 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
351 auto *C = cast<OMPLastprivateClause>(*I);
352 auto IRef = C->varlist_begin();
353 auto ISrcRef = C->source_exprs().begin();
354 auto IDestRef = C->destination_exprs().begin();
355 for (auto *AssignOp : C->assignment_ops()) {
356 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
357 QualType Type = PrivateVD->getType();
358 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
359 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
360 // If lastprivate variable is a loop control variable for loop-based
361 // directive, update its value before copyin back to original
363 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) {
364 if (FirstLCV && LastIterVal) {
365 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(),
366 IVExpr->getType().getQualifiers(),
367 /*IsInitializer=*/false);
368 EmitIgnoredExpr(IncExpr);
371 EmitIgnoredExpr(UpExpr);
373 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
374 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
375 // Get the address of the original variable.
376 auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
377 // Get the address of the private variable.
378 auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
379 EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
388 if (IsLastIterCond) {
389 EmitBlock(DoneBB, /*IsFinished=*/true);
393 void CodeGenFunction::EmitOMPReductionClauseInit(
394 const OMPExecutableDirective &D,
395 CodeGenFunction::OMPPrivateScope &PrivateScope) {
396 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
397 auto *C = cast<OMPReductionClause>(*I);
398 auto ILHS = C->lhs_exprs().begin();
399 auto IRHS = C->rhs_exprs().begin();
400 for (auto IRef : C->varlists()) {
401 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
402 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
403 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
404 // Store the address of the original variable associated with the LHS
405 // implicit variable.
406 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{
407 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
408 CapturedStmtInfo->lookup(OrigVD) != nullptr,
409 IRef->getType(), VK_LValue, IRef->getExprLoc());
410 return EmitLValue(&DRE).getAddress();
412 // Emit reduction copy.
414 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{
415 // Emit private VarDecl with reduction init.
416 EmitDecl(*PrivateVD);
417 return GetAddrOfLocalVar(PrivateVD);
419 assert(IsRegistered && "private var already registered as private");
420 // Silence the warning about unused variable.
427 void CodeGenFunction::EmitOMPReductionClauseFinal(
428 const OMPExecutableDirective &D) {
429 llvm::SmallVector<const Expr *, 8> LHSExprs;
430 llvm::SmallVector<const Expr *, 8> RHSExprs;
431 llvm::SmallVector<const Expr *, 8> ReductionOps;
432 bool HasAtLeastOneReduction = false;
433 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
434 HasAtLeastOneReduction = true;
435 auto *C = cast<OMPReductionClause>(*I);
436 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
437 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
438 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
440 if (HasAtLeastOneReduction) {
441 // Emit nowait reduction if nowait clause is present or directive is a
442 // parallel directive (it always has implicit barrier).
443 CGM.getOpenMPRuntime().emitReduction(
444 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps,
445 D.getSingleClause(OMPC_nowait) ||
446 isOpenMPParallelDirective(D.getDirectiveKind()) ||
447 D.getDirectiveKind() == OMPD_simd,
448 D.getDirectiveKind() == OMPD_simd);
452 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
453 const OMPExecutableDirective &S,
454 OpenMPDirectiveKind InnermostKind,
455 const RegionCodeGenTy &CodeGen) {
456 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
457 auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS);
458 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
459 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
460 if (auto C = S.getSingleClause(OMPC_num_threads)) {
461 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
462 auto NumThreadsClause = cast<OMPNumThreadsClause>(C);
463 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
464 /*IgnoreResultAssign*/ true);
465 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
466 CGF, NumThreads, NumThreadsClause->getLocStart());
468 if (auto *C = S.getSingleClause(OMPC_proc_bind)) {
469 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
470 auto *ProcBindClause = cast<OMPProcBindClause>(C);
471 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
472 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
474 const Expr *IfCond = nullptr;
475 if (auto C = S.getSingleClause(OMPC_if)) {
476 IfCond = cast<OMPIfClause>(C)->getCondition();
478 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
479 CapturedStruct, IfCond);
482 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
483 LexicalScope Scope(*this, S.getSourceRange());
484 // Emit parallel region as a standalone region.
485 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
486 OMPPrivateScope PrivateScope(CGF);
487 bool Copyins = CGF.EmitOMPCopyinClause(S);
488 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
489 if (Copyins || Firstprivates) {
490 // Emit implicit barrier to synchronize threads and avoid data races on
491 // initialization of firstprivate variables or propagation master's thread
492 // values of threadprivate variables to local instances of that variables
493 // of all other implicit threads.
494 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
497 CGF.EmitOMPPrivateClause(S, PrivateScope);
498 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
499 (void)PrivateScope.Privatize();
500 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
501 CGF.EmitOMPReductionClauseFinal(S);
502 // Emit implicit barrier at the end of the 'parallel' directive.
503 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
506 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
509 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
511 RunCleanupsScope BodyScope(*this);
512 // Update counters values on current iteration.
513 for (auto I : D.updates()) {
516 // Update the linear variables.
517 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
518 auto *C = cast<OMPLinearClause>(*I);
519 for (auto U : C->updates()) {
524 // On a continue in the body, jump to the end.
525 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
526 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
528 EmitStmt(D.getBody());
529 // The end (updates/cleanups).
530 EmitBlock(Continue.getBlock());
531 BreakContinueStack.pop_back();
532 // TODO: Update lastprivates if the SeparateIter flag is true.
533 // This will be implemented in a follow-up OMPLastprivateClause patch, but
534 // result should be still correct without it, as we do not make these
535 // variables private yet.
538 void CodeGenFunction::EmitOMPInnerLoop(
539 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
541 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
542 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
543 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
545 // Start the loop with a block that tests the condition.
546 auto CondBlock = createBasicBlock("omp.inner.for.cond");
547 EmitBlock(CondBlock);
548 LoopStack.push(CondBlock);
550 // If there are any cleanups between here and the loop-exit scope,
551 // create a block to stage a loop exit along.
552 auto ExitBlock = LoopExit.getBlock();
554 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
556 auto LoopBody = createBasicBlock("omp.inner.for.body");
559 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
560 if (ExitBlock != LoopExit.getBlock()) {
561 EmitBlock(ExitBlock);
562 EmitBranchThroughCleanup(LoopExit);
566 incrementProfileCounter(&S);
568 // Create a block for the increment.
569 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
570 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
574 // Emit "IV = IV + 1" and a back-edge to the condition block.
575 EmitBlock(Continue.getBlock());
576 EmitIgnoredExpr(IncExpr);
578 BreakContinueStack.pop_back();
579 EmitBranch(CondBlock);
581 // Emit the fall-through block.
582 EmitBlock(LoopExit.getBlock());
585 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
586 // Emit inits for the linear variables.
587 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
588 auto *C = cast<OMPLinearClause>(*I);
589 for (auto Init : C->inits()) {
590 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
591 auto *OrigVD = cast<VarDecl>(
592 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl());
593 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
594 CapturedStmtInfo->lookup(OrigVD) != nullptr,
595 VD->getInit()->getType(), VK_LValue,
596 VD->getInit()->getExprLoc());
597 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
598 EmitExprAsInit(&DRE, VD,
599 MakeAddrLValue(Emission.getAllocatedAddress(),
600 VD->getType(), Emission.Alignment),
601 /*capturedByInit=*/false);
602 EmitAutoVarCleanups(Emission);
604 // Emit the linear steps for the linear clauses.
605 // If a step is not constant, it is pre-calculated before the loop.
606 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
607 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
608 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
609 // Emit calculation of the linear step.
615 static void emitLinearClauseFinal(CodeGenFunction &CGF,
616 const OMPLoopDirective &D) {
617 // Emit the final values of the linear variables.
618 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
619 auto *C = cast<OMPLinearClause>(*I);
620 auto IC = C->varlist_begin();
621 for (auto F : C->finals()) {
622 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
623 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
624 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
625 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
626 auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress();
627 CodeGenFunction::OMPPrivateScope VarScope(CGF);
628 VarScope.addPrivate(OrigVD,
629 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
630 (void)VarScope.Privatize();
631 CGF.EmitIgnoredExpr(F);
637 static void emitAlignedClause(CodeGenFunction &CGF,
638 const OMPExecutableDirective &D) {
639 for (auto &&I = D.getClausesOfKind(OMPC_aligned); I; ++I) {
640 auto *Clause = cast<OMPAlignedClause>(*I);
641 unsigned ClauseAlignment = 0;
642 if (auto AlignmentExpr = Clause->getAlignment()) {
644 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
645 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
647 for (auto E : Clause->varlists()) {
648 unsigned Alignment = ClauseAlignment;
649 if (Alignment == 0) {
650 // OpenMP [2.8.1, Description]
651 // If no optional parameter is specified, implementation-defined default
652 // alignments for SIMD instructions on the target platforms are assumed.
655 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
656 E->getType()->getPointeeType()))
659 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
660 "alignment is not power of 2");
661 if (Alignment != 0) {
662 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
663 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
669 static void emitPrivateLoopCounters(CodeGenFunction &CGF,
670 CodeGenFunction::OMPPrivateScope &LoopScope,
671 ArrayRef<Expr *> Counters) {
672 for (auto *E : Counters) {
673 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
674 (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{
675 // Emit var without initialization.
676 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
677 CGF.EmitAutoVarCleanups(VarEmission);
678 return VarEmission.getAllocatedAddress();
683 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
684 const Expr *Cond, llvm::BasicBlock *TrueBlock,
685 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
687 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
688 emitPrivateLoopCounters(CGF, PreCondScope, S.counters());
689 const VarDecl *IVDecl =
690 cast<VarDecl>(cast<DeclRefExpr>(S.getIterationVariable())->getDecl());
691 bool IsRegistered = PreCondScope.addPrivate(IVDecl, [&]() -> llvm::Value *{
692 // Emit var without initialization.
693 auto VarEmission = CGF.EmitAutoVarAlloca(*IVDecl);
694 CGF.EmitAutoVarCleanups(VarEmission);
695 return VarEmission.getAllocatedAddress();
697 assert(IsRegistered && "counter already registered as private");
698 // Silence the warning about unused variable.
700 (void)PreCondScope.Privatize();
701 // Initialize internal counter to 0 to calculate initial values of real
703 LValue IV = CGF.EmitLValue(S.getIterationVariable());
704 CGF.EmitStoreOfScalar(
705 llvm::ConstantInt::getNullValue(
706 IV.getAddress()->getType()->getPointerElementType()),
707 CGF.EmitLValue(S.getIterationVariable()), /*isInit=*/true);
708 // Get initial values of real counters.
709 for (auto I : S.updates()) {
710 CGF.EmitIgnoredExpr(I);
713 // Check that loop is executed at least one time.
714 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
718 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
719 CodeGenFunction::OMPPrivateScope &PrivateScope) {
720 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
721 auto *C = cast<OMPLinearClause>(*I);
722 for (auto *E : C->varlists()) {
723 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
724 bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * {
725 // Emit var without initialization.
726 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
727 CGF.EmitAutoVarCleanups(VarEmission);
728 return VarEmission.getAllocatedAddress();
730 assert(IsRegistered && "linear var already registered as private");
731 // Silence the warning about unused variable.
737 static void emitSafelenClause(CodeGenFunction &CGF,
738 const OMPExecutableDirective &D) {
740 cast_or_null<OMPSafelenClause>(D.getSingleClause(OMPC_safelen))) {
741 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
742 /*ignoreResult=*/true);
743 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
744 CGF.LoopStack.setVectorizerWidth(Val->getZExtValue());
745 // In presence of finite 'safelen', it may be unsafe to mark all
746 // the memory instructions parallel, because loop-carried
747 // dependences of 'safelen' iterations are possible.
748 CGF.LoopStack.setParallel(false);
752 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
753 // Walk clauses and process safelen/lastprivate.
754 LoopStack.setParallel();
755 LoopStack.setVectorizerEnable(true);
756 emitSafelenClause(*this, D);
759 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
760 auto IC = D.counters().begin();
761 for (auto F : D.finals()) {
762 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
763 if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
764 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
765 CapturedStmtInfo->lookup(OrigVD) != nullptr,
766 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
767 auto *OrigAddr = EmitLValue(&DRE).getAddress();
768 OMPPrivateScope VarScope(*this);
769 VarScope.addPrivate(OrigVD,
770 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
771 (void)VarScope.Privatize();
776 emitLinearClauseFinal(*this, D);
779 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
780 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
782 // for (IV in 0..LastIteration) BODY;
783 // <Final counter/linear vars updates>;
787 // Emit: if (PreCond) - begin.
788 // If the condition constant folds and can be elided, avoid emitting the
791 llvm::BasicBlock *ContBlock = nullptr;
792 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
796 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
797 ContBlock = CGF.createBasicBlock("simd.if.end");
798 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
799 CGF.getProfileCount(&S));
800 CGF.EmitBlock(ThenBlock);
801 CGF.incrementProfileCounter(&S);
804 // Emit the loop iteration variable.
805 const Expr *IVExpr = S.getIterationVariable();
806 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
807 CGF.EmitVarDecl(*IVDecl);
808 CGF.EmitIgnoredExpr(S.getInit());
810 // Emit the iterations count variable.
811 // If it is not a variable, Sema decided to calculate iterations count on
812 // each iteration (e.g., it is foldable into a constant).
813 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
814 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
815 // Emit calculation of the iterations count.
816 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
819 CGF.EmitOMPSimdInit(S);
821 emitAlignedClause(CGF, S);
822 CGF.EmitOMPLinearClauseInit(S);
823 bool HasLastprivateClause;
825 OMPPrivateScope LoopScope(CGF);
826 emitPrivateLoopCounters(CGF, LoopScope, S.counters());
827 emitPrivateLinearVars(CGF, S, LoopScope);
828 CGF.EmitOMPPrivateClause(S, LoopScope);
829 CGF.EmitOMPReductionClauseInit(S, LoopScope);
830 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
831 (void)LoopScope.Privatize();
832 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
834 [&S](CodeGenFunction &CGF) {
835 CGF.EmitOMPLoopBody(S, JumpDest());
836 CGF.EmitStopPoint(&S);
838 [](CodeGenFunction &) {});
839 // Emit final copy of the lastprivate variables at the end of loops.
840 if (HasLastprivateClause) {
841 CGF.EmitOMPLastprivateClauseFinal(S);
843 CGF.EmitOMPReductionClauseFinal(S);
845 CGF.EmitOMPSimdFinal(S);
846 // Emit: if (PreCond) - end.
848 CGF.EmitBranch(ContBlock);
849 CGF.EmitBlock(ContBlock, true);
852 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
855 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
856 const OMPLoopDirective &S,
857 OMPPrivateScope &LoopScope,
858 bool Ordered, llvm::Value *LB,
859 llvm::Value *UB, llvm::Value *ST,
860 llvm::Value *IL, llvm::Value *Chunk) {
861 auto &RT = CGM.getOpenMPRuntime();
863 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
864 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);
867 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&
868 "static non-chunked schedule does not need outer loop");
872 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
873 // When schedule(dynamic,chunk_size) is specified, the iterations are
874 // distributed to threads in the team in chunks as the threads request them.
875 // Each thread executes a chunk of iterations, then requests another chunk,
876 // until no chunks remain to be distributed. Each chunk contains chunk_size
877 // iterations, except for the last chunk to be distributed, which may have
878 // fewer iterations. When no chunk_size is specified, it defaults to 1.
880 // When schedule(guided,chunk_size) is specified, the iterations are assigned
881 // to threads in the team in chunks as the executing threads request them.
882 // Each thread executes a chunk of iterations, then requests another chunk,
883 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
884 // each chunk is proportional to the number of unassigned iterations divided
885 // by the number of threads in the team, decreasing to 1. For a chunk_size
886 // with value k (greater than 1), the size of each chunk is determined in the
887 // same way, with the restriction that the chunks do not contain fewer than k
888 // iterations (except for the last chunk to be assigned, which may have fewer
889 // than k iterations).
891 // When schedule(auto) is specified, the decision regarding scheduling is
892 // delegated to the compiler and/or runtime system. The programmer gives the
893 // implementation the freedom to choose any possible mapping of iterations to
894 // threads in the team.
896 // When schedule(runtime) is specified, the decision regarding scheduling is
897 // deferred until run time, and the schedule and chunk size are taken from the
898 // run-sched-var ICV. If the ICV is set to auto, the schedule is
899 // implementation defined
901 // while(__kmpc_dispatch_next(&LB, &UB)) {
903 // while (idx <= UB) { BODY; ++idx;
904 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
908 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
909 // When schedule(static, chunk_size) is specified, iterations are divided into
910 // chunks of size chunk_size, and the chunks are assigned to the threads in
911 // the team in a round-robin fashion in the order of the thread number.
913 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
914 // while (idx <= UB) { BODY; ++idx; } // inner loop
920 const Expr *IVExpr = S.getIterationVariable();
921 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
922 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
925 *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB,
926 (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal()
930 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
932 // Start the loop with a block that tests the condition.
933 auto CondBlock = createBasicBlock("omp.dispatch.cond");
934 EmitBlock(CondBlock);
935 LoopStack.push(CondBlock);
937 llvm::Value *BoolCondVal = nullptr;
938 if (!DynamicOrOrdered) {
939 // UB = min(UB, GlobalUB)
940 EmitIgnoredExpr(S.getEnsureUpperBound());
942 EmitIgnoredExpr(S.getInit());
944 BoolCondVal = EvaluateExprAsBool(S.getCond());
946 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned,
950 // If there are any cleanups between here and the loop-exit scope,
951 // create a block to stage a loop exit along.
952 auto ExitBlock = LoopExit.getBlock();
953 if (LoopScope.requiresCleanups())
954 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
956 auto LoopBody = createBasicBlock("omp.dispatch.body");
957 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
958 if (ExitBlock != LoopExit.getBlock()) {
959 EmitBlock(ExitBlock);
960 EmitBranchThroughCleanup(LoopExit);
964 // Emit "IV = LB" (in case of static schedule, we have already calculated new
965 // LB for loop condition and emitted it above).
966 if (DynamicOrOrdered)
967 EmitIgnoredExpr(S.getInit());
969 // Create a block for the increment.
970 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
971 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
973 // Generate !llvm.loop.parallel metadata for loads and stores for loops
974 // with dynamic/guided scheduling and without ordered clause.
975 if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
976 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic ||
977 ScheduleKind == OMPC_SCHEDULE_guided) &&
983 SourceLocation Loc = S.getLocStart();
984 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
985 [&S, LoopExit](CodeGenFunction &CGF) {
986 CGF.EmitOMPLoopBody(S, LoopExit);
987 CGF.EmitStopPoint(&S);
989 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
991 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
992 CGF, Loc, IVSize, IVSigned);
996 EmitBlock(Continue.getBlock());
997 BreakContinueStack.pop_back();
998 if (!DynamicOrOrdered) {
999 // Emit "LB = LB + Stride", "UB = UB + Stride".
1000 EmitIgnoredExpr(S.getNextLowerBound());
1001 EmitIgnoredExpr(S.getNextUpperBound());
1004 EmitBranch(CondBlock);
1006 // Emit the fall-through block.
1007 EmitBlock(LoopExit.getBlock());
1009 // Tell the runtime we are done.
1010 if (!DynamicOrOrdered)
1011 RT.emitForStaticFinish(*this, S.getLocEnd());
1014 /// \brief Emit a helper variable and return corresponding lvalue.
1015 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1016 const DeclRefExpr *Helper) {
1017 auto VDecl = cast<VarDecl>(Helper->getDecl());
1018 CGF.EmitVarDecl(*VDecl);
1019 return CGF.EmitLValue(Helper);
1022 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind>
1023 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
1025 // Detect the loop schedule kind and chunk.
1026 auto ScheduleKind = OMPC_SCHEDULE_unknown;
1027 llvm::Value *Chunk = nullptr;
1029 cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) {
1030 ScheduleKind = C->getScheduleKind();
1031 if (const auto *Ch = C->getChunkSize()) {
1032 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
1034 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl());
1035 CGF.EmitVarDecl(*ImpVar);
1036 CGF.EmitStoreThroughLValue(
1037 CGF.EmitAnyExpr(Ch),
1038 CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
1039 ImpVar->getType()));
1044 if (!C->getHelperChunkSize() || !OuterRegion) {
1045 Chunk = CGF.EmitScalarExpr(Ch);
1046 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
1047 S.getIterationVariable()->getType());
1051 return std::make_pair(Chunk, ScheduleKind);
1054 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
1055 // Emit the loop iteration variable.
1056 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
1057 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
1058 EmitVarDecl(*IVDecl);
1060 // Emit the iterations count variable.
1061 // If it is not a variable, Sema decided to calculate iterations count on each
1062 // iteration (e.g., it is foldable into a constant).
1063 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1064 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1065 // Emit calculation of the iterations count.
1066 EmitIgnoredExpr(S.getCalcLastIteration());
1069 auto &RT = CGM.getOpenMPRuntime();
1071 bool HasLastprivateClause;
1072 // Check pre-condition.
1074 // Skip the entire loop if we don't meet the precondition.
1075 // If the condition constant folds and can be elided, avoid emitting the
1078 llvm::BasicBlock *ContBlock = nullptr;
1079 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1083 auto *ThenBlock = createBasicBlock("omp.precond.then");
1084 ContBlock = createBasicBlock("omp.precond.end");
1085 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
1086 getProfileCount(&S));
1087 EmitBlock(ThenBlock);
1088 incrementProfileCounter(&S);
1091 emitAlignedClause(*this, S);
1092 EmitOMPLinearClauseInit(S);
1093 // Emit 'then' code.
1095 // Emit helper vars inits.
1097 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1099 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1101 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
1103 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
1105 OMPPrivateScope LoopScope(*this);
1106 if (EmitOMPFirstprivateClause(S, LoopScope)) {
1107 // Emit implicit barrier to synchronize threads and avoid data races on
1108 // initialization of firstprivate variables.
1109 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
1112 EmitOMPPrivateClause(S, LoopScope);
1113 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
1114 EmitOMPReductionClauseInit(S, LoopScope);
1115 emitPrivateLoopCounters(*this, LoopScope, S.counters());
1116 emitPrivateLinearVars(*this, S, LoopScope);
1117 (void)LoopScope.Privatize();
1119 // Detect the loop schedule kind and chunk.
1121 OpenMPScheduleClauseKind ScheduleKind;
1123 emitScheduleClause(*this, S, /*OuterRegion=*/false);
1124 Chunk = ScheduleInfo.first;
1125 ScheduleKind = ScheduleInfo.second;
1126 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1127 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1128 const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr;
1129 if (RT.isStaticNonchunked(ScheduleKind,
1130 /* Chunked */ Chunk != nullptr) &&
1132 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
1135 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1136 // When no chunk_size is specified, the iteration space is divided into
1137 // chunks that are approximately equal in size, and at most one chunk is
1138 // distributed to each thread. Note that the size of the chunks is
1139 // unspecified in this case.
1140 RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1141 Ordered, IL.getAddress(), LB.getAddress(),
1142 UB.getAddress(), ST.getAddress());
1143 auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
1144 // UB = min(UB, GlobalUB);
1145 EmitIgnoredExpr(S.getEnsureUpperBound());
1147 EmitIgnoredExpr(S.getInit());
1148 // while (idx <= UB) { BODY; ++idx; }
1149 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1151 [&S, LoopExit](CodeGenFunction &CGF) {
1152 CGF.EmitOMPLoopBody(S, LoopExit);
1153 CGF.EmitStopPoint(&S);
1155 [](CodeGenFunction &) {});
1156 EmitBlock(LoopExit.getBlock());
1157 // Tell the runtime we are done.
1158 RT.emitForStaticFinish(*this, S.getLocStart());
1160 // Emit the outer loop, which requests its work chunk [LB..UB] from
1161 // runtime and runs the inner loop to process it.
1162 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered,
1163 LB.getAddress(), UB.getAddress(), ST.getAddress(),
1164 IL.getAddress(), Chunk);
1166 EmitOMPReductionClauseFinal(S);
1167 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1168 if (HasLastprivateClause)
1169 EmitOMPLastprivateClauseFinal(
1170 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
1172 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
1173 EmitOMPSimdFinal(S);
1175 // We're now done with the loop, so jump to the continuation block.
1177 EmitBranch(ContBlock);
1178 EmitBlock(ContBlock, true);
1181 return HasLastprivateClause;
1184 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
1185 LexicalScope Scope(*this, S.getSourceRange());
1186 bool HasLastprivates = false;
1187 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1188 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1190 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen);
1192 // Emit an implicit barrier at the end.
1193 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
1194 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1198 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
1199 LexicalScope Scope(*this, S.getSourceRange());
1200 bool HasLastprivates = false;
1201 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1202 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1204 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1206 // Emit an implicit barrier at the end.
1207 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
1208 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1212 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
1214 llvm::Value *Init = nullptr) {
1215 auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
1217 CGF.EmitScalarInit(Init, LVal);
1222 CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
1223 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
1224 auto *CS = dyn_cast<CompoundStmt>(Stmt);
1225 if (CS && CS->size() > 1) {
1226 bool HasLastprivates = false;
1227 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) {
1228 auto &C = CGF.CGM.getContext();
1229 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1230 // Emit helper vars inits.
1231 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
1232 CGF.Builder.getInt32(0));
1233 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1);
1235 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
1236 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
1237 CGF.Builder.getInt32(1));
1238 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
1239 CGF.Builder.getInt32(0));
1241 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
1242 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1243 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
1244 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1245 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
1246 // Generate condition for loop.
1247 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
1248 OK_Ordinary, S.getLocStart(),
1249 /*fpContractable=*/false);
1250 // Increment for loop counter.
1251 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue,
1252 OK_Ordinary, S.getLocStart());
1253 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) {
1254 // Iterate through all sections and emit a switch construct:
1257 // <SectionStmt[0]>;
1260 // case <NumSection> - 1:
1261 // <SectionStmt[<NumSection> - 1]>;
1264 // .omp.sections.exit:
1265 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
1266 auto *SwitchStmt = CGF.Builder.CreateSwitch(
1267 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
1269 unsigned CaseNumber = 0;
1270 for (auto *SubStmt : CS->children()) {
1271 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
1272 CGF.EmitBlock(CaseBB);
1273 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
1274 CGF.EmitStmt(SubStmt);
1275 CGF.EmitBranch(ExitBB);
1278 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1281 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1282 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
1283 // Emit implicit barrier to synchronize threads and avoid data races on
1284 // initialization of firstprivate variables.
1285 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1288 CGF.EmitOMPPrivateClause(S, LoopScope);
1289 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1290 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1291 (void)LoopScope.Privatize();
1293 // Emit static non-chunked loop.
1294 CGF.CGM.getOpenMPRuntime().emitForInit(
1295 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
1296 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
1297 LB.getAddress(), UB.getAddress(), ST.getAddress());
1298 // UB = min(UB, GlobalUB);
1299 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
1300 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
1301 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
1302 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
1304 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
1305 // while (idx <= UB) { BODY; ++idx; }
1306 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
1307 [](CodeGenFunction &) {});
1308 // Tell the runtime we are done.
1309 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
1310 CGF.EmitOMPReductionClauseFinal(S);
1312 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1313 if (HasLastprivates)
1314 CGF.EmitOMPLastprivateClauseFinal(
1315 S, CGF.Builder.CreateIsNotNull(
1316 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
1319 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen);
1320 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
1321 // clause. Otherwise the barrier will be generated by the codegen for the
1323 if (HasLastprivates && S.getSingleClause(OMPC_nowait)) {
1324 // Emit implicit barrier to synchronize threads and avoid data races on
1325 // initialization of firstprivate variables.
1326 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
1329 return OMPD_sections;
1331 // If only one section is found - no need to generate loop, emit as a single
1333 bool HasFirstprivates;
1334 // No need to generate reductions for sections with single section region, we
1335 // can use original shared variables for all operations.
1336 bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty();
1337 // No need to generate lastprivates for sections with single section region,
1338 // we can use original shared variable for all calculations with barrier at
1339 // the end of the sections.
1340 bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty();
1341 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
1342 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1343 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1344 CGF.EmitOMPPrivateClause(S, SingleScope);
1345 (void)SingleScope.Privatize();
1349 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
1350 llvm::None, llvm::None, llvm::None,
1352 // Emit barrier for firstprivates, lastprivates or reductions only if
1353 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be
1354 // generated by the codegen for the directive.
1355 if ((HasFirstprivates || HasLastprivates || HasReductions) &&
1356 S.getSingleClause(OMPC_nowait)) {
1357 // Emit implicit barrier to synchronize threads and avoid data races on
1358 // initialization of firstprivate variables.
1359 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown);
1364 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
1365 LexicalScope Scope(*this, S.getSourceRange());
1366 OpenMPDirectiveKind EmittedAs = EmitSections(S);
1367 // Emit an implicit barrier at the end.
1368 if (!S.getSingleClause(OMPC_nowait)) {
1369 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
1373 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
1374 LexicalScope Scope(*this, S.getSourceRange());
1375 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1376 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1377 CGF.EnsureInsertPoint();
1379 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen);
1382 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
1383 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
1384 llvm::SmallVector<const Expr *, 8> DestExprs;
1385 llvm::SmallVector<const Expr *, 8> SrcExprs;
1386 llvm::SmallVector<const Expr *, 8> AssignmentOps;
1387 // Check if there are any 'copyprivate' clauses associated with this
1390 // Build a list of copyprivate variables along with helper expressions
1391 // (<source>, <destination>, <destination>=<source> expressions)
1392 for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) {
1393 auto *C = cast<OMPCopyprivateClause>(*I);
1394 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
1395 DestExprs.append(C->destination_exprs().begin(),
1396 C->destination_exprs().end());
1397 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
1398 AssignmentOps.append(C->assignment_ops().begin(),
1399 C->assignment_ops().end());
1401 LexicalScope Scope(*this, S.getSourceRange());
1402 // Emit code for 'single' region along with 'copyprivate' clauses
1403 bool HasFirstprivates;
1404 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) {
1405 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1406 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1407 CGF.EmitOMPPrivateClause(S, SingleScope);
1408 (void)SingleScope.Privatize();
1410 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1411 CGF.EnsureInsertPoint();
1413 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
1414 CopyprivateVars, DestExprs, SrcExprs,
1416 // Emit an implicit barrier at the end (to avoid data race on firstprivate
1417 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
1418 if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) &&
1419 CopyprivateVars.empty()) {
1420 CGM.getOpenMPRuntime().emitBarrierCall(
1421 *this, S.getLocStart(),
1422 S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single);
1426 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
1427 LexicalScope Scope(*this, S.getSourceRange());
1428 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1429 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1430 CGF.EnsureInsertPoint();
1432 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
1435 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
1436 LexicalScope Scope(*this, S.getSourceRange());
1437 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1438 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1439 CGF.EnsureInsertPoint();
1441 CGM.getOpenMPRuntime().emitCriticalRegion(
1442 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart());
1445 void CodeGenFunction::EmitOMPParallelForDirective(
1446 const OMPParallelForDirective &S) {
1447 // Emit directive as a combined directive that consists of two implicit
1448 // directives: 'parallel' with 'for' directive.
1449 LexicalScope Scope(*this, S.getSourceRange());
1450 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1451 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1452 CGF.EmitOMPWorksharingLoop(S);
1453 // Emit implicit barrier at the end of parallel region, but this barrier
1454 // is at the end of 'for' directive, so emit it as the implicit barrier for
1455 // this 'for' directive.
1456 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1459 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
1462 void CodeGenFunction::EmitOMPParallelForSimdDirective(
1463 const OMPParallelForSimdDirective &S) {
1464 // Emit directive as a combined directive that consists of two implicit
1465 // directives: 'parallel' with 'for' directive.
1466 LexicalScope Scope(*this, S.getSourceRange());
1467 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1468 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1469 CGF.EmitOMPWorksharingLoop(S);
1470 // Emit implicit barrier at the end of parallel region, but this barrier
1471 // is at the end of 'for' directive, so emit it as the implicit barrier for
1472 // this 'for' directive.
1473 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1476 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
1479 void CodeGenFunction::EmitOMPParallelSectionsDirective(
1480 const OMPParallelSectionsDirective &S) {
1481 // Emit directive as a combined directive that consists of two implicit
1482 // directives: 'parallel' with 'sections' directive.
1483 LexicalScope Scope(*this, S.getSourceRange());
1484 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1485 (void)CGF.EmitSections(S);
1486 // Emit implicit barrier at the end of parallel region.
1487 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1490 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
1493 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
1494 // Emit outlined function for task construct.
1495 LexicalScope Scope(*this, S.getSourceRange());
1496 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1497 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
1498 auto *I = CS->getCapturedDecl()->param_begin();
1499 auto *PartId = std::next(I);
1500 // The first function argument for tasks is a thread id, the second one is a
1501 // part id (0 for tied tasks, >=0 for untied task).
1502 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
1503 // Get list of private variables.
1504 llvm::SmallVector<const Expr *, 8> PrivateVars;
1505 llvm::SmallVector<const Expr *, 8> PrivateCopies;
1506 for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) {
1507 auto *C = cast<OMPPrivateClause>(*I);
1508 auto IRef = C->varlist_begin();
1509 for (auto *IInit : C->private_copies()) {
1510 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1511 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1512 PrivateVars.push_back(*IRef);
1513 PrivateCopies.push_back(IInit);
1518 EmittedAsPrivate.clear();
1519 // Get list of firstprivate variables.
1520 llvm::SmallVector<const Expr *, 8> FirstprivateVars;
1521 llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
1522 llvm::SmallVector<const Expr *, 8> FirstprivateInits;
1523 for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) {
1524 auto *C = cast<OMPFirstprivateClause>(*I);
1525 auto IRef = C->varlist_begin();
1526 auto IElemInitRef = C->inits().begin();
1527 for (auto *IInit : C->private_copies()) {
1528 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1529 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1530 FirstprivateVars.push_back(*IRef);
1531 FirstprivateCopies.push_back(IInit);
1532 FirstprivateInits.push_back(*IElemInitRef);
1534 ++IRef, ++IElemInitRef;
1537 // Build list of dependences.
1538 llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8>
1540 for (auto &&I = S.getClausesOfKind(OMPC_depend); I; ++I) {
1541 auto *C = cast<OMPDependClause>(*I);
1542 for (auto *IRef : C->varlists()) {
1543 Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
1546 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars](
1547 CodeGenFunction &CGF) {
1548 // Set proper addresses for generated private copies.
1549 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
1550 OMPPrivateScope Scope(CGF);
1551 if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
1552 auto *CopyFn = CGF.Builder.CreateAlignedLoad(
1553 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)),
1554 CGF.PointerAlignInBytes);
1555 auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad(
1556 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)),
1557 CGF.PointerAlignInBytes);
1559 llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16>
1561 llvm::SmallVector<llvm::Value *, 16> CallArgs;
1562 CallArgs.push_back(PrivatesPtr);
1563 for (auto *E : PrivateVars) {
1564 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1566 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1567 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1568 CallArgs.push_back(PrivatePtr);
1570 for (auto *E : FirstprivateVars) {
1571 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1573 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1574 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1575 CallArgs.push_back(PrivatePtr);
1577 CGF.EmitRuntimeCall(CopyFn, CallArgs);
1578 for (auto &&Pair : PrivatePtrs) {
1580 CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes);
1581 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
1584 (void)Scope.Privatize();
1586 // TODO: emit code for untied tasks.
1588 CGF.EmitStmt(CS->getCapturedStmt());
1590 auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
1591 S, *I, OMPD_task, CodeGen);
1592 // Check if we should emit tied or untied task.
1593 bool Tied = !S.getSingleClause(OMPC_untied);
1594 // Check if the task is final
1595 llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
1596 if (auto *Clause = S.getSingleClause(OMPC_final)) {
1597 // If the condition constant folds and can be elided, try to avoid emitting
1598 // the condition and the dead arm of the if/else.
1599 auto *Cond = cast<OMPFinalClause>(Clause)->getCondition();
1601 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
1602 Final.setInt(CondConstant);
1604 Final.setPointer(EvaluateExprAsBool(Cond));
1606 // By default the task is not final.
1607 Final.setInt(/*IntVal=*/false);
1609 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
1610 const Expr *IfCond = nullptr;
1611 if (auto C = S.getSingleClause(OMPC_if)) {
1612 IfCond = cast<OMPIfClause>(C)->getCondition();
1614 CGM.getOpenMPRuntime().emitTaskCall(
1615 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
1616 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars,
1617 FirstprivateCopies, FirstprivateInits, Dependences);
1620 void CodeGenFunction::EmitOMPTaskyieldDirective(
1621 const OMPTaskyieldDirective &S) {
1622 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
1625 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
1626 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
1629 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
1630 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
1633 void CodeGenFunction::EmitOMPTaskgroupDirective(
1634 const OMPTaskgroupDirective &S) {
1635 LexicalScope Scope(*this, S.getSourceRange());
1636 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1637 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1638 CGF.EnsureInsertPoint();
1640 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
1643 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
1644 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
1645 if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) {
1646 auto FlushClause = cast<OMPFlushClause>(C);
1647 return llvm::makeArrayRef(FlushClause->varlist_begin(),
1648 FlushClause->varlist_end());
1651 }(), S.getLocStart());
1654 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
1655 LexicalScope Scope(*this, S.getSourceRange());
1656 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1657 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1658 CGF.EnsureInsertPoint();
1660 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart());
1663 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
1664 QualType SrcType, QualType DestType) {
1665 assert(CGF.hasScalarEvaluationKind(DestType) &&
1666 "DestType must have scalar evaluation kind.");
1667 assert(!Val.isAggregate() && "Must be a scalar or complex.");
1668 return Val.isScalar()
1669 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType)
1670 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
1674 static CodeGenFunction::ComplexPairTy
1675 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
1676 QualType DestType) {
1677 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
1678 "DestType must have complex evaluation kind.");
1679 CodeGenFunction::ComplexPairTy ComplexVal;
1680 if (Val.isScalar()) {
1681 // Convert the input element to the element type of the complex.
1682 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1684 CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType);
1685 ComplexVal = CodeGenFunction::ComplexPairTy(
1686 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
1688 assert(Val.isComplex() && "Must be a scalar or complex.");
1689 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
1690 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1691 ComplexVal.first = CGF.EmitScalarConversion(
1692 Val.getComplexVal().first, SrcElementType, DestElementType);
1693 ComplexVal.second = CGF.EmitScalarConversion(
1694 Val.getComplexVal().second, SrcElementType, DestElementType);
1699 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
1700 LValue LVal, RValue RVal) {
1701 if (LVal.isGlobalReg()) {
1702 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
1704 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
1706 LVal.isVolatile(), /*IsInit=*/false);
1710 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal,
1712 switch (CGF.getEvaluationKind(LVal.getType())) {
1714 CGF.EmitStoreThroughLValue(
1715 RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())),
1719 CGF.EmitStoreOfComplex(
1720 convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal,
1724 llvm_unreachable("Must be a scalar or complex.");
1728 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
1729 const Expr *X, const Expr *V,
1730 SourceLocation Loc) {
1732 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
1733 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
1734 LValue XLValue = CGF.EmitLValue(X);
1735 LValue VLValue = CGF.EmitLValue(V);
1736 RValue Res = XLValue.isGlobalReg()
1737 ? CGF.EmitLoadOfLValue(XLValue, Loc)
1738 : CGF.EmitAtomicLoad(XLValue, Loc,
1739 IsSeqCst ? llvm::SequentiallyConsistent
1741 XLValue.isVolatile());
1742 // OpenMP, 2.12.6, atomic Construct
1743 // Any atomic construct with a seq_cst clause forces the atomically
1744 // performed operation to include an implicit flush operation without a
1747 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1748 emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType());
1751 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
1752 const Expr *X, const Expr *E,
1753 SourceLocation Loc) {
1755 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
1756 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
1757 // OpenMP, 2.12.6, atomic Construct
1758 // Any atomic construct with a seq_cst clause forces the atomically
1759 // performed operation to include an implicit flush operation without a
1762 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1765 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
1767 BinaryOperatorKind BO,
1768 llvm::AtomicOrdering AO,
1769 bool IsXLHSInRHSPart) {
1770 auto &Context = CGF.CGM.getContext();
1771 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
1772 // expression is simple and atomic is allowed for the given type for the
1774 if (BO == BO_Comma || !Update.isScalar() ||
1775 !Update.getScalarVal()->getType()->isIntegerTy() ||
1776 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
1777 (Update.getScalarVal()->getType() !=
1778 X.getAddress()->getType()->getPointerElementType())) ||
1779 !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() ||
1780 !Context.getTargetInfo().hasBuiltinAtomic(
1781 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
1782 return std::make_pair(false, RValue::get(nullptr));
1784 llvm::AtomicRMWInst::BinOp RMWOp;
1787 RMWOp = llvm::AtomicRMWInst::Add;
1790 if (!IsXLHSInRHSPart)
1791 return std::make_pair(false, RValue::get(nullptr));
1792 RMWOp = llvm::AtomicRMWInst::Sub;
1795 RMWOp = llvm::AtomicRMWInst::And;
1798 RMWOp = llvm::AtomicRMWInst::Or;
1801 RMWOp = llvm::AtomicRMWInst::Xor;
1804 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1805 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
1806 : llvm::AtomicRMWInst::Max)
1807 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
1808 : llvm::AtomicRMWInst::UMax);
1811 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1812 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
1813 : llvm::AtomicRMWInst::Min)
1814 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
1815 : llvm::AtomicRMWInst::UMin);
1818 RMWOp = llvm::AtomicRMWInst::Xchg;
1827 return std::make_pair(false, RValue::get(nullptr));
1845 llvm_unreachable("Unsupported atomic update operation");
1847 auto *UpdateVal = Update.getScalarVal();
1848 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
1849 UpdateVal = CGF.Builder.CreateIntCast(
1850 IC, X.getAddress()->getType()->getPointerElementType(),
1851 X.getType()->hasSignedIntegerRepresentation());
1853 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
1854 return std::make_pair(true, RValue::get(Res));
1857 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
1858 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
1859 llvm::AtomicOrdering AO, SourceLocation Loc,
1860 const llvm::function_ref<RValue(RValue)> &CommonGen) {
1861 // Update expressions are allowed to have the following forms:
1862 // x binop= expr; -> xrval + expr;
1863 // x++, ++x -> xrval + 1;
1864 // x--, --x -> xrval - 1;
1865 // x = x binop expr; -> xrval binop expr
1866 // x = expr Op x; - > expr binop xrval;
1867 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
1869 if (X.isGlobalReg()) {
1870 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
1872 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
1874 // Perform compare-and-swap procedure.
1875 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
1881 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
1882 const Expr *X, const Expr *E,
1883 const Expr *UE, bool IsXLHSInRHSPart,
1884 SourceLocation Loc) {
1885 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1886 "Update expr in 'atomic update' must be a binary operator.");
1887 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1888 // Update expressions are allowed to have the following forms:
1889 // x binop= expr; -> xrval + expr;
1890 // x++, ++x -> xrval + 1;
1891 // x--, --x -> xrval - 1;
1892 // x = x binop expr; -> xrval binop expr
1893 // x = expr Op x; - > expr binop xrval;
1894 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
1895 LValue XLValue = CGF.EmitLValue(X);
1896 RValue ExprRValue = CGF.EmitAnyExpr(E);
1897 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1898 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1899 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1900 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1901 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1903 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
1904 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1905 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1906 return CGF.EmitAnyExpr(UE);
1908 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
1909 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1910 // OpenMP, 2.12.6, atomic Construct
1911 // Any atomic construct with a seq_cst clause forces the atomically
1912 // performed operation to include an implicit flush operation without a
1915 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1918 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
1919 QualType SourceType, QualType ResType) {
1920 switch (CGF.getEvaluationKind(ResType)) {
1922 return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType));
1924 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType);
1925 return RValue::getComplex(Res.first, Res.second);
1930 llvm_unreachable("Must be a scalar or complex.");
1933 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
1934 bool IsPostfixUpdate, const Expr *V,
1935 const Expr *X, const Expr *E,
1936 const Expr *UE, bool IsXLHSInRHSPart,
1937 SourceLocation Loc) {
1938 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
1939 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
1941 LValue VLValue = CGF.EmitLValue(V);
1942 LValue XLValue = CGF.EmitLValue(X);
1943 RValue ExprRValue = CGF.EmitAnyExpr(E);
1944 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1945 QualType NewVValType;
1947 // 'x' is updated with some additional value.
1948 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1949 "Update expr in 'atomic capture' must be a binary operator.");
1950 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1951 // Update expressions are allowed to have the following forms:
1952 // x binop= expr; -> xrval + expr;
1953 // x++, ++x -> xrval + 1;
1954 // x--, --x -> xrval - 1;
1955 // x = x binop expr; -> xrval binop expr
1956 // x = expr Op x; - > expr binop xrval;
1957 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1958 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1959 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1960 NewVValType = XRValExpr->getType();
1961 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1962 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
1963 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
1964 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1965 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1966 RValue Res = CGF.EmitAnyExpr(UE);
1967 NewVVal = IsPostfixUpdate ? XRValue : Res;
1970 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1971 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1973 // 'atomicrmw' instruction was generated.
1974 if (IsPostfixUpdate) {
1975 // Use old value from 'atomicrmw'.
1976 NewVVal = Res.second;
1978 // 'atomicrmw' does not provide new value, so evaluate it using old
1980 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1981 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
1982 NewVVal = CGF.EmitAnyExpr(UE);
1986 // 'x' is simply rewritten with some 'expr'.
1987 NewVValType = X->getType().getNonReferenceType();
1988 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
1989 X->getType().getNonReferenceType());
1990 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
1994 // Try to perform atomicrmw xchg, otherwise simple exchange.
1995 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1996 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
1999 // 'atomicrmw' instruction was generated.
2000 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
2003 // Emit post-update store to 'v' of old/new 'x' value.
2004 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType);
2005 // OpenMP, 2.12.6, atomic Construct
2006 // Any atomic construct with a seq_cst clause forces the atomically
2007 // performed operation to include an implicit flush operation without a
2010 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2013 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
2014 bool IsSeqCst, bool IsPostfixUpdate,
2015 const Expr *X, const Expr *V, const Expr *E,
2016 const Expr *UE, bool IsXLHSInRHSPart,
2017 SourceLocation Loc) {
2020 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
2023 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
2027 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
2030 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
2031 IsXLHSInRHSPart, Loc);
2035 case OMPC_num_threads:
2037 case OMPC_firstprivate:
2038 case OMPC_lastprivate:
2039 case OMPC_reduction:
2048 case OMPC_copyprivate:
2050 case OMPC_proc_bind:
2055 case OMPC_threadprivate:
2057 case OMPC_mergeable:
2058 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
2062 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
2063 bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst);
2064 OpenMPClauseKind Kind = OMPC_unknown;
2065 for (auto *C : S.clauses()) {
2066 // Find first clause (skip seq_cst clause, if it is first).
2067 if (C->getClauseKind() != OMPC_seq_cst) {
2068 Kind = C->getClauseKind();
2074 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
2075 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
2076 enterFullExpression(EWC);
2078 // Processing for statements under 'atomic capture'.
2079 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
2080 for (const auto *C : Compound->body()) {
2081 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
2082 enterFullExpression(EWC);
2087 LexicalScope Scope(*this, S.getSourceRange());
2088 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) {
2089 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
2090 S.getV(), S.getExpr(), S.getUpdateExpr(),
2091 S.isXLHSInRHSPart(), S.getLocStart());
2093 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
2096 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
2097 llvm_unreachable("CodeGen for 'omp target' is not supported yet.");
2100 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
2101 llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");
2104 void CodeGenFunction::EmitOMPCancellationPointDirective(
2105 const OMPCancellationPointDirective &S) {
2106 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
2107 S.getCancelRegion());
2110 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
2111 llvm_unreachable("CodeGen for 'omp cancel' is not supported yet.");
2114 CodeGenFunction::JumpDest
2115 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
2116 if (Kind == OMPD_parallel || Kind == OMPD_task)
2118 else if (Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections)
2119 return BreakContinueStack.empty() ? JumpDest()
2120 : BreakContinueStack.back().BreakBlock;