1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Stmt.h"
19 #include "clang/AST/StmtOpenMP.h"
20 using namespace clang;
21 using namespace CodeGen;
23 //===----------------------------------------------------------------------===//
24 // OpenMP Directive Emission
25 //===----------------------------------------------------------------------===//
26 void CodeGenFunction::EmitOMPAggregateAssign(
27 llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
28 const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) {
29 // Perform element-by-element initialization.
31 auto SrcBegin = SrcAddr;
32 auto DestBegin = DestAddr;
33 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
34 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
35 // Cast from pointer to array type to pointer to single element.
36 SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin,
37 DestBegin->getType());
38 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
39 // The basic structure here is a while-do loop.
40 auto BodyBB = createBasicBlock("omp.arraycpy.body");
41 auto DoneBB = createBasicBlock("omp.arraycpy.done");
43 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
44 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
46 // Enter the loop body, making that address the current address.
47 auto EntryBB = Builder.GetInsertBlock();
49 auto SrcElementCurrent =
50 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
51 SrcElementCurrent->addIncoming(SrcBegin, EntryBB);
52 auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2,
53 "omp.arraycpy.destElementPast");
54 DestElementCurrent->addIncoming(DestBegin, EntryBB);
57 CopyGen(DestElementCurrent, SrcElementCurrent);
59 // Shift the address forward by one element.
60 auto DestElementNext = Builder.CreateConstGEP1_32(
61 DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element");
62 auto SrcElementNext = Builder.CreateConstGEP1_32(
63 SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element");
64 // Check whether we've reached the end.
66 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
67 Builder.CreateCondBr(Done, DoneBB, BodyBB);
68 DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock());
69 SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock());
72 EmitBlock(DoneBB, /*IsFinished=*/true);
75 void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF,
76 QualType OriginalType, llvm::Value *DestAddr,
77 llvm::Value *SrcAddr, const VarDecl *DestVD,
78 const VarDecl *SrcVD, const Expr *Copy) {
79 if (OriginalType->isArrayType()) {
80 auto *BO = dyn_cast<BinaryOperator>(Copy);
81 if (BO && BO->getOpcode() == BO_Assign) {
82 // Perform simple memcpy for simple copying.
83 CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
85 // For arrays with complex element types perform element by element
87 CGF.EmitOMPAggregateAssign(
88 DestAddr, SrcAddr, OriginalType,
89 [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement,
90 llvm::Value *SrcElement) {
91 // Working with the single array element, so have to remap
92 // destination and source variables to corresponding array
94 CodeGenFunction::OMPPrivateScope Remap(CGF);
95 Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{
99 SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; });
100 (void)Remap.Privatize();
101 CGF.EmitIgnoredExpr(Copy);
105 // Remap pseudo source variable to private copy.
106 CodeGenFunction::OMPPrivateScope Remap(CGF);
107 Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; });
108 Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; });
109 (void)Remap.Privatize();
110 // Emit copying of the whole variable.
111 CGF.EmitIgnoredExpr(Copy);
115 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
116 OMPPrivateScope &PrivateScope) {
117 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
118 for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) {
119 auto *C = cast<OMPFirstprivateClause>(*I);
120 auto IRef = C->varlist_begin();
121 auto InitsRef = C->inits().begin();
122 for (auto IInit : C->private_copies()) {
123 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
124 if (EmittedAsFirstprivate.count(OrigVD) == 0) {
125 EmittedAsFirstprivate.insert(OrigVD);
126 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
127 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
130 const_cast<VarDecl *>(OrigVD),
131 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
133 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
134 auto *OriginalAddr = EmitLValue(&DRE).getAddress();
135 QualType Type = OrigVD->getType();
136 if (Type->isArrayType()) {
137 // Emit VarDecl with copy init for arrays.
138 // Get the address of the original variable captured in current
140 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
141 auto Emission = EmitAutoVarAlloca(*VD);
142 auto *Init = VD->getInit();
143 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
144 // Perform simple memcpy.
145 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
148 EmitOMPAggregateAssign(
149 Emission.getAllocatedAddress(), OriginalAddr, Type,
150 [this, VDInit, Init](llvm::Value *DestElement,
151 llvm::Value *SrcElement) {
152 // Clean up any temporaries needed by the initialization.
153 RunCleanupsScope InitScope(*this);
154 // Emit initialization for single element.
155 LocalDeclMap[VDInit] = SrcElement;
156 EmitAnyExprToMem(Init, DestElement,
157 Init->getType().getQualifiers(),
158 /*IsInitializer*/ false);
159 LocalDeclMap.erase(VDInit);
162 EmitAutoVarCleanups(Emission);
163 return Emission.getAllocatedAddress();
166 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
167 // Emit private VarDecl with copy init.
168 // Remap temp VDInit variable to the address of the original
170 // (for proper handling of captured global variables).
171 LocalDeclMap[VDInit] = OriginalAddr;
173 LocalDeclMap.erase(VDInit);
174 return GetAddrOfLocalVar(VD);
177 assert(IsRegistered &&
178 "firstprivate var already registered as private");
179 // Silence the warning about unused variable.
185 return !EmittedAsFirstprivate.empty();
188 void CodeGenFunction::EmitOMPPrivateClause(
189 const OMPExecutableDirective &D,
190 CodeGenFunction::OMPPrivateScope &PrivateScope) {
191 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
192 for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) {
193 auto *C = cast<OMPPrivateClause>(*I);
194 auto IRef = C->varlist_begin();
195 for (auto IInit : C->private_copies()) {
196 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
197 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
198 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
200 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
201 // Emit private VarDecl with copy init.
203 return GetAddrOfLocalVar(VD);
205 assert(IsRegistered && "private var already registered as private");
206 // Silence the warning about unused variable.
214 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
215 // threadprivate_var1 = master_threadprivate_var1;
216 // operator=(threadprivate_var2, master_threadprivate_var2);
218 // __kmpc_barrier(&loc, global_tid);
219 llvm::DenseSet<const VarDecl *> CopiedVars;
220 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
221 for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) {
222 auto *C = cast<OMPCopyinClause>(*I);
223 auto IRef = C->varlist_begin();
224 auto ISrcRef = C->source_exprs().begin();
225 auto IDestRef = C->destination_exprs().begin();
226 for (auto *AssignOp : C->assignment_ops()) {
227 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
228 QualType Type = VD->getType();
229 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
230 // Get the address of the master variable.
231 auto *MasterAddr = VD->isStaticLocal()
232 ? CGM.getStaticLocalDeclAddress(VD)
233 : CGM.GetAddrOfGlobal(VD);
234 // Get the address of the threadprivate variable.
235 auto *PrivateAddr = EmitLValue(*IRef).getAddress();
236 if (CopiedVars.size() == 1) {
237 // At first check if current thread is a master thread. If it is, no
238 // need to copy data.
239 CopyBegin = createBasicBlock("copyin.not.master");
240 CopyEnd = createBasicBlock("copyin.not.master.end");
241 Builder.CreateCondBr(
242 Builder.CreateICmpNE(
243 Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
244 Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
246 EmitBlock(CopyBegin);
248 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
249 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
250 EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD,
259 // Exit out of copying procedure for non-master thread.
260 EmitBlock(CopyEnd, /*IsFinished=*/true);
266 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
267 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
268 bool HasAtLeastOneLastprivate = false;
269 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
270 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
271 HasAtLeastOneLastprivate = true;
272 auto *C = cast<OMPLastprivateClause>(*I);
273 auto IRef = C->varlist_begin();
274 auto IDestRef = C->destination_exprs().begin();
275 for (auto *IInit : C->private_copies()) {
276 // Keep the address of the original variable for future update at the end
278 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
279 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
280 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
281 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{
283 const_cast<VarDecl *>(OrigVD),
284 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
286 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
287 return EmitLValue(&DRE).getAddress();
289 // Check if the variable is also a firstprivate: in this case IInit is
290 // not generated. Initialization of this variable will happen in codegen
291 // for 'firstprivate' clause.
293 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
295 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
296 // Emit private VarDecl with copy init.
298 return GetAddrOfLocalVar(VD);
300 assert(IsRegistered &&
301 "lastprivate var already registered as private");
308 return HasAtLeastOneLastprivate;
311 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
312 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
313 // Emit following code:
314 // if (<IsLastIterCond>) {
315 // orig_var1 = private_orig_var1;
317 // orig_varn = private_orig_varn;
319 llvm::BasicBlock *ThenBB = nullptr;
320 llvm::BasicBlock *DoneBB = nullptr;
321 if (IsLastIterCond) {
322 ThenBB = createBasicBlock(".omp.lastprivate.then");
323 DoneBB = createBasicBlock(".omp.lastprivate.done");
324 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
327 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates;
328 const Expr *LastIterVal = nullptr;
329 const Expr *IVExpr = nullptr;
330 const Expr *IncExpr = nullptr;
331 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
332 if (isOpenMPWorksharingDirective(D.getDirectiveKind())) {
333 LastIterVal = cast<VarDecl>(cast<DeclRefExpr>(
334 LoopDirective->getUpperBoundVariable())
336 ->getAnyInitializer();
337 IVExpr = LoopDirective->getIterationVariable();
338 IncExpr = LoopDirective->getInc();
339 auto IUpdate = LoopDirective->updates().begin();
340 for (auto *E : LoopDirective->counters()) {
341 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
342 LoopCountersAndUpdates[D] = *IUpdate;
348 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
349 bool FirstLCV = true;
350 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
351 auto *C = cast<OMPLastprivateClause>(*I);
352 auto IRef = C->varlist_begin();
353 auto ISrcRef = C->source_exprs().begin();
354 auto IDestRef = C->destination_exprs().begin();
355 for (auto *AssignOp : C->assignment_ops()) {
356 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
357 QualType Type = PrivateVD->getType();
358 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
359 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
360 // If lastprivate variable is a loop control variable for loop-based
361 // directive, update its value before copyin back to original
363 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) {
364 if (FirstLCV && LastIterVal) {
365 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(),
366 IVExpr->getType().getQualifiers(),
367 /*IsInitializer=*/false);
368 EmitIgnoredExpr(IncExpr);
371 EmitIgnoredExpr(UpExpr);
373 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
374 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
375 // Get the address of the original variable.
376 auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
377 // Get the address of the private variable.
378 auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
379 EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
388 if (IsLastIterCond) {
389 EmitBlock(DoneBB, /*IsFinished=*/true);
393 void CodeGenFunction::EmitOMPReductionClauseInit(
394 const OMPExecutableDirective &D,
395 CodeGenFunction::OMPPrivateScope &PrivateScope) {
396 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
397 auto *C = cast<OMPReductionClause>(*I);
398 auto ILHS = C->lhs_exprs().begin();
399 auto IRHS = C->rhs_exprs().begin();
400 for (auto IRef : C->varlists()) {
401 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
402 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
403 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
404 // Store the address of the original variable associated with the LHS
405 // implicit variable.
406 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{
407 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
408 CapturedStmtInfo->lookup(OrigVD) != nullptr,
409 IRef->getType(), VK_LValue, IRef->getExprLoc());
410 return EmitLValue(&DRE).getAddress();
412 // Emit reduction copy.
414 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{
415 // Emit private VarDecl with reduction init.
416 EmitDecl(*PrivateVD);
417 return GetAddrOfLocalVar(PrivateVD);
419 assert(IsRegistered && "private var already registered as private");
420 // Silence the warning about unused variable.
427 void CodeGenFunction::EmitOMPReductionClauseFinal(
428 const OMPExecutableDirective &D) {
429 llvm::SmallVector<const Expr *, 8> LHSExprs;
430 llvm::SmallVector<const Expr *, 8> RHSExprs;
431 llvm::SmallVector<const Expr *, 8> ReductionOps;
432 bool HasAtLeastOneReduction = false;
433 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
434 HasAtLeastOneReduction = true;
435 auto *C = cast<OMPReductionClause>(*I);
436 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
437 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
438 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
440 if (HasAtLeastOneReduction) {
441 // Emit nowait reduction if nowait clause is present or directive is a
442 // parallel directive (it always has implicit barrier).
443 CGM.getOpenMPRuntime().emitReduction(
444 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps,
445 D.getSingleClause(OMPC_nowait) ||
446 isOpenMPParallelDirective(D.getDirectiveKind()) ||
447 D.getDirectiveKind() == OMPD_simd,
448 D.getDirectiveKind() == OMPD_simd);
452 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
453 const OMPExecutableDirective &S,
454 const RegionCodeGenTy &CodeGen) {
455 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
456 auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS);
457 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
458 S, *CS->getCapturedDecl()->param_begin(), CodeGen);
459 if (auto C = S.getSingleClause(OMPC_num_threads)) {
460 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
461 auto NumThreadsClause = cast<OMPNumThreadsClause>(C);
462 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
463 /*IgnoreResultAssign*/ true);
464 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
465 CGF, NumThreads, NumThreadsClause->getLocStart());
467 if (auto *C = S.getSingleClause(OMPC_proc_bind)) {
468 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
469 auto *ProcBindClause = cast<OMPProcBindClause>(C);
470 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
471 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
473 const Expr *IfCond = nullptr;
474 if (auto C = S.getSingleClause(OMPC_if)) {
475 IfCond = cast<OMPIfClause>(C)->getCondition();
477 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
478 CapturedStruct, IfCond);
481 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
482 LexicalScope Scope(*this, S.getSourceRange());
483 // Emit parallel region as a standalone region.
484 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
485 OMPPrivateScope PrivateScope(CGF);
486 bool Copyins = CGF.EmitOMPCopyinClause(S);
487 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
488 if (Copyins || Firstprivates) {
489 // Emit implicit barrier to synchronize threads and avoid data races on
490 // initialization of firstprivate variables or propagation master's thread
491 // values of threadprivate variables to local instances of that variables
492 // of all other implicit threads.
493 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
496 CGF.EmitOMPPrivateClause(S, PrivateScope);
497 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
498 (void)PrivateScope.Privatize();
499 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
500 CGF.EmitOMPReductionClauseFinal(S);
501 // Emit implicit barrier at the end of the 'parallel' directive.
502 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
505 emitCommonOMPParallelDirective(*this, S, CodeGen);
508 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D) {
509 RunCleanupsScope BodyScope(*this);
510 // Update counters values on current iteration.
511 for (auto I : D.updates()) {
514 // Update the linear variables.
515 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
516 auto *C = cast<OMPLinearClause>(*I);
517 for (auto U : C->updates()) {
522 // On a continue in the body, jump to the end.
523 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
524 BreakContinueStack.push_back(BreakContinue(JumpDest(), Continue));
526 EmitStmt(D.getBody());
527 // The end (updates/cleanups).
528 EmitBlock(Continue.getBlock());
529 BreakContinueStack.pop_back();
530 // TODO: Update lastprivates if the SeparateIter flag is true.
531 // This will be implemented in a follow-up OMPLastprivateClause patch, but
532 // result should be still correct without it, as we do not make these
533 // variables private yet.
536 void CodeGenFunction::EmitOMPInnerLoop(
537 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
539 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
540 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
541 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
543 // Start the loop with a block that tests the condition.
544 auto CondBlock = createBasicBlock("omp.inner.for.cond");
545 EmitBlock(CondBlock);
546 LoopStack.push(CondBlock);
548 // If there are any cleanups between here and the loop-exit scope,
549 // create a block to stage a loop exit along.
550 auto ExitBlock = LoopExit.getBlock();
552 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
554 auto LoopBody = createBasicBlock("omp.inner.for.body");
557 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
558 if (ExitBlock != LoopExit.getBlock()) {
559 EmitBlock(ExitBlock);
560 EmitBranchThroughCleanup(LoopExit);
564 incrementProfileCounter(&S);
566 // Create a block for the increment.
567 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
568 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
572 // Emit "IV = IV + 1" and a back-edge to the condition block.
573 EmitBlock(Continue.getBlock());
574 EmitIgnoredExpr(IncExpr);
576 BreakContinueStack.pop_back();
577 EmitBranch(CondBlock);
579 // Emit the fall-through block.
580 EmitBlock(LoopExit.getBlock());
583 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
584 // Emit inits for the linear variables.
585 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
586 auto *C = cast<OMPLinearClause>(*I);
587 for (auto Init : C->inits()) {
588 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
589 auto *OrigVD = cast<VarDecl>(
590 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl());
591 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
592 CapturedStmtInfo->lookup(OrigVD) != nullptr,
593 VD->getInit()->getType(), VK_LValue,
594 VD->getInit()->getExprLoc());
595 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
596 EmitExprAsInit(&DRE, VD,
597 MakeAddrLValue(Emission.getAllocatedAddress(),
598 VD->getType(), Emission.Alignment),
599 /*capturedByInit=*/false);
600 EmitAutoVarCleanups(Emission);
602 // Emit the linear steps for the linear clauses.
603 // If a step is not constant, it is pre-calculated before the loop.
604 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
605 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
606 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
607 // Emit calculation of the linear step.
613 static void emitLinearClauseFinal(CodeGenFunction &CGF,
614 const OMPLoopDirective &D) {
615 // Emit the final values of the linear variables.
616 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
617 auto *C = cast<OMPLinearClause>(*I);
618 auto IC = C->varlist_begin();
619 for (auto F : C->finals()) {
620 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
621 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
622 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
623 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
624 auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress();
625 CodeGenFunction::OMPPrivateScope VarScope(CGF);
626 VarScope.addPrivate(OrigVD,
627 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
628 (void)VarScope.Privatize();
629 CGF.EmitIgnoredExpr(F);
635 static void emitAlignedClause(CodeGenFunction &CGF,
636 const OMPExecutableDirective &D) {
637 for (auto &&I = D.getClausesOfKind(OMPC_aligned); I; ++I) {
638 auto *Clause = cast<OMPAlignedClause>(*I);
639 unsigned ClauseAlignment = 0;
640 if (auto AlignmentExpr = Clause->getAlignment()) {
642 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
643 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
645 for (auto E : Clause->varlists()) {
646 unsigned Alignment = ClauseAlignment;
647 if (Alignment == 0) {
648 // OpenMP [2.8.1, Description]
649 // If no optional parameter is specified, implementation-defined default
650 // alignments for SIMD instructions on the target platforms are assumed.
652 CGF.CGM.getTargetCodeGenInfo().getOpenMPSimdDefaultAlignment(
655 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
656 "alignment is not power of 2");
657 if (Alignment != 0) {
658 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
659 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
665 static void emitPrivateLoopCounters(CodeGenFunction &CGF,
666 CodeGenFunction::OMPPrivateScope &LoopScope,
667 ArrayRef<Expr *> Counters) {
668 for (auto *E : Counters) {
669 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
670 (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{
671 // Emit var without initialization.
672 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
673 CGF.EmitAutoVarCleanups(VarEmission);
674 return VarEmission.getAllocatedAddress();
679 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
680 const Expr *Cond, llvm::BasicBlock *TrueBlock,
681 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
683 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
684 emitPrivateLoopCounters(CGF, PreCondScope, S.counters());
685 const VarDecl *IVDecl =
686 cast<VarDecl>(cast<DeclRefExpr>(S.getIterationVariable())->getDecl());
687 bool IsRegistered = PreCondScope.addPrivate(IVDecl, [&]() -> llvm::Value *{
688 // Emit var without initialization.
689 auto VarEmission = CGF.EmitAutoVarAlloca(*IVDecl);
690 CGF.EmitAutoVarCleanups(VarEmission);
691 return VarEmission.getAllocatedAddress();
693 assert(IsRegistered && "counter already registered as private");
694 // Silence the warning about unused variable.
696 (void)PreCondScope.Privatize();
697 // Initialize internal counter to 0 to calculate initial values of real
699 LValue IV = CGF.EmitLValue(S.getIterationVariable());
700 CGF.EmitStoreOfScalar(
701 llvm::ConstantInt::getNullValue(
702 IV.getAddress()->getType()->getPointerElementType()),
703 CGF.EmitLValue(S.getIterationVariable()), /*isInit=*/true);
704 // Get initial values of real counters.
705 for (auto I : S.updates()) {
706 CGF.EmitIgnoredExpr(I);
709 // Check that loop is executed at least one time.
710 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
714 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
715 CodeGenFunction::OMPPrivateScope &PrivateScope) {
716 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
717 auto *C = cast<OMPLinearClause>(*I);
718 for (auto *E : C->varlists()) {
719 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
720 bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * {
721 // Emit var without initialization.
722 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
723 CGF.EmitAutoVarCleanups(VarEmission);
724 return VarEmission.getAllocatedAddress();
726 assert(IsRegistered && "linear var already registered as private");
727 // Silence the warning about unused variable.
733 static void emitSafelenClause(CodeGenFunction &CGF,
734 const OMPExecutableDirective &D) {
736 cast_or_null<OMPSafelenClause>(D.getSingleClause(OMPC_safelen))) {
737 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
738 /*ignoreResult=*/true);
739 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
740 CGF.LoopStack.setVectorizerWidth(Val->getZExtValue());
741 // In presence of finite 'safelen', it may be unsafe to mark all
742 // the memory instructions parallel, because loop-carried
743 // dependences of 'safelen' iterations are possible.
744 CGF.LoopStack.setParallel(false);
748 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
749 // Walk clauses and process safelen/lastprivate.
750 LoopStack.setParallel();
751 LoopStack.setVectorizerEnable(true);
752 emitSafelenClause(*this, D);
755 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
756 auto IC = D.counters().begin();
757 for (auto F : D.finals()) {
758 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
759 if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
760 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
761 CapturedStmtInfo->lookup(OrigVD) != nullptr,
762 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
763 auto *OrigAddr = EmitLValue(&DRE).getAddress();
764 OMPPrivateScope VarScope(*this);
765 VarScope.addPrivate(OrigVD,
766 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
767 (void)VarScope.Privatize();
772 emitLinearClauseFinal(*this, D);
775 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
776 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
778 // for (IV in 0..LastIteration) BODY;
779 // <Final counter/linear vars updates>;
783 // Emit: if (PreCond) - begin.
784 // If the condition constant folds and can be elided, avoid emitting the
787 llvm::BasicBlock *ContBlock = nullptr;
788 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
792 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
793 ContBlock = CGF.createBasicBlock("simd.if.end");
794 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
795 CGF.getProfileCount(&S));
796 CGF.EmitBlock(ThenBlock);
797 CGF.incrementProfileCounter(&S);
800 // Emit the loop iteration variable.
801 const Expr *IVExpr = S.getIterationVariable();
802 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
803 CGF.EmitVarDecl(*IVDecl);
804 CGF.EmitIgnoredExpr(S.getInit());
806 // Emit the iterations count variable.
807 // If it is not a variable, Sema decided to calculate iterations count on
808 // each iteration (e.g., it is foldable into a constant).
809 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
810 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
811 // Emit calculation of the iterations count.
812 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
815 CGF.EmitOMPSimdInit(S);
817 emitAlignedClause(CGF, S);
818 CGF.EmitOMPLinearClauseInit(S);
819 bool HasLastprivateClause;
821 OMPPrivateScope LoopScope(CGF);
822 emitPrivateLoopCounters(CGF, LoopScope, S.counters());
823 emitPrivateLinearVars(CGF, S, LoopScope);
824 CGF.EmitOMPPrivateClause(S, LoopScope);
825 CGF.EmitOMPReductionClauseInit(S, LoopScope);
826 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
827 (void)LoopScope.Privatize();
828 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(),
829 S.getCond(), S.getInc(),
830 [&S](CodeGenFunction &CGF) {
831 CGF.EmitOMPLoopBody(S);
832 CGF.EmitStopPoint(&S);
834 [](CodeGenFunction &) {});
835 // Emit final copy of the lastprivate variables at the end of loops.
836 if (HasLastprivateClause) {
837 CGF.EmitOMPLastprivateClauseFinal(S);
839 CGF.EmitOMPReductionClauseFinal(S);
841 CGF.EmitOMPSimdFinal(S);
842 // Emit: if (PreCond) - end.
844 CGF.EmitBranch(ContBlock);
845 CGF.EmitBlock(ContBlock, true);
848 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
851 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
852 const OMPLoopDirective &S,
853 OMPPrivateScope &LoopScope,
854 bool Ordered, llvm::Value *LB,
855 llvm::Value *UB, llvm::Value *ST,
856 llvm::Value *IL, llvm::Value *Chunk) {
857 auto &RT = CGM.getOpenMPRuntime();
859 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
860 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);
863 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&
864 "static non-chunked schedule does not need outer loop");
868 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
869 // When schedule(dynamic,chunk_size) is specified, the iterations are
870 // distributed to threads in the team in chunks as the threads request them.
871 // Each thread executes a chunk of iterations, then requests another chunk,
872 // until no chunks remain to be distributed. Each chunk contains chunk_size
873 // iterations, except for the last chunk to be distributed, which may have
874 // fewer iterations. When no chunk_size is specified, it defaults to 1.
876 // When schedule(guided,chunk_size) is specified, the iterations are assigned
877 // to threads in the team in chunks as the executing threads request them.
878 // Each thread executes a chunk of iterations, then requests another chunk,
879 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
880 // each chunk is proportional to the number of unassigned iterations divided
881 // by the number of threads in the team, decreasing to 1. For a chunk_size
882 // with value k (greater than 1), the size of each chunk is determined in the
883 // same way, with the restriction that the chunks do not contain fewer than k
884 // iterations (except for the last chunk to be assigned, which may have fewer
885 // than k iterations).
887 // When schedule(auto) is specified, the decision regarding scheduling is
888 // delegated to the compiler and/or runtime system. The programmer gives the
889 // implementation the freedom to choose any possible mapping of iterations to
890 // threads in the team.
892 // When schedule(runtime) is specified, the decision regarding scheduling is
893 // deferred until run time, and the schedule and chunk size are taken from the
894 // run-sched-var ICV. If the ICV is set to auto, the schedule is
895 // implementation defined
897 // while(__kmpc_dispatch_next(&LB, &UB)) {
899 // while (idx <= UB) { BODY; ++idx;
900 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
904 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
905 // When schedule(static, chunk_size) is specified, iterations are divided into
906 // chunks of size chunk_size, and the chunks are assigned to the threads in
907 // the team in a round-robin fashion in the order of the thread number.
909 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
910 // while (idx <= UB) { BODY; ++idx; } // inner loop
916 const Expr *IVExpr = S.getIterationVariable();
917 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
918 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
921 *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB,
922 (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal()
926 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
928 // Start the loop with a block that tests the condition.
929 auto CondBlock = createBasicBlock("omp.dispatch.cond");
930 EmitBlock(CondBlock);
931 LoopStack.push(CondBlock);
933 llvm::Value *BoolCondVal = nullptr;
934 if (!DynamicOrOrdered) {
935 // UB = min(UB, GlobalUB)
936 EmitIgnoredExpr(S.getEnsureUpperBound());
938 EmitIgnoredExpr(S.getInit());
940 BoolCondVal = EvaluateExprAsBool(S.getCond());
942 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned,
946 // If there are any cleanups between here and the loop-exit scope,
947 // create a block to stage a loop exit along.
948 auto ExitBlock = LoopExit.getBlock();
949 if (LoopScope.requiresCleanups())
950 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
952 auto LoopBody = createBasicBlock("omp.dispatch.body");
953 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
954 if (ExitBlock != LoopExit.getBlock()) {
955 EmitBlock(ExitBlock);
956 EmitBranchThroughCleanup(LoopExit);
960 // Emit "IV = LB" (in case of static schedule, we have already calculated new
961 // LB for loop condition and emitted it above).
962 if (DynamicOrOrdered)
963 EmitIgnoredExpr(S.getInit());
965 // Create a block for the increment.
966 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
967 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
969 // Generate !llvm.loop.parallel metadata for loads and stores for loops
970 // with dynamic/guided scheduling and without ordered clause.
971 if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
972 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic ||
973 ScheduleKind == OMPC_SCHEDULE_guided) &&
979 SourceLocation Loc = S.getLocStart();
981 S, LoopScope.requiresCleanups(), S.getCond(),
983 [&S](CodeGenFunction &CGF) {
984 CGF.EmitOMPLoopBody(S);
985 CGF.EmitStopPoint(&S);
987 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
989 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
990 CGF, Loc, IVSize, IVSigned);
994 EmitBlock(Continue.getBlock());
995 BreakContinueStack.pop_back();
996 if (!DynamicOrOrdered) {
997 // Emit "LB = LB + Stride", "UB = UB + Stride".
998 EmitIgnoredExpr(S.getNextLowerBound());
999 EmitIgnoredExpr(S.getNextUpperBound());
1002 EmitBranch(CondBlock);
1004 // Emit the fall-through block.
1005 EmitBlock(LoopExit.getBlock());
1007 // Tell the runtime we are done.
1008 if (!DynamicOrOrdered)
1009 RT.emitForStaticFinish(*this, S.getLocEnd());
1012 /// \brief Emit a helper variable and return corresponding lvalue.
1013 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1014 const DeclRefExpr *Helper) {
1015 auto VDecl = cast<VarDecl>(Helper->getDecl());
1016 CGF.EmitVarDecl(*VDecl);
1017 return CGF.EmitLValue(Helper);
1020 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind>
1021 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
1023 // Detect the loop schedule kind and chunk.
1024 auto ScheduleKind = OMPC_SCHEDULE_unknown;
1025 llvm::Value *Chunk = nullptr;
1027 cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) {
1028 ScheduleKind = C->getScheduleKind();
1029 if (const auto *Ch = C->getChunkSize()) {
1030 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
1032 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl());
1033 CGF.EmitVarDecl(*ImpVar);
1034 CGF.EmitStoreThroughLValue(
1035 CGF.EmitAnyExpr(Ch),
1036 CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
1037 ImpVar->getType()));
1042 if (!C->getHelperChunkSize() || !OuterRegion) {
1043 Chunk = CGF.EmitScalarExpr(Ch);
1044 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
1045 S.getIterationVariable()->getType());
1049 return std::make_pair(Chunk, ScheduleKind);
1052 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
1053 // Emit the loop iteration variable.
1054 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
1055 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
1056 EmitVarDecl(*IVDecl);
1058 // Emit the iterations count variable.
1059 // If it is not a variable, Sema decided to calculate iterations count on each
1060 // iteration (e.g., it is foldable into a constant).
1061 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1062 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1063 // Emit calculation of the iterations count.
1064 EmitIgnoredExpr(S.getCalcLastIteration());
1067 auto &RT = CGM.getOpenMPRuntime();
1069 bool HasLastprivateClause;
1070 // Check pre-condition.
1072 // Skip the entire loop if we don't meet the precondition.
1073 // If the condition constant folds and can be elided, avoid emitting the
1076 llvm::BasicBlock *ContBlock = nullptr;
1077 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1081 auto *ThenBlock = createBasicBlock("omp.precond.then");
1082 ContBlock = createBasicBlock("omp.precond.end");
1083 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
1084 getProfileCount(&S));
1085 EmitBlock(ThenBlock);
1086 incrementProfileCounter(&S);
1089 emitAlignedClause(*this, S);
1090 EmitOMPLinearClauseInit(S);
1091 // Emit 'then' code.
1093 // Emit helper vars inits.
1095 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1097 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1099 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
1101 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
1103 OMPPrivateScope LoopScope(*this);
1104 if (EmitOMPFirstprivateClause(S, LoopScope)) {
1105 // Emit implicit barrier to synchronize threads and avoid data races on
1106 // initialization of firstprivate variables.
1107 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
1110 EmitOMPPrivateClause(S, LoopScope);
1111 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
1112 EmitOMPReductionClauseInit(S, LoopScope);
1113 emitPrivateLoopCounters(*this, LoopScope, S.counters());
1114 emitPrivateLinearVars(*this, S, LoopScope);
1115 (void)LoopScope.Privatize();
1117 // Detect the loop schedule kind and chunk.
1119 OpenMPScheduleClauseKind ScheduleKind;
1121 emitScheduleClause(*this, S, /*OuterRegion=*/false);
1122 Chunk = ScheduleInfo.first;
1123 ScheduleKind = ScheduleInfo.second;
1124 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1125 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1126 const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr;
1127 if (RT.isStaticNonchunked(ScheduleKind,
1128 /* Chunked */ Chunk != nullptr) &&
1130 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
1133 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1134 // When no chunk_size is specified, the iteration space is divided into
1135 // chunks that are approximately equal in size, and at most one chunk is
1136 // distributed to each thread. Note that the size of the chunks is
1137 // unspecified in this case.
1138 RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1139 Ordered, IL.getAddress(), LB.getAddress(),
1140 UB.getAddress(), ST.getAddress());
1141 // UB = min(UB, GlobalUB);
1142 EmitIgnoredExpr(S.getEnsureUpperBound());
1144 EmitIgnoredExpr(S.getInit());
1145 // while (idx <= UB) { BODY; ++idx; }
1146 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1148 [&S](CodeGenFunction &CGF) {
1149 CGF.EmitOMPLoopBody(S);
1150 CGF.EmitStopPoint(&S);
1152 [](CodeGenFunction &) {});
1153 // Tell the runtime we are done.
1154 RT.emitForStaticFinish(*this, S.getLocStart());
1156 // Emit the outer loop, which requests its work chunk [LB..UB] from
1157 // runtime and runs the inner loop to process it.
1158 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered,
1159 LB.getAddress(), UB.getAddress(), ST.getAddress(),
1160 IL.getAddress(), Chunk);
1162 EmitOMPReductionClauseFinal(S);
1163 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1164 if (HasLastprivateClause)
1165 EmitOMPLastprivateClauseFinal(
1166 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
1168 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
1169 EmitOMPSimdFinal(S);
1171 // We're now done with the loop, so jump to the continuation block.
1173 EmitBranch(ContBlock);
1174 EmitBlock(ContBlock, true);
1177 return HasLastprivateClause;
1180 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
1181 LexicalScope Scope(*this, S.getSourceRange());
1182 bool HasLastprivates = false;
1183 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1184 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1186 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
1188 // Emit an implicit barrier at the end.
1189 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
1190 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1194 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
1195 LexicalScope Scope(*this, S.getSourceRange());
1196 bool HasLastprivates = false;
1197 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1198 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1200 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
1202 // Emit an implicit barrier at the end.
1203 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
1204 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1208 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
1210 llvm::Value *Init = nullptr) {
1211 auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
1213 CGF.EmitScalarInit(Init, LVal);
1217 static OpenMPDirectiveKind emitSections(CodeGenFunction &CGF,
1218 const OMPExecutableDirective &S) {
1219 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
1220 auto *CS = dyn_cast<CompoundStmt>(Stmt);
1221 if (CS && CS->size() > 1) {
1222 bool HasLastprivates = false;
1223 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) {
1224 auto &C = CGF.CGM.getContext();
1225 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1226 // Emit helper vars inits.
1227 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
1228 CGF.Builder.getInt32(0));
1229 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1);
1231 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
1232 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
1233 CGF.Builder.getInt32(1));
1234 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
1235 CGF.Builder.getInt32(0));
1237 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
1238 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1239 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
1240 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1241 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
1242 // Generate condition for loop.
1243 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
1244 OK_Ordinary, S.getLocStart(),
1245 /*fpContractable=*/false);
1246 // Increment for loop counter.
1247 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue,
1248 OK_Ordinary, S.getLocStart());
1249 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) {
1250 // Iterate through all sections and emit a switch construct:
1253 // <SectionStmt[0]>;
1256 // case <NumSection> - 1:
1257 // <SectionStmt[<NumSection> - 1]>;
1260 // .omp.sections.exit:
1261 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
1262 auto *SwitchStmt = CGF.Builder.CreateSwitch(
1263 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
1265 unsigned CaseNumber = 0;
1266 for (auto C = CS->children(); C; ++C, ++CaseNumber) {
1267 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
1268 CGF.EmitBlock(CaseBB);
1269 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
1271 CGF.EmitBranch(ExitBB);
1273 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1276 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1277 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
1278 // Emit implicit barrier to synchronize threads and avoid data races on
1279 // initialization of firstprivate variables.
1280 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1283 CGF.EmitOMPPrivateClause(S, LoopScope);
1284 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1285 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1286 (void)LoopScope.Privatize();
1288 // Emit static non-chunked loop.
1289 CGF.CGM.getOpenMPRuntime().emitForInit(
1290 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
1291 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
1292 LB.getAddress(), UB.getAddress(), ST.getAddress());
1293 // UB = min(UB, GlobalUB);
1294 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
1295 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
1296 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
1297 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
1299 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
1300 // while (idx <= UB) { BODY; ++idx; }
1301 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
1302 [](CodeGenFunction &) {});
1303 // Tell the runtime we are done.
1304 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
1305 CGF.EmitOMPReductionClauseFinal(S);
1307 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1308 if (HasLastprivates)
1309 CGF.EmitOMPLastprivateClauseFinal(
1310 S, CGF.Builder.CreateIsNotNull(
1311 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
1314 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, CodeGen);
1315 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
1316 // clause. Otherwise the barrier will be generated by the codegen for the
1318 if (HasLastprivates && S.getSingleClause(OMPC_nowait)) {
1319 // Emit implicit barrier to synchronize threads and avoid data races on
1320 // initialization of firstprivate variables.
1321 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1324 return OMPD_sections;
1326 // If only one section is found - no need to generate loop, emit as a single
1328 bool HasFirstprivates;
1329 // No need to generate reductions for sections with single section region, we
1330 // can use original shared variables for all operations.
1331 bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty();
1332 // No need to generate lastprivates for sections with single section region,
1333 // we can use original shared variable for all calculations with barrier at
1334 // the end of the sections.
1335 bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty();
1336 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
1337 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1338 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1339 CGF.EmitOMPPrivateClause(S, SingleScope);
1340 (void)SingleScope.Privatize();
1343 CGF.EnsureInsertPoint();
1345 CGF.CGM.getOpenMPRuntime().emitSingleRegion(CGF, CodeGen, S.getLocStart(),
1346 llvm::None, llvm::None,
1347 llvm::None, llvm::None);
1348 // Emit barrier for firstprivates, lastprivates or reductions only if
1349 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be
1350 // generated by the codegen for the directive.
1351 if ((HasFirstprivates || HasLastprivates || HasReductions) &&
1352 S.getSingleClause(OMPC_nowait)) {
1353 // Emit implicit barrier to synchronize threads and avoid data races on
1354 // initialization of firstprivate variables.
1355 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1361 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
1362 LexicalScope Scope(*this, S.getSourceRange());
1363 OpenMPDirectiveKind EmittedAs = emitSections(*this, S);
1364 // Emit an implicit barrier at the end.
1365 if (!S.getSingleClause(OMPC_nowait)) {
1366 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
1370 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
1371 LexicalScope Scope(*this, S.getSourceRange());
1372 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1373 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1374 CGF.EnsureInsertPoint();
1376 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
1379 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
1380 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
1381 llvm::SmallVector<const Expr *, 8> DestExprs;
1382 llvm::SmallVector<const Expr *, 8> SrcExprs;
1383 llvm::SmallVector<const Expr *, 8> AssignmentOps;
1384 // Check if there are any 'copyprivate' clauses associated with this
1387 // Build a list of copyprivate variables along with helper expressions
1388 // (<source>, <destination>, <destination>=<source> expressions)
1389 for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) {
1390 auto *C = cast<OMPCopyprivateClause>(*I);
1391 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
1392 DestExprs.append(C->destination_exprs().begin(),
1393 C->destination_exprs().end());
1394 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
1395 AssignmentOps.append(C->assignment_ops().begin(),
1396 C->assignment_ops().end());
1398 LexicalScope Scope(*this, S.getSourceRange());
1399 // Emit code for 'single' region along with 'copyprivate' clauses
1400 bool HasFirstprivates;
1401 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) {
1402 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1403 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1404 CGF.EmitOMPPrivateClause(S, SingleScope);
1405 (void)SingleScope.Privatize();
1407 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1408 CGF.EnsureInsertPoint();
1410 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
1411 CopyprivateVars, DestExprs, SrcExprs,
1413 // Emit an implicit barrier at the end (to avoid data race on firstprivate
1414 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
1415 if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) &&
1416 CopyprivateVars.empty()) {
1417 CGM.getOpenMPRuntime().emitBarrierCall(
1418 *this, S.getLocStart(),
1419 S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single);
1423 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
1424 LexicalScope Scope(*this, S.getSourceRange());
1425 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1426 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1427 CGF.EnsureInsertPoint();
1429 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
1432 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
1433 LexicalScope Scope(*this, S.getSourceRange());
1434 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1435 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1436 CGF.EnsureInsertPoint();
1438 CGM.getOpenMPRuntime().emitCriticalRegion(
1439 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart());
1442 void CodeGenFunction::EmitOMPParallelForDirective(
1443 const OMPParallelForDirective &S) {
1444 // Emit directive as a combined directive that consists of two implicit
1445 // directives: 'parallel' with 'for' directive.
1446 LexicalScope Scope(*this, S.getSourceRange());
1447 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1448 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1449 CGF.EmitOMPWorksharingLoop(S);
1450 // Emit implicit barrier at the end of parallel region, but this barrier
1451 // is at the end of 'for' directive, so emit it as the implicit barrier for
1452 // this 'for' directive.
1453 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1456 emitCommonOMPParallelDirective(*this, S, CodeGen);
1459 void CodeGenFunction::EmitOMPParallelForSimdDirective(
1460 const OMPParallelForSimdDirective &S) {
1461 // Emit directive as a combined directive that consists of two implicit
1462 // directives: 'parallel' with 'for' directive.
1463 LexicalScope Scope(*this, S.getSourceRange());
1464 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1465 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1466 CGF.EmitOMPWorksharingLoop(S);
1467 // Emit implicit barrier at the end of parallel region, but this barrier
1468 // is at the end of 'for' directive, so emit it as the implicit barrier for
1469 // this 'for' directive.
1470 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1473 emitCommonOMPParallelDirective(*this, S, CodeGen);
1476 void CodeGenFunction::EmitOMPParallelSectionsDirective(
1477 const OMPParallelSectionsDirective &S) {
1478 // Emit directive as a combined directive that consists of two implicit
1479 // directives: 'parallel' with 'sections' directive.
1480 LexicalScope Scope(*this, S.getSourceRange());
1481 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1482 (void)emitSections(CGF, S);
1483 // Emit implicit barrier at the end of parallel region.
1484 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1487 emitCommonOMPParallelDirective(*this, S, CodeGen);
1490 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
1491 // Emit outlined function for task construct.
1492 LexicalScope Scope(*this, S.getSourceRange());
1493 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1494 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
1495 auto *I = CS->getCapturedDecl()->param_begin();
1496 auto *PartId = std::next(I);
1497 // The first function argument for tasks is a thread id, the second one is a
1498 // part id (0 for tied tasks, >=0 for untied task).
1499 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
1500 // Get list of private variables.
1501 llvm::SmallVector<const Expr *, 8> PrivateVars;
1502 llvm::SmallVector<const Expr *, 8> PrivateCopies;
1503 for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) {
1504 auto *C = cast<OMPPrivateClause>(*I);
1505 auto IRef = C->varlist_begin();
1506 for (auto *IInit : C->private_copies()) {
1507 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1508 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1509 PrivateVars.push_back(*IRef);
1510 PrivateCopies.push_back(IInit);
1515 EmittedAsPrivate.clear();
1516 // Get list of firstprivate variables.
1517 llvm::SmallVector<const Expr *, 8> FirstprivateVars;
1518 llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
1519 llvm::SmallVector<const Expr *, 8> FirstprivateInits;
1520 for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) {
1521 auto *C = cast<OMPFirstprivateClause>(*I);
1522 auto IRef = C->varlist_begin();
1523 auto IElemInitRef = C->inits().begin();
1524 for (auto *IInit : C->private_copies()) {
1525 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1526 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1527 FirstprivateVars.push_back(*IRef);
1528 FirstprivateCopies.push_back(IInit);
1529 FirstprivateInits.push_back(*IElemInitRef);
1531 ++IRef, ++IElemInitRef;
1534 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars](
1535 CodeGenFunction &CGF) {
1536 // Set proper addresses for generated private copies.
1537 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
1538 OMPPrivateScope Scope(CGF);
1539 if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
1540 auto *CopyFn = CGF.Builder.CreateAlignedLoad(
1541 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)),
1542 CGF.PointerAlignInBytes);
1543 auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad(
1544 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)),
1545 CGF.PointerAlignInBytes);
1547 llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16>
1549 llvm::SmallVector<llvm::Value *, 16> CallArgs;
1550 CallArgs.push_back(PrivatesPtr);
1551 for (auto *E : PrivateVars) {
1552 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1554 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1555 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1556 CallArgs.push_back(PrivatePtr);
1558 for (auto *E : FirstprivateVars) {
1559 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1561 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1562 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1563 CallArgs.push_back(PrivatePtr);
1565 CGF.EmitRuntimeCall(CopyFn, CallArgs);
1566 for (auto &&Pair : PrivatePtrs) {
1568 CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes);
1569 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
1572 (void)Scope.Privatize();
1574 // TODO: emit code for untied tasks.
1576 CGF.EmitStmt(CS->getCapturedStmt());
1579 CGM.getOpenMPRuntime().emitTaskOutlinedFunction(S, *I, CodeGen);
1580 // Check if we should emit tied or untied task.
1581 bool Tied = !S.getSingleClause(OMPC_untied);
1582 // Check if the task is final
1583 llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
1584 if (auto *Clause = S.getSingleClause(OMPC_final)) {
1585 // If the condition constant folds and can be elided, try to avoid emitting
1586 // the condition and the dead arm of the if/else.
1587 auto *Cond = cast<OMPFinalClause>(Clause)->getCondition();
1589 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
1590 Final.setInt(CondConstant);
1592 Final.setPointer(EvaluateExprAsBool(Cond));
1594 // By default the task is not final.
1595 Final.setInt(/*IntVal=*/false);
1597 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
1598 const Expr *IfCond = nullptr;
1599 if (auto C = S.getSingleClause(OMPC_if)) {
1600 IfCond = cast<OMPIfClause>(C)->getCondition();
1602 CGM.getOpenMPRuntime().emitTaskCall(
1603 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
1604 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars,
1605 FirstprivateCopies, FirstprivateInits);
1608 void CodeGenFunction::EmitOMPTaskyieldDirective(
1609 const OMPTaskyieldDirective &S) {
1610 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
1613 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
1614 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
1617 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
1618 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
1621 void CodeGenFunction::EmitOMPTaskgroupDirective(
1622 const OMPTaskgroupDirective &S) {
1623 LexicalScope Scope(*this, S.getSourceRange());
1624 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1625 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1626 CGF.EnsureInsertPoint();
1628 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
1631 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
1632 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
1633 if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) {
1634 auto FlushClause = cast<OMPFlushClause>(C);
1635 return llvm::makeArrayRef(FlushClause->varlist_begin(),
1636 FlushClause->varlist_end());
1639 }(), S.getLocStart());
1642 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
1643 LexicalScope Scope(*this, S.getSourceRange());
1644 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1645 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1646 CGF.EnsureInsertPoint();
1648 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart());
1651 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
1652 QualType SrcType, QualType DestType) {
1653 assert(CGF.hasScalarEvaluationKind(DestType) &&
1654 "DestType must have scalar evaluation kind.");
1655 assert(!Val.isAggregate() && "Must be a scalar or complex.");
1656 return Val.isScalar()
1657 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType)
1658 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
1662 static CodeGenFunction::ComplexPairTy
1663 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
1664 QualType DestType) {
1665 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
1666 "DestType must have complex evaluation kind.");
1667 CodeGenFunction::ComplexPairTy ComplexVal;
1668 if (Val.isScalar()) {
1669 // Convert the input element to the element type of the complex.
1670 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1672 CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType);
1673 ComplexVal = CodeGenFunction::ComplexPairTy(
1674 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
1676 assert(Val.isComplex() && "Must be a scalar or complex.");
1677 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
1678 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1679 ComplexVal.first = CGF.EmitScalarConversion(
1680 Val.getComplexVal().first, SrcElementType, DestElementType);
1681 ComplexVal.second = CGF.EmitScalarConversion(
1682 Val.getComplexVal().second, SrcElementType, DestElementType);
1687 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
1688 LValue LVal, RValue RVal) {
1689 if (LVal.isGlobalReg()) {
1690 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
1692 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
1694 LVal.isVolatile(), /*IsInit=*/false);
1698 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal,
1700 switch (CGF.getEvaluationKind(LVal.getType())) {
1702 CGF.EmitStoreThroughLValue(
1703 RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())),
1707 CGF.EmitStoreOfComplex(
1708 convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal,
1712 llvm_unreachable("Must be a scalar or complex.");
1716 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
1717 const Expr *X, const Expr *V,
1718 SourceLocation Loc) {
1720 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
1721 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
1722 LValue XLValue = CGF.EmitLValue(X);
1723 LValue VLValue = CGF.EmitLValue(V);
1724 RValue Res = XLValue.isGlobalReg()
1725 ? CGF.EmitLoadOfLValue(XLValue, Loc)
1726 : CGF.EmitAtomicLoad(XLValue, Loc,
1727 IsSeqCst ? llvm::SequentiallyConsistent
1729 XLValue.isVolatile());
1730 // OpenMP, 2.12.6, atomic Construct
1731 // Any atomic construct with a seq_cst clause forces the atomically
1732 // performed operation to include an implicit flush operation without a
1735 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1736 emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType());
1739 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
1740 const Expr *X, const Expr *E,
1741 SourceLocation Loc) {
1743 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
1744 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
1745 // OpenMP, 2.12.6, atomic Construct
1746 // Any atomic construct with a seq_cst clause forces the atomically
1747 // performed operation to include an implicit flush operation without a
1750 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1753 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
1755 BinaryOperatorKind BO,
1756 llvm::AtomicOrdering AO,
1757 bool IsXLHSInRHSPart) {
1758 auto &Context = CGF.CGM.getContext();
1759 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
1760 // expression is simple and atomic is allowed for the given type for the
1762 if (BO == BO_Comma || !Update.isScalar() ||
1763 !Update.getScalarVal()->getType()->isIntegerTy() ||
1764 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
1765 (Update.getScalarVal()->getType() !=
1766 X.getAddress()->getType()->getPointerElementType())) ||
1767 !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() ||
1768 !Context.getTargetInfo().hasBuiltinAtomic(
1769 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
1770 return std::make_pair(false, RValue::get(nullptr));
1772 llvm::AtomicRMWInst::BinOp RMWOp;
1775 RMWOp = llvm::AtomicRMWInst::Add;
1778 if (!IsXLHSInRHSPart)
1779 return std::make_pair(false, RValue::get(nullptr));
1780 RMWOp = llvm::AtomicRMWInst::Sub;
1783 RMWOp = llvm::AtomicRMWInst::And;
1786 RMWOp = llvm::AtomicRMWInst::Or;
1789 RMWOp = llvm::AtomicRMWInst::Xor;
1792 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1793 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
1794 : llvm::AtomicRMWInst::Max)
1795 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
1796 : llvm::AtomicRMWInst::UMax);
1799 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1800 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
1801 : llvm::AtomicRMWInst::Min)
1802 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
1803 : llvm::AtomicRMWInst::UMin);
1806 RMWOp = llvm::AtomicRMWInst::Xchg;
1815 return std::make_pair(false, RValue::get(nullptr));
1833 llvm_unreachable("Unsupported atomic update operation");
1835 auto *UpdateVal = Update.getScalarVal();
1836 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
1837 UpdateVal = CGF.Builder.CreateIntCast(
1838 IC, X.getAddress()->getType()->getPointerElementType(),
1839 X.getType()->hasSignedIntegerRepresentation());
1841 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
1842 return std::make_pair(true, RValue::get(Res));
1845 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
1846 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
1847 llvm::AtomicOrdering AO, SourceLocation Loc,
1848 const llvm::function_ref<RValue(RValue)> &CommonGen) {
1849 // Update expressions are allowed to have the following forms:
1850 // x binop= expr; -> xrval + expr;
1851 // x++, ++x -> xrval + 1;
1852 // x--, --x -> xrval - 1;
1853 // x = x binop expr; -> xrval binop expr
1854 // x = expr Op x; - > expr binop xrval;
1855 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
1857 if (X.isGlobalReg()) {
1858 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
1860 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
1862 // Perform compare-and-swap procedure.
1863 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
1869 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
1870 const Expr *X, const Expr *E,
1871 const Expr *UE, bool IsXLHSInRHSPart,
1872 SourceLocation Loc) {
1873 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1874 "Update expr in 'atomic update' must be a binary operator.");
1875 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1876 // Update expressions are allowed to have the following forms:
1877 // x binop= expr; -> xrval + expr;
1878 // x++, ++x -> xrval + 1;
1879 // x--, --x -> xrval - 1;
1880 // x = x binop expr; -> xrval binop expr
1881 // x = expr Op x; - > expr binop xrval;
1882 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
1883 LValue XLValue = CGF.EmitLValue(X);
1884 RValue ExprRValue = CGF.EmitAnyExpr(E);
1885 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1886 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1887 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1888 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1889 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1891 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
1892 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1893 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1894 return CGF.EmitAnyExpr(UE);
1896 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
1897 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1898 // OpenMP, 2.12.6, atomic Construct
1899 // Any atomic construct with a seq_cst clause forces the atomically
1900 // performed operation to include an implicit flush operation without a
1903 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1906 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
1907 QualType SourceType, QualType ResType) {
1908 switch (CGF.getEvaluationKind(ResType)) {
1910 return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType));
1912 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType);
1913 return RValue::getComplex(Res.first, Res.second);
1918 llvm_unreachable("Must be a scalar or complex.");
1921 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
1922 bool IsPostfixUpdate, const Expr *V,
1923 const Expr *X, const Expr *E,
1924 const Expr *UE, bool IsXLHSInRHSPart,
1925 SourceLocation Loc) {
1926 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
1927 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
1929 LValue VLValue = CGF.EmitLValue(V);
1930 LValue XLValue = CGF.EmitLValue(X);
1931 RValue ExprRValue = CGF.EmitAnyExpr(E);
1932 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1933 QualType NewVValType;
1935 // 'x' is updated with some additional value.
1936 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1937 "Update expr in 'atomic capture' must be a binary operator.");
1938 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1939 // Update expressions are allowed to have the following forms:
1940 // x binop= expr; -> xrval + expr;
1941 // x++, ++x -> xrval + 1;
1942 // x--, --x -> xrval - 1;
1943 // x = x binop expr; -> xrval binop expr
1944 // x = expr Op x; - > expr binop xrval;
1945 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1946 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1947 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1948 NewVValType = XRValExpr->getType();
1949 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1950 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
1951 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
1952 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1953 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1954 RValue Res = CGF.EmitAnyExpr(UE);
1955 NewVVal = IsPostfixUpdate ? XRValue : Res;
1958 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1959 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1961 // 'atomicrmw' instruction was generated.
1962 if (IsPostfixUpdate) {
1963 // Use old value from 'atomicrmw'.
1964 NewVVal = Res.second;
1966 // 'atomicrmw' does not provide new value, so evaluate it using old
1968 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1969 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
1970 NewVVal = CGF.EmitAnyExpr(UE);
1974 // 'x' is simply rewritten with some 'expr'.
1975 NewVValType = X->getType().getNonReferenceType();
1976 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
1977 X->getType().getNonReferenceType());
1978 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
1982 // Try to perform atomicrmw xchg, otherwise simple exchange.
1983 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1984 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
1987 // 'atomicrmw' instruction was generated.
1988 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
1991 // Emit post-update store to 'v' of old/new 'x' value.
1992 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType);
1993 // OpenMP, 2.12.6, atomic Construct
1994 // Any atomic construct with a seq_cst clause forces the atomically
1995 // performed operation to include an implicit flush operation without a
1998 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2001 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
2002 bool IsSeqCst, bool IsPostfixUpdate,
2003 const Expr *X, const Expr *V, const Expr *E,
2004 const Expr *UE, bool IsXLHSInRHSPart,
2005 SourceLocation Loc) {
2008 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
2011 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
2015 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
2018 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
2019 IsXLHSInRHSPart, Loc);
2023 case OMPC_num_threads:
2025 case OMPC_firstprivate:
2026 case OMPC_lastprivate:
2027 case OMPC_reduction:
2036 case OMPC_copyprivate:
2038 case OMPC_proc_bind:
2043 case OMPC_threadprivate:
2044 case OMPC_mergeable:
2045 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
2049 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
2050 bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst);
2051 OpenMPClauseKind Kind = OMPC_unknown;
2052 for (auto *C : S.clauses()) {
2053 // Find first clause (skip seq_cst clause, if it is first).
2054 if (C->getClauseKind() != OMPC_seq_cst) {
2055 Kind = C->getClauseKind();
2061 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
2062 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
2063 enterFullExpression(EWC);
2065 // Processing for statements under 'atomic capture'.
2066 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
2067 for (const auto *C : Compound->body()) {
2068 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
2069 enterFullExpression(EWC);
2074 LexicalScope Scope(*this, S.getSourceRange());
2075 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) {
2076 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
2077 S.getV(), S.getExpr(), S.getUpdateExpr(),
2078 S.isXLHSInRHSPart(), S.getLocStart());
2080 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
2083 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
2084 llvm_unreachable("CodeGen for 'omp target' is not supported yet.");
2087 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
2088 llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");