1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Stmt.h"
19 #include "clang/AST/StmtOpenMP.h"
20 using namespace clang;
21 using namespace CodeGen;
23 //===----------------------------------------------------------------------===//
24 // OpenMP Directive Emission
25 //===----------------------------------------------------------------------===//
26 void CodeGenFunction::EmitOMPAggregateAssign(
27 llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
28 const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) {
29 // Perform element-by-element initialization.
31 auto SrcBegin = SrcAddr;
32 auto DestBegin = DestAddr;
33 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
34 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
35 // Cast from pointer to array type to pointer to single element.
36 SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin,
37 DestBegin->getType());
38 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
39 // The basic structure here is a while-do loop.
40 auto BodyBB = createBasicBlock("omp.arraycpy.body");
41 auto DoneBB = createBasicBlock("omp.arraycpy.done");
43 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
44 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
46 // Enter the loop body, making that address the current address.
47 auto EntryBB = Builder.GetInsertBlock();
49 auto SrcElementCurrent =
50 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
51 SrcElementCurrent->addIncoming(SrcBegin, EntryBB);
52 auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2,
53 "omp.arraycpy.destElementPast");
54 DestElementCurrent->addIncoming(DestBegin, EntryBB);
57 CopyGen(DestElementCurrent, SrcElementCurrent);
59 // Shift the address forward by one element.
60 auto DestElementNext = Builder.CreateConstGEP1_32(
61 DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element");
62 auto SrcElementNext = Builder.CreateConstGEP1_32(
63 SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element");
64 // Check whether we've reached the end.
66 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
67 Builder.CreateCondBr(Done, DoneBB, BodyBB);
68 DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock());
69 SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock());
72 EmitBlock(DoneBB, /*IsFinished=*/true);
75 void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF,
76 QualType OriginalType, llvm::Value *DestAddr,
77 llvm::Value *SrcAddr, const VarDecl *DestVD,
78 const VarDecl *SrcVD, const Expr *Copy) {
79 if (OriginalType->isArrayType()) {
80 auto *BO = dyn_cast<BinaryOperator>(Copy);
81 if (BO && BO->getOpcode() == BO_Assign) {
82 // Perform simple memcpy for simple copying.
83 CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
85 // For arrays with complex element types perform element by element
87 CGF.EmitOMPAggregateAssign(
88 DestAddr, SrcAddr, OriginalType,
89 [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement,
90 llvm::Value *SrcElement) {
91 // Working with the single array element, so have to remap
92 // destination and source variables to corresponding array
94 CodeGenFunction::OMPPrivateScope Remap(CGF);
95 Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{
99 SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; });
100 (void)Remap.Privatize();
101 CGF.EmitIgnoredExpr(Copy);
105 // Remap pseudo source variable to private copy.
106 CodeGenFunction::OMPPrivateScope Remap(CGF);
107 Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; });
108 Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; });
109 (void)Remap.Privatize();
110 // Emit copying of the whole variable.
111 CGF.EmitIgnoredExpr(Copy);
115 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
116 OMPPrivateScope &PrivateScope) {
117 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
118 for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) {
119 auto *C = cast<OMPFirstprivateClause>(*I);
120 auto IRef = C->varlist_begin();
121 auto InitsRef = C->inits().begin();
122 for (auto IInit : C->private_copies()) {
123 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
124 if (EmittedAsFirstprivate.count(OrigVD) == 0) {
125 EmittedAsFirstprivate.insert(OrigVD);
126 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
127 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
130 const_cast<VarDecl *>(OrigVD),
131 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
133 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
134 auto *OriginalAddr = EmitLValue(&DRE).getAddress();
135 QualType Type = OrigVD->getType();
136 if (Type->isArrayType()) {
137 // Emit VarDecl with copy init for arrays.
138 // Get the address of the original variable captured in current
140 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
141 auto Emission = EmitAutoVarAlloca(*VD);
142 auto *Init = VD->getInit();
143 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
144 // Perform simple memcpy.
145 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
148 EmitOMPAggregateAssign(
149 Emission.getAllocatedAddress(), OriginalAddr, Type,
150 [this, VDInit, Init](llvm::Value *DestElement,
151 llvm::Value *SrcElement) {
152 // Clean up any temporaries needed by the initialization.
153 RunCleanupsScope InitScope(*this);
154 // Emit initialization for single element.
155 LocalDeclMap[VDInit] = SrcElement;
156 EmitAnyExprToMem(Init, DestElement,
157 Init->getType().getQualifiers(),
158 /*IsInitializer*/ false);
159 LocalDeclMap.erase(VDInit);
162 EmitAutoVarCleanups(Emission);
163 return Emission.getAllocatedAddress();
166 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
167 // Emit private VarDecl with copy init.
168 // Remap temp VDInit variable to the address of the original
170 // (for proper handling of captured global variables).
171 LocalDeclMap[VDInit] = OriginalAddr;
173 LocalDeclMap.erase(VDInit);
174 return GetAddrOfLocalVar(VD);
177 assert(IsRegistered &&
178 "firstprivate var already registered as private");
179 // Silence the warning about unused variable.
185 return !EmittedAsFirstprivate.empty();
188 void CodeGenFunction::EmitOMPPrivateClause(
189 const OMPExecutableDirective &D,
190 CodeGenFunction::OMPPrivateScope &PrivateScope) {
191 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
192 for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) {
193 auto *C = cast<OMPPrivateClause>(*I);
194 auto IRef = C->varlist_begin();
195 for (auto IInit : C->private_copies()) {
196 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
197 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
198 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
200 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
201 // Emit private VarDecl with copy init.
203 return GetAddrOfLocalVar(VD);
205 assert(IsRegistered && "private var already registered as private");
206 // Silence the warning about unused variable.
214 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
215 // threadprivate_var1 = master_threadprivate_var1;
216 // operator=(threadprivate_var2, master_threadprivate_var2);
218 // __kmpc_barrier(&loc, global_tid);
219 llvm::DenseSet<const VarDecl *> CopiedVars;
220 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
221 for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) {
222 auto *C = cast<OMPCopyinClause>(*I);
223 auto IRef = C->varlist_begin();
224 auto ISrcRef = C->source_exprs().begin();
225 auto IDestRef = C->destination_exprs().begin();
226 for (auto *AssignOp : C->assignment_ops()) {
227 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
228 QualType Type = VD->getType();
229 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
231 // Get the address of the master variable. If we are emitting code with
232 // TLS support, the address is passed from the master as field in the
233 // captured declaration.
234 llvm::Value *MasterAddr;
235 if (getLangOpts().OpenMPUseTLS &&
236 getContext().getTargetInfo().isTLSSupported()) {
237 assert(CapturedStmtInfo->lookup(VD) &&
238 "Copyin threadprivates should have been captured!");
239 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
240 VK_LValue, (*IRef)->getExprLoc());
241 MasterAddr = EmitLValue(&DRE).getAddress();
243 MasterAddr = VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
244 : CGM.GetAddrOfGlobal(VD);
246 // Get the address of the threadprivate variable.
247 auto *PrivateAddr = EmitLValue(*IRef).getAddress();
248 if (CopiedVars.size() == 1) {
249 // At first check if current thread is a master thread. If it is, no
250 // need to copy data.
251 CopyBegin = createBasicBlock("copyin.not.master");
252 CopyEnd = createBasicBlock("copyin.not.master.end");
253 Builder.CreateCondBr(
254 Builder.CreateICmpNE(
255 Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
256 Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
258 EmitBlock(CopyBegin);
260 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
261 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
262 EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD,
271 // Exit out of copying procedure for non-master thread.
272 EmitBlock(CopyEnd, /*IsFinished=*/true);
278 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
279 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
280 bool HasAtLeastOneLastprivate = false;
281 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
282 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
283 HasAtLeastOneLastprivate = true;
284 auto *C = cast<OMPLastprivateClause>(*I);
285 auto IRef = C->varlist_begin();
286 auto IDestRef = C->destination_exprs().begin();
287 for (auto *IInit : C->private_copies()) {
288 // Keep the address of the original variable for future update at the end
290 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
291 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
292 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
293 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{
295 const_cast<VarDecl *>(OrigVD),
296 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
298 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
299 return EmitLValue(&DRE).getAddress();
301 // Check if the variable is also a firstprivate: in this case IInit is
302 // not generated. Initialization of this variable will happen in codegen
303 // for 'firstprivate' clause.
305 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
307 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
308 // Emit private VarDecl with copy init.
310 return GetAddrOfLocalVar(VD);
312 assert(IsRegistered &&
313 "lastprivate var already registered as private");
320 return HasAtLeastOneLastprivate;
323 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
324 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
325 // Emit following code:
326 // if (<IsLastIterCond>) {
327 // orig_var1 = private_orig_var1;
329 // orig_varn = private_orig_varn;
331 llvm::BasicBlock *ThenBB = nullptr;
332 llvm::BasicBlock *DoneBB = nullptr;
333 if (IsLastIterCond) {
334 ThenBB = createBasicBlock(".omp.lastprivate.then");
335 DoneBB = createBasicBlock(".omp.lastprivate.done");
336 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
339 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates;
340 const Expr *LastIterVal = nullptr;
341 const Expr *IVExpr = nullptr;
342 const Expr *IncExpr = nullptr;
343 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
344 if (isOpenMPWorksharingDirective(D.getDirectiveKind())) {
345 LastIterVal = cast<VarDecl>(cast<DeclRefExpr>(
346 LoopDirective->getUpperBoundVariable())
348 ->getAnyInitializer();
349 IVExpr = LoopDirective->getIterationVariable();
350 IncExpr = LoopDirective->getInc();
351 auto IUpdate = LoopDirective->updates().begin();
352 for (auto *E : LoopDirective->counters()) {
353 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
354 LoopCountersAndUpdates[D] = *IUpdate;
360 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
361 bool FirstLCV = true;
362 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
363 auto *C = cast<OMPLastprivateClause>(*I);
364 auto IRef = C->varlist_begin();
365 auto ISrcRef = C->source_exprs().begin();
366 auto IDestRef = C->destination_exprs().begin();
367 for (auto *AssignOp : C->assignment_ops()) {
368 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
369 QualType Type = PrivateVD->getType();
370 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
371 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
372 // If lastprivate variable is a loop control variable for loop-based
373 // directive, update its value before copyin back to original
375 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) {
376 if (FirstLCV && LastIterVal) {
377 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(),
378 IVExpr->getType().getQualifiers(),
379 /*IsInitializer=*/false);
380 EmitIgnoredExpr(IncExpr);
383 EmitIgnoredExpr(UpExpr);
385 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
386 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
387 // Get the address of the original variable.
388 auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
389 // Get the address of the private variable.
390 auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
391 EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
400 if (IsLastIterCond) {
401 EmitBlock(DoneBB, /*IsFinished=*/true);
405 void CodeGenFunction::EmitOMPReductionClauseInit(
406 const OMPExecutableDirective &D,
407 CodeGenFunction::OMPPrivateScope &PrivateScope) {
408 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
409 auto *C = cast<OMPReductionClause>(*I);
410 auto ILHS = C->lhs_exprs().begin();
411 auto IRHS = C->rhs_exprs().begin();
412 for (auto IRef : C->varlists()) {
413 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
414 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
415 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
416 // Store the address of the original variable associated with the LHS
417 // implicit variable.
418 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{
419 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
420 CapturedStmtInfo->lookup(OrigVD) != nullptr,
421 IRef->getType(), VK_LValue, IRef->getExprLoc());
422 return EmitLValue(&DRE).getAddress();
424 // Emit reduction copy.
426 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{
427 // Emit private VarDecl with reduction init.
428 EmitDecl(*PrivateVD);
429 return GetAddrOfLocalVar(PrivateVD);
431 assert(IsRegistered && "private var already registered as private");
432 // Silence the warning about unused variable.
439 void CodeGenFunction::EmitOMPReductionClauseFinal(
440 const OMPExecutableDirective &D) {
441 llvm::SmallVector<const Expr *, 8> LHSExprs;
442 llvm::SmallVector<const Expr *, 8> RHSExprs;
443 llvm::SmallVector<const Expr *, 8> ReductionOps;
444 bool HasAtLeastOneReduction = false;
445 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
446 HasAtLeastOneReduction = true;
447 auto *C = cast<OMPReductionClause>(*I);
448 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
449 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
450 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
452 if (HasAtLeastOneReduction) {
453 // Emit nowait reduction if nowait clause is present or directive is a
454 // parallel directive (it always has implicit barrier).
455 CGM.getOpenMPRuntime().emitReduction(
456 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps,
457 D.getSingleClause(OMPC_nowait) ||
458 isOpenMPParallelDirective(D.getDirectiveKind()) ||
459 D.getDirectiveKind() == OMPD_simd,
460 D.getDirectiveKind() == OMPD_simd);
464 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
465 const OMPExecutableDirective &S,
466 OpenMPDirectiveKind InnermostKind,
467 const RegionCodeGenTy &CodeGen) {
468 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
469 auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS);
470 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
471 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
472 if (auto C = S.getSingleClause(OMPC_num_threads)) {
473 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
474 auto NumThreadsClause = cast<OMPNumThreadsClause>(C);
475 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
476 /*IgnoreResultAssign*/ true);
477 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
478 CGF, NumThreads, NumThreadsClause->getLocStart());
480 if (auto *C = S.getSingleClause(OMPC_proc_bind)) {
481 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
482 auto *ProcBindClause = cast<OMPProcBindClause>(C);
483 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
484 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
486 const Expr *IfCond = nullptr;
487 if (auto C = S.getSingleClause(OMPC_if)) {
488 IfCond = cast<OMPIfClause>(C)->getCondition();
490 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
491 CapturedStruct, IfCond);
494 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
495 LexicalScope Scope(*this, S.getSourceRange());
496 // Emit parallel region as a standalone region.
497 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
498 OMPPrivateScope PrivateScope(CGF);
499 bool Copyins = CGF.EmitOMPCopyinClause(S);
500 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
501 if (Copyins || Firstprivates) {
502 // Emit implicit barrier to synchronize threads and avoid data races on
503 // initialization of firstprivate variables or propagation master's thread
504 // values of threadprivate variables to local instances of that variables
505 // of all other implicit threads.
506 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
509 CGF.EmitOMPPrivateClause(S, PrivateScope);
510 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
511 (void)PrivateScope.Privatize();
512 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
513 CGF.EmitOMPReductionClauseFinal(S);
514 // Emit implicit barrier at the end of the 'parallel' directive.
515 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
518 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
521 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
523 RunCleanupsScope BodyScope(*this);
524 // Update counters values on current iteration.
525 for (auto I : D.updates()) {
528 // Update the linear variables.
529 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
530 auto *C = cast<OMPLinearClause>(*I);
531 for (auto U : C->updates()) {
536 // On a continue in the body, jump to the end.
537 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
538 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
540 EmitStmt(D.getBody());
541 // The end (updates/cleanups).
542 EmitBlock(Continue.getBlock());
543 BreakContinueStack.pop_back();
544 // TODO: Update lastprivates if the SeparateIter flag is true.
545 // This will be implemented in a follow-up OMPLastprivateClause patch, but
546 // result should be still correct without it, as we do not make these
547 // variables private yet.
550 void CodeGenFunction::EmitOMPInnerLoop(
551 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
553 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
554 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
555 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
557 // Start the loop with a block that tests the condition.
558 auto CondBlock = createBasicBlock("omp.inner.for.cond");
559 EmitBlock(CondBlock);
560 LoopStack.push(CondBlock);
562 // If there are any cleanups between here and the loop-exit scope,
563 // create a block to stage a loop exit along.
564 auto ExitBlock = LoopExit.getBlock();
566 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
568 auto LoopBody = createBasicBlock("omp.inner.for.body");
571 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
572 if (ExitBlock != LoopExit.getBlock()) {
573 EmitBlock(ExitBlock);
574 EmitBranchThroughCleanup(LoopExit);
578 incrementProfileCounter(&S);
580 // Create a block for the increment.
581 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
582 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
586 // Emit "IV = IV + 1" and a back-edge to the condition block.
587 EmitBlock(Continue.getBlock());
588 EmitIgnoredExpr(IncExpr);
590 BreakContinueStack.pop_back();
591 EmitBranch(CondBlock);
593 // Emit the fall-through block.
594 EmitBlock(LoopExit.getBlock());
597 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
598 // Emit inits for the linear variables.
599 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
600 auto *C = cast<OMPLinearClause>(*I);
601 for (auto Init : C->inits()) {
602 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
603 auto *OrigVD = cast<VarDecl>(
604 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl());
605 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
606 CapturedStmtInfo->lookup(OrigVD) != nullptr,
607 VD->getInit()->getType(), VK_LValue,
608 VD->getInit()->getExprLoc());
609 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
610 EmitExprAsInit(&DRE, VD,
611 MakeAddrLValue(Emission.getAllocatedAddress(),
612 VD->getType(), Emission.Alignment),
613 /*capturedByInit=*/false);
614 EmitAutoVarCleanups(Emission);
616 // Emit the linear steps for the linear clauses.
617 // If a step is not constant, it is pre-calculated before the loop.
618 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
619 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
620 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
621 // Emit calculation of the linear step.
627 static void emitLinearClauseFinal(CodeGenFunction &CGF,
628 const OMPLoopDirective &D) {
629 // Emit the final values of the linear variables.
630 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
631 auto *C = cast<OMPLinearClause>(*I);
632 auto IC = C->varlist_begin();
633 for (auto F : C->finals()) {
634 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
635 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
636 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
637 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
638 auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress();
639 CodeGenFunction::OMPPrivateScope VarScope(CGF);
640 VarScope.addPrivate(OrigVD,
641 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
642 (void)VarScope.Privatize();
643 CGF.EmitIgnoredExpr(F);
649 static void emitAlignedClause(CodeGenFunction &CGF,
650 const OMPExecutableDirective &D) {
651 for (auto &&I = D.getClausesOfKind(OMPC_aligned); I; ++I) {
652 auto *Clause = cast<OMPAlignedClause>(*I);
653 unsigned ClauseAlignment = 0;
654 if (auto AlignmentExpr = Clause->getAlignment()) {
656 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
657 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
659 for (auto E : Clause->varlists()) {
660 unsigned Alignment = ClauseAlignment;
661 if (Alignment == 0) {
662 // OpenMP [2.8.1, Description]
663 // If no optional parameter is specified, implementation-defined default
664 // alignments for SIMD instructions on the target platforms are assumed.
667 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
668 E->getType()->getPointeeType()))
671 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
672 "alignment is not power of 2");
673 if (Alignment != 0) {
674 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
675 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
681 static void emitPrivateLoopCounters(CodeGenFunction &CGF,
682 CodeGenFunction::OMPPrivateScope &LoopScope,
683 ArrayRef<Expr *> Counters) {
684 for (auto *E : Counters) {
685 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
686 (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{
687 // Emit var without initialization.
688 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
689 CGF.EmitAutoVarCleanups(VarEmission);
690 return VarEmission.getAllocatedAddress();
695 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
696 const Expr *Cond, llvm::BasicBlock *TrueBlock,
697 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
699 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
700 emitPrivateLoopCounters(CGF, PreCondScope, S.counters());
701 (void)PreCondScope.Privatize();
702 // Get initial values of real counters.
703 for (auto I : S.inits()) {
704 CGF.EmitIgnoredExpr(I);
707 // Check that loop is executed at least one time.
708 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
712 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
713 CodeGenFunction::OMPPrivateScope &PrivateScope) {
714 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
715 auto *C = cast<OMPLinearClause>(*I);
716 for (auto *E : C->varlists()) {
717 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
718 bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * {
719 // Emit var without initialization.
720 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
721 CGF.EmitAutoVarCleanups(VarEmission);
722 return VarEmission.getAllocatedAddress();
724 assert(IsRegistered && "linear var already registered as private");
725 // Silence the warning about unused variable.
731 static void emitSafelenClause(CodeGenFunction &CGF,
732 const OMPExecutableDirective &D) {
734 cast_or_null<OMPSafelenClause>(D.getSingleClause(OMPC_safelen))) {
735 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
736 /*ignoreResult=*/true);
737 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
738 CGF.LoopStack.setVectorizerWidth(Val->getZExtValue());
739 // In presence of finite 'safelen', it may be unsafe to mark all
740 // the memory instructions parallel, because loop-carried
741 // dependences of 'safelen' iterations are possible.
742 CGF.LoopStack.setParallel(false);
746 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
747 // Walk clauses and process safelen/lastprivate.
748 LoopStack.setParallel();
749 LoopStack.setVectorizerEnable(true);
750 emitSafelenClause(*this, D);
753 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
754 auto IC = D.counters().begin();
755 for (auto F : D.finals()) {
756 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
757 if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
758 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
759 CapturedStmtInfo->lookup(OrigVD) != nullptr,
760 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
761 auto *OrigAddr = EmitLValue(&DRE).getAddress();
762 OMPPrivateScope VarScope(*this);
763 VarScope.addPrivate(OrigVD,
764 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
765 (void)VarScope.Privatize();
770 emitLinearClauseFinal(*this, D);
773 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
774 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
776 // for (IV in 0..LastIteration) BODY;
777 // <Final counter/linear vars updates>;
781 // Emit: if (PreCond) - begin.
782 // If the condition constant folds and can be elided, avoid emitting the
785 llvm::BasicBlock *ContBlock = nullptr;
786 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
790 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
791 ContBlock = CGF.createBasicBlock("simd.if.end");
792 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
793 CGF.getProfileCount(&S));
794 CGF.EmitBlock(ThenBlock);
795 CGF.incrementProfileCounter(&S);
798 // Emit the loop iteration variable.
799 const Expr *IVExpr = S.getIterationVariable();
800 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
801 CGF.EmitVarDecl(*IVDecl);
802 CGF.EmitIgnoredExpr(S.getInit());
804 // Emit the iterations count variable.
805 // If it is not a variable, Sema decided to calculate iterations count on
806 // each iteration (e.g., it is foldable into a constant).
807 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
808 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
809 // Emit calculation of the iterations count.
810 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
813 CGF.EmitOMPSimdInit(S);
815 emitAlignedClause(CGF, S);
816 CGF.EmitOMPLinearClauseInit(S);
817 bool HasLastprivateClause;
819 OMPPrivateScope LoopScope(CGF);
820 emitPrivateLoopCounters(CGF, LoopScope, S.counters());
821 emitPrivateLinearVars(CGF, S, LoopScope);
822 CGF.EmitOMPPrivateClause(S, LoopScope);
823 CGF.EmitOMPReductionClauseInit(S, LoopScope);
824 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
825 (void)LoopScope.Privatize();
826 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
828 [&S](CodeGenFunction &CGF) {
829 CGF.EmitOMPLoopBody(S, JumpDest());
830 CGF.EmitStopPoint(&S);
832 [](CodeGenFunction &) {});
833 // Emit final copy of the lastprivate variables at the end of loops.
834 if (HasLastprivateClause) {
835 CGF.EmitOMPLastprivateClauseFinal(S);
837 CGF.EmitOMPReductionClauseFinal(S);
839 CGF.EmitOMPSimdFinal(S);
840 // Emit: if (PreCond) - end.
842 CGF.EmitBranch(ContBlock);
843 CGF.EmitBlock(ContBlock, true);
846 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
849 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
850 const OMPLoopDirective &S,
851 OMPPrivateScope &LoopScope,
852 bool Ordered, llvm::Value *LB,
853 llvm::Value *UB, llvm::Value *ST,
854 llvm::Value *IL, llvm::Value *Chunk) {
855 auto &RT = CGM.getOpenMPRuntime();
857 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
858 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);
861 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&
862 "static non-chunked schedule does not need outer loop");
866 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
867 // When schedule(dynamic,chunk_size) is specified, the iterations are
868 // distributed to threads in the team in chunks as the threads request them.
869 // Each thread executes a chunk of iterations, then requests another chunk,
870 // until no chunks remain to be distributed. Each chunk contains chunk_size
871 // iterations, except for the last chunk to be distributed, which may have
872 // fewer iterations. When no chunk_size is specified, it defaults to 1.
874 // When schedule(guided,chunk_size) is specified, the iterations are assigned
875 // to threads in the team in chunks as the executing threads request them.
876 // Each thread executes a chunk of iterations, then requests another chunk,
877 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
878 // each chunk is proportional to the number of unassigned iterations divided
879 // by the number of threads in the team, decreasing to 1. For a chunk_size
880 // with value k (greater than 1), the size of each chunk is determined in the
881 // same way, with the restriction that the chunks do not contain fewer than k
882 // iterations (except for the last chunk to be assigned, which may have fewer
883 // than k iterations).
885 // When schedule(auto) is specified, the decision regarding scheduling is
886 // delegated to the compiler and/or runtime system. The programmer gives the
887 // implementation the freedom to choose any possible mapping of iterations to
888 // threads in the team.
890 // When schedule(runtime) is specified, the decision regarding scheduling is
891 // deferred until run time, and the schedule and chunk size are taken from the
892 // run-sched-var ICV. If the ICV is set to auto, the schedule is
893 // implementation defined
895 // while(__kmpc_dispatch_next(&LB, &UB)) {
897 // while (idx <= UB) { BODY; ++idx;
898 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
902 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
903 // When schedule(static, chunk_size) is specified, iterations are divided into
904 // chunks of size chunk_size, and the chunks are assigned to the threads in
905 // the team in a round-robin fashion in the order of the thread number.
907 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
908 // while (idx <= UB) { BODY; ++idx; } // inner loop
914 const Expr *IVExpr = S.getIterationVariable();
915 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
916 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
919 *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB,
920 (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal()
924 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
926 // Start the loop with a block that tests the condition.
927 auto CondBlock = createBasicBlock("omp.dispatch.cond");
928 EmitBlock(CondBlock);
929 LoopStack.push(CondBlock);
931 llvm::Value *BoolCondVal = nullptr;
932 if (!DynamicOrOrdered) {
933 // UB = min(UB, GlobalUB)
934 EmitIgnoredExpr(S.getEnsureUpperBound());
936 EmitIgnoredExpr(S.getInit());
938 BoolCondVal = EvaluateExprAsBool(S.getCond());
940 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned,
944 // If there are any cleanups between here and the loop-exit scope,
945 // create a block to stage a loop exit along.
946 auto ExitBlock = LoopExit.getBlock();
947 if (LoopScope.requiresCleanups())
948 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
950 auto LoopBody = createBasicBlock("omp.dispatch.body");
951 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
952 if (ExitBlock != LoopExit.getBlock()) {
953 EmitBlock(ExitBlock);
954 EmitBranchThroughCleanup(LoopExit);
958 // Emit "IV = LB" (in case of static schedule, we have already calculated new
959 // LB for loop condition and emitted it above).
960 if (DynamicOrOrdered)
961 EmitIgnoredExpr(S.getInit());
963 // Create a block for the increment.
964 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
965 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
967 // Generate !llvm.loop.parallel metadata for loads and stores for loops
968 // with dynamic/guided scheduling and without ordered clause.
969 if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
970 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic ||
971 ScheduleKind == OMPC_SCHEDULE_guided) &&
977 SourceLocation Loc = S.getLocStart();
978 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
979 [&S, LoopExit](CodeGenFunction &CGF) {
980 CGF.EmitOMPLoopBody(S, LoopExit);
981 CGF.EmitStopPoint(&S);
983 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
985 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
986 CGF, Loc, IVSize, IVSigned);
990 EmitBlock(Continue.getBlock());
991 BreakContinueStack.pop_back();
992 if (!DynamicOrOrdered) {
993 // Emit "LB = LB + Stride", "UB = UB + Stride".
994 EmitIgnoredExpr(S.getNextLowerBound());
995 EmitIgnoredExpr(S.getNextUpperBound());
998 EmitBranch(CondBlock);
1000 // Emit the fall-through block.
1001 EmitBlock(LoopExit.getBlock());
1003 // Tell the runtime we are done.
1004 if (!DynamicOrOrdered)
1005 RT.emitForStaticFinish(*this, S.getLocEnd());
1008 /// \brief Emit a helper variable and return corresponding lvalue.
1009 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1010 const DeclRefExpr *Helper) {
1011 auto VDecl = cast<VarDecl>(Helper->getDecl());
1012 CGF.EmitVarDecl(*VDecl);
1013 return CGF.EmitLValue(Helper);
1016 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind>
1017 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
1019 // Detect the loop schedule kind and chunk.
1020 auto ScheduleKind = OMPC_SCHEDULE_unknown;
1021 llvm::Value *Chunk = nullptr;
1023 cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) {
1024 ScheduleKind = C->getScheduleKind();
1025 if (const auto *Ch = C->getChunkSize()) {
1026 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
1028 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl());
1029 CGF.EmitVarDecl(*ImpVar);
1030 CGF.EmitStoreThroughLValue(
1031 CGF.EmitAnyExpr(Ch),
1032 CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
1033 ImpVar->getType()));
1038 if (!C->getHelperChunkSize() || !OuterRegion) {
1039 Chunk = CGF.EmitScalarExpr(Ch);
1040 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
1041 S.getIterationVariable()->getType());
1045 return std::make_pair(Chunk, ScheduleKind);
1048 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
1049 // Emit the loop iteration variable.
1050 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
1051 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
1052 EmitVarDecl(*IVDecl);
1054 // Emit the iterations count variable.
1055 // If it is not a variable, Sema decided to calculate iterations count on each
1056 // iteration (e.g., it is foldable into a constant).
1057 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1058 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1059 // Emit calculation of the iterations count.
1060 EmitIgnoredExpr(S.getCalcLastIteration());
1063 auto &RT = CGM.getOpenMPRuntime();
1065 bool HasLastprivateClause;
1066 // Check pre-condition.
1068 // Skip the entire loop if we don't meet the precondition.
1069 // If the condition constant folds and can be elided, avoid emitting the
1072 llvm::BasicBlock *ContBlock = nullptr;
1073 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1077 auto *ThenBlock = createBasicBlock("omp.precond.then");
1078 ContBlock = createBasicBlock("omp.precond.end");
1079 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
1080 getProfileCount(&S));
1081 EmitBlock(ThenBlock);
1082 incrementProfileCounter(&S);
1085 emitAlignedClause(*this, S);
1086 EmitOMPLinearClauseInit(S);
1087 // Emit 'then' code.
1089 // Emit helper vars inits.
1091 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1093 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1095 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
1097 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
1099 OMPPrivateScope LoopScope(*this);
1100 if (EmitOMPFirstprivateClause(S, LoopScope)) {
1101 // Emit implicit barrier to synchronize threads and avoid data races on
1102 // initialization of firstprivate variables.
1103 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
1106 EmitOMPPrivateClause(S, LoopScope);
1107 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
1108 EmitOMPReductionClauseInit(S, LoopScope);
1109 emitPrivateLoopCounters(*this, LoopScope, S.counters());
1110 emitPrivateLinearVars(*this, S, LoopScope);
1111 (void)LoopScope.Privatize();
1113 // Detect the loop schedule kind and chunk.
1115 OpenMPScheduleClauseKind ScheduleKind;
1117 emitScheduleClause(*this, S, /*OuterRegion=*/false);
1118 Chunk = ScheduleInfo.first;
1119 ScheduleKind = ScheduleInfo.second;
1120 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1121 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1122 const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr;
1123 if (RT.isStaticNonchunked(ScheduleKind,
1124 /* Chunked */ Chunk != nullptr) &&
1126 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
1129 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1130 // When no chunk_size is specified, the iteration space is divided into
1131 // chunks that are approximately equal in size, and at most one chunk is
1132 // distributed to each thread. Note that the size of the chunks is
1133 // unspecified in this case.
1134 RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1135 Ordered, IL.getAddress(), LB.getAddress(),
1136 UB.getAddress(), ST.getAddress());
1137 auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
1138 // UB = min(UB, GlobalUB);
1139 EmitIgnoredExpr(S.getEnsureUpperBound());
1141 EmitIgnoredExpr(S.getInit());
1142 // while (idx <= UB) { BODY; ++idx; }
1143 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1145 [&S, LoopExit](CodeGenFunction &CGF) {
1146 CGF.EmitOMPLoopBody(S, LoopExit);
1147 CGF.EmitStopPoint(&S);
1149 [](CodeGenFunction &) {});
1150 EmitBlock(LoopExit.getBlock());
1151 // Tell the runtime we are done.
1152 RT.emitForStaticFinish(*this, S.getLocStart());
1154 // Emit the outer loop, which requests its work chunk [LB..UB] from
1155 // runtime and runs the inner loop to process it.
1156 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered,
1157 LB.getAddress(), UB.getAddress(), ST.getAddress(),
1158 IL.getAddress(), Chunk);
1160 EmitOMPReductionClauseFinal(S);
1161 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1162 if (HasLastprivateClause)
1163 EmitOMPLastprivateClauseFinal(
1164 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
1166 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
1167 EmitOMPSimdFinal(S);
1169 // We're now done with the loop, so jump to the continuation block.
1171 EmitBranch(ContBlock);
1172 EmitBlock(ContBlock, true);
1175 return HasLastprivateClause;
1178 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
1179 LexicalScope Scope(*this, S.getSourceRange());
1180 bool HasLastprivates = false;
1181 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1182 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1184 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen);
1186 // Emit an implicit barrier at the end.
1187 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
1188 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1192 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
1193 LexicalScope Scope(*this, S.getSourceRange());
1194 bool HasLastprivates = false;
1195 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1196 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1198 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1200 // Emit an implicit barrier at the end.
1201 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
1202 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1206 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
1208 llvm::Value *Init = nullptr) {
1209 auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
1211 CGF.EmitScalarInit(Init, LVal);
1216 CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
1217 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
1218 auto *CS = dyn_cast<CompoundStmt>(Stmt);
1219 if (CS && CS->size() > 1) {
1220 bool HasLastprivates = false;
1221 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) {
1222 auto &C = CGF.CGM.getContext();
1223 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1224 // Emit helper vars inits.
1225 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
1226 CGF.Builder.getInt32(0));
1227 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1);
1229 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
1230 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
1231 CGF.Builder.getInt32(1));
1232 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
1233 CGF.Builder.getInt32(0));
1235 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
1236 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1237 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
1238 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1239 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
1240 // Generate condition for loop.
1241 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
1242 OK_Ordinary, S.getLocStart(),
1243 /*fpContractable=*/false);
1244 // Increment for loop counter.
1245 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue,
1246 OK_Ordinary, S.getLocStart());
1247 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) {
1248 // Iterate through all sections and emit a switch construct:
1251 // <SectionStmt[0]>;
1254 // case <NumSection> - 1:
1255 // <SectionStmt[<NumSection> - 1]>;
1258 // .omp.sections.exit:
1259 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
1260 auto *SwitchStmt = CGF.Builder.CreateSwitch(
1261 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
1263 unsigned CaseNumber = 0;
1264 for (auto *SubStmt : CS->children()) {
1265 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
1266 CGF.EmitBlock(CaseBB);
1267 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
1268 CGF.EmitStmt(SubStmt);
1269 CGF.EmitBranch(ExitBB);
1272 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1275 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1276 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
1277 // Emit implicit barrier to synchronize threads and avoid data races on
1278 // initialization of firstprivate variables.
1279 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1282 CGF.EmitOMPPrivateClause(S, LoopScope);
1283 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1284 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1285 (void)LoopScope.Privatize();
1287 // Emit static non-chunked loop.
1288 CGF.CGM.getOpenMPRuntime().emitForInit(
1289 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
1290 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
1291 LB.getAddress(), UB.getAddress(), ST.getAddress());
1292 // UB = min(UB, GlobalUB);
1293 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
1294 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
1295 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
1296 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
1298 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
1299 // while (idx <= UB) { BODY; ++idx; }
1300 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
1301 [](CodeGenFunction &) {});
1302 // Tell the runtime we are done.
1303 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
1304 CGF.EmitOMPReductionClauseFinal(S);
1306 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1307 if (HasLastprivates)
1308 CGF.EmitOMPLastprivateClauseFinal(
1309 S, CGF.Builder.CreateIsNotNull(
1310 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
1313 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen);
1314 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
1315 // clause. Otherwise the barrier will be generated by the codegen for the
1317 if (HasLastprivates && S.getSingleClause(OMPC_nowait)) {
1318 // Emit implicit barrier to synchronize threads and avoid data races on
1319 // initialization of firstprivate variables.
1320 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
1323 return OMPD_sections;
1325 // If only one section is found - no need to generate loop, emit as a single
1327 bool HasFirstprivates;
1328 // No need to generate reductions for sections with single section region, we
1329 // can use original shared variables for all operations.
1330 bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty();
1331 // No need to generate lastprivates for sections with single section region,
1332 // we can use original shared variable for all calculations with barrier at
1333 // the end of the sections.
1334 bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty();
1335 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
1336 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1337 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1338 CGF.EmitOMPPrivateClause(S, SingleScope);
1339 (void)SingleScope.Privatize();
1343 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
1344 llvm::None, llvm::None, llvm::None,
1346 // Emit barrier for firstprivates, lastprivates or reductions only if
1347 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be
1348 // generated by the codegen for the directive.
1349 if ((HasFirstprivates || HasLastprivates || HasReductions) &&
1350 S.getSingleClause(OMPC_nowait)) {
1351 // Emit implicit barrier to synchronize threads and avoid data races on
1352 // initialization of firstprivate variables.
1353 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown);
1358 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
1359 LexicalScope Scope(*this, S.getSourceRange());
1360 OpenMPDirectiveKind EmittedAs = EmitSections(S);
1361 // Emit an implicit barrier at the end.
1362 if (!S.getSingleClause(OMPC_nowait)) {
1363 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
1367 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
1368 LexicalScope Scope(*this, S.getSourceRange());
1369 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1370 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1371 CGF.EnsureInsertPoint();
1373 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen);
1376 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
1377 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
1378 llvm::SmallVector<const Expr *, 8> DestExprs;
1379 llvm::SmallVector<const Expr *, 8> SrcExprs;
1380 llvm::SmallVector<const Expr *, 8> AssignmentOps;
1381 // Check if there are any 'copyprivate' clauses associated with this
1384 // Build a list of copyprivate variables along with helper expressions
1385 // (<source>, <destination>, <destination>=<source> expressions)
1386 for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) {
1387 auto *C = cast<OMPCopyprivateClause>(*I);
1388 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
1389 DestExprs.append(C->destination_exprs().begin(),
1390 C->destination_exprs().end());
1391 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
1392 AssignmentOps.append(C->assignment_ops().begin(),
1393 C->assignment_ops().end());
1395 LexicalScope Scope(*this, S.getSourceRange());
1396 // Emit code for 'single' region along with 'copyprivate' clauses
1397 bool HasFirstprivates;
1398 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) {
1399 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1400 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1401 CGF.EmitOMPPrivateClause(S, SingleScope);
1402 (void)SingleScope.Privatize();
1404 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1405 CGF.EnsureInsertPoint();
1407 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
1408 CopyprivateVars, DestExprs, SrcExprs,
1410 // Emit an implicit barrier at the end (to avoid data race on firstprivate
1411 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
1412 if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) &&
1413 CopyprivateVars.empty()) {
1414 CGM.getOpenMPRuntime().emitBarrierCall(
1415 *this, S.getLocStart(),
1416 S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single);
1420 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
1421 LexicalScope Scope(*this, S.getSourceRange());
1422 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1423 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1424 CGF.EnsureInsertPoint();
1426 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
1429 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
1430 LexicalScope Scope(*this, S.getSourceRange());
1431 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1432 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1433 CGF.EnsureInsertPoint();
1435 CGM.getOpenMPRuntime().emitCriticalRegion(
1436 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart());
1439 void CodeGenFunction::EmitOMPParallelForDirective(
1440 const OMPParallelForDirective &S) {
1441 // Emit directive as a combined directive that consists of two implicit
1442 // directives: 'parallel' with 'for' directive.
1443 LexicalScope Scope(*this, S.getSourceRange());
1444 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1445 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1446 CGF.EmitOMPWorksharingLoop(S);
1447 // Emit implicit barrier at the end of parallel region, but this barrier
1448 // is at the end of 'for' directive, so emit it as the implicit barrier for
1449 // this 'for' directive.
1450 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1453 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
1456 void CodeGenFunction::EmitOMPParallelForSimdDirective(
1457 const OMPParallelForSimdDirective &S) {
1458 // Emit directive as a combined directive that consists of two implicit
1459 // directives: 'parallel' with 'for' directive.
1460 LexicalScope Scope(*this, S.getSourceRange());
1461 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1462 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1463 CGF.EmitOMPWorksharingLoop(S);
1464 // Emit implicit barrier at the end of parallel region, but this barrier
1465 // is at the end of 'for' directive, so emit it as the implicit barrier for
1466 // this 'for' directive.
1467 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1470 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
1473 void CodeGenFunction::EmitOMPParallelSectionsDirective(
1474 const OMPParallelSectionsDirective &S) {
1475 // Emit directive as a combined directive that consists of two implicit
1476 // directives: 'parallel' with 'sections' directive.
1477 LexicalScope Scope(*this, S.getSourceRange());
1478 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1479 (void)CGF.EmitSections(S);
1480 // Emit implicit barrier at the end of parallel region.
1481 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1484 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
1487 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
1488 // Emit outlined function for task construct.
1489 LexicalScope Scope(*this, S.getSourceRange());
1490 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1491 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
1492 auto *I = CS->getCapturedDecl()->param_begin();
1493 auto *PartId = std::next(I);
1494 // The first function argument for tasks is a thread id, the second one is a
1495 // part id (0 for tied tasks, >=0 for untied task).
1496 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
1497 // Get list of private variables.
1498 llvm::SmallVector<const Expr *, 8> PrivateVars;
1499 llvm::SmallVector<const Expr *, 8> PrivateCopies;
1500 for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) {
1501 auto *C = cast<OMPPrivateClause>(*I);
1502 auto IRef = C->varlist_begin();
1503 for (auto *IInit : C->private_copies()) {
1504 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1505 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1506 PrivateVars.push_back(*IRef);
1507 PrivateCopies.push_back(IInit);
1512 EmittedAsPrivate.clear();
1513 // Get list of firstprivate variables.
1514 llvm::SmallVector<const Expr *, 8> FirstprivateVars;
1515 llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
1516 llvm::SmallVector<const Expr *, 8> FirstprivateInits;
1517 for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) {
1518 auto *C = cast<OMPFirstprivateClause>(*I);
1519 auto IRef = C->varlist_begin();
1520 auto IElemInitRef = C->inits().begin();
1521 for (auto *IInit : C->private_copies()) {
1522 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1523 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1524 FirstprivateVars.push_back(*IRef);
1525 FirstprivateCopies.push_back(IInit);
1526 FirstprivateInits.push_back(*IElemInitRef);
1528 ++IRef, ++IElemInitRef;
1531 // Build list of dependences.
1532 llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8>
1534 for (auto &&I = S.getClausesOfKind(OMPC_depend); I; ++I) {
1535 auto *C = cast<OMPDependClause>(*I);
1536 for (auto *IRef : C->varlists()) {
1537 Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
1540 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars](
1541 CodeGenFunction &CGF) {
1542 // Set proper addresses for generated private copies.
1543 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
1544 OMPPrivateScope Scope(CGF);
1545 if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
1546 auto *CopyFn = CGF.Builder.CreateAlignedLoad(
1547 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)),
1548 CGF.PointerAlignInBytes);
1549 auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad(
1550 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)),
1551 CGF.PointerAlignInBytes);
1553 llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16>
1555 llvm::SmallVector<llvm::Value *, 16> CallArgs;
1556 CallArgs.push_back(PrivatesPtr);
1557 for (auto *E : PrivateVars) {
1558 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1560 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1561 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1562 CallArgs.push_back(PrivatePtr);
1564 for (auto *E : FirstprivateVars) {
1565 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1567 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1568 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1569 CallArgs.push_back(PrivatePtr);
1571 CGF.EmitRuntimeCall(CopyFn, CallArgs);
1572 for (auto &&Pair : PrivatePtrs) {
1574 CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes);
1575 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
1578 (void)Scope.Privatize();
1580 // TODO: emit code for untied tasks.
1582 CGF.EmitStmt(CS->getCapturedStmt());
1584 auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
1585 S, *I, OMPD_task, CodeGen);
1586 // Check if we should emit tied or untied task.
1587 bool Tied = !S.getSingleClause(OMPC_untied);
1588 // Check if the task is final
1589 llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
1590 if (auto *Clause = S.getSingleClause(OMPC_final)) {
1591 // If the condition constant folds and can be elided, try to avoid emitting
1592 // the condition and the dead arm of the if/else.
1593 auto *Cond = cast<OMPFinalClause>(Clause)->getCondition();
1595 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
1596 Final.setInt(CondConstant);
1598 Final.setPointer(EvaluateExprAsBool(Cond));
1600 // By default the task is not final.
1601 Final.setInt(/*IntVal=*/false);
1603 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
1604 const Expr *IfCond = nullptr;
1605 if (auto C = S.getSingleClause(OMPC_if)) {
1606 IfCond = cast<OMPIfClause>(C)->getCondition();
1608 CGM.getOpenMPRuntime().emitTaskCall(
1609 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
1610 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars,
1611 FirstprivateCopies, FirstprivateInits, Dependences);
1614 void CodeGenFunction::EmitOMPTaskyieldDirective(
1615 const OMPTaskyieldDirective &S) {
1616 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
1619 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
1620 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
1623 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
1624 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
1627 void CodeGenFunction::EmitOMPTaskgroupDirective(
1628 const OMPTaskgroupDirective &S) {
1629 LexicalScope Scope(*this, S.getSourceRange());
1630 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1631 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1632 CGF.EnsureInsertPoint();
1634 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
1637 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
1638 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
1639 if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) {
1640 auto FlushClause = cast<OMPFlushClause>(C);
1641 return llvm::makeArrayRef(FlushClause->varlist_begin(),
1642 FlushClause->varlist_end());
1645 }(), S.getLocStart());
1648 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
1649 LexicalScope Scope(*this, S.getSourceRange());
1650 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1651 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1652 CGF.EnsureInsertPoint();
1654 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart());
1657 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
1658 QualType SrcType, QualType DestType) {
1659 assert(CGF.hasScalarEvaluationKind(DestType) &&
1660 "DestType must have scalar evaluation kind.");
1661 assert(!Val.isAggregate() && "Must be a scalar or complex.");
1662 return Val.isScalar()
1663 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType)
1664 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
1668 static CodeGenFunction::ComplexPairTy
1669 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
1670 QualType DestType) {
1671 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
1672 "DestType must have complex evaluation kind.");
1673 CodeGenFunction::ComplexPairTy ComplexVal;
1674 if (Val.isScalar()) {
1675 // Convert the input element to the element type of the complex.
1676 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1678 CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType);
1679 ComplexVal = CodeGenFunction::ComplexPairTy(
1680 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
1682 assert(Val.isComplex() && "Must be a scalar or complex.");
1683 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
1684 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1685 ComplexVal.first = CGF.EmitScalarConversion(
1686 Val.getComplexVal().first, SrcElementType, DestElementType);
1687 ComplexVal.second = CGF.EmitScalarConversion(
1688 Val.getComplexVal().second, SrcElementType, DestElementType);
1693 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
1694 LValue LVal, RValue RVal) {
1695 if (LVal.isGlobalReg()) {
1696 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
1698 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
1700 LVal.isVolatile(), /*IsInit=*/false);
1704 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal,
1706 switch (CGF.getEvaluationKind(LVal.getType())) {
1708 CGF.EmitStoreThroughLValue(
1709 RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())),
1713 CGF.EmitStoreOfComplex(
1714 convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal,
1718 llvm_unreachable("Must be a scalar or complex.");
1722 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
1723 const Expr *X, const Expr *V,
1724 SourceLocation Loc) {
1726 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
1727 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
1728 LValue XLValue = CGF.EmitLValue(X);
1729 LValue VLValue = CGF.EmitLValue(V);
1730 RValue Res = XLValue.isGlobalReg()
1731 ? CGF.EmitLoadOfLValue(XLValue, Loc)
1732 : CGF.EmitAtomicLoad(XLValue, Loc,
1733 IsSeqCst ? llvm::SequentiallyConsistent
1735 XLValue.isVolatile());
1736 // OpenMP, 2.12.6, atomic Construct
1737 // Any atomic construct with a seq_cst clause forces the atomically
1738 // performed operation to include an implicit flush operation without a
1741 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1742 emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType());
1745 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
1746 const Expr *X, const Expr *E,
1747 SourceLocation Loc) {
1749 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
1750 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
1751 // OpenMP, 2.12.6, atomic Construct
1752 // Any atomic construct with a seq_cst clause forces the atomically
1753 // performed operation to include an implicit flush operation without a
1756 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1759 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
1761 BinaryOperatorKind BO,
1762 llvm::AtomicOrdering AO,
1763 bool IsXLHSInRHSPart) {
1764 auto &Context = CGF.CGM.getContext();
1765 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
1766 // expression is simple and atomic is allowed for the given type for the
1768 if (BO == BO_Comma || !Update.isScalar() ||
1769 !Update.getScalarVal()->getType()->isIntegerTy() ||
1770 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
1771 (Update.getScalarVal()->getType() !=
1772 X.getAddress()->getType()->getPointerElementType())) ||
1773 !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() ||
1774 !Context.getTargetInfo().hasBuiltinAtomic(
1775 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
1776 return std::make_pair(false, RValue::get(nullptr));
1778 llvm::AtomicRMWInst::BinOp RMWOp;
1781 RMWOp = llvm::AtomicRMWInst::Add;
1784 if (!IsXLHSInRHSPart)
1785 return std::make_pair(false, RValue::get(nullptr));
1786 RMWOp = llvm::AtomicRMWInst::Sub;
1789 RMWOp = llvm::AtomicRMWInst::And;
1792 RMWOp = llvm::AtomicRMWInst::Or;
1795 RMWOp = llvm::AtomicRMWInst::Xor;
1798 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1799 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
1800 : llvm::AtomicRMWInst::Max)
1801 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
1802 : llvm::AtomicRMWInst::UMax);
1805 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1806 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
1807 : llvm::AtomicRMWInst::Min)
1808 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
1809 : llvm::AtomicRMWInst::UMin);
1812 RMWOp = llvm::AtomicRMWInst::Xchg;
1821 return std::make_pair(false, RValue::get(nullptr));
1839 llvm_unreachable("Unsupported atomic update operation");
1841 auto *UpdateVal = Update.getScalarVal();
1842 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
1843 UpdateVal = CGF.Builder.CreateIntCast(
1844 IC, X.getAddress()->getType()->getPointerElementType(),
1845 X.getType()->hasSignedIntegerRepresentation());
1847 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
1848 return std::make_pair(true, RValue::get(Res));
1851 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
1852 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
1853 llvm::AtomicOrdering AO, SourceLocation Loc,
1854 const llvm::function_ref<RValue(RValue)> &CommonGen) {
1855 // Update expressions are allowed to have the following forms:
1856 // x binop= expr; -> xrval + expr;
1857 // x++, ++x -> xrval + 1;
1858 // x--, --x -> xrval - 1;
1859 // x = x binop expr; -> xrval binop expr
1860 // x = expr Op x; - > expr binop xrval;
1861 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
1863 if (X.isGlobalReg()) {
1864 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
1866 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
1868 // Perform compare-and-swap procedure.
1869 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
1875 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
1876 const Expr *X, const Expr *E,
1877 const Expr *UE, bool IsXLHSInRHSPart,
1878 SourceLocation Loc) {
1879 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1880 "Update expr in 'atomic update' must be a binary operator.");
1881 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1882 // Update expressions are allowed to have the following forms:
1883 // x binop= expr; -> xrval + expr;
1884 // x++, ++x -> xrval + 1;
1885 // x--, --x -> xrval - 1;
1886 // x = x binop expr; -> xrval binop expr
1887 // x = expr Op x; - > expr binop xrval;
1888 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
1889 LValue XLValue = CGF.EmitLValue(X);
1890 RValue ExprRValue = CGF.EmitAnyExpr(E);
1891 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1892 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1893 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1894 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1895 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1897 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
1898 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1899 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1900 return CGF.EmitAnyExpr(UE);
1902 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
1903 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1904 // OpenMP, 2.12.6, atomic Construct
1905 // Any atomic construct with a seq_cst clause forces the atomically
1906 // performed operation to include an implicit flush operation without a
1909 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1912 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
1913 QualType SourceType, QualType ResType) {
1914 switch (CGF.getEvaluationKind(ResType)) {
1916 return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType));
1918 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType);
1919 return RValue::getComplex(Res.first, Res.second);
1924 llvm_unreachable("Must be a scalar or complex.");
1927 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
1928 bool IsPostfixUpdate, const Expr *V,
1929 const Expr *X, const Expr *E,
1930 const Expr *UE, bool IsXLHSInRHSPart,
1931 SourceLocation Loc) {
1932 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
1933 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
1935 LValue VLValue = CGF.EmitLValue(V);
1936 LValue XLValue = CGF.EmitLValue(X);
1937 RValue ExprRValue = CGF.EmitAnyExpr(E);
1938 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1939 QualType NewVValType;
1941 // 'x' is updated with some additional value.
1942 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1943 "Update expr in 'atomic capture' must be a binary operator.");
1944 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1945 // Update expressions are allowed to have the following forms:
1946 // x binop= expr; -> xrval + expr;
1947 // x++, ++x -> xrval + 1;
1948 // x--, --x -> xrval - 1;
1949 // x = x binop expr; -> xrval binop expr
1950 // x = expr Op x; - > expr binop xrval;
1951 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1952 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1953 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1954 NewVValType = XRValExpr->getType();
1955 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1956 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
1957 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
1958 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1959 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1960 RValue Res = CGF.EmitAnyExpr(UE);
1961 NewVVal = IsPostfixUpdate ? XRValue : Res;
1964 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1965 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1967 // 'atomicrmw' instruction was generated.
1968 if (IsPostfixUpdate) {
1969 // Use old value from 'atomicrmw'.
1970 NewVVal = Res.second;
1972 // 'atomicrmw' does not provide new value, so evaluate it using old
1974 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1975 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
1976 NewVVal = CGF.EmitAnyExpr(UE);
1980 // 'x' is simply rewritten with some 'expr'.
1981 NewVValType = X->getType().getNonReferenceType();
1982 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
1983 X->getType().getNonReferenceType());
1984 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
1988 // Try to perform atomicrmw xchg, otherwise simple exchange.
1989 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1990 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
1993 // 'atomicrmw' instruction was generated.
1994 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
1997 // Emit post-update store to 'v' of old/new 'x' value.
1998 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType);
1999 // OpenMP, 2.12.6, atomic Construct
2000 // Any atomic construct with a seq_cst clause forces the atomically
2001 // performed operation to include an implicit flush operation without a
2004 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2007 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
2008 bool IsSeqCst, bool IsPostfixUpdate,
2009 const Expr *X, const Expr *V, const Expr *E,
2010 const Expr *UE, bool IsXLHSInRHSPart,
2011 SourceLocation Loc) {
2014 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
2017 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
2021 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
2024 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
2025 IsXLHSInRHSPart, Loc);
2029 case OMPC_num_threads:
2031 case OMPC_firstprivate:
2032 case OMPC_lastprivate:
2033 case OMPC_reduction:
2042 case OMPC_copyprivate:
2044 case OMPC_proc_bind:
2049 case OMPC_threadprivate:
2051 case OMPC_mergeable:
2052 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
2056 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
2057 bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst);
2058 OpenMPClauseKind Kind = OMPC_unknown;
2059 for (auto *C : S.clauses()) {
2060 // Find first clause (skip seq_cst clause, if it is first).
2061 if (C->getClauseKind() != OMPC_seq_cst) {
2062 Kind = C->getClauseKind();
2068 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
2069 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
2070 enterFullExpression(EWC);
2072 // Processing for statements under 'atomic capture'.
2073 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
2074 for (const auto *C : Compound->body()) {
2075 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
2076 enterFullExpression(EWC);
2081 LexicalScope Scope(*this, S.getSourceRange());
2082 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) {
2083 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
2084 S.getV(), S.getExpr(), S.getUpdateExpr(),
2085 S.isXLHSInRHSPart(), S.getLocStart());
2087 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
2090 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
2091 llvm_unreachable("CodeGen for 'omp target' is not supported yet.");
2094 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
2095 llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");
2098 void CodeGenFunction::EmitOMPCancellationPointDirective(
2099 const OMPCancellationPointDirective &S) {
2100 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
2101 S.getCancelRegion());
2104 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
2105 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(),
2106 S.getCancelRegion());
2109 CodeGenFunction::JumpDest
2110 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
2111 if (Kind == OMPD_parallel || Kind == OMPD_task)
2113 else if (Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections)
2114 return BreakContinueStack.empty() ? JumpDest()
2115 : BreakContinueStack.back().BreakBlock;