1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Stmt.h"
19 #include "clang/AST/StmtOpenMP.h"
20 using namespace clang;
21 using namespace CodeGen;
23 //===----------------------------------------------------------------------===//
24 // OpenMP Directive Emission
25 //===----------------------------------------------------------------------===//
26 void CodeGenFunction::EmitOMPAggregateAssign(
27 llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
28 const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) {
29 // Perform element-by-element initialization.
31 auto SrcBegin = SrcAddr;
32 auto DestBegin = DestAddr;
33 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
34 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
35 // Cast from pointer to array type to pointer to single element.
36 SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin,
37 DestBegin->getType());
38 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
39 // The basic structure here is a while-do loop.
40 auto BodyBB = createBasicBlock("omp.arraycpy.body");
41 auto DoneBB = createBasicBlock("omp.arraycpy.done");
43 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
44 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
46 // Enter the loop body, making that address the current address.
47 auto EntryBB = Builder.GetInsertBlock();
49 auto SrcElementCurrent =
50 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
51 SrcElementCurrent->addIncoming(SrcBegin, EntryBB);
52 auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2,
53 "omp.arraycpy.destElementPast");
54 DestElementCurrent->addIncoming(DestBegin, EntryBB);
57 CopyGen(DestElementCurrent, SrcElementCurrent);
59 // Shift the address forward by one element.
60 auto DestElementNext = Builder.CreateConstGEP1_32(
61 DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element");
62 auto SrcElementNext = Builder.CreateConstGEP1_32(
63 SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element");
64 // Check whether we've reached the end.
66 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
67 Builder.CreateCondBr(Done, DoneBB, BodyBB);
68 DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock());
69 SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock());
72 EmitBlock(DoneBB, /*IsFinished=*/true);
75 void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF,
76 QualType OriginalType, llvm::Value *DestAddr,
77 llvm::Value *SrcAddr, const VarDecl *DestVD,
78 const VarDecl *SrcVD, const Expr *Copy) {
79 if (OriginalType->isArrayType()) {
80 auto *BO = dyn_cast<BinaryOperator>(Copy);
81 if (BO && BO->getOpcode() == BO_Assign) {
82 // Perform simple memcpy for simple copying.
83 CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
85 // For arrays with complex element types perform element by element
87 CGF.EmitOMPAggregateAssign(
88 DestAddr, SrcAddr, OriginalType,
89 [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement,
90 llvm::Value *SrcElement) {
91 // Working with the single array element, so have to remap
92 // destination and source variables to corresponding array
94 CodeGenFunction::OMPPrivateScope Remap(CGF);
95 Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{
99 SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; });
100 (void)Remap.Privatize();
101 CGF.EmitIgnoredExpr(Copy);
105 // Remap pseudo source variable to private copy.
106 CodeGenFunction::OMPPrivateScope Remap(CGF);
107 Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; });
108 Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; });
109 (void)Remap.Privatize();
110 // Emit copying of the whole variable.
111 CGF.EmitIgnoredExpr(Copy);
115 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
116 OMPPrivateScope &PrivateScope) {
117 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
118 for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) {
119 auto *C = cast<OMPFirstprivateClause>(*I);
120 auto IRef = C->varlist_begin();
121 auto InitsRef = C->inits().begin();
122 for (auto IInit : C->private_copies()) {
123 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
124 if (EmittedAsFirstprivate.count(OrigVD) == 0) {
125 EmittedAsFirstprivate.insert(OrigVD);
126 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
127 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
130 const_cast<VarDecl *>(OrigVD),
131 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
133 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
134 auto *OriginalAddr = EmitLValue(&DRE).getAddress();
135 QualType Type = OrigVD->getType();
136 if (Type->isArrayType()) {
137 // Emit VarDecl with copy init for arrays.
138 // Get the address of the original variable captured in current
140 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
141 auto Emission = EmitAutoVarAlloca(*VD);
142 auto *Init = VD->getInit();
143 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
144 // Perform simple memcpy.
145 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
148 EmitOMPAggregateAssign(
149 Emission.getAllocatedAddress(), OriginalAddr, Type,
150 [this, VDInit, Init](llvm::Value *DestElement,
151 llvm::Value *SrcElement) {
152 // Clean up any temporaries needed by the initialization.
153 RunCleanupsScope InitScope(*this);
154 // Emit initialization for single element.
155 LocalDeclMap[VDInit] = SrcElement;
156 EmitAnyExprToMem(Init, DestElement,
157 Init->getType().getQualifiers(),
158 /*IsInitializer*/ false);
159 LocalDeclMap.erase(VDInit);
162 EmitAutoVarCleanups(Emission);
163 return Emission.getAllocatedAddress();
166 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
167 // Emit private VarDecl with copy init.
168 // Remap temp VDInit variable to the address of the original
170 // (for proper handling of captured global variables).
171 LocalDeclMap[VDInit] = OriginalAddr;
173 LocalDeclMap.erase(VDInit);
174 return GetAddrOfLocalVar(VD);
177 assert(IsRegistered &&
178 "firstprivate var already registered as private");
179 // Silence the warning about unused variable.
185 return !EmittedAsFirstprivate.empty();
188 void CodeGenFunction::EmitOMPPrivateClause(
189 const OMPExecutableDirective &D,
190 CodeGenFunction::OMPPrivateScope &PrivateScope) {
191 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
192 for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) {
193 auto *C = cast<OMPPrivateClause>(*I);
194 auto IRef = C->varlist_begin();
195 for (auto IInit : C->private_copies()) {
196 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
197 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
198 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
200 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
201 // Emit private VarDecl with copy init.
203 return GetAddrOfLocalVar(VD);
205 assert(IsRegistered && "private var already registered as private");
206 // Silence the warning about unused variable.
214 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
215 // threadprivate_var1 = master_threadprivate_var1;
216 // operator=(threadprivate_var2, master_threadprivate_var2);
218 // __kmpc_barrier(&loc, global_tid);
219 llvm::DenseSet<const VarDecl *> CopiedVars;
220 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
221 for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) {
222 auto *C = cast<OMPCopyinClause>(*I);
223 auto IRef = C->varlist_begin();
224 auto ISrcRef = C->source_exprs().begin();
225 auto IDestRef = C->destination_exprs().begin();
226 for (auto *AssignOp : C->assignment_ops()) {
227 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
228 QualType Type = VD->getType();
229 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
230 // Get the address of the master variable.
231 auto *MasterAddr = VD->isStaticLocal()
232 ? CGM.getStaticLocalDeclAddress(VD)
233 : CGM.GetAddrOfGlobal(VD);
234 // Get the address of the threadprivate variable.
235 auto *PrivateAddr = EmitLValue(*IRef).getAddress();
236 if (CopiedVars.size() == 1) {
237 // At first check if current thread is a master thread. If it is, no
238 // need to copy data.
239 CopyBegin = createBasicBlock("copyin.not.master");
240 CopyEnd = createBasicBlock("copyin.not.master.end");
241 Builder.CreateCondBr(
242 Builder.CreateICmpNE(
243 Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
244 Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
246 EmitBlock(CopyBegin);
248 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
249 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
250 EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD,
259 // Exit out of copying procedure for non-master thread.
260 EmitBlock(CopyEnd, /*IsFinished=*/true);
266 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
267 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
268 bool HasAtLeastOneLastprivate = false;
269 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
270 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
271 HasAtLeastOneLastprivate = true;
272 auto *C = cast<OMPLastprivateClause>(*I);
273 auto IRef = C->varlist_begin();
274 auto IDestRef = C->destination_exprs().begin();
275 for (auto *IInit : C->private_copies()) {
276 // Keep the address of the original variable for future update at the end
278 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
279 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
280 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
281 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{
283 const_cast<VarDecl *>(OrigVD),
284 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
286 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
287 return EmitLValue(&DRE).getAddress();
289 // Check if the variable is also a firstprivate: in this case IInit is
290 // not generated. Initialization of this variable will happen in codegen
291 // for 'firstprivate' clause.
293 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
295 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
296 // Emit private VarDecl with copy init.
298 return GetAddrOfLocalVar(VD);
300 assert(IsRegistered &&
301 "lastprivate var already registered as private");
308 return HasAtLeastOneLastprivate;
311 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
312 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
313 // Emit following code:
314 // if (<IsLastIterCond>) {
315 // orig_var1 = private_orig_var1;
317 // orig_varn = private_orig_varn;
319 auto *ThenBB = createBasicBlock(".omp.lastprivate.then");
320 auto *DoneBB = createBasicBlock(".omp.lastprivate.done");
321 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
323 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates;
324 const Expr *LastIterVal = nullptr;
325 const Expr *IVExpr = nullptr;
326 const Expr *IncExpr = nullptr;
327 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
329 cast<VarDecl>(cast<DeclRefExpr>(LoopDirective->getUpperBoundVariable())
331 ->getAnyInitializer();
332 IVExpr = LoopDirective->getIterationVariable();
333 IncExpr = LoopDirective->getInc();
334 auto IUpdate = LoopDirective->updates().begin();
335 for (auto *E : LoopDirective->counters()) {
336 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
337 LoopCountersAndUpdates[D] = *IUpdate;
342 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
343 bool FirstLCV = true;
344 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
345 auto *C = cast<OMPLastprivateClause>(*I);
346 auto IRef = C->varlist_begin();
347 auto ISrcRef = C->source_exprs().begin();
348 auto IDestRef = C->destination_exprs().begin();
349 for (auto *AssignOp : C->assignment_ops()) {
350 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
351 QualType Type = PrivateVD->getType();
352 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
353 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
354 // If lastprivate variable is a loop control variable for loop-based
355 // directive, update its value before copyin back to original
357 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) {
359 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(),
360 IVExpr->getType().getQualifiers(),
361 /*IsInitializer=*/false);
362 EmitIgnoredExpr(IncExpr);
365 EmitIgnoredExpr(UpExpr);
367 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
368 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
369 // Get the address of the original variable.
370 auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
371 // Get the address of the private variable.
372 auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
373 EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
382 EmitBlock(DoneBB, /*IsFinished=*/true);
385 void CodeGenFunction::EmitOMPReductionClauseInit(
386 const OMPExecutableDirective &D,
387 CodeGenFunction::OMPPrivateScope &PrivateScope) {
388 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
389 auto *C = cast<OMPReductionClause>(*I);
390 auto ILHS = C->lhs_exprs().begin();
391 auto IRHS = C->rhs_exprs().begin();
392 for (auto IRef : C->varlists()) {
393 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
394 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
395 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
396 // Store the address of the original variable associated with the LHS
397 // implicit variable.
398 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{
399 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
400 CapturedStmtInfo->lookup(OrigVD) != nullptr,
401 IRef->getType(), VK_LValue, IRef->getExprLoc());
402 return EmitLValue(&DRE).getAddress();
404 // Emit reduction copy.
406 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{
407 // Emit private VarDecl with reduction init.
408 EmitDecl(*PrivateVD);
409 return GetAddrOfLocalVar(PrivateVD);
411 assert(IsRegistered && "private var already registered as private");
412 // Silence the warning about unused variable.
419 void CodeGenFunction::EmitOMPReductionClauseFinal(
420 const OMPExecutableDirective &D) {
421 llvm::SmallVector<const Expr *, 8> LHSExprs;
422 llvm::SmallVector<const Expr *, 8> RHSExprs;
423 llvm::SmallVector<const Expr *, 8> ReductionOps;
424 bool HasAtLeastOneReduction = false;
425 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
426 HasAtLeastOneReduction = true;
427 auto *C = cast<OMPReductionClause>(*I);
428 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
429 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
430 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
432 if (HasAtLeastOneReduction) {
433 // Emit nowait reduction if nowait clause is present or directive is a
434 // parallel directive (it always has implicit barrier).
435 CGM.getOpenMPRuntime().emitReduction(
436 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps,
437 D.getSingleClause(OMPC_nowait) ||
438 isOpenMPParallelDirective(D.getDirectiveKind()));
442 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
443 const OMPExecutableDirective &S,
444 const RegionCodeGenTy &CodeGen) {
445 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
446 auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS);
447 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
448 S, *CS->getCapturedDecl()->param_begin(), CodeGen);
449 if (auto C = S.getSingleClause(OMPC_num_threads)) {
450 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
451 auto NumThreadsClause = cast<OMPNumThreadsClause>(C);
452 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
453 /*IgnoreResultAssign*/ true);
454 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
455 CGF, NumThreads, NumThreadsClause->getLocStart());
457 const Expr *IfCond = nullptr;
458 if (auto C = S.getSingleClause(OMPC_if)) {
459 IfCond = cast<OMPIfClause>(C)->getCondition();
461 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
462 CapturedStruct, IfCond);
465 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
466 LexicalScope Scope(*this, S.getSourceRange());
467 // Emit parallel region as a standalone region.
468 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
469 OMPPrivateScope PrivateScope(CGF);
470 bool Copyins = CGF.EmitOMPCopyinClause(S);
471 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
472 if (Copyins || Firstprivates) {
473 // Emit implicit barrier to synchronize threads and avoid data races on
474 // initialization of firstprivate variables or propagation master's thread
475 // values of threadprivate variables to local instances of that variables
476 // of all other implicit threads.
477 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
480 CGF.EmitOMPPrivateClause(S, PrivateScope);
481 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
482 (void)PrivateScope.Privatize();
483 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
484 CGF.EmitOMPReductionClauseFinal(S);
485 // Emit implicit barrier at the end of the 'parallel' directive.
486 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
489 emitCommonOMPParallelDirective(*this, S, CodeGen);
492 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &S,
494 RunCleanupsScope BodyScope(*this);
495 // Update counters values on current iteration.
496 for (auto I : S.updates()) {
499 // Update the linear variables.
500 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) {
501 auto *C = cast<OMPLinearClause>(*I);
502 for (auto U : C->updates()) {
507 // On a continue in the body, jump to the end.
508 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
509 BreakContinueStack.push_back(BreakContinue(JumpDest(), Continue));
511 EmitStmt(S.getBody());
512 // The end (updates/cleanups).
513 EmitBlock(Continue.getBlock());
514 BreakContinueStack.pop_back();
516 // TODO: Update lastprivates if the SeparateIter flag is true.
517 // This will be implemented in a follow-up OMPLastprivateClause patch, but
518 // result should be still correct without it, as we do not make these
519 // variables private yet.
523 void CodeGenFunction::EmitOMPInnerLoop(
524 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
526 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
527 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
528 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
530 // Start the loop with a block that tests the condition.
531 auto CondBlock = createBasicBlock("omp.inner.for.cond");
532 EmitBlock(CondBlock);
533 LoopStack.push(CondBlock);
535 // If there are any cleanups between here and the loop-exit scope,
536 // create a block to stage a loop exit along.
537 auto ExitBlock = LoopExit.getBlock();
539 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
541 auto LoopBody = createBasicBlock("omp.inner.for.body");
544 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
545 if (ExitBlock != LoopExit.getBlock()) {
546 EmitBlock(ExitBlock);
547 EmitBranchThroughCleanup(LoopExit);
551 incrementProfileCounter(&S);
553 // Create a block for the increment.
554 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
555 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
559 // Emit "IV = IV + 1" and a back-edge to the condition block.
560 EmitBlock(Continue.getBlock());
561 EmitIgnoredExpr(IncExpr);
563 BreakContinueStack.pop_back();
564 EmitBranch(CondBlock);
566 // Emit the fall-through block.
567 EmitBlock(LoopExit.getBlock());
570 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &S) {
571 auto IC = S.counters().begin();
572 for (auto F : S.finals()) {
573 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
574 if (LocalDeclMap.lookup(OrigVD)) {
575 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
576 CapturedStmtInfo->lookup(OrigVD) != nullptr,
577 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
578 auto *OrigAddr = EmitLValue(&DRE).getAddress();
579 OMPPrivateScope VarScope(*this);
580 VarScope.addPrivate(OrigVD,
581 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
582 (void)VarScope.Privatize();
587 // Emit the final values of the linear variables.
588 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) {
589 auto *C = cast<OMPLinearClause>(*I);
590 auto IC = C->varlist_begin();
591 for (auto F : C->finals()) {
592 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
593 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
594 CapturedStmtInfo->lookup(OrigVD) != nullptr,
595 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
596 auto *OrigAddr = EmitLValue(&DRE).getAddress();
597 OMPPrivateScope VarScope(*this);
598 VarScope.addPrivate(OrigVD,
599 [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
600 (void)VarScope.Privatize();
607 static void EmitOMPAlignedClause(CodeGenFunction &CGF, CodeGenModule &CGM,
608 const OMPAlignedClause &Clause) {
609 unsigned ClauseAlignment = 0;
610 if (auto AlignmentExpr = Clause.getAlignment()) {
612 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
613 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
615 for (auto E : Clause.varlists()) {
616 unsigned Alignment = ClauseAlignment;
617 if (Alignment == 0) {
618 // OpenMP [2.8.1, Description]
619 // If no optional parameter is specified, implementation-defined default
620 // alignments for SIMD instructions on the target platforms are assumed.
621 Alignment = CGM.getTargetCodeGenInfo().getOpenMPSimdDefaultAlignment(
624 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
625 "alignment is not power of 2");
626 if (Alignment != 0) {
627 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
628 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
633 static void EmitPrivateLoopCounters(CodeGenFunction &CGF,
634 CodeGenFunction::OMPPrivateScope &LoopScope,
635 ArrayRef<Expr *> Counters) {
636 for (auto *E : Counters) {
637 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
638 (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{
639 // Emit var without initialization.
640 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
641 CGF.EmitAutoVarCleanups(VarEmission);
642 return VarEmission.getAllocatedAddress();
647 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
648 const Expr *Cond, llvm::BasicBlock *TrueBlock,
649 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
650 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
651 EmitPrivateLoopCounters(CGF, PreCondScope, S.counters());
652 const VarDecl *IVDecl =
653 cast<VarDecl>(cast<DeclRefExpr>(S.getIterationVariable())->getDecl());
654 bool IsRegistered = PreCondScope.addPrivate(IVDecl, [&]() -> llvm::Value *{
655 // Emit var without initialization.
656 auto VarEmission = CGF.EmitAutoVarAlloca(*IVDecl);
657 CGF.EmitAutoVarCleanups(VarEmission);
658 return VarEmission.getAllocatedAddress();
660 assert(IsRegistered && "counter already registered as private");
661 // Silence the warning about unused variable.
663 (void)PreCondScope.Privatize();
664 // Initialize internal counter to 0 to calculate initial values of real
666 LValue IV = CGF.EmitLValue(S.getIterationVariable());
667 CGF.EmitStoreOfScalar(
668 llvm::ConstantInt::getNullValue(
669 IV.getAddress()->getType()->getPointerElementType()),
670 CGF.EmitLValue(S.getIterationVariable()), /*isInit=*/true);
671 // Get initial values of real counters.
672 for (auto I : S.updates()) {
673 CGF.EmitIgnoredExpr(I);
675 // Check that loop is executed at least one time.
676 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
680 EmitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
681 CodeGenFunction::OMPPrivateScope &PrivateScope) {
682 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
683 auto *C = cast<OMPLinearClause>(*I);
684 for (auto *E : C->varlists()) {
685 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
686 bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * {
687 // Emit var without initialization.
688 auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
689 CGF.EmitAutoVarCleanups(VarEmission);
690 return VarEmission.getAllocatedAddress();
692 assert(IsRegistered && "linear var already registered as private");
693 // Silence the warning about unused variable.
699 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
700 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
701 // Pragma 'simd' code depends on presence of 'lastprivate'.
702 // If present, we have to separate last iteration of the loop:
705 // for (IV in 0..LastIteration-1) BODY;
706 // BODY with updates of lastprivate vars;
707 // <Final counter/linear vars updates>;
710 // otherwise (when there's no lastprivate):
713 // for (IV in 0..LastIteration) BODY;
714 // <Final counter/linear vars updates>;
718 // Emit: if (PreCond) - begin.
719 // If the condition constant folds and can be elided, avoid emitting the
722 llvm::BasicBlock *ContBlock = nullptr;
723 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
727 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
728 ContBlock = CGF.createBasicBlock("simd.if.end");
729 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
730 CGF.getProfileCount(&S));
731 CGF.EmitBlock(ThenBlock);
732 CGF.incrementProfileCounter(&S);
734 // Walk clauses and process safelen/lastprivate.
735 bool SeparateIter = false;
736 CGF.LoopStack.setParallel();
737 CGF.LoopStack.setVectorizerEnable(true);
738 for (auto C : S.clauses()) {
739 switch (C->getClauseKind()) {
741 RValue Len = CGF.EmitAnyExpr(cast<OMPSafelenClause>(C)->getSafelen(),
742 AggValueSlot::ignored(), true);
743 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
744 CGF.LoopStack.setVectorizerWidth(Val->getZExtValue());
745 // In presence of finite 'safelen', it may be unsafe to mark all
746 // the memory instructions parallel, because loop-carried
747 // dependences of 'safelen' iterations are possible.
748 CGF.LoopStack.setParallel(false);
752 EmitOMPAlignedClause(CGF, CGF.CGM, cast<OMPAlignedClause>(*C));
754 case OMPC_lastprivate:
763 // Emit inits for the linear variables.
764 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) {
765 auto *C = cast<OMPLinearClause>(*I);
766 for (auto Init : C->inits()) {
767 auto *D = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
772 // Emit the loop iteration variable.
773 const Expr *IVExpr = S.getIterationVariable();
774 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
775 CGF.EmitVarDecl(*IVDecl);
776 CGF.EmitIgnoredExpr(S.getInit());
778 // Emit the iterations count variable.
779 // If it is not a variable, Sema decided to calculate iterations count on
780 // each iteration (e.g., it is foldable into a constant).
781 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
782 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
783 // Emit calculation of the iterations count.
784 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
787 // Emit the linear steps for the linear clauses.
788 // If a step is not constant, it is pre-calculated before the loop.
789 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) {
790 auto *C = cast<OMPLinearClause>(*I);
791 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
792 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
793 CGF.EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
794 // Emit calculation of the linear step.
795 CGF.EmitIgnoredExpr(CS);
800 OMPPrivateScope LoopScope(CGF);
801 EmitPrivateLoopCounters(CGF, LoopScope, S.counters());
802 EmitPrivateLinearVars(CGF, S, LoopScope);
803 CGF.EmitOMPPrivateClause(S, LoopScope);
804 (void)LoopScope.Privatize();
805 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(),
806 S.getCond(SeparateIter), S.getInc(),
807 [&S](CodeGenFunction &CGF) {
808 CGF.EmitOMPLoopBody(S);
809 CGF.EmitStopPoint(&S);
811 [](CodeGenFunction &) {});
813 CGF.EmitOMPLoopBody(S, /*SeparateIter=*/true);
816 CGF.EmitOMPSimdFinal(S);
817 // Emit: if (PreCond) - end.
819 CGF.EmitBranch(ContBlock);
820 CGF.EmitBlock(ContBlock, true);
823 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
826 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
827 const OMPLoopDirective &S,
828 OMPPrivateScope &LoopScope,
829 bool Ordered, llvm::Value *LB,
830 llvm::Value *UB, llvm::Value *ST,
831 llvm::Value *IL, llvm::Value *Chunk) {
832 auto &RT = CGM.getOpenMPRuntime();
834 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
835 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);
838 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&
839 "static non-chunked schedule does not need outer loop");
843 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
844 // When schedule(dynamic,chunk_size) is specified, the iterations are
845 // distributed to threads in the team in chunks as the threads request them.
846 // Each thread executes a chunk of iterations, then requests another chunk,
847 // until no chunks remain to be distributed. Each chunk contains chunk_size
848 // iterations, except for the last chunk to be distributed, which may have
849 // fewer iterations. When no chunk_size is specified, it defaults to 1.
851 // When schedule(guided,chunk_size) is specified, the iterations are assigned
852 // to threads in the team in chunks as the executing threads request them.
853 // Each thread executes a chunk of iterations, then requests another chunk,
854 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
855 // each chunk is proportional to the number of unassigned iterations divided
856 // by the number of threads in the team, decreasing to 1. For a chunk_size
857 // with value k (greater than 1), the size of each chunk is determined in the
858 // same way, with the restriction that the chunks do not contain fewer than k
859 // iterations (except for the last chunk to be assigned, which may have fewer
860 // than k iterations).
862 // When schedule(auto) is specified, the decision regarding scheduling is
863 // delegated to the compiler and/or runtime system. The programmer gives the
864 // implementation the freedom to choose any possible mapping of iterations to
865 // threads in the team.
867 // When schedule(runtime) is specified, the decision regarding scheduling is
868 // deferred until run time, and the schedule and chunk size are taken from the
869 // run-sched-var ICV. If the ICV is set to auto, the schedule is
870 // implementation defined
872 // while(__kmpc_dispatch_next(&LB, &UB)) {
874 // while (idx <= UB) { BODY; ++idx;
875 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
879 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
880 // When schedule(static, chunk_size) is specified, iterations are divided into
881 // chunks of size chunk_size, and the chunks are assigned to the threads in
882 // the team in a round-robin fashion in the order of the thread number.
884 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
885 // while (idx <= UB) { BODY; ++idx; } // inner loop
891 const Expr *IVExpr = S.getIterationVariable();
892 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
893 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
896 *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB,
897 (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal()
901 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
903 // Start the loop with a block that tests the condition.
904 auto CondBlock = createBasicBlock("omp.dispatch.cond");
905 EmitBlock(CondBlock);
906 LoopStack.push(CondBlock);
908 llvm::Value *BoolCondVal = nullptr;
909 if (!DynamicOrOrdered) {
910 // UB = min(UB, GlobalUB)
911 EmitIgnoredExpr(S.getEnsureUpperBound());
913 EmitIgnoredExpr(S.getInit());
915 BoolCondVal = EvaluateExprAsBool(S.getCond(false));
917 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned,
921 // If there are any cleanups between here and the loop-exit scope,
922 // create a block to stage a loop exit along.
923 auto ExitBlock = LoopExit.getBlock();
924 if (LoopScope.requiresCleanups())
925 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
927 auto LoopBody = createBasicBlock("omp.dispatch.body");
928 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
929 if (ExitBlock != LoopExit.getBlock()) {
930 EmitBlock(ExitBlock);
931 EmitBranchThroughCleanup(LoopExit);
935 // Emit "IV = LB" (in case of static schedule, we have already calculated new
936 // LB for loop condition and emitted it above).
937 if (DynamicOrOrdered)
938 EmitIgnoredExpr(S.getInit());
940 // Create a block for the increment.
941 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
942 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
944 SourceLocation Loc = S.getLocStart();
945 // Generate !llvm.loop.parallel metadata for loads and stores for loops with
946 // dynamic/guided scheduling and without ordered clause.
947 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic ||
948 ScheduleKind == OMPC_SCHEDULE_guided) &&
951 S, LoopScope.requiresCleanups(), S.getCond(/*SeparateIter=*/false),
953 [&S](CodeGenFunction &CGF) {
954 CGF.EmitOMPLoopBody(S);
955 CGF.EmitStopPoint(&S);
957 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
959 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
960 CGF, Loc, IVSize, IVSigned);
964 EmitBlock(Continue.getBlock());
965 BreakContinueStack.pop_back();
966 if (!DynamicOrOrdered) {
967 // Emit "LB = LB + Stride", "UB = UB + Stride".
968 EmitIgnoredExpr(S.getNextLowerBound());
969 EmitIgnoredExpr(S.getNextUpperBound());
972 EmitBranch(CondBlock);
974 // Emit the fall-through block.
975 EmitBlock(LoopExit.getBlock());
977 // Tell the runtime we are done.
978 if (!DynamicOrOrdered)
979 RT.emitForStaticFinish(*this, S.getLocEnd());
982 /// \brief Emit a helper variable and return corresponding lvalue.
983 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
984 const DeclRefExpr *Helper) {
985 auto VDecl = cast<VarDecl>(Helper->getDecl());
986 CGF.EmitVarDecl(*VDecl);
987 return CGF.EmitLValue(Helper);
990 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind>
991 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
993 // Detect the loop schedule kind and chunk.
994 auto ScheduleKind = OMPC_SCHEDULE_unknown;
995 llvm::Value *Chunk = nullptr;
997 cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) {
998 ScheduleKind = C->getScheduleKind();
999 if (const auto *Ch = C->getChunkSize()) {
1000 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
1002 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl());
1003 CGF.EmitVarDecl(*ImpVar);
1004 CGF.EmitStoreThroughLValue(
1005 CGF.EmitAnyExpr(Ch),
1006 CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
1007 ImpVar->getType()));
1012 if (!C->getHelperChunkSize() || !OuterRegion) {
1013 Chunk = CGF.EmitScalarExpr(Ch);
1014 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
1015 S.getIterationVariable()->getType());
1019 return std::make_pair(Chunk, ScheduleKind);
1022 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
1023 // Emit the loop iteration variable.
1024 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
1025 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
1026 EmitVarDecl(*IVDecl);
1028 // Emit the iterations count variable.
1029 // If it is not a variable, Sema decided to calculate iterations count on each
1030 // iteration (e.g., it is foldable into a constant).
1031 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1032 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1033 // Emit calculation of the iterations count.
1034 EmitIgnoredExpr(S.getCalcLastIteration());
1037 auto &RT = CGM.getOpenMPRuntime();
1039 bool HasLastprivateClause;
1040 // Check pre-condition.
1042 // Skip the entire loop if we don't meet the precondition.
1043 // If the condition constant folds and can be elided, avoid emitting the
1046 llvm::BasicBlock *ContBlock = nullptr;
1047 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1051 auto *ThenBlock = createBasicBlock("omp.precond.then");
1052 ContBlock = createBasicBlock("omp.precond.end");
1053 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
1054 getProfileCount(&S));
1055 EmitBlock(ThenBlock);
1056 incrementProfileCounter(&S);
1058 // Emit 'then' code.
1060 // Emit helper vars inits.
1062 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1064 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1066 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
1068 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
1070 OMPPrivateScope LoopScope(*this);
1071 if (EmitOMPFirstprivateClause(S, LoopScope)) {
1072 // Emit implicit barrier to synchronize threads and avoid data races on
1073 // initialization of firstprivate variables.
1074 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
1077 EmitOMPPrivateClause(S, LoopScope);
1078 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
1079 EmitOMPReductionClauseInit(S, LoopScope);
1080 EmitPrivateLoopCounters(*this, LoopScope, S.counters());
1081 (void)LoopScope.Privatize();
1083 // Detect the loop schedule kind and chunk.
1085 OpenMPScheduleClauseKind ScheduleKind;
1087 emitScheduleClause(*this, S, /*OuterRegion=*/false);
1088 Chunk = ScheduleInfo.first;
1089 ScheduleKind = ScheduleInfo.second;
1090 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1091 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1092 const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr;
1093 if (RT.isStaticNonchunked(ScheduleKind,
1094 /* Chunked */ Chunk != nullptr) &&
1096 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1097 // When no chunk_size is specified, the iteration space is divided into
1098 // chunks that are approximately equal in size, and at most one chunk is
1099 // distributed to each thread. Note that the size of the chunks is
1100 // unspecified in this case.
1101 RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1102 Ordered, IL.getAddress(), LB.getAddress(),
1103 UB.getAddress(), ST.getAddress());
1104 // UB = min(UB, GlobalUB);
1105 EmitIgnoredExpr(S.getEnsureUpperBound());
1107 EmitIgnoredExpr(S.getInit());
1108 // while (idx <= UB) { BODY; ++idx; }
1109 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(),
1110 S.getCond(/*SeparateIter=*/false), S.getInc(),
1111 [&S](CodeGenFunction &CGF) {
1112 CGF.EmitOMPLoopBody(S);
1113 CGF.EmitStopPoint(&S);
1115 [](CodeGenFunction &) {});
1116 // Tell the runtime we are done.
1117 RT.emitForStaticFinish(*this, S.getLocStart());
1119 // Emit the outer loop, which requests its work chunk [LB..UB] from
1120 // runtime and runs the inner loop to process it.
1121 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered,
1122 LB.getAddress(), UB.getAddress(), ST.getAddress(),
1123 IL.getAddress(), Chunk);
1125 EmitOMPReductionClauseFinal(S);
1126 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1127 if (HasLastprivateClause)
1128 EmitOMPLastprivateClauseFinal(
1129 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
1131 // We're now done with the loop, so jump to the continuation block.
1133 EmitBranch(ContBlock);
1134 EmitBlock(ContBlock, true);
1137 return HasLastprivateClause;
1140 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
1141 LexicalScope Scope(*this, S.getSourceRange());
1142 bool HasLastprivates = false;
1143 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1144 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1146 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
1148 // Emit an implicit barrier at the end.
1149 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
1150 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1154 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &) {
1155 llvm_unreachable("CodeGen for 'omp for simd' is not supported yet.");
1158 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
1160 llvm::Value *Init = nullptr) {
1161 auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
1163 CGF.EmitScalarInit(Init, LVal);
1167 static OpenMPDirectiveKind emitSections(CodeGenFunction &CGF,
1168 const OMPExecutableDirective &S) {
1169 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
1170 auto *CS = dyn_cast<CompoundStmt>(Stmt);
1171 if (CS && CS->size() > 1) {
1172 bool HasLastprivates = false;
1173 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) {
1174 auto &C = CGF.CGM.getContext();
1175 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1176 // Emit helper vars inits.
1177 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
1178 CGF.Builder.getInt32(0));
1179 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1);
1181 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
1182 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
1183 CGF.Builder.getInt32(1));
1184 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
1185 CGF.Builder.getInt32(0));
1187 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
1188 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1189 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
1190 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1191 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
1192 // Generate condition for loop.
1193 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
1194 OK_Ordinary, S.getLocStart(),
1195 /*fpContractable=*/false);
1196 // Increment for loop counter.
1197 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue,
1198 OK_Ordinary, S.getLocStart());
1199 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) {
1200 // Iterate through all sections and emit a switch construct:
1203 // <SectionStmt[0]>;
1206 // case <NumSection> - 1:
1207 // <SectionStmt[<NumSection> - 1]>;
1210 // .omp.sections.exit:
1211 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
1212 auto *SwitchStmt = CGF.Builder.CreateSwitch(
1213 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
1215 unsigned CaseNumber = 0;
1216 for (auto C = CS->children(); C; ++C, ++CaseNumber) {
1217 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
1218 CGF.EmitBlock(CaseBB);
1219 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
1221 CGF.EmitBranch(ExitBB);
1223 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1226 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1227 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
1228 // Emit implicit barrier to synchronize threads and avoid data races on
1229 // initialization of firstprivate variables.
1230 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1233 CGF.EmitOMPPrivateClause(S, LoopScope);
1234 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1235 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1236 (void)LoopScope.Privatize();
1238 // Emit static non-chunked loop.
1239 CGF.CGM.getOpenMPRuntime().emitForInit(
1240 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
1241 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
1242 LB.getAddress(), UB.getAddress(), ST.getAddress());
1243 // UB = min(UB, GlobalUB);
1244 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
1245 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
1246 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
1247 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
1249 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
1250 // while (idx <= UB) { BODY; ++idx; }
1251 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
1252 [](CodeGenFunction &) {});
1253 // Tell the runtime we are done.
1254 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
1255 CGF.EmitOMPReductionClauseFinal(S);
1257 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1258 if (HasLastprivates)
1259 CGF.EmitOMPLastprivateClauseFinal(
1260 S, CGF.Builder.CreateIsNotNull(
1261 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
1264 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, CodeGen);
1265 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
1266 // clause. Otherwise the barrier will be generated by the codegen for the
1268 if (HasLastprivates && S.getSingleClause(OMPC_nowait)) {
1269 // Emit implicit barrier to synchronize threads and avoid data races on
1270 // initialization of firstprivate variables.
1271 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1274 return OMPD_sections;
1276 // If only one section is found - no need to generate loop, emit as a single
1278 bool HasFirstprivates;
1279 // No need to generate reductions for sections with single section region, we
1280 // can use original shared variables for all operations.
1281 bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty();
1282 // No need to generate lastprivates for sections with single section region,
1283 // we can use original shared variable for all calculations with barrier at
1284 // the end of the sections.
1285 bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty();
1286 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
1287 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1288 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1289 CGF.EmitOMPPrivateClause(S, SingleScope);
1290 (void)SingleScope.Privatize();
1293 CGF.EnsureInsertPoint();
1295 CGF.CGM.getOpenMPRuntime().emitSingleRegion(CGF, CodeGen, S.getLocStart(),
1296 llvm::None, llvm::None,
1297 llvm::None, llvm::None);
1298 // Emit barrier for firstprivates, lastprivates or reductions only if
1299 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be
1300 // generated by the codegen for the directive.
1301 if ((HasFirstprivates || HasLastprivates || HasReductions) &&
1302 S.getSingleClause(OMPC_nowait)) {
1303 // Emit implicit barrier to synchronize threads and avoid data races on
1304 // initialization of firstprivate variables.
1305 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1311 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
1312 LexicalScope Scope(*this, S.getSourceRange());
1313 OpenMPDirectiveKind EmittedAs = emitSections(*this, S);
1314 // Emit an implicit barrier at the end.
1315 if (!S.getSingleClause(OMPC_nowait)) {
1316 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
1320 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
1321 LexicalScope Scope(*this, S.getSourceRange());
1322 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1323 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1324 CGF.EnsureInsertPoint();
1326 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
1329 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
1330 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
1331 llvm::SmallVector<const Expr *, 8> DestExprs;
1332 llvm::SmallVector<const Expr *, 8> SrcExprs;
1333 llvm::SmallVector<const Expr *, 8> AssignmentOps;
1334 // Check if there are any 'copyprivate' clauses associated with this
1337 // Build a list of copyprivate variables along with helper expressions
1338 // (<source>, <destination>, <destination>=<source> expressions)
1339 for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) {
1340 auto *C = cast<OMPCopyprivateClause>(*I);
1341 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
1342 DestExprs.append(C->destination_exprs().begin(),
1343 C->destination_exprs().end());
1344 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
1345 AssignmentOps.append(C->assignment_ops().begin(),
1346 C->assignment_ops().end());
1348 LexicalScope Scope(*this, S.getSourceRange());
1349 // Emit code for 'single' region along with 'copyprivate' clauses
1350 bool HasFirstprivates;
1351 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) {
1352 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1353 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1354 CGF.EmitOMPPrivateClause(S, SingleScope);
1355 (void)SingleScope.Privatize();
1357 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1358 CGF.EnsureInsertPoint();
1360 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
1361 CopyprivateVars, DestExprs, SrcExprs,
1363 // Emit an implicit barrier at the end (to avoid data race on firstprivate
1364 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
1365 if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) &&
1366 CopyprivateVars.empty()) {
1367 CGM.getOpenMPRuntime().emitBarrierCall(
1368 *this, S.getLocStart(),
1369 S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single);
1373 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
1374 LexicalScope Scope(*this, S.getSourceRange());
1375 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1376 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1377 CGF.EnsureInsertPoint();
1379 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
1382 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
1383 LexicalScope Scope(*this, S.getSourceRange());
1384 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1385 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1386 CGF.EnsureInsertPoint();
1388 CGM.getOpenMPRuntime().emitCriticalRegion(
1389 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart());
1392 void CodeGenFunction::EmitOMPParallelForDirective(
1393 const OMPParallelForDirective &S) {
1394 // Emit directive as a combined directive that consists of two implicit
1395 // directives: 'parallel' with 'for' directive.
1396 LexicalScope Scope(*this, S.getSourceRange());
1397 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1398 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1399 CGF.EmitOMPWorksharingLoop(S);
1400 // Emit implicit barrier at the end of parallel region, but this barrier
1401 // is at the end of 'for' directive, so emit it as the implicit barrier for
1402 // this 'for' directive.
1403 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1406 emitCommonOMPParallelDirective(*this, S, CodeGen);
1409 void CodeGenFunction::EmitOMPParallelForSimdDirective(
1410 const OMPParallelForSimdDirective &) {
1411 llvm_unreachable("CodeGen for 'omp parallel for simd' is not supported yet.");
1414 void CodeGenFunction::EmitOMPParallelSectionsDirective(
1415 const OMPParallelSectionsDirective &S) {
1416 // Emit directive as a combined directive that consists of two implicit
1417 // directives: 'parallel' with 'sections' directive.
1418 LexicalScope Scope(*this, S.getSourceRange());
1419 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1420 (void)emitSections(CGF, S);
1421 // Emit implicit barrier at the end of parallel region.
1422 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
1425 emitCommonOMPParallelDirective(*this, S, CodeGen);
1428 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
1429 // Emit outlined function for task construct.
1430 LexicalScope Scope(*this, S.getSourceRange());
1431 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1432 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
1433 auto *I = CS->getCapturedDecl()->param_begin();
1434 auto *PartId = std::next(I);
1435 // The first function argument for tasks is a thread id, the second one is a
1436 // part id (0 for tied tasks, >=0 for untied task).
1437 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
1438 // Get list of private variables.
1439 llvm::SmallVector<const Expr *, 8> PrivateVars;
1440 llvm::SmallVector<const Expr *, 8> PrivateCopies;
1441 for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) {
1442 auto *C = cast<OMPPrivateClause>(*I);
1443 auto IRef = C->varlist_begin();
1444 for (auto *IInit : C->private_copies()) {
1445 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1446 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1447 PrivateVars.push_back(*IRef);
1448 PrivateCopies.push_back(IInit);
1453 EmittedAsPrivate.clear();
1454 // Get list of firstprivate variables.
1455 llvm::SmallVector<const Expr *, 8> FirstprivateVars;
1456 llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
1457 llvm::SmallVector<const Expr *, 8> FirstprivateInits;
1458 for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) {
1459 auto *C = cast<OMPFirstprivateClause>(*I);
1460 auto IRef = C->varlist_begin();
1461 auto IElemInitRef = C->inits().begin();
1462 for (auto *IInit : C->private_copies()) {
1463 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1464 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1465 FirstprivateVars.push_back(*IRef);
1466 FirstprivateCopies.push_back(IInit);
1467 FirstprivateInits.push_back(*IElemInitRef);
1469 ++IRef, ++IElemInitRef;
1472 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars](
1473 CodeGenFunction &CGF) {
1474 // Set proper addresses for generated private copies.
1475 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
1476 OMPPrivateScope Scope(CGF);
1477 if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
1478 auto *CopyFn = CGF.Builder.CreateAlignedLoad(
1479 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)),
1480 CGF.PointerAlignInBytes);
1481 auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad(
1482 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)),
1483 CGF.PointerAlignInBytes);
1485 llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16>
1487 llvm::SmallVector<llvm::Value *, 16> CallArgs;
1488 CallArgs.push_back(PrivatesPtr);
1489 for (auto *E : PrivateVars) {
1490 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1492 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1493 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1494 CallArgs.push_back(PrivatePtr);
1496 for (auto *E : FirstprivateVars) {
1497 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1499 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1500 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1501 CallArgs.push_back(PrivatePtr);
1503 CGF.EmitRuntimeCall(CopyFn, CallArgs);
1504 for (auto &&Pair : PrivatePtrs) {
1506 CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes);
1507 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
1510 (void)Scope.Privatize();
1512 // TODO: emit code for untied tasks.
1514 CGF.EmitStmt(CS->getCapturedStmt());
1517 CGM.getOpenMPRuntime().emitTaskOutlinedFunction(S, *I, CodeGen);
1518 // Check if we should emit tied or untied task.
1519 bool Tied = !S.getSingleClause(OMPC_untied);
1520 // Check if the task is final
1521 llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
1522 if (auto *Clause = S.getSingleClause(OMPC_final)) {
1523 // If the condition constant folds and can be elided, try to avoid emitting
1524 // the condition and the dead arm of the if/else.
1525 auto *Cond = cast<OMPFinalClause>(Clause)->getCondition();
1527 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
1528 Final.setInt(CondConstant);
1530 Final.setPointer(EvaluateExprAsBool(Cond));
1532 // By default the task is not final.
1533 Final.setInt(/*IntVal=*/false);
1535 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
1536 const Expr *IfCond = nullptr;
1537 if (auto C = S.getSingleClause(OMPC_if)) {
1538 IfCond = cast<OMPIfClause>(C)->getCondition();
1540 CGM.getOpenMPRuntime().emitTaskCall(
1541 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
1542 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars,
1543 FirstprivateCopies, FirstprivateInits);
1546 void CodeGenFunction::EmitOMPTaskyieldDirective(
1547 const OMPTaskyieldDirective &S) {
1548 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
1551 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
1552 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
1555 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
1556 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
1559 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
1560 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
1561 if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) {
1562 auto FlushClause = cast<OMPFlushClause>(C);
1563 return llvm::makeArrayRef(FlushClause->varlist_begin(),
1564 FlushClause->varlist_end());
1567 }(), S.getLocStart());
1570 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
1571 LexicalScope Scope(*this, S.getSourceRange());
1572 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1573 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1574 CGF.EnsureInsertPoint();
1576 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart());
1579 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
1580 QualType SrcType, QualType DestType) {
1581 assert(CGF.hasScalarEvaluationKind(DestType) &&
1582 "DestType must have scalar evaluation kind.");
1583 assert(!Val.isAggregate() && "Must be a scalar or complex.");
1584 return Val.isScalar()
1585 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType)
1586 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
1590 static CodeGenFunction::ComplexPairTy
1591 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
1592 QualType DestType) {
1593 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
1594 "DestType must have complex evaluation kind.");
1595 CodeGenFunction::ComplexPairTy ComplexVal;
1596 if (Val.isScalar()) {
1597 // Convert the input element to the element type of the complex.
1598 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1600 CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType);
1601 ComplexVal = CodeGenFunction::ComplexPairTy(
1602 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
1604 assert(Val.isComplex() && "Must be a scalar or complex.");
1605 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
1606 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
1607 ComplexVal.first = CGF.EmitScalarConversion(
1608 Val.getComplexVal().first, SrcElementType, DestElementType);
1609 ComplexVal.second = CGF.EmitScalarConversion(
1610 Val.getComplexVal().second, SrcElementType, DestElementType);
1615 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
1616 LValue LVal, RValue RVal) {
1617 if (LVal.isGlobalReg()) {
1618 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
1620 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
1622 LVal.isVolatile(), /*IsInit=*/false);
1626 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal,
1628 switch (CGF.getEvaluationKind(LVal.getType())) {
1630 CGF.EmitStoreThroughLValue(
1631 RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())),
1635 CGF.EmitStoreOfComplex(
1636 convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal,
1640 llvm_unreachable("Must be a scalar or complex.");
1644 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
1645 const Expr *X, const Expr *V,
1646 SourceLocation Loc) {
1648 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
1649 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
1650 LValue XLValue = CGF.EmitLValue(X);
1651 LValue VLValue = CGF.EmitLValue(V);
1652 RValue Res = XLValue.isGlobalReg()
1653 ? CGF.EmitLoadOfLValue(XLValue, Loc)
1654 : CGF.EmitAtomicLoad(XLValue, Loc,
1655 IsSeqCst ? llvm::SequentiallyConsistent
1657 XLValue.isVolatile());
1658 // OpenMP, 2.12.6, atomic Construct
1659 // Any atomic construct with a seq_cst clause forces the atomically
1660 // performed operation to include an implicit flush operation without a
1663 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1664 emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType());
1667 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
1668 const Expr *X, const Expr *E,
1669 SourceLocation Loc) {
1671 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
1672 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
1673 // OpenMP, 2.12.6, atomic Construct
1674 // Any atomic construct with a seq_cst clause forces the atomically
1675 // performed operation to include an implicit flush operation without a
1678 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1681 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
1683 BinaryOperatorKind BO,
1684 llvm::AtomicOrdering AO,
1685 bool IsXLHSInRHSPart) {
1686 auto &Context = CGF.CGM.getContext();
1687 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
1688 // expression is simple and atomic is allowed for the given type for the
1690 if (BO == BO_Comma || !Update.isScalar() ||
1691 !Update.getScalarVal()->getType()->isIntegerTy() ||
1692 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
1693 (Update.getScalarVal()->getType() !=
1694 X.getAddress()->getType()->getPointerElementType())) ||
1695 !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() ||
1696 !Context.getTargetInfo().hasBuiltinAtomic(
1697 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
1698 return std::make_pair(false, RValue::get(nullptr));
1700 llvm::AtomicRMWInst::BinOp RMWOp;
1703 RMWOp = llvm::AtomicRMWInst::Add;
1706 if (!IsXLHSInRHSPart)
1707 return std::make_pair(false, RValue::get(nullptr));
1708 RMWOp = llvm::AtomicRMWInst::Sub;
1711 RMWOp = llvm::AtomicRMWInst::And;
1714 RMWOp = llvm::AtomicRMWInst::Or;
1717 RMWOp = llvm::AtomicRMWInst::Xor;
1720 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1721 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
1722 : llvm::AtomicRMWInst::Max)
1723 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
1724 : llvm::AtomicRMWInst::UMax);
1727 RMWOp = X.getType()->hasSignedIntegerRepresentation()
1728 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
1729 : llvm::AtomicRMWInst::Min)
1730 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
1731 : llvm::AtomicRMWInst::UMin);
1734 RMWOp = llvm::AtomicRMWInst::Xchg;
1743 return std::make_pair(false, RValue::get(nullptr));
1761 llvm_unreachable("Unsupported atomic update operation");
1763 auto *UpdateVal = Update.getScalarVal();
1764 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
1765 UpdateVal = CGF.Builder.CreateIntCast(
1766 IC, X.getAddress()->getType()->getPointerElementType(),
1767 X.getType()->hasSignedIntegerRepresentation());
1769 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
1770 return std::make_pair(true, RValue::get(Res));
1773 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
1774 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
1775 llvm::AtomicOrdering AO, SourceLocation Loc,
1776 const llvm::function_ref<RValue(RValue)> &CommonGen) {
1777 // Update expressions are allowed to have the following forms:
1778 // x binop= expr; -> xrval + expr;
1779 // x++, ++x -> xrval + 1;
1780 // x--, --x -> xrval - 1;
1781 // x = x binop expr; -> xrval binop expr
1782 // x = expr Op x; - > expr binop xrval;
1783 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
1785 if (X.isGlobalReg()) {
1786 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
1788 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
1790 // Perform compare-and-swap procedure.
1791 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
1797 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
1798 const Expr *X, const Expr *E,
1799 const Expr *UE, bool IsXLHSInRHSPart,
1800 SourceLocation Loc) {
1801 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1802 "Update expr in 'atomic update' must be a binary operator.");
1803 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1804 // Update expressions are allowed to have the following forms:
1805 // x binop= expr; -> xrval + expr;
1806 // x++, ++x -> xrval + 1;
1807 // x--, --x -> xrval - 1;
1808 // x = x binop expr; -> xrval binop expr
1809 // x = expr Op x; - > expr binop xrval;
1810 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
1811 LValue XLValue = CGF.EmitLValue(X);
1812 RValue ExprRValue = CGF.EmitAnyExpr(E);
1813 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1814 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1815 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1816 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1817 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1819 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
1820 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1821 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1822 return CGF.EmitAnyExpr(UE);
1824 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
1825 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1826 // OpenMP, 2.12.6, atomic Construct
1827 // Any atomic construct with a seq_cst clause forces the atomically
1828 // performed operation to include an implicit flush operation without a
1831 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1834 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
1835 QualType SourceType, QualType ResType) {
1836 switch (CGF.getEvaluationKind(ResType)) {
1838 return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType));
1840 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType);
1841 return RValue::getComplex(Res.first, Res.second);
1846 llvm_unreachable("Must be a scalar or complex.");
1849 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
1850 bool IsPostfixUpdate, const Expr *V,
1851 const Expr *X, const Expr *E,
1852 const Expr *UE, bool IsXLHSInRHSPart,
1853 SourceLocation Loc) {
1854 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
1855 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
1857 LValue VLValue = CGF.EmitLValue(V);
1858 LValue XLValue = CGF.EmitLValue(X);
1859 RValue ExprRValue = CGF.EmitAnyExpr(E);
1860 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
1861 QualType NewVValType;
1863 // 'x' is updated with some additional value.
1864 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
1865 "Update expr in 'atomic capture' must be a binary operator.");
1866 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
1867 // Update expressions are allowed to have the following forms:
1868 // x binop= expr; -> xrval + expr;
1869 // x++, ++x -> xrval + 1;
1870 // x--, --x -> xrval - 1;
1871 // x = x binop expr; -> xrval binop expr
1872 // x = expr Op x; - > expr binop xrval;
1873 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
1874 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
1875 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
1876 NewVValType = XRValExpr->getType();
1877 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
1878 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
1879 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
1880 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1881 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
1882 RValue Res = CGF.EmitAnyExpr(UE);
1883 NewVVal = IsPostfixUpdate ? XRValue : Res;
1886 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1887 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
1889 // 'atomicrmw' instruction was generated.
1890 if (IsPostfixUpdate) {
1891 // Use old value from 'atomicrmw'.
1892 NewVVal = Res.second;
1894 // 'atomicrmw' does not provide new value, so evaluate it using old
1896 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
1897 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
1898 NewVVal = CGF.EmitAnyExpr(UE);
1902 // 'x' is simply rewritten with some 'expr'.
1903 NewVValType = X->getType().getNonReferenceType();
1904 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
1905 X->getType().getNonReferenceType());
1906 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
1910 // Try to perform atomicrmw xchg, otherwise simple exchange.
1911 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
1912 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
1915 // 'atomicrmw' instruction was generated.
1916 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
1919 // Emit post-update store to 'v' of old/new 'x' value.
1920 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType);
1921 // OpenMP, 2.12.6, atomic Construct
1922 // Any atomic construct with a seq_cst clause forces the atomically
1923 // performed operation to include an implicit flush operation without a
1926 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
1929 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
1930 bool IsSeqCst, bool IsPostfixUpdate,
1931 const Expr *X, const Expr *V, const Expr *E,
1932 const Expr *UE, bool IsXLHSInRHSPart,
1933 SourceLocation Loc) {
1936 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
1939 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
1943 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
1946 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
1947 IsXLHSInRHSPart, Loc);
1951 case OMPC_num_threads:
1953 case OMPC_firstprivate:
1954 case OMPC_lastprivate:
1955 case OMPC_reduction:
1964 case OMPC_copyprivate:
1966 case OMPC_proc_bind:
1971 case OMPC_threadprivate:
1972 case OMPC_mergeable:
1973 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
1977 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
1978 bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst);
1979 OpenMPClauseKind Kind = OMPC_unknown;
1980 for (auto *C : S.clauses()) {
1981 // Find first clause (skip seq_cst clause, if it is first).
1982 if (C->getClauseKind() != OMPC_seq_cst) {
1983 Kind = C->getClauseKind();
1989 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
1990 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
1991 enterFullExpression(EWC);
1993 // Processing for statements under 'atomic capture'.
1994 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
1995 for (const auto *C : Compound->body()) {
1996 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
1997 enterFullExpression(EWC);
2002 LexicalScope Scope(*this, S.getSourceRange());
2003 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) {
2004 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
2005 S.getV(), S.getExpr(), S.getUpdateExpr(),
2006 S.isXLHSInRHSPart(), S.getLocStart());
2008 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen);
2011 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
2012 llvm_unreachable("CodeGen for 'omp target' is not supported yet.");
2015 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
2016 llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");