1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Stmt.h"
19 #include "clang/AST/StmtOpenMP.h"
20 using namespace clang;
21 using namespace CodeGen;
23 void CodeGenFunction::GenerateOpenMPCapturedVars(
24 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
25 const RecordDecl *RD = S.getCapturedRecordDecl();
26 auto CurField = RD->field_begin();
27 auto CurCap = S.captures().begin();
28 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
29 E = S.capture_init_end();
30 I != E; ++I, ++CurField, ++CurCap) {
31 if (CurField->hasCapturedVLAType()) {
32 auto VAT = CurField->getCapturedVLAType();
33 auto *Val = VLASizeMap[VAT->getSizeExpr()];
34 CapturedVars.push_back(Val);
35 } else if (CurCap->capturesThis())
36 CapturedVars.push_back(CXXThisValue);
37 else if (CurCap->capturesVariableByCopy())
38 CapturedVars.push_back(
39 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal());
41 assert(CurCap->capturesVariable() && "Expected capture by reference.");
42 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
47 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
48 StringRef Name, LValue AddrLV,
49 bool isReferenceType = false) {
50 ASTContext &Ctx = CGF.getContext();
52 auto *CastedPtr = CGF.EmitScalarConversion(
53 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
54 Ctx.getPointerType(DstType), SourceLocation());
56 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
59 // If we are dealing with references we need to return the address of the
60 // reference instead of the reference of the value.
61 if (isReferenceType) {
62 QualType RefType = Ctx.getLValueReferenceType(DstType);
63 auto *RefVal = TmpAddr.getPointer();
64 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
65 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
66 CGF.EmitScalarInit(RefVal, TmpLVal);
73 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
76 "CapturedStmtInfo should be set when generating the captured function");
77 const CapturedDecl *CD = S.getCapturedDecl();
78 const RecordDecl *RD = S.getCapturedRecordDecl();
79 assert(CD->hasBody() && "missing CapturedDecl body");
81 // Build the argument list.
82 ASTContext &Ctx = CGM.getContext();
84 Args.append(CD->param_begin(),
85 std::next(CD->param_begin(), CD->getContextParamPosition()));
86 auto I = S.captures().begin();
87 for (auto *FD : RD->fields()) {
88 QualType ArgType = FD->getType();
89 IdentifierInfo *II = nullptr;
90 VarDecl *CapVar = nullptr;
92 // If this is a capture by copy and the type is not a pointer, the outlined
93 // function argument type should be uintptr and the value properly casted to
94 // uintptr. This is necessary given that the runtime library is only able to
95 // deal with pointers. We can pass in the same way the VLA type sizes to the
97 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
98 I->capturesVariableArrayType())
99 ArgType = Ctx.getUIntPtrType();
101 if (I->capturesVariable() || I->capturesVariableByCopy()) {
102 CapVar = I->getCapturedVar();
103 II = CapVar->getIdentifier();
104 } else if (I->capturesThis())
105 II = &getContext().Idents.get("this");
107 assert(I->capturesVariableArrayType());
108 II = &getContext().Idents.get("vla");
110 if (ArgType->isVariablyModifiedType())
111 ArgType = getContext().getVariableArrayDecayedType(ArgType);
112 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
113 FD->getLocation(), II, ArgType));
117 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
120 // Create the function declaration.
121 FunctionType::ExtInfo ExtInfo;
122 const CGFunctionInfo &FuncInfo =
123 CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
124 /*IsVariadic=*/false);
125 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
127 llvm::Function *F = llvm::Function::Create(
128 FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
129 CapturedStmtInfo->getHelperName(), &CGM.getModule());
130 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
132 F->addFnAttr(llvm::Attribute::NoUnwind);
134 // Generate the function.
135 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
136 CD->getBody()->getLocStart());
137 unsigned Cnt = CD->getContextParamPosition();
138 I = S.captures().begin();
139 for (auto *FD : RD->fields()) {
140 // If we are capturing a pointer by copy we don't need to do anything, just
141 // use the value that we get from the arguments.
142 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
143 setAddrOfLocalVar(I->getCapturedVar(), GetAddrOfLocalVar(Args[Cnt]));
149 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
150 AlignmentSource::Decl);
151 if (FD->hasCapturedVLAType()) {
152 LValue CastedArgLVal =
153 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
154 Args[Cnt]->getName(), ArgLVal),
155 FD->getType(), AlignmentSource::Decl);
157 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
158 auto VAT = FD->getCapturedVLAType();
159 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
160 } else if (I->capturesVariable()) {
161 auto *Var = I->getCapturedVar();
162 QualType VarTy = Var->getType();
163 Address ArgAddr = ArgLVal.getAddress();
164 if (!VarTy->isReferenceType()) {
165 ArgAddr = EmitLoadOfReference(
166 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
169 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
170 } else if (I->capturesVariableByCopy()) {
171 assert(!FD->getType()->isAnyPointerType() &&
172 "Not expecting a captured pointer.");
173 auto *Var = I->getCapturedVar();
174 QualType VarTy = Var->getType();
175 setAddrOfLocalVar(I->getCapturedVar(),
176 castValueFromUintptr(*this, FD->getType(),
177 Args[Cnt]->getName(), ArgLVal,
178 VarTy->isReferenceType()));
180 // If 'this' is captured, load it into CXXThisValue.
181 assert(I->capturesThis());
183 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
188 PGO.assignRegionCounters(GlobalDecl(CD), F);
189 CapturedStmtInfo->EmitBody(*this, CD->getBody());
190 FinishFunction(CD->getBodyRBrace());
195 //===----------------------------------------------------------------------===//
196 // OpenMP Directive Emission
197 //===----------------------------------------------------------------------===//
198 void CodeGenFunction::EmitOMPAggregateAssign(
199 Address DestAddr, Address SrcAddr, QualType OriginalType,
200 const llvm::function_ref<void(Address, Address)> &CopyGen) {
201 // Perform element-by-element initialization.
204 // Drill down to the base element type on both arrays.
205 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
206 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
207 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
209 auto SrcBegin = SrcAddr.getPointer();
210 auto DestBegin = DestAddr.getPointer();
211 // Cast from pointer to array type to pointer to single element.
212 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
213 // The basic structure here is a while-do loop.
214 auto BodyBB = createBasicBlock("omp.arraycpy.body");
215 auto DoneBB = createBasicBlock("omp.arraycpy.done");
217 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
218 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
220 // Enter the loop body, making that address the current address.
221 auto EntryBB = Builder.GetInsertBlock();
224 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
226 llvm::PHINode *SrcElementPHI =
227 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
228 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
229 Address SrcElementCurrent =
230 Address(SrcElementPHI,
231 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
233 llvm::PHINode *DestElementPHI =
234 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
235 DestElementPHI->addIncoming(DestBegin, EntryBB);
236 Address DestElementCurrent =
237 Address(DestElementPHI,
238 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
241 CopyGen(DestElementCurrent, SrcElementCurrent);
243 // Shift the address forward by one element.
244 auto DestElementNext = Builder.CreateConstGEP1_32(
245 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
246 auto SrcElementNext = Builder.CreateConstGEP1_32(
247 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
248 // Check whether we've reached the end.
250 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
251 Builder.CreateCondBr(Done, DoneBB, BodyBB);
252 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
253 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
256 EmitBlock(DoneBB, /*IsFinished=*/true);
259 /// \brief Emit initialization of arrays of complex types.
260 /// \param DestAddr Address of the array.
261 /// \param Type Type of array.
262 /// \param Init Initial expression of array.
263 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
264 QualType Type, const Expr *Init) {
265 // Perform element-by-element initialization.
268 // Drill down to the base element type on both arrays.
269 auto ArrayTy = Type->getAsArrayTypeUnsafe();
270 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
272 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
274 auto DestBegin = DestAddr.getPointer();
275 // Cast from pointer to array type to pointer to single element.
276 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
277 // The basic structure here is a while-do loop.
278 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
279 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
281 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
282 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
284 // Enter the loop body, making that address the current address.
285 auto EntryBB = CGF.Builder.GetInsertBlock();
286 CGF.EmitBlock(BodyBB);
288 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
290 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
291 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
292 DestElementPHI->addIncoming(DestBegin, EntryBB);
293 Address DestElementCurrent =
294 Address(DestElementPHI,
295 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
299 CodeGenFunction::RunCleanupsScope InitScope(CGF);
300 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
301 /*IsInitializer=*/false);
304 // Shift the address forward by one element.
305 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
306 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
307 // Check whether we've reached the end.
309 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
310 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
311 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
314 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
317 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
318 Address SrcAddr, const VarDecl *DestVD,
319 const VarDecl *SrcVD, const Expr *Copy) {
320 if (OriginalType->isArrayType()) {
321 auto *BO = dyn_cast<BinaryOperator>(Copy);
322 if (BO && BO->getOpcode() == BO_Assign) {
323 // Perform simple memcpy for simple copying.
324 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
326 // For arrays with complex element types perform element by element
328 EmitOMPAggregateAssign(
329 DestAddr, SrcAddr, OriginalType,
330 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
331 // Working with the single array element, so have to remap
332 // destination and source variables to corresponding array
334 CodeGenFunction::OMPPrivateScope Remap(*this);
335 Remap.addPrivate(DestVD, [DestElement]() -> Address {
339 SrcVD, [SrcElement]() -> Address { return SrcElement; });
340 (void)Remap.Privatize();
341 EmitIgnoredExpr(Copy);
345 // Remap pseudo source variable to private copy.
346 CodeGenFunction::OMPPrivateScope Remap(*this);
347 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
348 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
349 (void)Remap.Privatize();
350 // Emit copying of the whole variable.
351 EmitIgnoredExpr(Copy);
355 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
356 OMPPrivateScope &PrivateScope) {
357 if (!HaveInsertPoint())
359 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
360 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
361 auto IRef = C->varlist_begin();
362 auto InitsRef = C->inits().begin();
363 for (auto IInit : C->private_copies()) {
364 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
365 if (EmittedAsFirstprivate.count(OrigVD) == 0) {
366 EmittedAsFirstprivate.insert(OrigVD);
367 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
368 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
371 const_cast<VarDecl *>(OrigVD),
372 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
374 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
375 Address OriginalAddr = EmitLValue(&DRE).getAddress();
376 QualType Type = OrigVD->getType();
377 if (Type->isArrayType()) {
378 // Emit VarDecl with copy init for arrays.
379 // Get the address of the original variable captured in current
381 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
382 auto Emission = EmitAutoVarAlloca(*VD);
383 auto *Init = VD->getInit();
384 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
385 // Perform simple memcpy.
386 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
389 EmitOMPAggregateAssign(
390 Emission.getAllocatedAddress(), OriginalAddr, Type,
391 [this, VDInit, Init](Address DestElement,
392 Address SrcElement) {
393 // Clean up any temporaries needed by the initialization.
394 RunCleanupsScope InitScope(*this);
395 // Emit initialization for single element.
396 setAddrOfLocalVar(VDInit, SrcElement);
397 EmitAnyExprToMem(Init, DestElement,
398 Init->getType().getQualifiers(),
399 /*IsInitializer*/ false);
400 LocalDeclMap.erase(VDInit);
403 EmitAutoVarCleanups(Emission);
404 return Emission.getAllocatedAddress();
407 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
408 // Emit private VarDecl with copy init.
409 // Remap temp VDInit variable to the address of the original
411 // (for proper handling of captured global variables).
412 setAddrOfLocalVar(VDInit, OriginalAddr);
414 LocalDeclMap.erase(VDInit);
415 return GetAddrOfLocalVar(VD);
418 assert(IsRegistered &&
419 "firstprivate var already registered as private");
420 // Silence the warning about unused variable.
426 return !EmittedAsFirstprivate.empty();
429 void CodeGenFunction::EmitOMPPrivateClause(
430 const OMPExecutableDirective &D,
431 CodeGenFunction::OMPPrivateScope &PrivateScope) {
432 if (!HaveInsertPoint())
434 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
435 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
436 auto IRef = C->varlist_begin();
437 for (auto IInit : C->private_copies()) {
438 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
439 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
440 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
442 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
443 // Emit private VarDecl with copy init.
445 return GetAddrOfLocalVar(VD);
447 assert(IsRegistered && "private var already registered as private");
448 // Silence the warning about unused variable.
456 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
457 if (!HaveInsertPoint())
459 // threadprivate_var1 = master_threadprivate_var1;
460 // operator=(threadprivate_var2, master_threadprivate_var2);
462 // __kmpc_barrier(&loc, global_tid);
463 llvm::DenseSet<const VarDecl *> CopiedVars;
464 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
465 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
466 auto IRef = C->varlist_begin();
467 auto ISrcRef = C->source_exprs().begin();
468 auto IDestRef = C->destination_exprs().begin();
469 for (auto *AssignOp : C->assignment_ops()) {
470 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
471 QualType Type = VD->getType();
472 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
474 // Get the address of the master variable. If we are emitting code with
475 // TLS support, the address is passed from the master as field in the
476 // captured declaration.
477 Address MasterAddr = Address::invalid();
478 if (getLangOpts().OpenMPUseTLS &&
479 getContext().getTargetInfo().isTLSSupported()) {
480 assert(CapturedStmtInfo->lookup(VD) &&
481 "Copyin threadprivates should have been captured!");
482 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
483 VK_LValue, (*IRef)->getExprLoc());
484 MasterAddr = EmitLValue(&DRE).getAddress();
485 LocalDeclMap.erase(VD);
488 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
489 : CGM.GetAddrOfGlobal(VD),
490 getContext().getDeclAlign(VD));
492 // Get the address of the threadprivate variable.
493 Address PrivateAddr = EmitLValue(*IRef).getAddress();
494 if (CopiedVars.size() == 1) {
495 // At first check if current thread is a master thread. If it is, no
496 // need to copy data.
497 CopyBegin = createBasicBlock("copyin.not.master");
498 CopyEnd = createBasicBlock("copyin.not.master.end");
499 Builder.CreateCondBr(
500 Builder.CreateICmpNE(
501 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
502 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
504 EmitBlock(CopyBegin);
506 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
507 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
508 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
516 // Exit out of copying procedure for non-master thread.
517 EmitBlock(CopyEnd, /*IsFinished=*/true);
523 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
524 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
525 if (!HaveInsertPoint())
527 bool HasAtLeastOneLastprivate = false;
528 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
529 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
530 HasAtLeastOneLastprivate = true;
531 auto IRef = C->varlist_begin();
532 auto IDestRef = C->destination_exprs().begin();
533 for (auto *IInit : C->private_copies()) {
534 // Keep the address of the original variable for future update at the end
536 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
537 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
538 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
539 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
541 const_cast<VarDecl *>(OrigVD),
542 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
544 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
545 return EmitLValue(&DRE).getAddress();
547 // Check if the variable is also a firstprivate: in this case IInit is
548 // not generated. Initialization of this variable will happen in codegen
549 // for 'firstprivate' clause.
551 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
553 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
554 // Emit private VarDecl with copy init.
556 return GetAddrOfLocalVar(VD);
558 assert(IsRegistered &&
559 "lastprivate var already registered as private");
566 return HasAtLeastOneLastprivate;
569 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
570 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
571 if (!HaveInsertPoint())
573 // Emit following code:
574 // if (<IsLastIterCond>) {
575 // orig_var1 = private_orig_var1;
577 // orig_varn = private_orig_varn;
579 llvm::BasicBlock *ThenBB = nullptr;
580 llvm::BasicBlock *DoneBB = nullptr;
581 if (IsLastIterCond) {
582 ThenBB = createBasicBlock(".omp.lastprivate.then");
583 DoneBB = createBasicBlock(".omp.lastprivate.done");
584 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
587 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates;
588 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
589 auto IC = LoopDirective->counters().begin();
590 for (auto F : LoopDirective->finals()) {
591 auto *D = cast<DeclRefExpr>(*IC)->getDecl()->getCanonicalDecl();
592 LoopCountersAndUpdates[D] = F;
596 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
597 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
598 auto IRef = C->varlist_begin();
599 auto ISrcRef = C->source_exprs().begin();
600 auto IDestRef = C->destination_exprs().begin();
601 for (auto *AssignOp : C->assignment_ops()) {
602 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
603 QualType Type = PrivateVD->getType();
604 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
605 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
606 // If lastprivate variable is a loop control variable for loop-based
607 // directive, update its value before copyin back to original
609 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
610 EmitIgnoredExpr(UpExpr);
611 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
612 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
613 // Get the address of the original variable.
614 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
615 // Get the address of the private variable.
616 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
617 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
619 Address(Builder.CreateLoad(PrivateAddr),
620 getNaturalTypeAlignment(RefTy->getPointeeType()));
621 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
629 EmitBlock(DoneBB, /*IsFinished=*/true);
632 void CodeGenFunction::EmitOMPReductionClauseInit(
633 const OMPExecutableDirective &D,
634 CodeGenFunction::OMPPrivateScope &PrivateScope) {
635 if (!HaveInsertPoint())
637 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
638 auto ILHS = C->lhs_exprs().begin();
639 auto IRHS = C->rhs_exprs().begin();
640 auto IPriv = C->privates().begin();
641 for (auto IRef : C->varlists()) {
642 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
643 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
644 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
645 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
646 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
647 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
648 Base = TempOASE->getBase()->IgnoreParenImpCasts();
649 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
650 Base = TempASE->getBase()->IgnoreParenImpCasts();
651 auto *DE = cast<DeclRefExpr>(Base);
652 auto *OrigVD = cast<VarDecl>(DE->getDecl());
653 auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
655 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
656 auto OriginalBaseLValue = EmitLValue(DE);
657 auto BaseLValue = OriginalBaseLValue;
658 auto *Zero = Builder.getInt64(/*C=*/0);
659 llvm::SmallVector<llvm::Value *, 4> Indexes;
660 Indexes.push_back(Zero);
662 OASELValueLB.getPointer()->getType()->getPointerElementType();
663 auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType();
664 while (Ty != ItemTy) {
665 Indexes.push_back(Zero);
666 Ty = Ty->getPointerElementType();
668 BaseLValue = MakeAddrLValue(
669 Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes),
670 OASELValueLB.getAlignment()),
671 OASELValueLB.getType(), OASELValueLB.getAlignmentSource());
672 // Store the address of the original variable associated with the LHS
673 // implicit variable.
674 PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address {
675 return OASELValueLB.getAddress();
677 // Emit reduction copy.
678 bool IsRegistered = PrivateScope.addPrivate(
679 OrigVD, [this, PrivateVD, BaseLValue, OASELValueLB, OASELValueUB,
680 OriginalBaseLValue]() -> Address {
681 // Emit VarDecl with copy init for arrays.
682 // Get the address of the original variable captured in current
684 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
685 OASELValueLB.getPointer());
686 Size = Builder.CreateNUWAdd(
687 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
688 CodeGenFunction::OpaqueValueMapping OpaqueMap(
689 *this, cast<OpaqueValueExpr>(
691 .getAsVariableArrayType(PrivateVD->getType())
694 EmitVariablyModifiedType(PrivateVD->getType());
695 auto Emission = EmitAutoVarAlloca(*PrivateVD);
696 auto Addr = Emission.getAllocatedAddress();
697 auto *Init = PrivateVD->getInit();
698 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(), Init);
699 EmitAutoVarCleanups(Emission);
700 // Emit private VarDecl with reduction init.
701 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
702 OASELValueLB.getPointer());
703 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
704 Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(
705 Ptr, OriginalBaseLValue.getPointer()->getType());
706 return Address(Ptr, OriginalBaseLValue.getAlignment());
708 assert(IsRegistered && "private var already registered as private");
709 // Silence the warning about unused variable.
711 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
712 return GetAddrOfLocalVar(PrivateVD);
714 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
715 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
716 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
717 Base = TempASE->getBase()->IgnoreParenImpCasts();
718 auto *DE = cast<DeclRefExpr>(Base);
719 auto *OrigVD = cast<VarDecl>(DE->getDecl());
720 auto ASELValue = EmitLValue(ASE);
721 auto OriginalBaseLValue = EmitLValue(DE);
722 auto BaseLValue = OriginalBaseLValue;
723 auto *Zero = Builder.getInt64(/*C=*/0);
724 llvm::SmallVector<llvm::Value *, 4> Indexes;
725 Indexes.push_back(Zero);
727 ASELValue.getPointer()->getType()->getPointerElementType();
728 auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType();
729 while (Ty != ItemTy) {
730 Indexes.push_back(Zero);
731 Ty = Ty->getPointerElementType();
733 BaseLValue = MakeAddrLValue(
734 Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes),
735 ASELValue.getAlignment()),
736 ASELValue.getType(), ASELValue.getAlignmentSource());
737 // Store the address of the original variable associated with the LHS
738 // implicit variable.
739 PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address {
740 return ASELValue.getAddress();
742 // Emit reduction copy.
743 bool IsRegistered = PrivateScope.addPrivate(
744 OrigVD, [this, PrivateVD, BaseLValue, ASELValue,
745 OriginalBaseLValue]() -> Address {
746 // Emit private VarDecl with reduction init.
747 EmitDecl(*PrivateVD);
748 auto Addr = GetAddrOfLocalVar(PrivateVD);
749 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
750 ASELValue.getPointer());
751 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
752 Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(
753 Ptr, OriginalBaseLValue.getPointer()->getType());
754 return Address(Ptr, OriginalBaseLValue.getAlignment());
756 assert(IsRegistered && "private var already registered as private");
757 // Silence the warning about unused variable.
759 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
760 return GetAddrOfLocalVar(PrivateVD);
763 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
764 // Store the address of the original variable associated with the LHS
765 // implicit variable.
766 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> Address {
767 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
768 CapturedStmtInfo->lookup(OrigVD) != nullptr,
769 IRef->getType(), VK_LValue, IRef->getExprLoc());
770 return EmitLValue(&DRE).getAddress();
772 // Emit reduction copy.
774 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> Address {
775 // Emit private VarDecl with reduction init.
776 EmitDecl(*PrivateVD);
777 return GetAddrOfLocalVar(PrivateVD);
779 assert(IsRegistered && "private var already registered as private");
780 // Silence the warning about unused variable.
782 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
783 return GetAddrOfLocalVar(PrivateVD);
786 ++ILHS, ++IRHS, ++IPriv;
791 void CodeGenFunction::EmitOMPReductionClauseFinal(
792 const OMPExecutableDirective &D) {
793 if (!HaveInsertPoint())
795 llvm::SmallVector<const Expr *, 8> Privates;
796 llvm::SmallVector<const Expr *, 8> LHSExprs;
797 llvm::SmallVector<const Expr *, 8> RHSExprs;
798 llvm::SmallVector<const Expr *, 8> ReductionOps;
799 bool HasAtLeastOneReduction = false;
800 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
801 HasAtLeastOneReduction = true;
802 Privates.append(C->privates().begin(), C->privates().end());
803 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
804 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
805 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
807 if (HasAtLeastOneReduction) {
808 // Emit nowait reduction if nowait clause is present or directive is a
809 // parallel directive (it always has implicit barrier).
810 CGM.getOpenMPRuntime().emitReduction(
811 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
812 D.getSingleClause<OMPNowaitClause>() ||
813 isOpenMPParallelDirective(D.getDirectiveKind()) ||
814 D.getDirectiveKind() == OMPD_simd,
815 D.getDirectiveKind() == OMPD_simd);
819 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
820 const OMPExecutableDirective &S,
821 OpenMPDirectiveKind InnermostKind,
822 const RegionCodeGenTy &CodeGen) {
823 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
824 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
825 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
826 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
827 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
828 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
829 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
830 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
831 /*IgnoreResultAssign*/ true);
832 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
833 CGF, NumThreads, NumThreadsClause->getLocStart());
835 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
836 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
837 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
838 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
840 const Expr *IfCond = nullptr;
841 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
842 if (C->getNameModifier() == OMPD_unknown ||
843 C->getNameModifier() == OMPD_parallel) {
844 IfCond = C->getCondition();
848 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
849 CapturedVars, IfCond);
852 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
853 LexicalScope Scope(*this, S.getSourceRange());
854 // Emit parallel region as a standalone region.
855 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
856 OMPPrivateScope PrivateScope(CGF);
857 bool Copyins = CGF.EmitOMPCopyinClause(S);
858 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
859 if (Copyins || Firstprivates) {
860 // Emit implicit barrier to synchronize threads and avoid data races on
861 // initialization of firstprivate variables or propagation master's thread
862 // values of threadprivate variables to local instances of that variables
863 // of all other implicit threads.
864 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
865 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
866 /*ForceSimpleCall=*/true);
868 CGF.EmitOMPPrivateClause(S, PrivateScope);
869 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
870 (void)PrivateScope.Privatize();
871 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
872 CGF.EmitOMPReductionClauseFinal(S);
874 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
877 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
879 RunCleanupsScope BodyScope(*this);
880 // Update counters values on current iteration.
881 for (auto I : D.updates()) {
884 // Update the linear variables.
885 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
886 for (auto U : C->updates()) {
891 // On a continue in the body, jump to the end.
892 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
893 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
895 EmitStmt(D.getBody());
896 // The end (updates/cleanups).
897 EmitBlock(Continue.getBlock());
898 BreakContinueStack.pop_back();
901 void CodeGenFunction::EmitOMPInnerLoop(
902 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
904 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
905 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
906 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
908 // Start the loop with a block that tests the condition.
909 auto CondBlock = createBasicBlock("omp.inner.for.cond");
910 EmitBlock(CondBlock);
911 LoopStack.push(CondBlock);
913 // If there are any cleanups between here and the loop-exit scope,
914 // create a block to stage a loop exit along.
915 auto ExitBlock = LoopExit.getBlock();
917 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
919 auto LoopBody = createBasicBlock("omp.inner.for.body");
922 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
923 if (ExitBlock != LoopExit.getBlock()) {
924 EmitBlock(ExitBlock);
925 EmitBranchThroughCleanup(LoopExit);
929 incrementProfileCounter(&S);
931 // Create a block for the increment.
932 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
933 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
937 // Emit "IV = IV + 1" and a back-edge to the condition block.
938 EmitBlock(Continue.getBlock());
939 EmitIgnoredExpr(IncExpr);
941 BreakContinueStack.pop_back();
942 EmitBranch(CondBlock);
944 // Emit the fall-through block.
945 EmitBlock(LoopExit.getBlock());
948 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
949 if (!HaveInsertPoint())
951 // Emit inits for the linear variables.
952 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
953 for (auto Init : C->inits()) {
954 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
955 auto *OrigVD = cast<VarDecl>(
956 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl());
957 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
958 CapturedStmtInfo->lookup(OrigVD) != nullptr,
959 VD->getInit()->getType(), VK_LValue,
960 VD->getInit()->getExprLoc());
961 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
962 EmitExprAsInit(&DRE, VD,
963 MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
964 /*capturedByInit=*/false);
965 EmitAutoVarCleanups(Emission);
967 // Emit the linear steps for the linear clauses.
968 // If a step is not constant, it is pre-calculated before the loop.
969 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
970 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
971 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
972 // Emit calculation of the linear step.
978 static void emitLinearClauseFinal(CodeGenFunction &CGF,
979 const OMPLoopDirective &D) {
980 if (!CGF.HaveInsertPoint())
982 // Emit the final values of the linear variables.
983 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
984 auto IC = C->varlist_begin();
985 for (auto F : C->finals()) {
986 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
987 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
988 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
989 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
990 Address OrigAddr = CGF.EmitLValue(&DRE).getAddress();
991 CodeGenFunction::OMPPrivateScope VarScope(CGF);
992 VarScope.addPrivate(OrigVD,
993 [OrigAddr]() -> Address { return OrigAddr; });
994 (void)VarScope.Privatize();
995 CGF.EmitIgnoredExpr(F);
1001 static void emitAlignedClause(CodeGenFunction &CGF,
1002 const OMPExecutableDirective &D) {
1003 if (!CGF.HaveInsertPoint())
1005 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1006 unsigned ClauseAlignment = 0;
1007 if (auto AlignmentExpr = Clause->getAlignment()) {
1009 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1010 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1012 for (auto E : Clause->varlists()) {
1013 unsigned Alignment = ClauseAlignment;
1014 if (Alignment == 0) {
1015 // OpenMP [2.8.1, Description]
1016 // If no optional parameter is specified, implementation-defined default
1017 // alignments for SIMD instructions on the target platforms are assumed.
1020 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1021 E->getType()->getPointeeType()))
1024 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1025 "alignment is not power of 2");
1026 if (Alignment != 0) {
1027 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1028 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
1034 static void emitPrivateLoopCounters(CodeGenFunction &CGF,
1035 CodeGenFunction::OMPPrivateScope &LoopScope,
1036 ArrayRef<Expr *> Counters,
1037 ArrayRef<Expr *> PrivateCounters) {
1038 if (!CGF.HaveInsertPoint())
1040 auto I = PrivateCounters.begin();
1041 for (auto *E : Counters) {
1042 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1043 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1044 Address Addr = Address::invalid();
1045 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
1046 // Emit var without initialization.
1047 auto VarEmission = CGF.EmitAutoVarAlloca(*PrivateVD);
1048 CGF.EmitAutoVarCleanups(VarEmission);
1049 Addr = VarEmission.getAllocatedAddress();
1052 (void)LoopScope.addPrivate(VD, [&]() -> Address { return Addr; });
1057 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1058 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1059 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1060 if (!CGF.HaveInsertPoint())
1063 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1064 emitPrivateLoopCounters(CGF, PreCondScope, S.counters(),
1065 S.private_counters());
1066 (void)PreCondScope.Privatize();
1067 // Get initial values of real counters.
1068 for (auto I : S.inits()) {
1069 CGF.EmitIgnoredExpr(I);
1072 // Check that loop is executed at least one time.
1073 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1077 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
1078 CodeGenFunction::OMPPrivateScope &PrivateScope) {
1079 if (!CGF.HaveInsertPoint())
1081 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1082 auto CurPrivate = C->privates().begin();
1083 for (auto *E : C->varlists()) {
1084 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1086 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1087 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
1088 // Emit private VarDecl with copy init.
1089 CGF.EmitVarDecl(*PrivateVD);
1090 return CGF.GetAddrOfLocalVar(PrivateVD);
1092 assert(IsRegistered && "linear var already registered as private");
1093 // Silence the warning about unused variable.
1100 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1101 const OMPExecutableDirective &D,
1103 if (!CGF.HaveInsertPoint())
1105 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1106 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1107 /*ignoreResult=*/true);
1108 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1109 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1110 // In presence of finite 'safelen', it may be unsafe to mark all
1111 // the memory instructions parallel, because loop-carried
1112 // dependences of 'safelen' iterations are possible.
1114 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1115 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1116 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1117 /*ignoreResult=*/true);
1118 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1119 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1120 // In presence of finite 'safelen', it may be unsafe to mark all
1121 // the memory instructions parallel, because loop-carried
1122 // dependences of 'safelen' iterations are possible.
1123 CGF.LoopStack.setParallel(false);
1127 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1129 // Walk clauses and process safelen/lastprivate.
1130 LoopStack.setParallel(!IsMonotonic);
1131 LoopStack.setVectorizeEnable(true);
1132 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1135 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
1136 if (!HaveInsertPoint())
1138 auto IC = D.counters().begin();
1139 for (auto F : D.finals()) {
1140 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1141 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
1142 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1143 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1144 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1145 Address OrigAddr = EmitLValue(&DRE).getAddress();
1146 OMPPrivateScope VarScope(*this);
1147 VarScope.addPrivate(OrigVD,
1148 [OrigAddr]() -> Address { return OrigAddr; });
1149 (void)VarScope.Privatize();
1154 emitLinearClauseFinal(*this, D);
1157 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1158 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1160 // for (IV in 0..LastIteration) BODY;
1161 // <Final counter/linear vars updates>;
1165 // Emit: if (PreCond) - begin.
1166 // If the condition constant folds and can be elided, avoid emitting the
1169 llvm::BasicBlock *ContBlock = nullptr;
1170 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1174 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
1175 ContBlock = CGF.createBasicBlock("simd.if.end");
1176 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1177 CGF.getProfileCount(&S));
1178 CGF.EmitBlock(ThenBlock);
1179 CGF.incrementProfileCounter(&S);
1182 // Emit the loop iteration variable.
1183 const Expr *IVExpr = S.getIterationVariable();
1184 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1185 CGF.EmitVarDecl(*IVDecl);
1186 CGF.EmitIgnoredExpr(S.getInit());
1188 // Emit the iterations count variable.
1189 // If it is not a variable, Sema decided to calculate iterations count on
1190 // each iteration (e.g., it is foldable into a constant).
1191 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1192 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1193 // Emit calculation of the iterations count.
1194 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1197 CGF.EmitOMPSimdInit(S);
1199 emitAlignedClause(CGF, S);
1200 CGF.EmitOMPLinearClauseInit(S);
1201 bool HasLastprivateClause;
1203 OMPPrivateScope LoopScope(CGF);
1204 emitPrivateLoopCounters(CGF, LoopScope, S.counters(),
1205 S.private_counters());
1206 emitPrivateLinearVars(CGF, S, LoopScope);
1207 CGF.EmitOMPPrivateClause(S, LoopScope);
1208 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1209 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1210 (void)LoopScope.Privatize();
1211 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1213 [&S](CodeGenFunction &CGF) {
1214 CGF.EmitOMPLoopBody(S, JumpDest());
1215 CGF.EmitStopPoint(&S);
1217 [](CodeGenFunction &) {});
1218 // Emit final copy of the lastprivate variables at the end of loops.
1219 if (HasLastprivateClause) {
1220 CGF.EmitOMPLastprivateClauseFinal(S);
1222 CGF.EmitOMPReductionClauseFinal(S);
1224 CGF.EmitOMPSimdFinal(S);
1225 // Emit: if (PreCond) - end.
1227 CGF.EmitBranch(ContBlock);
1228 CGF.EmitBlock(ContBlock, true);
1231 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1234 void CodeGenFunction::EmitOMPForOuterLoop(
1235 OpenMPScheduleClauseKind ScheduleKind, bool IsMonotonic,
1236 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1237 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1238 auto &RT = CGM.getOpenMPRuntime();
1240 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1241 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);
1244 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&
1245 "static non-chunked schedule does not need outer loop");
1249 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1250 // When schedule(dynamic,chunk_size) is specified, the iterations are
1251 // distributed to threads in the team in chunks as the threads request them.
1252 // Each thread executes a chunk of iterations, then requests another chunk,
1253 // until no chunks remain to be distributed. Each chunk contains chunk_size
1254 // iterations, except for the last chunk to be distributed, which may have
1255 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1257 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1258 // to threads in the team in chunks as the executing threads request them.
1259 // Each thread executes a chunk of iterations, then requests another chunk,
1260 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1261 // each chunk is proportional to the number of unassigned iterations divided
1262 // by the number of threads in the team, decreasing to 1. For a chunk_size
1263 // with value k (greater than 1), the size of each chunk is determined in the
1264 // same way, with the restriction that the chunks do not contain fewer than k
1265 // iterations (except for the last chunk to be assigned, which may have fewer
1266 // than k iterations).
1268 // When schedule(auto) is specified, the decision regarding scheduling is
1269 // delegated to the compiler and/or runtime system. The programmer gives the
1270 // implementation the freedom to choose any possible mapping of iterations to
1271 // threads in the team.
1273 // When schedule(runtime) is specified, the decision regarding scheduling is
1274 // deferred until run time, and the schedule and chunk size are taken from the
1275 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1276 // implementation defined
1278 // while(__kmpc_dispatch_next(&LB, &UB)) {
1280 // while (idx <= UB) { BODY; ++idx;
1281 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1285 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1286 // When schedule(static, chunk_size) is specified, iterations are divided into
1287 // chunks of size chunk_size, and the chunks are assigned to the threads in
1288 // the team in a round-robin fashion in the order of the thread number.
1290 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1291 // while (idx <= UB) { BODY; ++idx; } // inner loop
1297 const Expr *IVExpr = S.getIterationVariable();
1298 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1299 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1301 if (DynamicOrOrdered) {
1302 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
1303 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind,
1304 IVSize, IVSigned, Ordered, UBVal, Chunk);
1306 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
1307 IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk);
1310 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1312 // Start the loop with a block that tests the condition.
1313 auto CondBlock = createBasicBlock("omp.dispatch.cond");
1314 EmitBlock(CondBlock);
1315 LoopStack.push(CondBlock);
1317 llvm::Value *BoolCondVal = nullptr;
1318 if (!DynamicOrOrdered) {
1319 // UB = min(UB, GlobalUB)
1320 EmitIgnoredExpr(S.getEnsureUpperBound());
1322 EmitIgnoredExpr(S.getInit());
1324 BoolCondVal = EvaluateExprAsBool(S.getCond());
1326 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned,
1330 // If there are any cleanups between here and the loop-exit scope,
1331 // create a block to stage a loop exit along.
1332 auto ExitBlock = LoopExit.getBlock();
1333 if (LoopScope.requiresCleanups())
1334 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1336 auto LoopBody = createBasicBlock("omp.dispatch.body");
1337 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1338 if (ExitBlock != LoopExit.getBlock()) {
1339 EmitBlock(ExitBlock);
1340 EmitBranchThroughCleanup(LoopExit);
1342 EmitBlock(LoopBody);
1344 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1345 // LB for loop condition and emitted it above).
1346 if (DynamicOrOrdered)
1347 EmitIgnoredExpr(S.getInit());
1349 // Create a block for the increment.
1350 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1351 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1353 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1354 // with dynamic/guided scheduling and without ordered clause.
1355 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1356 LoopStack.setParallel(!IsMonotonic);
1358 EmitOMPSimdInit(S, IsMonotonic);
1360 SourceLocation Loc = S.getLocStart();
1361 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
1362 [&S, LoopExit](CodeGenFunction &CGF) {
1363 CGF.EmitOMPLoopBody(S, LoopExit);
1364 CGF.EmitStopPoint(&S);
1366 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
1368 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
1369 CGF, Loc, IVSize, IVSigned);
1373 EmitBlock(Continue.getBlock());
1374 BreakContinueStack.pop_back();
1375 if (!DynamicOrOrdered) {
1376 // Emit "LB = LB + Stride", "UB = UB + Stride".
1377 EmitIgnoredExpr(S.getNextLowerBound());
1378 EmitIgnoredExpr(S.getNextUpperBound());
1381 EmitBranch(CondBlock);
1383 // Emit the fall-through block.
1384 EmitBlock(LoopExit.getBlock());
1386 // Tell the runtime we are done.
1387 if (!DynamicOrOrdered)
1388 RT.emitForStaticFinish(*this, S.getLocEnd());
1391 /// \brief Emit a helper variable and return corresponding lvalue.
1392 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
1393 const DeclRefExpr *Helper) {
1394 auto VDecl = cast<VarDecl>(Helper->getDecl());
1395 CGF.EmitVarDecl(*VDecl);
1396 return CGF.EmitLValue(Helper);
1400 struct ScheduleKindModifiersTy {
1401 OpenMPScheduleClauseKind Kind;
1402 OpenMPScheduleClauseModifier M1;
1403 OpenMPScheduleClauseModifier M2;
1404 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
1405 OpenMPScheduleClauseModifier M1,
1406 OpenMPScheduleClauseModifier M2)
1407 : Kind(Kind), M1(M1), M2(M2) {}
1411 static std::pair<llvm::Value * /*Chunk*/, ScheduleKindModifiersTy>
1412 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
1414 // Detect the loop schedule kind and chunk.
1415 auto ScheduleKind = OMPC_SCHEDULE_unknown;
1416 OpenMPScheduleClauseModifier M1 = OMPC_SCHEDULE_MODIFIER_unknown;
1417 OpenMPScheduleClauseModifier M2 = OMPC_SCHEDULE_MODIFIER_unknown;
1418 llvm::Value *Chunk = nullptr;
1419 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
1420 ScheduleKind = C->getScheduleKind();
1421 M1 = C->getFirstScheduleModifier();
1422 M2 = C->getSecondScheduleModifier();
1423 if (const auto *Ch = C->getChunkSize()) {
1424 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
1426 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl());
1427 CGF.EmitVarDecl(*ImpVar);
1428 CGF.EmitStoreThroughLValue(
1429 CGF.EmitAnyExpr(Ch),
1430 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
1431 ImpVar->getType()));
1436 if (!C->getHelperChunkSize() || !OuterRegion) {
1437 Chunk = CGF.EmitScalarExpr(Ch);
1438 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
1439 S.getIterationVariable()->getType(),
1444 return std::make_pair(Chunk, ScheduleKindModifiersTy(ScheduleKind, M1, M2));
1447 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
1448 // Emit the loop iteration variable.
1449 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
1450 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
1451 EmitVarDecl(*IVDecl);
1453 // Emit the iterations count variable.
1454 // If it is not a variable, Sema decided to calculate iterations count on each
1455 // iteration (e.g., it is foldable into a constant).
1456 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1457 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1458 // Emit calculation of the iterations count.
1459 EmitIgnoredExpr(S.getCalcLastIteration());
1462 auto &RT = CGM.getOpenMPRuntime();
1464 bool HasLastprivateClause;
1465 // Check pre-condition.
1467 // Skip the entire loop if we don't meet the precondition.
1468 // If the condition constant folds and can be elided, avoid emitting the
1471 llvm::BasicBlock *ContBlock = nullptr;
1472 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1476 auto *ThenBlock = createBasicBlock("omp.precond.then");
1477 ContBlock = createBasicBlock("omp.precond.end");
1478 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
1479 getProfileCount(&S));
1480 EmitBlock(ThenBlock);
1481 incrementProfileCounter(&S);
1484 emitAlignedClause(*this, S);
1485 EmitOMPLinearClauseInit(S);
1486 // Emit 'then' code.
1488 // Emit helper vars inits.
1490 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
1492 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
1494 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
1496 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
1498 OMPPrivateScope LoopScope(*this);
1499 if (EmitOMPFirstprivateClause(S, LoopScope)) {
1500 // Emit implicit barrier to synchronize threads and avoid data races on
1501 // initialization of firstprivate variables.
1502 CGM.getOpenMPRuntime().emitBarrierCall(
1503 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1504 /*ForceSimpleCall=*/true);
1506 EmitOMPPrivateClause(S, LoopScope);
1507 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
1508 EmitOMPReductionClauseInit(S, LoopScope);
1509 emitPrivateLoopCounters(*this, LoopScope, S.counters(),
1510 S.private_counters());
1511 emitPrivateLinearVars(*this, S, LoopScope);
1512 (void)LoopScope.Privatize();
1514 // Detect the loop schedule kind and chunk.
1516 OpenMPScheduleClauseKind ScheduleKind;
1518 emitScheduleClause(*this, S, /*OuterRegion=*/false);
1519 Chunk = ScheduleInfo.first;
1520 ScheduleKind = ScheduleInfo.second.Kind;
1521 const OpenMPScheduleClauseModifier M1 = ScheduleInfo.second.M1;
1522 const OpenMPScheduleClauseModifier M2 = ScheduleInfo.second.M2;
1523 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1524 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1525 const bool Ordered = S.getSingleClause<OMPOrderedClause>() != nullptr;
1526 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
1527 // If the static schedule kind is specified or if the ordered clause is
1528 // specified, and if no monotonic modifier is specified, the effect will
1529 // be as if the monotonic modifier was specified.
1530 if (RT.isStaticNonchunked(ScheduleKind,
1531 /* Chunked */ Chunk != nullptr) &&
1533 if (isOpenMPSimdDirective(S.getDirectiveKind()))
1534 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
1535 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1536 // When no chunk_size is specified, the iteration space is divided into
1537 // chunks that are approximately equal in size, and at most one chunk is
1538 // distributed to each thread. Note that the size of the chunks is
1539 // unspecified in this case.
1540 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
1541 IVSize, IVSigned, Ordered,
1542 IL.getAddress(), LB.getAddress(),
1543 UB.getAddress(), ST.getAddress());
1545 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
1546 // UB = min(UB, GlobalUB);
1547 EmitIgnoredExpr(S.getEnsureUpperBound());
1549 EmitIgnoredExpr(S.getInit());
1550 // while (idx <= UB) { BODY; ++idx; }
1551 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1553 [&S, LoopExit](CodeGenFunction &CGF) {
1554 CGF.EmitOMPLoopBody(S, LoopExit);
1555 CGF.EmitStopPoint(&S);
1557 [](CodeGenFunction &) {});
1558 EmitBlock(LoopExit.getBlock());
1559 // Tell the runtime we are done.
1560 RT.emitForStaticFinish(*this, S.getLocStart());
1562 const bool IsMonotonic = Ordered ||
1563 ScheduleKind == OMPC_SCHEDULE_static ||
1564 ScheduleKind == OMPC_SCHEDULE_unknown ||
1565 M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
1566 M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
1567 // Emit the outer loop, which requests its work chunk [LB..UB] from
1568 // runtime and runs the inner loop to process it.
1569 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
1570 LB.getAddress(), UB.getAddress(), ST.getAddress(),
1571 IL.getAddress(), Chunk);
1573 EmitOMPReductionClauseFinal(S);
1574 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1575 if (HasLastprivateClause)
1576 EmitOMPLastprivateClauseFinal(
1577 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
1579 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
1580 EmitOMPSimdFinal(S);
1582 // We're now done with the loop, so jump to the continuation block.
1584 EmitBranch(ContBlock);
1585 EmitBlock(ContBlock, true);
1588 return HasLastprivateClause;
1591 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
1592 LexicalScope Scope(*this, S.getSourceRange());
1593 bool HasLastprivates = false;
1594 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1595 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1597 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
1600 // Emit an implicit barrier at the end.
1601 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
1602 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1606 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
1607 LexicalScope Scope(*this, S.getSourceRange());
1608 bool HasLastprivates = false;
1609 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
1610 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
1612 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1614 // Emit an implicit barrier at the end.
1615 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
1616 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
1620 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
1622 llvm::Value *Init = nullptr) {
1623 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
1625 CGF.EmitScalarInit(Init, LVal);
1630 CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
1631 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
1632 auto *CS = dyn_cast<CompoundStmt>(Stmt);
1633 bool HasLastprivates = false;
1634 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF) {
1635 auto &C = CGF.CGM.getContext();
1636 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1637 // Emit helper vars inits.
1638 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
1639 CGF.Builder.getInt32(0));
1640 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
1641 : CGF.Builder.getInt32(0);
1643 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
1644 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
1645 CGF.Builder.getInt32(1));
1646 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
1647 CGF.Builder.getInt32(0));
1649 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
1650 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1651 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
1652 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
1653 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
1654 // Generate condition for loop.
1655 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
1656 OK_Ordinary, S.getLocStart(),
1657 /*fpContractable=*/false);
1658 // Increment for loop counter.
1659 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
1661 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
1662 // Iterate through all sections and emit a switch construct:
1665 // <SectionStmt[0]>;
1668 // case <NumSection> - 1:
1669 // <SectionStmt[<NumSection> - 1]>;
1672 // .omp.sections.exit:
1673 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
1674 auto *SwitchStmt = CGF.Builder.CreateSwitch(
1675 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
1676 CS == nullptr ? 1 : CS->size());
1678 unsigned CaseNumber = 0;
1679 for (auto *SubStmt : CS->children()) {
1680 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
1681 CGF.EmitBlock(CaseBB);
1682 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
1683 CGF.EmitStmt(SubStmt);
1684 CGF.EmitBranch(ExitBB);
1688 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
1689 CGF.EmitBlock(CaseBB);
1690 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
1692 CGF.EmitBranch(ExitBB);
1694 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1697 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
1698 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
1699 // Emit implicit barrier to synchronize threads and avoid data races on
1700 // initialization of firstprivate variables.
1701 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1702 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1703 /*ForceSimpleCall=*/true);
1705 CGF.EmitOMPPrivateClause(S, LoopScope);
1706 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1707 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1708 (void)LoopScope.Privatize();
1710 // Emit static non-chunked loop.
1711 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
1712 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
1713 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
1714 UB.getAddress(), ST.getAddress());
1715 // UB = min(UB, GlobalUB);
1716 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
1717 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
1718 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
1719 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
1721 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
1722 // while (idx <= UB) { BODY; ++idx; }
1723 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
1724 [](CodeGenFunction &) {});
1725 // Tell the runtime we are done.
1726 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
1727 CGF.EmitOMPReductionClauseFinal(S);
1729 // Emit final copy of the lastprivate variables if IsLastIter != 0.
1730 if (HasLastprivates)
1731 CGF.EmitOMPLastprivateClauseFinal(
1732 S, CGF.Builder.CreateIsNotNull(
1733 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
1736 bool HasCancel = false;
1737 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
1738 HasCancel = OSD->hasCancel();
1739 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
1740 HasCancel = OPSD->hasCancel();
1741 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
1743 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
1744 // clause. Otherwise the barrier will be generated by the codegen for the
1746 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
1747 // Emit implicit barrier to synchronize threads and avoid data races on
1748 // initialization of firstprivate variables.
1749 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
1752 return OMPD_sections;
1755 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
1756 LexicalScope Scope(*this, S.getSourceRange());
1757 OpenMPDirectiveKind EmittedAs = EmitSections(S);
1758 // Emit an implicit barrier at the end.
1759 if (!S.getSingleClause<OMPNowaitClause>()) {
1760 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
1764 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
1765 LexicalScope Scope(*this, S.getSourceRange());
1766 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1767 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1769 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
1773 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
1774 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
1775 llvm::SmallVector<const Expr *, 8> DestExprs;
1776 llvm::SmallVector<const Expr *, 8> SrcExprs;
1777 llvm::SmallVector<const Expr *, 8> AssignmentOps;
1778 // Check if there are any 'copyprivate' clauses associated with this
1781 // Build a list of copyprivate variables along with helper expressions
1782 // (<source>, <destination>, <destination>=<source> expressions)
1783 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
1784 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
1785 DestExprs.append(C->destination_exprs().begin(),
1786 C->destination_exprs().end());
1787 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
1788 AssignmentOps.append(C->assignment_ops().begin(),
1789 C->assignment_ops().end());
1791 LexicalScope Scope(*this, S.getSourceRange());
1792 // Emit code for 'single' region along with 'copyprivate' clauses
1793 bool HasFirstprivates;
1794 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) {
1795 CodeGenFunction::OMPPrivateScope SingleScope(CGF);
1796 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
1797 CGF.EmitOMPPrivateClause(S, SingleScope);
1798 (void)SingleScope.Privatize();
1800 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1802 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
1803 CopyprivateVars, DestExprs, SrcExprs,
1805 // Emit an implicit barrier at the end (to avoid data race on firstprivate
1806 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
1807 if ((!S.getSingleClause<OMPNowaitClause>() || HasFirstprivates) &&
1808 CopyprivateVars.empty()) {
1809 CGM.getOpenMPRuntime().emitBarrierCall(
1810 *this, S.getLocStart(),
1811 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
1815 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
1816 LexicalScope Scope(*this, S.getSourceRange());
1817 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1818 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1820 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
1823 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
1824 LexicalScope Scope(*this, S.getSourceRange());
1825 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1826 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1828 Expr *Hint = nullptr;
1829 if (auto *HintClause = S.getSingleClause<OMPHintClause>())
1830 Hint = HintClause->getHint();
1831 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
1832 S.getDirectiveName().getAsString(),
1833 CodeGen, S.getLocStart(), Hint);
1836 void CodeGenFunction::EmitOMPParallelForDirective(
1837 const OMPParallelForDirective &S) {
1838 // Emit directive as a combined directive that consists of two implicit
1839 // directives: 'parallel' with 'for' directive.
1840 LexicalScope Scope(*this, S.getSourceRange());
1841 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1842 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1843 CGF.EmitOMPWorksharingLoop(S);
1845 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
1848 void CodeGenFunction::EmitOMPParallelForSimdDirective(
1849 const OMPParallelForSimdDirective &S) {
1850 // Emit directive as a combined directive that consists of two implicit
1851 // directives: 'parallel' with 'for' directive.
1852 LexicalScope Scope(*this, S.getSourceRange());
1853 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
1854 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1855 CGF.EmitOMPWorksharingLoop(S);
1857 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
1860 void CodeGenFunction::EmitOMPParallelSectionsDirective(
1861 const OMPParallelSectionsDirective &S) {
1862 // Emit directive as a combined directive that consists of two implicit
1863 // directives: 'parallel' with 'sections' directive.
1864 LexicalScope Scope(*this, S.getSourceRange());
1865 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
1866 (void)CGF.EmitSections(S);
1868 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
1871 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
1872 // Emit outlined function for task construct.
1873 LexicalScope Scope(*this, S.getSourceRange());
1874 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1875 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
1876 auto *I = CS->getCapturedDecl()->param_begin();
1877 auto *PartId = std::next(I);
1878 // The first function argument for tasks is a thread id, the second one is a
1879 // part id (0 for tied tasks, >=0 for untied task).
1880 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
1881 // Get list of private variables.
1882 llvm::SmallVector<const Expr *, 8> PrivateVars;
1883 llvm::SmallVector<const Expr *, 8> PrivateCopies;
1884 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
1885 auto IRef = C->varlist_begin();
1886 for (auto *IInit : C->private_copies()) {
1887 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1888 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1889 PrivateVars.push_back(*IRef);
1890 PrivateCopies.push_back(IInit);
1895 EmittedAsPrivate.clear();
1896 // Get list of firstprivate variables.
1897 llvm::SmallVector<const Expr *, 8> FirstprivateVars;
1898 llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
1899 llvm::SmallVector<const Expr *, 8> FirstprivateInits;
1900 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
1901 auto IRef = C->varlist_begin();
1902 auto IElemInitRef = C->inits().begin();
1903 for (auto *IInit : C->private_copies()) {
1904 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1905 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1906 FirstprivateVars.push_back(*IRef);
1907 FirstprivateCopies.push_back(IInit);
1908 FirstprivateInits.push_back(*IElemInitRef);
1910 ++IRef, ++IElemInitRef;
1913 // Build list of dependences.
1914 llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8>
1916 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
1917 for (auto *IRef : C->varlists()) {
1918 Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
1921 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars](
1922 CodeGenFunction &CGF) {
1923 // Set proper addresses for generated private copies.
1924 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
1925 OMPPrivateScope Scope(CGF);
1926 if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
1927 auto *CopyFn = CGF.Builder.CreateLoad(
1928 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
1929 auto *PrivatesPtr = CGF.Builder.CreateLoad(
1930 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
1932 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16>
1934 llvm::SmallVector<llvm::Value *, 16> CallArgs;
1935 CallArgs.push_back(PrivatesPtr);
1936 for (auto *E : PrivateVars) {
1937 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1938 Address PrivatePtr =
1939 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1940 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1941 CallArgs.push_back(PrivatePtr.getPointer());
1943 for (auto *E : FirstprivateVars) {
1944 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1945 Address PrivatePtr =
1946 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
1947 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
1948 CallArgs.push_back(PrivatePtr.getPointer());
1950 CGF.EmitRuntimeCall(CopyFn, CallArgs);
1951 for (auto &&Pair : PrivatePtrs) {
1952 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
1953 CGF.getContext().getDeclAlign(Pair.first));
1954 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
1957 (void)Scope.Privatize();
1959 // TODO: emit code for untied tasks.
1961 CGF.EmitStmt(CS->getCapturedStmt());
1963 auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
1964 S, *I, OMPD_task, CodeGen);
1965 // Check if we should emit tied or untied task.
1966 bool Tied = !S.getSingleClause<OMPUntiedClause>();
1967 // Check if the task is final
1968 llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
1969 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
1970 // If the condition constant folds and can be elided, try to avoid emitting
1971 // the condition and the dead arm of the if/else.
1972 auto *Cond = Clause->getCondition();
1974 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
1975 Final.setInt(CondConstant);
1977 Final.setPointer(EvaluateExprAsBool(Cond));
1979 // By default the task is not final.
1980 Final.setInt(/*IntVal=*/false);
1982 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
1983 const Expr *IfCond = nullptr;
1984 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1985 if (C->getNameModifier() == OMPD_unknown ||
1986 C->getNameModifier() == OMPD_task) {
1987 IfCond = C->getCondition();
1991 CGM.getOpenMPRuntime().emitTaskCall(
1992 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
1993 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars,
1994 FirstprivateCopies, FirstprivateInits, Dependences);
1997 void CodeGenFunction::EmitOMPTaskyieldDirective(
1998 const OMPTaskyieldDirective &S) {
1999 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
2002 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
2003 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
2006 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
2007 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
2010 void CodeGenFunction::EmitOMPTaskgroupDirective(
2011 const OMPTaskgroupDirective &S) {
2012 LexicalScope Scope(*this, S.getSourceRange());
2013 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2014 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2016 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
2019 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
2020 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
2021 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
2022 return llvm::makeArrayRef(FlushClause->varlist_begin(),
2023 FlushClause->varlist_end());
2026 }(), S.getLocStart());
2029 void CodeGenFunction::EmitOMPDistributeDirective(
2030 const OMPDistributeDirective &S) {
2031 llvm_unreachable("CodeGen for 'omp distribute' is not supported yet.");
2034 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
2035 const CapturedStmt *S) {
2036 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2037 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
2038 CGF.CapturedStmtInfo = &CapStmtInfo;
2039 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
2040 Fn->addFnAttr(llvm::Attribute::NoInline);
2044 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
2045 if (!S.getAssociatedStmt())
2047 LexicalScope Scope(*this, S.getSourceRange());
2048 auto *C = S.getSingleClause<OMPSIMDClause>();
2049 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF) {
2051 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2052 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
2053 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
2054 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
2055 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
2058 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2061 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
2064 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
2065 QualType SrcType, QualType DestType,
2066 SourceLocation Loc) {
2067 assert(CGF.hasScalarEvaluationKind(DestType) &&
2068 "DestType must have scalar evaluation kind.");
2069 assert(!Val.isAggregate() && "Must be a scalar or complex.");
2070 return Val.isScalar()
2071 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
2073 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
2077 static CodeGenFunction::ComplexPairTy
2078 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
2079 QualType DestType, SourceLocation Loc) {
2080 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
2081 "DestType must have complex evaluation kind.");
2082 CodeGenFunction::ComplexPairTy ComplexVal;
2083 if (Val.isScalar()) {
2084 // Convert the input element to the element type of the complex.
2085 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2086 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
2087 DestElementType, Loc);
2088 ComplexVal = CodeGenFunction::ComplexPairTy(
2089 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
2091 assert(Val.isComplex() && "Must be a scalar or complex.");
2092 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
2093 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2094 ComplexVal.first = CGF.EmitScalarConversion(
2095 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
2096 ComplexVal.second = CGF.EmitScalarConversion(
2097 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
2102 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
2103 LValue LVal, RValue RVal) {
2104 if (LVal.isGlobalReg()) {
2105 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
2107 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
2109 LVal.isVolatile(), /*IsInit=*/false);
2113 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
2114 QualType RValTy, SourceLocation Loc) {
2115 switch (getEvaluationKind(LVal.getType())) {
2117 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
2118 *this, RVal, RValTy, LVal.getType(), Loc)),
2123 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
2127 llvm_unreachable("Must be a scalar or complex.");
2131 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
2132 const Expr *X, const Expr *V,
2133 SourceLocation Loc) {
2135 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
2136 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
2137 LValue XLValue = CGF.EmitLValue(X);
2138 LValue VLValue = CGF.EmitLValue(V);
2139 RValue Res = XLValue.isGlobalReg()
2140 ? CGF.EmitLoadOfLValue(XLValue, Loc)
2141 : CGF.EmitAtomicLoad(XLValue, Loc,
2142 IsSeqCst ? llvm::SequentiallyConsistent
2144 XLValue.isVolatile());
2145 // OpenMP, 2.12.6, atomic Construct
2146 // Any atomic construct with a seq_cst clause forces the atomically
2147 // performed operation to include an implicit flush operation without a
2150 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2151 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
2154 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
2155 const Expr *X, const Expr *E,
2156 SourceLocation Loc) {
2158 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
2159 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
2160 // OpenMP, 2.12.6, atomic Construct
2161 // Any atomic construct with a seq_cst clause forces the atomically
2162 // performed operation to include an implicit flush operation without a
2165 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2168 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
2170 BinaryOperatorKind BO,
2171 llvm::AtomicOrdering AO,
2172 bool IsXLHSInRHSPart) {
2173 auto &Context = CGF.CGM.getContext();
2174 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
2175 // expression is simple and atomic is allowed for the given type for the
2177 if (BO == BO_Comma || !Update.isScalar() ||
2178 !Update.getScalarVal()->getType()->isIntegerTy() ||
2179 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
2180 (Update.getScalarVal()->getType() !=
2181 X.getAddress().getElementType())) ||
2182 !X.getAddress().getElementType()->isIntegerTy() ||
2183 !Context.getTargetInfo().hasBuiltinAtomic(
2184 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
2185 return std::make_pair(false, RValue::get(nullptr));
2187 llvm::AtomicRMWInst::BinOp RMWOp;
2190 RMWOp = llvm::AtomicRMWInst::Add;
2193 if (!IsXLHSInRHSPart)
2194 return std::make_pair(false, RValue::get(nullptr));
2195 RMWOp = llvm::AtomicRMWInst::Sub;
2198 RMWOp = llvm::AtomicRMWInst::And;
2201 RMWOp = llvm::AtomicRMWInst::Or;
2204 RMWOp = llvm::AtomicRMWInst::Xor;
2207 RMWOp = X.getType()->hasSignedIntegerRepresentation()
2208 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
2209 : llvm::AtomicRMWInst::Max)
2210 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
2211 : llvm::AtomicRMWInst::UMax);
2214 RMWOp = X.getType()->hasSignedIntegerRepresentation()
2215 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
2216 : llvm::AtomicRMWInst::Min)
2217 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
2218 : llvm::AtomicRMWInst::UMin);
2221 RMWOp = llvm::AtomicRMWInst::Xchg;
2230 return std::make_pair(false, RValue::get(nullptr));
2248 llvm_unreachable("Unsupported atomic update operation");
2250 auto *UpdateVal = Update.getScalarVal();
2251 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
2252 UpdateVal = CGF.Builder.CreateIntCast(
2253 IC, X.getAddress().getElementType(),
2254 X.getType()->hasSignedIntegerRepresentation());
2256 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
2257 return std::make_pair(true, RValue::get(Res));
2260 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
2261 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
2262 llvm::AtomicOrdering AO, SourceLocation Loc,
2263 const llvm::function_ref<RValue(RValue)> &CommonGen) {
2264 // Update expressions are allowed to have the following forms:
2265 // x binop= expr; -> xrval + expr;
2266 // x++, ++x -> xrval + 1;
2267 // x--, --x -> xrval - 1;
2268 // x = x binop expr; -> xrval binop expr
2269 // x = expr Op x; - > expr binop xrval;
2270 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
2272 if (X.isGlobalReg()) {
2273 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
2275 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
2277 // Perform compare-and-swap procedure.
2278 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
2284 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
2285 const Expr *X, const Expr *E,
2286 const Expr *UE, bool IsXLHSInRHSPart,
2287 SourceLocation Loc) {
2288 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
2289 "Update expr in 'atomic update' must be a binary operator.");
2290 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
2291 // Update expressions are allowed to have the following forms:
2292 // x binop= expr; -> xrval + expr;
2293 // x++, ++x -> xrval + 1;
2294 // x--, --x -> xrval - 1;
2295 // x = x binop expr; -> xrval binop expr
2296 // x = expr Op x; - > expr binop xrval;
2297 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
2298 LValue XLValue = CGF.EmitLValue(X);
2299 RValue ExprRValue = CGF.EmitAnyExpr(E);
2300 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
2301 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
2302 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
2303 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
2304 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
2306 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
2307 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
2308 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
2309 return CGF.EmitAnyExpr(UE);
2311 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
2312 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
2313 // OpenMP, 2.12.6, atomic Construct
2314 // Any atomic construct with a seq_cst clause forces the atomically
2315 // performed operation to include an implicit flush operation without a
2318 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2321 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
2322 QualType SourceType, QualType ResType,
2323 SourceLocation Loc) {
2324 switch (CGF.getEvaluationKind(ResType)) {
2327 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
2329 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
2330 return RValue::getComplex(Res.first, Res.second);
2335 llvm_unreachable("Must be a scalar or complex.");
2338 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
2339 bool IsPostfixUpdate, const Expr *V,
2340 const Expr *X, const Expr *E,
2341 const Expr *UE, bool IsXLHSInRHSPart,
2342 SourceLocation Loc) {
2343 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
2344 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
2346 LValue VLValue = CGF.EmitLValue(V);
2347 LValue XLValue = CGF.EmitLValue(X);
2348 RValue ExprRValue = CGF.EmitAnyExpr(E);
2349 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
2350 QualType NewVValType;
2352 // 'x' is updated with some additional value.
2353 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
2354 "Update expr in 'atomic capture' must be a binary operator.");
2355 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
2356 // Update expressions are allowed to have the following forms:
2357 // x binop= expr; -> xrval + expr;
2358 // x++, ++x -> xrval + 1;
2359 // x--, --x -> xrval - 1;
2360 // x = x binop expr; -> xrval binop expr
2361 // x = expr Op x; - > expr binop xrval;
2362 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
2363 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
2364 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
2365 NewVValType = XRValExpr->getType();
2366 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
2367 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
2368 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
2369 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
2370 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
2371 RValue Res = CGF.EmitAnyExpr(UE);
2372 NewVVal = IsPostfixUpdate ? XRValue : Res;
2375 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
2376 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
2378 // 'atomicrmw' instruction was generated.
2379 if (IsPostfixUpdate) {
2380 // Use old value from 'atomicrmw'.
2381 NewVVal = Res.second;
2383 // 'atomicrmw' does not provide new value, so evaluate it using old
2385 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
2386 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
2387 NewVVal = CGF.EmitAnyExpr(UE);
2391 // 'x' is simply rewritten with some 'expr'.
2392 NewVValType = X->getType().getNonReferenceType();
2393 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
2394 X->getType().getNonReferenceType(), Loc);
2395 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
2399 // Try to perform atomicrmw xchg, otherwise simple exchange.
2400 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
2401 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
2404 // 'atomicrmw' instruction was generated.
2405 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
2408 // Emit post-update store to 'v' of old/new 'x' value.
2409 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
2410 // OpenMP, 2.12.6, atomic Construct
2411 // Any atomic construct with a seq_cst clause forces the atomically
2412 // performed operation to include an implicit flush operation without a
2415 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2418 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
2419 bool IsSeqCst, bool IsPostfixUpdate,
2420 const Expr *X, const Expr *V, const Expr *E,
2421 const Expr *UE, bool IsXLHSInRHSPart,
2422 SourceLocation Loc) {
2425 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
2428 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
2432 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
2435 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
2436 IsXLHSInRHSPart, Loc);
2440 case OMPC_num_threads:
2442 case OMPC_firstprivate:
2443 case OMPC_lastprivate:
2444 case OMPC_reduction:
2454 case OMPC_copyprivate:
2456 case OMPC_proc_bind:
2461 case OMPC_threadprivate:
2463 case OMPC_mergeable:
2468 case OMPC_num_teams:
2469 case OMPC_thread_limit:
2471 case OMPC_grainsize:
2473 case OMPC_num_tasks:
2475 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
2479 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
2480 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
2481 OpenMPClauseKind Kind = OMPC_unknown;
2482 for (auto *C : S.clauses()) {
2483 // Find first clause (skip seq_cst clause, if it is first).
2484 if (C->getClauseKind() != OMPC_seq_cst) {
2485 Kind = C->getClauseKind();
2491 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
2492 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
2493 enterFullExpression(EWC);
2495 // Processing for statements under 'atomic capture'.
2496 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
2497 for (const auto *C : Compound->body()) {
2498 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
2499 enterFullExpression(EWC);
2504 LexicalScope Scope(*this, S.getSourceRange());
2505 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF) {
2506 CGF.EmitStopPoint(CS);
2507 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
2508 S.getV(), S.getExpr(), S.getUpdateExpr(),
2509 S.isXLHSInRHSPart(), S.getLocStart());
2511 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
2514 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
2515 LexicalScope Scope(*this, S.getSourceRange());
2516 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
2518 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
2519 GenerateOpenMPCapturedVars(CS, CapturedVars);
2521 llvm::Function *Fn = nullptr;
2522 llvm::Constant *FnID = nullptr;
2524 // Check if we have any if clause associated with the directive.
2525 const Expr *IfCond = nullptr;
2527 if (auto *C = S.getSingleClause<OMPIfClause>()) {
2528 IfCond = C->getCondition();
2531 // Check if we have any device clause associated with the directive.
2532 const Expr *Device = nullptr;
2533 if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
2534 Device = C->getDevice();
2537 // Check if we have an if clause whose conditional always evaluates to false
2538 // or if we do not have any targets specified. If so the target region is not
2539 // an offload entry point.
2540 bool IsOffloadEntry = true;
2543 if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
2544 IsOffloadEntry = false;
2546 if (CGM.getLangOpts().OMPTargetTriples.empty())
2547 IsOffloadEntry = false;
2549 assert(CurFuncDecl && "No parent declaration for target region!");
2550 StringRef ParentName;
2551 // In case we have Ctors/Dtors we use the complete type variant to produce
2552 // the mangling of the device outlined kernel.
2553 if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
2554 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
2555 else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
2556 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
2559 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
2561 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
2564 CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
2568 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
2569 llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");
2572 void CodeGenFunction::EmitOMPCancellationPointDirective(
2573 const OMPCancellationPointDirective &S) {
2574 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
2575 S.getCancelRegion());
2578 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
2579 const Expr *IfCond = nullptr;
2580 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2581 if (C->getNameModifier() == OMPD_unknown ||
2582 C->getNameModifier() == OMPD_cancel) {
2583 IfCond = C->getCondition();
2587 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
2588 S.getCancelRegion());
2591 CodeGenFunction::JumpDest
2592 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
2593 if (Kind == OMPD_parallel || Kind == OMPD_task)
2595 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
2596 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for);
2597 return BreakContinueStack.back().BreakBlock;
2600 // Generate the instructions for '#pragma omp target data' directive.
2601 void CodeGenFunction::EmitOMPTargetDataDirective(
2602 const OMPTargetDataDirective &S) {
2603 // emit the code inside the construct for now
2604 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2605 CGM.getOpenMPRuntime().emitInlinedDirective(
2606 *this, OMPD_target_data,
2607 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
2610 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
2611 // emit the code inside the construct for now
2612 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2613 CGM.getOpenMPRuntime().emitInlinedDirective(
2614 *this, OMPD_taskloop,
2615 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
2618 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
2619 const OMPTaskLoopSimdDirective &S) {
2620 // emit the code inside the construct for now
2621 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2622 CGM.getOpenMPRuntime().emitInlinedDirective(
2623 *this, OMPD_taskloop_simd,
2624 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });