1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Stmt.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "llvm/IR/CallSite.h"
23 using namespace clang;
24 using namespace CodeGen;
27 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
28 /// for captured expressions.
29 class OMPLexicalScope final : public CodeGenFunction::LexicalScope {
30 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
31 for (const auto *C : S.clauses()) {
32 if (auto *CPI = OMPClauseWithPreInit::get(C)) {
33 if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
34 for (const auto *I : PreInit->decls()) {
35 if (!I->hasAttr<OMPCaptureNoInitAttr>())
36 CGF.EmitVarDecl(cast<VarDecl>(*I));
38 CodeGenFunction::AutoVarEmission Emission =
39 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
40 CGF.EmitAutoVarCleanups(Emission);
47 CodeGenFunction::OMPPrivateScope InlinedShareds;
49 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
50 return CGF.LambdaCaptureFields.lookup(VD) ||
51 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
52 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
56 OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S,
57 bool AsInlined = false)
58 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
60 emitPreInitStmt(CGF, S);
62 if (S.hasAssociatedStmt()) {
63 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
64 for (auto &C : CS->captures()) {
65 if (C.capturesVariable() || C.capturesVariableByCopy()) {
66 auto *VD = C.getCapturedVar();
67 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
68 isCapturedVar(CGF, VD) ||
69 (CGF.CapturedStmtInfo &&
70 InlinedShareds.isGlobalVarCaptured(VD)),
71 VD->getType().getNonReferenceType(), VK_LValue,
73 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
74 return CGF.EmitLValue(&DRE).getAddress();
78 (void)InlinedShareds.Privatize();
84 /// Private scope for OpenMP loop-based directives, that supports capturing
85 /// of used expression from loop statement.
86 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
87 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
88 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
89 if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) {
90 for (const auto *I : PreInits->decls())
91 CGF.EmitVarDecl(cast<VarDecl>(*I));
97 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
98 : CodeGenFunction::RunCleanupsScope(CGF) {
99 emitPreInitStmt(CGF, S);
105 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
106 auto &C = getContext();
107 llvm::Value *Size = nullptr;
108 auto SizeInChars = C.getTypeSizeInChars(Ty);
109 if (SizeInChars.isZero()) {
110 // getTypeSizeInChars() returns 0 for a VLA.
111 while (auto *VAT = C.getAsVariableArrayType(Ty)) {
112 llvm::Value *ArraySize;
113 std::tie(ArraySize, Ty) = getVLASize(VAT);
114 Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
116 SizeInChars = C.getTypeSizeInChars(Ty);
117 if (SizeInChars.isZero())
118 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
119 Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
121 Size = CGM.getSize(SizeInChars);
125 void CodeGenFunction::GenerateOpenMPCapturedVars(
126 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
127 const RecordDecl *RD = S.getCapturedRecordDecl();
128 auto CurField = RD->field_begin();
129 auto CurCap = S.captures().begin();
130 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
131 E = S.capture_init_end();
132 I != E; ++I, ++CurField, ++CurCap) {
133 if (CurField->hasCapturedVLAType()) {
134 auto VAT = CurField->getCapturedVLAType();
135 auto *Val = VLASizeMap[VAT->getSizeExpr()];
136 CapturedVars.push_back(Val);
137 } else if (CurCap->capturesThis())
138 CapturedVars.push_back(CXXThisValue);
139 else if (CurCap->capturesVariableByCopy()) {
141 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal();
143 // If the field is not a pointer, we need to save the actual value
144 // and load it as a void pointer.
145 if (!CurField->getType()->isAnyPointerType()) {
146 auto &Ctx = getContext();
147 auto DstAddr = CreateMemTemp(
148 Ctx.getUIntPtrType(),
149 Twine(CurCap->getCapturedVar()->getName()) + ".casted");
150 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
152 auto *SrcAddrVal = EmitScalarConversion(
153 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
154 Ctx.getPointerType(CurField->getType()), SourceLocation());
156 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
158 // Store the value using the source type pointer.
159 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
161 // Load the value using the destination type pointer.
162 CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
164 CapturedVars.push_back(CV);
166 assert(CurCap->capturesVariable() && "Expected capture by reference.");
167 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
172 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
173 StringRef Name, LValue AddrLV,
174 bool isReferenceType = false) {
175 ASTContext &Ctx = CGF.getContext();
177 auto *CastedPtr = CGF.EmitScalarConversion(
178 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
179 Ctx.getPointerType(DstType), SourceLocation());
181 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
184 // If we are dealing with references we need to return the address of the
185 // reference instead of the reference of the value.
186 if (isReferenceType) {
187 QualType RefType = Ctx.getLValueReferenceType(DstType);
188 auto *RefVal = TmpAddr.getPointer();
189 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
190 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
191 CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true);
198 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
201 "CapturedStmtInfo should be set when generating the captured function");
202 const CapturedDecl *CD = S.getCapturedDecl();
203 const RecordDecl *RD = S.getCapturedRecordDecl();
204 assert(CD->hasBody() && "missing CapturedDecl body");
206 // Build the argument list.
207 ASTContext &Ctx = CGM.getContext();
208 FunctionArgList Args;
209 Args.append(CD->param_begin(),
210 std::next(CD->param_begin(), CD->getContextParamPosition()));
211 auto I = S.captures().begin();
212 for (auto *FD : RD->fields()) {
213 QualType ArgType = FD->getType();
214 IdentifierInfo *II = nullptr;
215 VarDecl *CapVar = nullptr;
217 // If this is a capture by copy and the type is not a pointer, the outlined
218 // function argument type should be uintptr and the value properly casted to
219 // uintptr. This is necessary given that the runtime library is only able to
220 // deal with pointers. We can pass in the same way the VLA type sizes to the
221 // outlined function.
222 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
223 I->capturesVariableArrayType())
224 ArgType = Ctx.getUIntPtrType();
226 if (I->capturesVariable() || I->capturesVariableByCopy()) {
227 CapVar = I->getCapturedVar();
228 II = CapVar->getIdentifier();
229 } else if (I->capturesThis())
230 II = &getContext().Idents.get("this");
232 assert(I->capturesVariableArrayType());
233 II = &getContext().Idents.get("vla");
235 if (ArgType->isVariablyModifiedType()) {
236 bool IsReference = ArgType->isLValueReferenceType();
238 getContext().getCanonicalParamType(ArgType.getNonReferenceType());
239 if (IsReference && !ArgType->isPointerType()) {
240 ArgType = getContext().getLValueReferenceType(
241 ArgType, /*SpelledAsLValue=*/false);
244 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
245 FD->getLocation(), II, ArgType));
249 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
252 // Create the function declaration.
253 FunctionType::ExtInfo ExtInfo;
254 const CGFunctionInfo &FuncInfo =
255 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
256 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
258 llvm::Function *F = llvm::Function::Create(
259 FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
260 CapturedStmtInfo->getHelperName(), &CGM.getModule());
261 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
263 F->addFnAttr(llvm::Attribute::NoUnwind);
265 // Generate the function.
266 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
267 CD->getBody()->getLocStart());
268 unsigned Cnt = CD->getContextParamPosition();
269 I = S.captures().begin();
270 for (auto *FD : RD->fields()) {
271 // If we are capturing a pointer by copy we don't need to do anything, just
272 // use the value that we get from the arguments.
273 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
274 const VarDecl *CurVD = I->getCapturedVar();
275 Address LocalAddr = GetAddrOfLocalVar(Args[Cnt]);
276 // If the variable is a reference we need to materialize it here.
277 if (CurVD->getType()->isReferenceType()) {
278 Address RefAddr = CreateMemTemp(CurVD->getType(), getPointerAlign(),
279 ".materialized_ref");
280 EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr, /*Volatile=*/false,
284 setAddrOfLocalVar(CurVD, LocalAddr);
291 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
292 AlignmentSource::Decl);
293 if (FD->hasCapturedVLAType()) {
294 LValue CastedArgLVal =
295 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
296 Args[Cnt]->getName(), ArgLVal),
297 FD->getType(), AlignmentSource::Decl);
299 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
300 auto VAT = FD->getCapturedVLAType();
301 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
302 } else if (I->capturesVariable()) {
303 auto *Var = I->getCapturedVar();
304 QualType VarTy = Var->getType();
305 Address ArgAddr = ArgLVal.getAddress();
306 if (!VarTy->isReferenceType()) {
307 if (ArgLVal.getType()->isLValueReferenceType()) {
308 ArgAddr = EmitLoadOfReference(
309 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
310 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
311 assert(ArgLVal.getType()->isPointerType());
312 ArgAddr = EmitLoadOfPointer(
313 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
317 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
318 } else if (I->capturesVariableByCopy()) {
319 assert(!FD->getType()->isAnyPointerType() &&
320 "Not expecting a captured pointer.");
321 auto *Var = I->getCapturedVar();
322 QualType VarTy = Var->getType();
323 setAddrOfLocalVar(Var, castValueFromUintptr(*this, FD->getType(),
324 Args[Cnt]->getName(), ArgLVal,
325 VarTy->isReferenceType()));
327 // If 'this' is captured, load it into CXXThisValue.
328 assert(I->capturesThis());
330 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
336 PGO.assignRegionCounters(GlobalDecl(CD), F);
337 CapturedStmtInfo->EmitBody(*this, CD->getBody());
338 FinishFunction(CD->getBodyRBrace());
343 //===----------------------------------------------------------------------===//
344 // OpenMP Directive Emission
345 //===----------------------------------------------------------------------===//
346 void CodeGenFunction::EmitOMPAggregateAssign(
347 Address DestAddr, Address SrcAddr, QualType OriginalType,
348 const llvm::function_ref<void(Address, Address)> &CopyGen) {
349 // Perform element-by-element initialization.
352 // Drill down to the base element type on both arrays.
353 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
354 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
355 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
357 auto SrcBegin = SrcAddr.getPointer();
358 auto DestBegin = DestAddr.getPointer();
359 // Cast from pointer to array type to pointer to single element.
360 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
361 // The basic structure here is a while-do loop.
362 auto BodyBB = createBasicBlock("omp.arraycpy.body");
363 auto DoneBB = createBasicBlock("omp.arraycpy.done");
365 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
366 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
368 // Enter the loop body, making that address the current address.
369 auto EntryBB = Builder.GetInsertBlock();
372 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
374 llvm::PHINode *SrcElementPHI =
375 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
376 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
377 Address SrcElementCurrent =
378 Address(SrcElementPHI,
379 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
381 llvm::PHINode *DestElementPHI =
382 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
383 DestElementPHI->addIncoming(DestBegin, EntryBB);
384 Address DestElementCurrent =
385 Address(DestElementPHI,
386 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
389 CopyGen(DestElementCurrent, SrcElementCurrent);
391 // Shift the address forward by one element.
392 auto DestElementNext = Builder.CreateConstGEP1_32(
393 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
394 auto SrcElementNext = Builder.CreateConstGEP1_32(
395 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
396 // Check whether we've reached the end.
398 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
399 Builder.CreateCondBr(Done, DoneBB, BodyBB);
400 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
401 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
404 EmitBlock(DoneBB, /*IsFinished=*/true);
407 /// Check if the combiner is a call to UDR combiner and if it is so return the
408 /// UDR decl used for reduction.
409 static const OMPDeclareReductionDecl *
410 getReductionInit(const Expr *ReductionOp) {
411 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
412 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
414 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
415 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
420 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
421 const OMPDeclareReductionDecl *DRD,
423 Address Private, Address Original,
425 if (DRD->getInitializer()) {
426 std::pair<llvm::Function *, llvm::Function *> Reduction =
427 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
428 auto *CE = cast<CallExpr>(InitOp);
429 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
430 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
431 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
432 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
433 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
434 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
435 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
436 [=]() -> Address { return Private; });
437 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
438 [=]() -> Address { return Original; });
439 (void)PrivateScope.Privatize();
440 RValue Func = RValue::get(Reduction.second);
441 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
442 CGF.EmitIgnoredExpr(InitOp);
444 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
445 auto *GV = new llvm::GlobalVariable(
446 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
447 llvm::GlobalValue::PrivateLinkage, Init, ".init");
448 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
450 switch (CGF.getEvaluationKind(Ty)) {
452 InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
456 RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
459 InitRVal = RValue::getAggregate(LV.getAddress());
462 OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
463 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
464 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
465 /*IsInitializer=*/false);
469 /// \brief Emit initialization of arrays of complex types.
470 /// \param DestAddr Address of the array.
471 /// \param Type Type of array.
472 /// \param Init Initial expression of array.
473 /// \param SrcAddr Address of the original array.
474 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
475 QualType Type, const Expr *Init,
476 Address SrcAddr = Address::invalid()) {
477 auto *DRD = getReductionInit(Init);
478 // Perform element-by-element initialization.
481 // Drill down to the base element type on both arrays.
482 auto ArrayTy = Type->getAsArrayTypeUnsafe();
483 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
485 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
488 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
490 llvm::Value *SrcBegin = nullptr;
492 SrcBegin = SrcAddr.getPointer();
493 auto DestBegin = DestAddr.getPointer();
494 // Cast from pointer to array type to pointer to single element.
495 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
496 // The basic structure here is a while-do loop.
497 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
498 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
500 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
501 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
503 // Enter the loop body, making that address the current address.
504 auto EntryBB = CGF.Builder.GetInsertBlock();
505 CGF.EmitBlock(BodyBB);
507 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
509 llvm::PHINode *SrcElementPHI = nullptr;
510 Address SrcElementCurrent = Address::invalid();
512 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
513 "omp.arraycpy.srcElementPast");
514 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
516 Address(SrcElementPHI,
517 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
519 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
520 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
521 DestElementPHI->addIncoming(DestBegin, EntryBB);
522 Address DestElementCurrent =
523 Address(DestElementPHI,
524 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
528 CodeGenFunction::RunCleanupsScope InitScope(CGF);
529 if (DRD && (DRD->getInitializer() || !Init)) {
530 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
531 SrcElementCurrent, ElementTy);
533 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
534 /*IsInitializer=*/false);
538 // Shift the address forward by one element.
539 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
540 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
541 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
544 // Shift the address forward by one element.
545 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
546 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
547 // Check whether we've reached the end.
549 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
550 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
551 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
554 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
557 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
558 Address SrcAddr, const VarDecl *DestVD,
559 const VarDecl *SrcVD, const Expr *Copy) {
560 if (OriginalType->isArrayType()) {
561 auto *BO = dyn_cast<BinaryOperator>(Copy);
562 if (BO && BO->getOpcode() == BO_Assign) {
563 // Perform simple memcpy for simple copying.
564 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
566 // For arrays with complex element types perform element by element
568 EmitOMPAggregateAssign(
569 DestAddr, SrcAddr, OriginalType,
570 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
571 // Working with the single array element, so have to remap
572 // destination and source variables to corresponding array
574 CodeGenFunction::OMPPrivateScope Remap(*this);
575 Remap.addPrivate(DestVD, [DestElement]() -> Address {
579 SrcVD, [SrcElement]() -> Address { return SrcElement; });
580 (void)Remap.Privatize();
581 EmitIgnoredExpr(Copy);
585 // Remap pseudo source variable to private copy.
586 CodeGenFunction::OMPPrivateScope Remap(*this);
587 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
588 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
589 (void)Remap.Privatize();
590 // Emit copying of the whole variable.
591 EmitIgnoredExpr(Copy);
595 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
596 OMPPrivateScope &PrivateScope) {
597 if (!HaveInsertPoint())
599 bool FirstprivateIsLastprivate = false;
600 llvm::DenseSet<const VarDecl *> Lastprivates;
601 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
602 for (const auto *D : C->varlists())
604 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
606 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
607 CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt()));
608 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
609 auto IRef = C->varlist_begin();
610 auto InitsRef = C->inits().begin();
611 for (auto IInit : C->private_copies()) {
612 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
613 bool ThisFirstprivateIsLastprivate =
614 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
615 auto *CapFD = CapturesInfo.lookup(OrigVD);
616 auto *FD = CapturedStmtInfo->lookup(OrigVD);
617 if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) &&
618 !FD->getType()->isReferenceType()) {
619 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
624 FirstprivateIsLastprivate =
625 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
626 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
627 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
628 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
630 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
631 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
632 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
633 Address OriginalAddr = EmitLValue(&DRE).getAddress();
634 QualType Type = VD->getType();
635 if (Type->isArrayType()) {
636 // Emit VarDecl with copy init for arrays.
637 // Get the address of the original variable captured in current
639 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
640 auto Emission = EmitAutoVarAlloca(*VD);
641 auto *Init = VD->getInit();
642 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
643 // Perform simple memcpy.
644 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
647 EmitOMPAggregateAssign(
648 Emission.getAllocatedAddress(), OriginalAddr, Type,
649 [this, VDInit, Init](Address DestElement,
650 Address SrcElement) {
651 // Clean up any temporaries needed by the initialization.
652 RunCleanupsScope InitScope(*this);
653 // Emit initialization for single element.
654 setAddrOfLocalVar(VDInit, SrcElement);
655 EmitAnyExprToMem(Init, DestElement,
656 Init->getType().getQualifiers(),
657 /*IsInitializer*/ false);
658 LocalDeclMap.erase(VDInit);
661 EmitAutoVarCleanups(Emission);
662 return Emission.getAllocatedAddress();
665 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
666 // Emit private VarDecl with copy init.
667 // Remap temp VDInit variable to the address of the original
669 // (for proper handling of captured global variables).
670 setAddrOfLocalVar(VDInit, OriginalAddr);
672 LocalDeclMap.erase(VDInit);
673 return GetAddrOfLocalVar(VD);
676 assert(IsRegistered &&
677 "firstprivate var already registered as private");
678 // Silence the warning about unused variable.
685 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
688 void CodeGenFunction::EmitOMPPrivateClause(
689 const OMPExecutableDirective &D,
690 CodeGenFunction::OMPPrivateScope &PrivateScope) {
691 if (!HaveInsertPoint())
693 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
694 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
695 auto IRef = C->varlist_begin();
696 for (auto IInit : C->private_copies()) {
697 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
698 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
699 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
701 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
702 // Emit private VarDecl with copy init.
704 return GetAddrOfLocalVar(VD);
706 assert(IsRegistered && "private var already registered as private");
707 // Silence the warning about unused variable.
715 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
716 if (!HaveInsertPoint())
718 // threadprivate_var1 = master_threadprivate_var1;
719 // operator=(threadprivate_var2, master_threadprivate_var2);
721 // __kmpc_barrier(&loc, global_tid);
722 llvm::DenseSet<const VarDecl *> CopiedVars;
723 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
724 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
725 auto IRef = C->varlist_begin();
726 auto ISrcRef = C->source_exprs().begin();
727 auto IDestRef = C->destination_exprs().begin();
728 for (auto *AssignOp : C->assignment_ops()) {
729 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
730 QualType Type = VD->getType();
731 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
732 // Get the address of the master variable. If we are emitting code with
733 // TLS support, the address is passed from the master as field in the
734 // captured declaration.
735 Address MasterAddr = Address::invalid();
736 if (getLangOpts().OpenMPUseTLS &&
737 getContext().getTargetInfo().isTLSSupported()) {
738 assert(CapturedStmtInfo->lookup(VD) &&
739 "Copyin threadprivates should have been captured!");
740 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
741 VK_LValue, (*IRef)->getExprLoc());
742 MasterAddr = EmitLValue(&DRE).getAddress();
743 LocalDeclMap.erase(VD);
746 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
747 : CGM.GetAddrOfGlobal(VD),
748 getContext().getDeclAlign(VD));
750 // Get the address of the threadprivate variable.
751 Address PrivateAddr = EmitLValue(*IRef).getAddress();
752 if (CopiedVars.size() == 1) {
753 // At first check if current thread is a master thread. If it is, no
754 // need to copy data.
755 CopyBegin = createBasicBlock("copyin.not.master");
756 CopyEnd = createBasicBlock("copyin.not.master.end");
757 Builder.CreateCondBr(
758 Builder.CreateICmpNE(
759 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
760 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
762 EmitBlock(CopyBegin);
764 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
765 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
766 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
774 // Exit out of copying procedure for non-master thread.
775 EmitBlock(CopyEnd, /*IsFinished=*/true);
781 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
782 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
783 if (!HaveInsertPoint())
785 bool HasAtLeastOneLastprivate = false;
786 llvm::DenseSet<const VarDecl *> SIMDLCVs;
787 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
788 auto *LoopDirective = cast<OMPLoopDirective>(&D);
789 for (auto *C : LoopDirective->counters()) {
791 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
794 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
795 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
796 HasAtLeastOneLastprivate = true;
797 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()))
799 auto IRef = C->varlist_begin();
800 auto IDestRef = C->destination_exprs().begin();
801 for (auto *IInit : C->private_copies()) {
802 // Keep the address of the original variable for future update at the end
804 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
805 // Taskloops do not require additional initialization, it is done in
806 // runtime support library.
807 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
808 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
809 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
811 const_cast<VarDecl *>(OrigVD),
812 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
814 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
815 return EmitLValue(&DRE).getAddress();
817 // Check if the variable is also a firstprivate: in this case IInit is
818 // not generated. Initialization of this variable will happen in codegen
819 // for 'firstprivate' clause.
820 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
821 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
822 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
823 // Emit private VarDecl with copy init.
825 return GetAddrOfLocalVar(VD);
827 assert(IsRegistered &&
828 "lastprivate var already registered as private");
836 return HasAtLeastOneLastprivate;
839 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
840 const OMPExecutableDirective &D, bool NoFinals,
841 llvm::Value *IsLastIterCond) {
842 if (!HaveInsertPoint())
844 // Emit following code:
845 // if (<IsLastIterCond>) {
846 // orig_var1 = private_orig_var1;
848 // orig_varn = private_orig_varn;
850 llvm::BasicBlock *ThenBB = nullptr;
851 llvm::BasicBlock *DoneBB = nullptr;
852 if (IsLastIterCond) {
853 ThenBB = createBasicBlock(".omp.lastprivate.then");
854 DoneBB = createBasicBlock(".omp.lastprivate.done");
855 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
858 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
859 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
860 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
861 auto IC = LoopDirective->counters().begin();
862 for (auto F : LoopDirective->finals()) {
864 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
866 AlreadyEmittedVars.insert(D);
868 LoopCountersAndUpdates[D] = F;
872 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
873 auto IRef = C->varlist_begin();
874 auto ISrcRef = C->source_exprs().begin();
875 auto IDestRef = C->destination_exprs().begin();
876 for (auto *AssignOp : C->assignment_ops()) {
877 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
878 QualType Type = PrivateVD->getType();
879 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
880 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
881 // If lastprivate variable is a loop control variable for loop-based
882 // directive, update its value before copyin back to original
884 if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
885 EmitIgnoredExpr(FinalExpr);
886 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
887 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
888 // Get the address of the original variable.
889 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
890 // Get the address of the private variable.
891 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
892 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
894 Address(Builder.CreateLoad(PrivateAddr),
895 getNaturalTypeAlignment(RefTy->getPointeeType()));
896 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
902 if (auto *PostUpdate = C->getPostUpdateExpr())
903 EmitIgnoredExpr(PostUpdate);
906 EmitBlock(DoneBB, /*IsFinished=*/true);
909 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
910 LValue BaseLV, llvm::Value *Addr) {
911 Address Tmp = Address::invalid();
912 Address TopTmp = Address::invalid();
913 Address MostTopTmp = Address::invalid();
914 BaseTy = BaseTy.getNonReferenceType();
915 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
916 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
917 Tmp = CGF.CreateMemTemp(BaseTy);
918 if (TopTmp.isValid())
919 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
923 BaseTy = BaseTy->getPointeeType();
925 llvm::Type *Ty = BaseLV.getPointer()->getType();
927 Ty = Tmp.getElementType();
928 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
930 CGF.Builder.CreateStore(Addr, Tmp);
933 return Address(Addr, BaseLV.getAlignment());
936 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
938 BaseTy = BaseTy.getNonReferenceType();
939 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
940 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
941 if (auto *PtrTy = BaseTy->getAs<PointerType>())
942 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
944 BaseLV = CGF.EmitLoadOfReferenceLValue(BaseLV.getAddress(),
945 BaseTy->castAs<ReferenceType>());
947 BaseTy = BaseTy->getPointeeType();
949 return CGF.MakeAddrLValue(
951 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
952 BaseLV.getPointer(), CGF.ConvertTypeForMem(ElTy)->getPointerTo()),
953 BaseLV.getAlignment()),
954 BaseLV.getType(), BaseLV.getAlignmentSource());
957 void CodeGenFunction::EmitOMPReductionClauseInit(
958 const OMPExecutableDirective &D,
959 CodeGenFunction::OMPPrivateScope &PrivateScope) {
960 if (!HaveInsertPoint())
962 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
963 auto ILHS = C->lhs_exprs().begin();
964 auto IRHS = C->rhs_exprs().begin();
965 auto IPriv = C->privates().begin();
966 auto IRed = C->reduction_ops().begin();
967 for (auto IRef : C->varlists()) {
968 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
969 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
970 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
971 auto *DRD = getReductionInit(*IRed);
972 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
973 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
974 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
975 Base = TempOASE->getBase()->IgnoreParenImpCasts();
976 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
977 Base = TempASE->getBase()->IgnoreParenImpCasts();
978 auto *DE = cast<DeclRefExpr>(Base);
979 auto *OrigVD = cast<VarDecl>(DE->getDecl());
980 auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
982 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
983 auto OriginalBaseLValue = EmitLValue(DE);
985 loadToBegin(*this, OrigVD->getType(), OASELValueLB.getType(),
987 // Store the address of the original variable associated with the LHS
988 // implicit variable.
989 PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address {
990 return OASELValueLB.getAddress();
992 // Emit reduction copy.
993 bool IsRegistered = PrivateScope.addPrivate(
994 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, OASELValueLB,
995 OASELValueUB, OriginalBaseLValue, DRD, IRed]() -> Address {
996 // Emit VarDecl with copy init for arrays.
997 // Get the address of the original variable captured in current
999 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
1000 OASELValueLB.getPointer());
1001 Size = Builder.CreateNUWAdd(
1002 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1003 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1004 *this, cast<OpaqueValueExpr>(
1006 .getAsVariableArrayType(PrivateVD->getType())
1009 EmitVariablyModifiedType(PrivateVD->getType());
1010 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1011 auto Addr = Emission.getAllocatedAddress();
1012 auto *Init = PrivateVD->getInit();
1013 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1015 OASELValueLB.getAddress());
1016 EmitAutoVarCleanups(Emission);
1017 // Emit private VarDecl with reduction init.
1018 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1019 OASELValueLB.getPointer());
1020 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1021 return castToBase(*this, OrigVD->getType(),
1022 OASELValueLB.getType(), OriginalBaseLValue,
1025 assert(IsRegistered && "private var already registered as private");
1026 // Silence the warning about unused variable.
1028 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1029 return GetAddrOfLocalVar(PrivateVD);
1031 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
1032 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1033 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1034 Base = TempASE->getBase()->IgnoreParenImpCasts();
1035 auto *DE = cast<DeclRefExpr>(Base);
1036 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1037 auto ASELValue = EmitLValue(ASE);
1038 auto OriginalBaseLValue = EmitLValue(DE);
1039 LValue BaseLValue = loadToBegin(
1040 *this, OrigVD->getType(), ASELValue.getType(), OriginalBaseLValue);
1041 // Store the address of the original variable associated with the LHS
1042 // implicit variable.
1043 PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address {
1044 return ASELValue.getAddress();
1046 // Emit reduction copy.
1047 bool IsRegistered = PrivateScope.addPrivate(
1048 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, ASELValue,
1049 OriginalBaseLValue, DRD, IRed]() -> Address {
1050 // Emit private VarDecl with reduction init.
1051 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1052 auto Addr = Emission.getAllocatedAddress();
1053 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1054 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1055 ASELValue.getAddress(),
1056 ASELValue.getType());
1058 EmitAutoVarInit(Emission);
1059 EmitAutoVarCleanups(Emission);
1060 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1061 ASELValue.getPointer());
1062 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1063 return castToBase(*this, OrigVD->getType(), ASELValue.getType(),
1064 OriginalBaseLValue, Ptr);
1066 assert(IsRegistered && "private var already registered as private");
1067 // Silence the warning about unused variable.
1069 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1070 return Builder.CreateElementBitCast(
1071 GetAddrOfLocalVar(PrivateVD), ConvertTypeForMem(RHSVD->getType()),
1075 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
1076 QualType Type = PrivateVD->getType();
1077 if (getContext().getAsArrayType(Type)) {
1078 // Store the address of the original variable associated with the LHS
1079 // implicit variable.
1080 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1081 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1082 IRef->getType(), VK_LValue, IRef->getExprLoc());
1083 Address OriginalAddr = EmitLValue(&DRE).getAddress();
1084 PrivateScope.addPrivate(LHSVD, [this, &OriginalAddr,
1085 LHSVD]() -> Address {
1086 OriginalAddr = Builder.CreateElementBitCast(
1087 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1088 return OriginalAddr;
1090 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
1091 if (Type->isVariablyModifiedType()) {
1092 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1093 *this, cast<OpaqueValueExpr>(
1095 .getAsVariableArrayType(PrivateVD->getType())
1098 getTypeSize(OrigVD->getType().getNonReferenceType())));
1099 EmitVariablyModifiedType(Type);
1101 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1102 auto Addr = Emission.getAllocatedAddress();
1103 auto *Init = PrivateVD->getInit();
1104 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1105 DRD ? *IRed : Init, OriginalAddr);
1106 EmitAutoVarCleanups(Emission);
1107 return Emission.getAllocatedAddress();
1109 assert(IsRegistered && "private var already registered as private");
1110 // Silence the warning about unused variable.
1112 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1113 return Builder.CreateElementBitCast(
1114 GetAddrOfLocalVar(PrivateVD),
1115 ConvertTypeForMem(RHSVD->getType()), "rhs.begin");
1118 // Store the address of the original variable associated with the LHS
1119 // implicit variable.
1120 Address OriginalAddr = Address::invalid();
1121 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef,
1122 &OriginalAddr]() -> Address {
1123 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1124 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1125 IRef->getType(), VK_LValue, IRef->getExprLoc());
1126 OriginalAddr = EmitLValue(&DRE).getAddress();
1127 return OriginalAddr;
1129 // Emit reduction copy.
1130 bool IsRegistered = PrivateScope.addPrivate(
1131 OrigVD, [this, PrivateVD, OriginalAddr, DRD, IRed]() -> Address {
1132 // Emit private VarDecl with reduction init.
1133 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1134 auto Addr = Emission.getAllocatedAddress();
1135 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1136 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1138 PrivateVD->getType());
1140 EmitAutoVarInit(Emission);
1141 EmitAutoVarCleanups(Emission);
1144 assert(IsRegistered && "private var already registered as private");
1145 // Silence the warning about unused variable.
1147 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1148 return GetAddrOfLocalVar(PrivateVD);
1160 void CodeGenFunction::EmitOMPReductionClauseFinal(
1161 const OMPExecutableDirective &D) {
1162 if (!HaveInsertPoint())
1164 llvm::SmallVector<const Expr *, 8> Privates;
1165 llvm::SmallVector<const Expr *, 8> LHSExprs;
1166 llvm::SmallVector<const Expr *, 8> RHSExprs;
1167 llvm::SmallVector<const Expr *, 8> ReductionOps;
1168 bool HasAtLeastOneReduction = false;
1169 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1170 HasAtLeastOneReduction = true;
1171 Privates.append(C->privates().begin(), C->privates().end());
1172 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1173 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1174 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1176 if (HasAtLeastOneReduction) {
1177 // Emit nowait reduction if nowait clause is present or directive is a
1178 // parallel directive (it always has implicit barrier).
1179 CGM.getOpenMPRuntime().emitReduction(
1180 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
1181 D.getSingleClause<OMPNowaitClause>() ||
1182 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1183 D.getDirectiveKind() == OMPD_simd,
1184 D.getDirectiveKind() == OMPD_simd);
1188 static void emitPostUpdateForReductionClause(
1189 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1190 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1191 if (!CGF.HaveInsertPoint())
1193 llvm::BasicBlock *DoneBB = nullptr;
1194 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1195 if (auto *PostUpdate = C->getPostUpdateExpr()) {
1197 if (auto *Cond = CondGen(CGF)) {
1198 // If the first post-update expression is found, emit conditional
1199 // block if it was requested.
1200 auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1201 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1202 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1203 CGF.EmitBlock(ThenBB);
1206 CGF.EmitIgnoredExpr(PostUpdate);
1210 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1213 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
1214 const OMPExecutableDirective &S,
1215 OpenMPDirectiveKind InnermostKind,
1216 const RegionCodeGenTy &CodeGen) {
1217 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1218 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
1219 emitParallelOrTeamsOutlinedFunction(S,
1220 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1221 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1222 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1223 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1224 /*IgnoreResultAssign*/ true);
1225 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1226 CGF, NumThreads, NumThreadsClause->getLocStart());
1228 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1229 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1230 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1231 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
1233 const Expr *IfCond = nullptr;
1234 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1235 if (C->getNameModifier() == OMPD_unknown ||
1236 C->getNameModifier() == OMPD_parallel) {
1237 IfCond = C->getCondition();
1242 OMPLexicalScope Scope(CGF, S);
1243 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1244 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1245 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
1246 CapturedVars, IfCond);
1249 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1250 // Emit parallel region as a standalone region.
1251 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1252 OMPPrivateScope PrivateScope(CGF);
1253 bool Copyins = CGF.EmitOMPCopyinClause(S);
1254 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1256 // Emit implicit barrier to synchronize threads and avoid data races on
1257 // propagation master's thread values of threadprivate variables to local
1258 // instances of that variables of all other implicit threads.
1259 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1260 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1261 /*ForceSimpleCall=*/true);
1263 CGF.EmitOMPPrivateClause(S, PrivateScope);
1264 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1265 (void)PrivateScope.Privatize();
1266 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1267 CGF.EmitOMPReductionClauseFinal(S);
1269 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
1270 emitPostUpdateForReductionClause(
1271 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1274 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1275 JumpDest LoopExit) {
1276 RunCleanupsScope BodyScope(*this);
1277 // Update counters values on current iteration.
1278 for (auto I : D.updates()) {
1281 // Update the linear variables.
1282 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1283 for (auto *U : C->updates())
1287 // On a continue in the body, jump to the end.
1288 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
1289 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1291 EmitStmt(D.getBody());
1292 // The end (updates/cleanups).
1293 EmitBlock(Continue.getBlock());
1294 BreakContinueStack.pop_back();
1297 void CodeGenFunction::EmitOMPInnerLoop(
1298 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1299 const Expr *IncExpr,
1300 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
1301 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
1302 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1304 // Start the loop with a block that tests the condition.
1305 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1306 EmitBlock(CondBlock);
1307 const SourceRange &R = S.getSourceRange();
1308 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1309 SourceLocToDebugLoc(R.getEnd()));
1311 // If there are any cleanups between here and the loop-exit scope,
1312 // create a block to stage a loop exit along.
1313 auto ExitBlock = LoopExit.getBlock();
1314 if (RequiresCleanup)
1315 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1317 auto LoopBody = createBasicBlock("omp.inner.for.body");
1320 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1321 if (ExitBlock != LoopExit.getBlock()) {
1322 EmitBlock(ExitBlock);
1323 EmitBranchThroughCleanup(LoopExit);
1326 EmitBlock(LoopBody);
1327 incrementProfileCounter(&S);
1329 // Create a block for the increment.
1330 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1331 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1335 // Emit "IV = IV + 1" and a back-edge to the condition block.
1336 EmitBlock(Continue.getBlock());
1337 EmitIgnoredExpr(IncExpr);
1339 BreakContinueStack.pop_back();
1340 EmitBranch(CondBlock);
1342 // Emit the fall-through block.
1343 EmitBlock(LoopExit.getBlock());
1346 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1347 if (!HaveInsertPoint())
1349 // Emit inits for the linear variables.
1350 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1351 for (auto *Init : C->inits()) {
1352 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1353 if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1354 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1355 auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1356 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1357 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1358 VD->getInit()->getType(), VK_LValue,
1359 VD->getInit()->getExprLoc());
1360 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1362 /*capturedByInit=*/false);
1363 EmitAutoVarCleanups(Emission);
1367 // Emit the linear steps for the linear clauses.
1368 // If a step is not constant, it is pre-calculated before the loop.
1369 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1370 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1371 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1372 // Emit calculation of the linear step.
1373 EmitIgnoredExpr(CS);
1378 void CodeGenFunction::EmitOMPLinearClauseFinal(
1379 const OMPLoopDirective &D,
1380 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1381 if (!HaveInsertPoint())
1383 llvm::BasicBlock *DoneBB = nullptr;
1384 // Emit the final values of the linear variables.
1385 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1386 auto IC = C->varlist_begin();
1387 for (auto *F : C->finals()) {
1389 if (auto *Cond = CondGen(*this)) {
1390 // If the first post-update expression is found, emit conditional
1391 // block if it was requested.
1392 auto *ThenBB = createBasicBlock(".omp.linear.pu");
1393 DoneBB = createBasicBlock(".omp.linear.pu.done");
1394 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1398 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1399 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1400 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1401 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1402 Address OrigAddr = EmitLValue(&DRE).getAddress();
1403 CodeGenFunction::OMPPrivateScope VarScope(*this);
1404 VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; });
1405 (void)VarScope.Privatize();
1409 if (auto *PostUpdate = C->getPostUpdateExpr())
1410 EmitIgnoredExpr(PostUpdate);
1413 EmitBlock(DoneBB, /*IsFinished=*/true);
1416 static void emitAlignedClause(CodeGenFunction &CGF,
1417 const OMPExecutableDirective &D) {
1418 if (!CGF.HaveInsertPoint())
1420 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1421 unsigned ClauseAlignment = 0;
1422 if (auto AlignmentExpr = Clause->getAlignment()) {
1424 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1425 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1427 for (auto E : Clause->varlists()) {
1428 unsigned Alignment = ClauseAlignment;
1429 if (Alignment == 0) {
1430 // OpenMP [2.8.1, Description]
1431 // If no optional parameter is specified, implementation-defined default
1432 // alignments for SIMD instructions on the target platforms are assumed.
1435 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1436 E->getType()->getPointeeType()))
1439 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1440 "alignment is not power of 2");
1441 if (Alignment != 0) {
1442 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1443 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
1449 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1450 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1451 if (!HaveInsertPoint())
1453 auto I = S.private_counters().begin();
1454 for (auto *E : S.counters()) {
1455 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1456 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1457 (void)LoopScope.addPrivate(VD, [&]() -> Address {
1458 // Emit var without initialization.
1459 if (!LocalDeclMap.count(PrivateVD)) {
1460 auto VarEmission = EmitAutoVarAlloca(*PrivateVD);
1461 EmitAutoVarCleanups(VarEmission);
1463 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1464 /*RefersToEnclosingVariableOrCapture=*/false,
1465 (*I)->getType(), VK_LValue, (*I)->getExprLoc());
1466 return EmitLValue(&DRE).getAddress();
1468 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1469 VD->hasGlobalStorage()) {
1470 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
1471 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
1472 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1473 E->getType(), VK_LValue, E->getExprLoc());
1474 return EmitLValue(&DRE).getAddress();
1481 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1482 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1483 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1484 if (!CGF.HaveInsertPoint())
1487 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1488 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1489 (void)PreCondScope.Privatize();
1490 // Get initial values of real counters.
1491 for (auto I : S.inits()) {
1492 CGF.EmitIgnoredExpr(I);
1495 // Check that loop is executed at least one time.
1496 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1499 void CodeGenFunction::EmitOMPLinearClause(
1500 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1501 if (!HaveInsertPoint())
1503 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1504 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1505 auto *LoopDirective = cast<OMPLoopDirective>(&D);
1506 for (auto *C : LoopDirective->counters()) {
1508 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1511 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1512 auto CurPrivate = C->privates().begin();
1513 for (auto *E : C->varlists()) {
1514 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1516 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1517 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1518 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
1519 // Emit private VarDecl with copy init.
1520 EmitVarDecl(*PrivateVD);
1521 return GetAddrOfLocalVar(PrivateVD);
1523 assert(IsRegistered && "linear var already registered as private");
1524 // Silence the warning about unused variable.
1527 EmitVarDecl(*PrivateVD);
1533 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1534 const OMPExecutableDirective &D,
1536 if (!CGF.HaveInsertPoint())
1538 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1539 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1540 /*ignoreResult=*/true);
1541 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1542 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1543 // In presence of finite 'safelen', it may be unsafe to mark all
1544 // the memory instructions parallel, because loop-carried
1545 // dependences of 'safelen' iterations are possible.
1547 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1548 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1549 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1550 /*ignoreResult=*/true);
1551 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1552 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1553 // In presence of finite 'safelen', it may be unsafe to mark all
1554 // the memory instructions parallel, because loop-carried
1555 // dependences of 'safelen' iterations are possible.
1556 CGF.LoopStack.setParallel(false);
1560 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1562 // Walk clauses and process safelen/lastprivate.
1563 LoopStack.setParallel(!IsMonotonic);
1564 LoopStack.setVectorizeEnable(true);
1565 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1568 void CodeGenFunction::EmitOMPSimdFinal(
1569 const OMPLoopDirective &D,
1570 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1571 if (!HaveInsertPoint())
1573 llvm::BasicBlock *DoneBB = nullptr;
1574 auto IC = D.counters().begin();
1575 auto IPC = D.private_counters().begin();
1576 for (auto F : D.finals()) {
1577 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1578 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1579 auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1580 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1581 OrigVD->hasGlobalStorage() || CED) {
1583 if (auto *Cond = CondGen(*this)) {
1584 // If the first post-update expression is found, emit conditional
1585 // block if it was requested.
1586 auto *ThenBB = createBasicBlock(".omp.final.then");
1587 DoneBB = createBasicBlock(".omp.final.done");
1588 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1592 Address OrigAddr = Address::invalid();
1594 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1596 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1597 /*RefersToEnclosingVariableOrCapture=*/false,
1598 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1599 OrigAddr = EmitLValue(&DRE).getAddress();
1601 OMPPrivateScope VarScope(*this);
1602 VarScope.addPrivate(OrigVD,
1603 [OrigAddr]() -> Address { return OrigAddr; });
1604 (void)VarScope.Privatize();
1611 EmitBlock(DoneBB, /*IsFinished=*/true);
1614 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1615 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1616 OMPLoopScope PreInitScope(CGF, S);
1618 // for (IV in 0..LastIteration) BODY;
1619 // <Final counter/linear vars updates>;
1623 // Emit: if (PreCond) - begin.
1624 // If the condition constant folds and can be elided, avoid emitting the
1627 llvm::BasicBlock *ContBlock = nullptr;
1628 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1632 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
1633 ContBlock = CGF.createBasicBlock("simd.if.end");
1634 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1635 CGF.getProfileCount(&S));
1636 CGF.EmitBlock(ThenBlock);
1637 CGF.incrementProfileCounter(&S);
1640 // Emit the loop iteration variable.
1641 const Expr *IVExpr = S.getIterationVariable();
1642 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1643 CGF.EmitVarDecl(*IVDecl);
1644 CGF.EmitIgnoredExpr(S.getInit());
1646 // Emit the iterations count variable.
1647 // If it is not a variable, Sema decided to calculate iterations count on
1648 // each iteration (e.g., it is foldable into a constant).
1649 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1650 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1651 // Emit calculation of the iterations count.
1652 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1655 CGF.EmitOMPSimdInit(S);
1657 emitAlignedClause(CGF, S);
1658 CGF.EmitOMPLinearClauseInit(S);
1660 OMPPrivateScope LoopScope(CGF);
1661 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1662 CGF.EmitOMPLinearClause(S, LoopScope);
1663 CGF.EmitOMPPrivateClause(S, LoopScope);
1664 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1665 bool HasLastprivateClause =
1666 CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1667 (void)LoopScope.Privatize();
1668 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1670 [&S](CodeGenFunction &CGF) {
1671 CGF.EmitOMPLoopBody(S, JumpDest());
1672 CGF.EmitStopPoint(&S);
1674 [](CodeGenFunction &) {});
1675 CGF.EmitOMPSimdFinal(
1676 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1677 // Emit final copy of the lastprivate variables at the end of loops.
1678 if (HasLastprivateClause)
1679 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
1680 CGF.EmitOMPReductionClauseFinal(S);
1681 emitPostUpdateForReductionClause(
1682 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1684 CGF.EmitOMPLinearClauseFinal(
1685 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1686 // Emit: if (PreCond) - end.
1688 CGF.EmitBranch(ContBlock);
1689 CGF.EmitBlock(ContBlock, true);
1692 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1693 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1696 void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
1697 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1698 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1699 auto &RT = CGM.getOpenMPRuntime();
1701 const Expr *IVExpr = S.getIterationVariable();
1702 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1703 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1705 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1707 // Start the loop with a block that tests the condition.
1708 auto CondBlock = createBasicBlock("omp.dispatch.cond");
1709 EmitBlock(CondBlock);
1710 const SourceRange &R = S.getSourceRange();
1711 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1712 SourceLocToDebugLoc(R.getEnd()));
1714 llvm::Value *BoolCondVal = nullptr;
1715 if (!DynamicOrOrdered) {
1716 // UB = min(UB, GlobalUB)
1717 EmitIgnoredExpr(S.getEnsureUpperBound());
1719 EmitIgnoredExpr(S.getInit());
1721 BoolCondVal = EvaluateExprAsBool(S.getCond());
1723 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, IL,
1727 // If there are any cleanups between here and the loop-exit scope,
1728 // create a block to stage a loop exit along.
1729 auto ExitBlock = LoopExit.getBlock();
1730 if (LoopScope.requiresCleanups())
1731 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1733 auto LoopBody = createBasicBlock("omp.dispatch.body");
1734 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1735 if (ExitBlock != LoopExit.getBlock()) {
1736 EmitBlock(ExitBlock);
1737 EmitBranchThroughCleanup(LoopExit);
1739 EmitBlock(LoopBody);
1741 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1742 // LB for loop condition and emitted it above).
1743 if (DynamicOrOrdered)
1744 EmitIgnoredExpr(S.getInit());
1746 // Create a block for the increment.
1747 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1748 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1750 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1751 // with dynamic/guided scheduling and without ordered clause.
1752 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1753 LoopStack.setParallel(!IsMonotonic);
1755 EmitOMPSimdInit(S, IsMonotonic);
1757 SourceLocation Loc = S.getLocStart();
1758 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
1759 [&S, LoopExit](CodeGenFunction &CGF) {
1760 CGF.EmitOMPLoopBody(S, LoopExit);
1761 CGF.EmitStopPoint(&S);
1763 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
1765 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
1766 CGF, Loc, IVSize, IVSigned);
1770 EmitBlock(Continue.getBlock());
1771 BreakContinueStack.pop_back();
1772 if (!DynamicOrOrdered) {
1773 // Emit "LB = LB + Stride", "UB = UB + Stride".
1774 EmitIgnoredExpr(S.getNextLowerBound());
1775 EmitIgnoredExpr(S.getNextUpperBound());
1778 EmitBranch(CondBlock);
1780 // Emit the fall-through block.
1781 EmitBlock(LoopExit.getBlock());
1783 // Tell the runtime we are done.
1784 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
1785 if (!DynamicOrOrdered)
1786 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
1788 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
1791 void CodeGenFunction::EmitOMPForOuterLoop(
1792 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
1793 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1794 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1795 auto &RT = CGM.getOpenMPRuntime();
1797 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1798 const bool DynamicOrOrdered =
1799 Ordered || RT.isDynamic(ScheduleKind.Schedule);
1802 !RT.isStaticNonchunked(ScheduleKind.Schedule,
1803 /*Chunked=*/Chunk != nullptr)) &&
1804 "static non-chunked schedule does not need outer loop");
1808 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1809 // When schedule(dynamic,chunk_size) is specified, the iterations are
1810 // distributed to threads in the team in chunks as the threads request them.
1811 // Each thread executes a chunk of iterations, then requests another chunk,
1812 // until no chunks remain to be distributed. Each chunk contains chunk_size
1813 // iterations, except for the last chunk to be distributed, which may have
1814 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1816 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1817 // to threads in the team in chunks as the executing threads request them.
1818 // Each thread executes a chunk of iterations, then requests another chunk,
1819 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1820 // each chunk is proportional to the number of unassigned iterations divided
1821 // by the number of threads in the team, decreasing to 1. For a chunk_size
1822 // with value k (greater than 1), the size of each chunk is determined in the
1823 // same way, with the restriction that the chunks do not contain fewer than k
1824 // iterations (except for the last chunk to be assigned, which may have fewer
1825 // than k iterations).
1827 // When schedule(auto) is specified, the decision regarding scheduling is
1828 // delegated to the compiler and/or runtime system. The programmer gives the
1829 // implementation the freedom to choose any possible mapping of iterations to
1830 // threads in the team.
1832 // When schedule(runtime) is specified, the decision regarding scheduling is
1833 // deferred until run time, and the schedule and chunk size are taken from the
1834 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1835 // implementation defined
1837 // while(__kmpc_dispatch_next(&LB, &UB)) {
1839 // while (idx <= UB) { BODY; ++idx;
1840 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1844 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1845 // When schedule(static, chunk_size) is specified, iterations are divided into
1846 // chunks of size chunk_size, and the chunks are assigned to the threads in
1847 // the team in a round-robin fashion in the order of the thread number.
1849 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1850 // while (idx <= UB) { BODY; ++idx; } // inner loop
1856 const Expr *IVExpr = S.getIterationVariable();
1857 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1858 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1860 if (DynamicOrOrdered) {
1861 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
1862 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
1863 IVSigned, Ordered, UBVal, Chunk);
1865 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1866 Ordered, IL, LB, UB, ST, Chunk);
1869 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, Ordered, LB, UB,
1873 void CodeGenFunction::EmitOMPDistributeOuterLoop(
1874 OpenMPDistScheduleClauseKind ScheduleKind,
1875 const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
1876 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1878 auto &RT = CGM.getOpenMPRuntime();
1881 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
1885 const Expr *IVExpr = S.getIterationVariable();
1886 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1887 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1889 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
1890 IVSize, IVSigned, /* Ordered = */ false,
1891 IL, LB, UB, ST, Chunk);
1893 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false,
1894 S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk);
1897 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
1898 const OMPDistributeParallelForDirective &S) {
1899 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1900 CGM.getOpenMPRuntime().emitInlinedDirective(
1901 *this, OMPD_distribute_parallel_for,
1902 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1903 OMPLoopScope PreInitScope(CGF, S);
1904 OMPCancelStackRAII CancelRegion(CGF, OMPD_distribute_parallel_for,
1905 /*HasCancel=*/false);
1907 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1911 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
1912 const OMPDistributeParallelForSimdDirective &S) {
1913 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1914 CGM.getOpenMPRuntime().emitInlinedDirective(
1915 *this, OMPD_distribute_parallel_for_simd,
1916 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1917 OMPLoopScope PreInitScope(CGF, S);
1919 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1923 void CodeGenFunction::EmitOMPDistributeSimdDirective(
1924 const OMPDistributeSimdDirective &S) {
1925 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1926 CGM.getOpenMPRuntime().emitInlinedDirective(
1927 *this, OMPD_distribute_simd,
1928 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1929 OMPLoopScope PreInitScope(CGF, S);
1931 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1935 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
1936 const OMPTargetParallelForSimdDirective &S) {
1937 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1938 CGM.getOpenMPRuntime().emitInlinedDirective(
1939 *this, OMPD_target_parallel_for_simd,
1940 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1941 OMPLoopScope PreInitScope(CGF, S);
1943 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1947 void CodeGenFunction::EmitOMPTargetSimdDirective(
1948 const OMPTargetSimdDirective &S) {
1949 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1950 CGM.getOpenMPRuntime().emitInlinedDirective(
1951 *this, OMPD_target_simd, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1952 OMPLoopScope PreInitScope(CGF, S);
1954 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1958 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
1959 const OMPTeamsDistributeDirective &S) {
1960 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1961 CGM.getOpenMPRuntime().emitInlinedDirective(
1962 *this, OMPD_teams_distribute,
1963 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1964 OMPLoopScope PreInitScope(CGF, S);
1966 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1970 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
1971 const OMPTeamsDistributeSimdDirective &S) {
1972 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1973 CGM.getOpenMPRuntime().emitInlinedDirective(
1974 *this, OMPD_teams_distribute_simd,
1975 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1976 OMPLoopScope PreInitScope(CGF, S);
1978 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1982 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
1983 const OMPTeamsDistributeParallelForSimdDirective &S) {
1984 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1985 CGM.getOpenMPRuntime().emitInlinedDirective(
1986 *this, OMPD_teams_distribute_parallel_for_simd,
1987 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1988 OMPLoopScope PreInitScope(CGF, S);
1990 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1994 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
1995 const OMPTeamsDistributeParallelForDirective &S) {
1996 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1997 CGM.getOpenMPRuntime().emitInlinedDirective(
1998 *this, OMPD_teams_distribute_parallel_for,
1999 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2000 OMPLoopScope PreInitScope(CGF, S);
2002 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2006 void CodeGenFunction::EmitOMPTargetTeamsDirective(
2007 const OMPTargetTeamsDirective &S) {
2008 CGM.getOpenMPRuntime().emitInlinedDirective(
2009 *this, OMPD_target_teams, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2011 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2015 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
2016 const OMPTargetTeamsDistributeDirective &S) {
2017 CGM.getOpenMPRuntime().emitInlinedDirective(
2018 *this, OMPD_target_teams_distribute,
2019 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2021 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2025 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
2026 const OMPTargetTeamsDistributeParallelForDirective &S) {
2027 CGM.getOpenMPRuntime().emitInlinedDirective(
2028 *this, OMPD_target_teams_distribute_parallel_for,
2029 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2031 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2035 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
2036 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
2037 CGM.getOpenMPRuntime().emitInlinedDirective(
2038 *this, OMPD_target_teams_distribute_parallel_for_simd,
2039 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2041 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2045 /// \brief Emit a helper variable and return corresponding lvalue.
2046 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2047 const DeclRefExpr *Helper) {
2048 auto VDecl = cast<VarDecl>(Helper->getDecl());
2049 CGF.EmitVarDecl(*VDecl);
2050 return CGF.EmitLValue(Helper);
2054 struct ScheduleKindModifiersTy {
2055 OpenMPScheduleClauseKind Kind;
2056 OpenMPScheduleClauseModifier M1;
2057 OpenMPScheduleClauseModifier M2;
2058 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2059 OpenMPScheduleClauseModifier M1,
2060 OpenMPScheduleClauseModifier M2)
2061 : Kind(Kind), M1(M1), M2(M2) {}
2065 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
2066 // Emit the loop iteration variable.
2067 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2068 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2069 EmitVarDecl(*IVDecl);
2071 // Emit the iterations count variable.
2072 // If it is not a variable, Sema decided to calculate iterations count on each
2073 // iteration (e.g., it is foldable into a constant).
2074 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2075 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2076 // Emit calculation of the iterations count.
2077 EmitIgnoredExpr(S.getCalcLastIteration());
2080 auto &RT = CGM.getOpenMPRuntime();
2082 bool HasLastprivateClause;
2083 // Check pre-condition.
2085 OMPLoopScope PreInitScope(*this, S);
2086 // Skip the entire loop if we don't meet the precondition.
2087 // If the condition constant folds and can be elided, avoid emitting the
2090 llvm::BasicBlock *ContBlock = nullptr;
2091 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2095 auto *ThenBlock = createBasicBlock("omp.precond.then");
2096 ContBlock = createBasicBlock("omp.precond.end");
2097 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2098 getProfileCount(&S));
2099 EmitBlock(ThenBlock);
2100 incrementProfileCounter(&S);
2103 bool Ordered = false;
2104 if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2105 if (OrderedClause->getNumForLoops())
2106 RT.emitDoacrossInit(*this, S);
2111 llvm::DenseSet<const Expr *> EmittedFinals;
2112 emitAlignedClause(*this, S);
2113 EmitOMPLinearClauseInit(S);
2114 // Emit helper vars inits.
2116 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2118 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2120 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2122 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2124 // Emit 'then' code.
2126 OMPPrivateScope LoopScope(*this);
2127 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2128 // Emit implicit barrier to synchronize threads and avoid data races on
2129 // initialization of firstprivate variables and post-update of
2130 // lastprivate variables.
2131 CGM.getOpenMPRuntime().emitBarrierCall(
2132 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2133 /*ForceSimpleCall=*/true);
2135 EmitOMPPrivateClause(S, LoopScope);
2136 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2137 EmitOMPReductionClauseInit(S, LoopScope);
2138 EmitOMPPrivateLoopCounters(S, LoopScope);
2139 EmitOMPLinearClause(S, LoopScope);
2140 (void)LoopScope.Privatize();
2142 // Detect the loop schedule kind and chunk.
2143 llvm::Value *Chunk = nullptr;
2144 OpenMPScheduleTy ScheduleKind;
2145 if (auto *C = S.getSingleClause<OMPScheduleClause>()) {
2146 ScheduleKind.Schedule = C->getScheduleKind();
2147 ScheduleKind.M1 = C->getFirstScheduleModifier();
2148 ScheduleKind.M2 = C->getSecondScheduleModifier();
2149 if (const auto *Ch = C->getChunkSize()) {
2150 Chunk = EmitScalarExpr(Ch);
2151 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2152 S.getIterationVariable()->getType(),
2156 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2157 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2158 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2159 // If the static schedule kind is specified or if the ordered clause is
2160 // specified, and if no monotonic modifier is specified, the effect will
2161 // be as if the monotonic modifier was specified.
2162 if (RT.isStaticNonchunked(ScheduleKind.Schedule,
2163 /* Chunked */ Chunk != nullptr) &&
2165 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2166 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2167 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2168 // When no chunk_size is specified, the iteration space is divided into
2169 // chunks that are approximately equal in size, and at most one chunk is
2170 // distributed to each thread. Note that the size of the chunks is
2171 // unspecified in this case.
2172 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
2173 IVSize, IVSigned, Ordered,
2174 IL.getAddress(), LB.getAddress(),
2175 UB.getAddress(), ST.getAddress());
2177 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2178 // UB = min(UB, GlobalUB);
2179 EmitIgnoredExpr(S.getEnsureUpperBound());
2181 EmitIgnoredExpr(S.getInit());
2182 // while (idx <= UB) { BODY; ++idx; }
2183 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2185 [&S, LoopExit](CodeGenFunction &CGF) {
2186 CGF.EmitOMPLoopBody(S, LoopExit);
2187 CGF.EmitStopPoint(&S);
2189 [](CodeGenFunction &) {});
2190 EmitBlock(LoopExit.getBlock());
2191 // Tell the runtime we are done.
2192 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2193 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2195 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2197 const bool IsMonotonic =
2198 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2199 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2200 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2201 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2202 // Emit the outer loop, which requests its work chunk [LB..UB] from
2203 // runtime and runs the inner loop to process it.
2204 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2205 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2206 IL.getAddress(), Chunk);
2208 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2210 [&](CodeGenFunction &CGF) -> llvm::Value * {
2211 return CGF.Builder.CreateIsNotNull(
2212 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2215 EmitOMPReductionClauseFinal(S);
2216 // Emit post-update of the reduction variables if IsLastIter != 0.
2217 emitPostUpdateForReductionClause(
2218 *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2219 return CGF.Builder.CreateIsNotNull(
2220 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2222 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2223 if (HasLastprivateClause)
2224 EmitOMPLastprivateClauseFinal(
2225 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2226 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
2228 EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2229 return CGF.Builder.CreateIsNotNull(
2230 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2232 // We're now done with the loop, so jump to the continuation block.
2234 EmitBranch(ContBlock);
2235 EmitBlock(ContBlock, true);
2238 return HasLastprivateClause;
2241 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2242 bool HasLastprivates = false;
2243 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2244 PrePostActionTy &) {
2245 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
2246 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2249 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2250 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2254 // Emit an implicit barrier at the end.
2255 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2256 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2260 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2261 bool HasLastprivates = false;
2262 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2263 PrePostActionTy &) {
2264 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2267 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2268 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2271 // Emit an implicit barrier at the end.
2272 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2273 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2277 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2279 llvm::Value *Init = nullptr) {
2280 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2282 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
2286 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2287 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
2288 auto *CS = dyn_cast<CompoundStmt>(Stmt);
2289 bool HasLastprivates = false;
2290 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
2291 PrePostActionTy &) {
2292 auto &C = CGF.CGM.getContext();
2293 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2294 // Emit helper vars inits.
2295 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2296 CGF.Builder.getInt32(0));
2297 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
2298 : CGF.Builder.getInt32(0);
2300 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2301 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2302 CGF.Builder.getInt32(1));
2303 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2304 CGF.Builder.getInt32(0));
2306 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2307 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2308 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2309 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2310 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2311 // Generate condition for loop.
2312 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2313 OK_Ordinary, S.getLocStart(),
2314 /*fpContractable=*/false);
2315 // Increment for loop counter.
2316 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2318 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
2319 // Iterate through all sections and emit a switch construct:
2322 // <SectionStmt[0]>;
2325 // case <NumSection> - 1:
2326 // <SectionStmt[<NumSection> - 1]>;
2329 // .omp.sections.exit:
2330 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2331 auto *SwitchStmt = CGF.Builder.CreateSwitch(
2332 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
2333 CS == nullptr ? 1 : CS->size());
2335 unsigned CaseNumber = 0;
2336 for (auto *SubStmt : CS->children()) {
2337 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2338 CGF.EmitBlock(CaseBB);
2339 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2340 CGF.EmitStmt(SubStmt);
2341 CGF.EmitBranch(ExitBB);
2345 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2346 CGF.EmitBlock(CaseBB);
2347 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2349 CGF.EmitBranch(ExitBB);
2351 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2354 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2355 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2356 // Emit implicit barrier to synchronize threads and avoid data races on
2357 // initialization of firstprivate variables and post-update of lastprivate
2359 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2360 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2361 /*ForceSimpleCall=*/true);
2363 CGF.EmitOMPPrivateClause(S, LoopScope);
2364 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2365 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2366 (void)LoopScope.Privatize();
2368 // Emit static non-chunked loop.
2369 OpenMPScheduleTy ScheduleKind;
2370 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2371 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2372 CGF, S.getLocStart(), ScheduleKind, /*IVSize=*/32,
2373 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
2374 UB.getAddress(), ST.getAddress());
2375 // UB = min(UB, GlobalUB);
2376 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
2377 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
2378 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2379 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2381 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
2382 // while (idx <= UB) { BODY; ++idx; }
2383 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2384 [](CodeGenFunction &) {});
2385 // Tell the runtime we are done.
2386 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2387 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2389 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
2390 CGF.EmitOMPReductionClauseFinal(S);
2391 // Emit post-update of the reduction variables if IsLastIter != 0.
2392 emitPostUpdateForReductionClause(
2393 CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2394 return CGF.Builder.CreateIsNotNull(
2395 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2398 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2399 if (HasLastprivates)
2400 CGF.EmitOMPLastprivateClauseFinal(
2401 S, /*NoFinals=*/false,
2402 CGF.Builder.CreateIsNotNull(
2403 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
2406 bool HasCancel = false;
2407 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2408 HasCancel = OSD->hasCancel();
2409 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2410 HasCancel = OPSD->hasCancel();
2411 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
2412 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2414 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2415 // clause. Otherwise the barrier will be generated by the codegen for the
2417 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2418 // Emit implicit barrier to synchronize threads and avoid data races on
2419 // initialization of firstprivate variables.
2420 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2425 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2427 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2430 // Emit an implicit barrier at the end.
2431 if (!S.getSingleClause<OMPNowaitClause>()) {
2432 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2437 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2438 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2439 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2441 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2442 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2446 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2447 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2448 llvm::SmallVector<const Expr *, 8> DestExprs;
2449 llvm::SmallVector<const Expr *, 8> SrcExprs;
2450 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2451 // Check if there are any 'copyprivate' clauses associated with this
2452 // 'single' construct.
2453 // Build a list of copyprivate variables along with helper expressions
2454 // (<source>, <destination>, <destination>=<source> expressions)
2455 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2456 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2457 DestExprs.append(C->destination_exprs().begin(),
2458 C->destination_exprs().end());
2459 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2460 AssignmentOps.append(C->assignment_ops().begin(),
2461 C->assignment_ops().end());
2463 // Emit code for 'single' region along with 'copyprivate' clauses
2464 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2466 OMPPrivateScope SingleScope(CGF);
2467 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2468 CGF.EmitOMPPrivateClause(S, SingleScope);
2469 (void)SingleScope.Privatize();
2470 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2473 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2474 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
2475 CopyprivateVars, DestExprs,
2476 SrcExprs, AssignmentOps);
2478 // Emit an implicit barrier at the end (to avoid data race on firstprivate
2479 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2480 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2481 CGM.getOpenMPRuntime().emitBarrierCall(
2482 *this, S.getLocStart(),
2483 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2487 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2488 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2490 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2492 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2493 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
2496 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2497 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2499 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2501 Expr *Hint = nullptr;
2502 if (auto *HintClause = S.getSingleClause<OMPHintClause>())
2503 Hint = HintClause->getHint();
2504 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2505 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2506 S.getDirectiveName().getAsString(),
2507 CodeGen, S.getLocStart(), Hint);
2510 void CodeGenFunction::EmitOMPParallelForDirective(
2511 const OMPParallelForDirective &S) {
2512 // Emit directive as a combined directive that consists of two implicit
2513 // directives: 'parallel' with 'for' directive.
2514 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2515 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
2516 CGF.EmitOMPWorksharingLoop(S);
2518 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
2521 void CodeGenFunction::EmitOMPParallelForSimdDirective(
2522 const OMPParallelForSimdDirective &S) {
2523 // Emit directive as a combined directive that consists of two implicit
2524 // directives: 'parallel' with 'for' directive.
2525 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2526 CGF.EmitOMPWorksharingLoop(S);
2528 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
2531 void CodeGenFunction::EmitOMPParallelSectionsDirective(
2532 const OMPParallelSectionsDirective &S) {
2533 // Emit directive as a combined directive that consists of two implicit
2534 // directives: 'parallel' with 'sections' directive.
2535 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2536 CGF.EmitSections(S);
2538 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
2541 void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
2542 const RegionCodeGenTy &BodyGen,
2543 const TaskGenTy &TaskGen,
2544 OMPTaskDataTy &Data) {
2545 // Emit outlined function for task construct.
2546 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2547 auto *I = CS->getCapturedDecl()->param_begin();
2548 auto *PartId = std::next(I);
2549 auto *TaskT = std::next(I, 4);
2550 // Check if the task is final
2551 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2552 // If the condition constant folds and can be elided, try to avoid emitting
2553 // the condition and the dead arm of the if/else.
2554 auto *Cond = Clause->getCondition();
2556 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
2557 Data.Final.setInt(CondConstant);
2559 Data.Final.setPointer(EvaluateExprAsBool(Cond));
2561 // By default the task is not final.
2562 Data.Final.setInt(/*IntVal=*/false);
2564 // Check if the task has 'priority' clause.
2565 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2566 auto *Prio = Clause->getPriority();
2567 Data.Priority.setInt(/*IntVal=*/true);
2568 Data.Priority.setPointer(EmitScalarConversion(
2569 EmitScalarExpr(Prio), Prio->getType(),
2570 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
2571 Prio->getExprLoc()));
2573 // The first function argument for tasks is a thread id, the second one is a
2574 // part id (0 for tied tasks, >=0 for untied task).
2575 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2576 // Get list of private variables.
2577 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2578 auto IRef = C->varlist_begin();
2579 for (auto *IInit : C->private_copies()) {
2580 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2581 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2582 Data.PrivateVars.push_back(*IRef);
2583 Data.PrivateCopies.push_back(IInit);
2588 EmittedAsPrivate.clear();
2589 // Get list of firstprivate variables.
2590 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2591 auto IRef = C->varlist_begin();
2592 auto IElemInitRef = C->inits().begin();
2593 for (auto *IInit : C->private_copies()) {
2594 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2595 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2596 Data.FirstprivateVars.push_back(*IRef);
2597 Data.FirstprivateCopies.push_back(IInit);
2598 Data.FirstprivateInits.push_back(*IElemInitRef);
2604 // Get list of lastprivate variables (for taskloops).
2605 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2606 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2607 auto IRef = C->varlist_begin();
2608 auto ID = C->destination_exprs().begin();
2609 for (auto *IInit : C->private_copies()) {
2610 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2611 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2612 Data.LastprivateVars.push_back(*IRef);
2613 Data.LastprivateCopies.push_back(IInit);
2615 LastprivateDstsOrigs.insert(
2616 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2617 cast<DeclRefExpr>(*IRef)});
2622 // Build list of dependences.
2623 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2624 for (auto *IRef : C->varlists())
2625 Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
2626 auto &&CodeGen = [PartId, &S, &Data, CS, &BodyGen, &LastprivateDstsOrigs](
2627 CodeGenFunction &CGF, PrePostActionTy &Action) {
2628 // Set proper addresses for generated private copies.
2629 OMPPrivateScope Scope(CGF);
2630 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2631 !Data.LastprivateVars.empty()) {
2632 auto *CopyFn = CGF.Builder.CreateLoad(
2633 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
2634 auto *PrivatesPtr = CGF.Builder.CreateLoad(
2635 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
2637 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
2638 llvm::SmallVector<llvm::Value *, 16> CallArgs;
2639 CallArgs.push_back(PrivatesPtr);
2640 for (auto *E : Data.PrivateVars) {
2641 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2642 Address PrivatePtr = CGF.CreateMemTemp(
2643 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2644 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2645 CallArgs.push_back(PrivatePtr.getPointer());
2647 for (auto *E : Data.FirstprivateVars) {
2648 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2649 Address PrivatePtr =
2650 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2651 ".firstpriv.ptr.addr");
2652 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2653 CallArgs.push_back(PrivatePtr.getPointer());
2655 for (auto *E : Data.LastprivateVars) {
2656 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2657 Address PrivatePtr =
2658 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2659 ".lastpriv.ptr.addr");
2660 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2661 CallArgs.push_back(PrivatePtr.getPointer());
2663 CGF.EmitRuntimeCall(CopyFn, CallArgs);
2664 for (auto &&Pair : LastprivateDstsOrigs) {
2665 auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2667 const_cast<VarDecl *>(OrigVD),
2668 /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
2670 Pair.second->getType(), VK_LValue, Pair.second->getExprLoc());
2671 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2672 return CGF.EmitLValue(&DRE).getAddress();
2675 for (auto &&Pair : PrivatePtrs) {
2676 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2677 CGF.getContext().getDeclAlign(Pair.first));
2678 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2681 (void)Scope.Privatize();
2686 auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
2687 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
2688 Data.NumberOfParts);
2689 OMPLexicalScope Scope(*this, S);
2690 TaskGen(*this, OutlinedFn, Data);
2693 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
2694 // Emit outlined function for task construct.
2695 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2696 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
2697 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
2698 const Expr *IfCond = nullptr;
2699 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2700 if (C->getNameModifier() == OMPD_unknown ||
2701 C->getNameModifier() == OMPD_task) {
2702 IfCond = C->getCondition();
2708 // Check if we should emit tied or untied task.
2709 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
2710 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
2711 CGF.EmitStmt(CS->getCapturedStmt());
2713 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
2714 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
2715 const OMPTaskDataTy &Data) {
2716 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn,
2717 SharedsTy, CapturedStruct, IfCond,
2720 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
2723 void CodeGenFunction::EmitOMPTaskyieldDirective(
2724 const OMPTaskyieldDirective &S) {
2725 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
2728 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
2729 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
2732 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
2733 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
2736 void CodeGenFunction::EmitOMPTaskgroupDirective(
2737 const OMPTaskgroupDirective &S) {
2738 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2740 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2742 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2743 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
2746 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
2747 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
2748 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
2749 return llvm::makeArrayRef(FlushClause->varlist_begin(),
2750 FlushClause->varlist_end());
2753 }(), S.getLocStart());
2756 void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
2757 // Emit the loop iteration variable.
2758 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2759 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2760 EmitVarDecl(*IVDecl);
2762 // Emit the iterations count variable.
2763 // If it is not a variable, Sema decided to calculate iterations count on each
2764 // iteration (e.g., it is foldable into a constant).
2765 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2766 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2767 // Emit calculation of the iterations count.
2768 EmitIgnoredExpr(S.getCalcLastIteration());
2771 auto &RT = CGM.getOpenMPRuntime();
2773 bool HasLastprivateClause = false;
2774 // Check pre-condition.
2776 OMPLoopScope PreInitScope(*this, S);
2777 // Skip the entire loop if we don't meet the precondition.
2778 // If the condition constant folds and can be elided, avoid emitting the
2781 llvm::BasicBlock *ContBlock = nullptr;
2782 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2786 auto *ThenBlock = createBasicBlock("omp.precond.then");
2787 ContBlock = createBasicBlock("omp.precond.end");
2788 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2789 getProfileCount(&S));
2790 EmitBlock(ThenBlock);
2791 incrementProfileCounter(&S);
2794 // Emit 'then' code.
2796 // Emit helper vars inits.
2798 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2800 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2802 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2804 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2806 OMPPrivateScope LoopScope(*this);
2807 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2808 // Emit implicit barrier to synchronize threads and avoid data races on
2809 // initialization of firstprivate variables and post-update of
2810 // lastprivate variables.
2811 CGM.getOpenMPRuntime().emitBarrierCall(
2812 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2813 /*ForceSimpleCall=*/true);
2815 EmitOMPPrivateClause(S, LoopScope);
2816 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2817 EmitOMPPrivateLoopCounters(S, LoopScope);
2818 (void)LoopScope.Privatize();
2820 // Detect the distribute schedule kind and chunk.
2821 llvm::Value *Chunk = nullptr;
2822 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
2823 if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
2824 ScheduleKind = C->getDistScheduleKind();
2825 if (const auto *Ch = C->getChunkSize()) {
2826 Chunk = EmitScalarExpr(Ch);
2827 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2828 S.getIterationVariable()->getType(),
2832 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2833 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2835 // OpenMP [2.10.8, distribute Construct, Description]
2836 // If dist_schedule is specified, kind must be static. If specified,
2837 // iterations are divided into chunks of size chunk_size, chunks are
2838 // assigned to the teams of the league in a round-robin fashion in the
2839 // order of the team number. When no chunk_size is specified, the
2840 // iteration space is divided into chunks that are approximately equal
2841 // in size, and at most one chunk is distributed to each team of the
2842 // league. The size of the chunks is unspecified in this case.
2843 if (RT.isStaticNonchunked(ScheduleKind,
2844 /* Chunked */ Chunk != nullptr)) {
2845 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
2846 IVSize, IVSigned, /* Ordered = */ false,
2847 IL.getAddress(), LB.getAddress(),
2848 UB.getAddress(), ST.getAddress());
2850 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2851 // UB = min(UB, GlobalUB);
2852 EmitIgnoredExpr(S.getEnsureUpperBound());
2854 EmitIgnoredExpr(S.getInit());
2855 // while (idx <= UB) { BODY; ++idx; }
2856 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2858 [&S, LoopExit](CodeGenFunction &CGF) {
2859 CGF.EmitOMPLoopBody(S, LoopExit);
2860 CGF.EmitStopPoint(&S);
2862 [](CodeGenFunction &) {});
2863 EmitBlock(LoopExit.getBlock());
2864 // Tell the runtime we are done.
2865 RT.emitForStaticFinish(*this, S.getLocStart());
2867 // Emit the outer loop, which requests its work chunk [LB..UB] from
2868 // runtime and runs the inner loop to process it.
2869 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope,
2870 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2871 IL.getAddress(), Chunk);
2874 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2875 if (HasLastprivateClause)
2876 EmitOMPLastprivateClauseFinal(
2877 S, /*NoFinals=*/false,
2878 Builder.CreateIsNotNull(
2879 EmitLoadOfScalar(IL, S.getLocStart())));
2882 // We're now done with the loop, so jump to the continuation block.
2884 EmitBranch(ContBlock);
2885 EmitBlock(ContBlock, true);
2890 void CodeGenFunction::EmitOMPDistributeDirective(
2891 const OMPDistributeDirective &S) {
2892 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2893 CGF.EmitOMPDistributeLoop(S);
2895 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2896 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
2900 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
2901 const CapturedStmt *S) {
2902 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2903 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
2904 CGF.CapturedStmtInfo = &CapStmtInfo;
2905 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
2906 Fn->addFnAttr(llvm::Attribute::NoInline);
2910 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
2911 if (!S.getAssociatedStmt()) {
2912 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
2913 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
2916 auto *C = S.getSingleClause<OMPSIMDClause>();
2917 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
2918 PrePostActionTy &Action) {
2920 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2921 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
2922 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
2923 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
2924 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
2928 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2931 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2932 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
2935 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
2936 QualType SrcType, QualType DestType,
2937 SourceLocation Loc) {
2938 assert(CGF.hasScalarEvaluationKind(DestType) &&
2939 "DestType must have scalar evaluation kind.");
2940 assert(!Val.isAggregate() && "Must be a scalar or complex.");
2941 return Val.isScalar()
2942 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
2944 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
2948 static CodeGenFunction::ComplexPairTy
2949 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
2950 QualType DestType, SourceLocation Loc) {
2951 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
2952 "DestType must have complex evaluation kind.");
2953 CodeGenFunction::ComplexPairTy ComplexVal;
2954 if (Val.isScalar()) {
2955 // Convert the input element to the element type of the complex.
2956 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2957 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
2958 DestElementType, Loc);
2959 ComplexVal = CodeGenFunction::ComplexPairTy(
2960 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
2962 assert(Val.isComplex() && "Must be a scalar or complex.");
2963 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
2964 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2965 ComplexVal.first = CGF.EmitScalarConversion(
2966 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
2967 ComplexVal.second = CGF.EmitScalarConversion(
2968 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
2973 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
2974 LValue LVal, RValue RVal) {
2975 if (LVal.isGlobalReg()) {
2976 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
2978 CGF.EmitAtomicStore(RVal, LVal,
2979 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
2980 : llvm::AtomicOrdering::Monotonic,
2981 LVal.isVolatile(), /*IsInit=*/false);
2985 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
2986 QualType RValTy, SourceLocation Loc) {
2987 switch (getEvaluationKind(LVal.getType())) {
2989 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
2990 *this, RVal, RValTy, LVal.getType(), Loc)),
2995 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
2999 llvm_unreachable("Must be a scalar or complex.");
3003 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
3004 const Expr *X, const Expr *V,
3005 SourceLocation Loc) {
3007 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
3008 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
3009 LValue XLValue = CGF.EmitLValue(X);
3010 LValue VLValue = CGF.EmitLValue(V);
3011 RValue Res = XLValue.isGlobalReg()
3012 ? CGF.EmitLoadOfLValue(XLValue, Loc)
3013 : CGF.EmitAtomicLoad(
3015 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3016 : llvm::AtomicOrdering::Monotonic,
3017 XLValue.isVolatile());
3018 // OpenMP, 2.12.6, atomic Construct
3019 // Any atomic construct with a seq_cst clause forces the atomically
3020 // performed operation to include an implicit flush operation without a
3023 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3024 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
3027 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
3028 const Expr *X, const Expr *E,
3029 SourceLocation Loc) {
3031 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3032 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3033 // OpenMP, 2.12.6, atomic Construct
3034 // Any atomic construct with a seq_cst clause forces the atomically
3035 // performed operation to include an implicit flush operation without a
3038 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3041 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
3043 BinaryOperatorKind BO,
3044 llvm::AtomicOrdering AO,
3045 bool IsXLHSInRHSPart) {
3046 auto &Context = CGF.CGM.getContext();
3047 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3048 // expression is simple and atomic is allowed for the given type for the
3050 if (BO == BO_Comma || !Update.isScalar() ||
3051 !Update.getScalarVal()->getType()->isIntegerTy() ||
3052 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
3053 (Update.getScalarVal()->getType() !=
3054 X.getAddress().getElementType())) ||
3055 !X.getAddress().getElementType()->isIntegerTy() ||
3056 !Context.getTargetInfo().hasBuiltinAtomic(
3057 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
3058 return std::make_pair(false, RValue::get(nullptr));
3060 llvm::AtomicRMWInst::BinOp RMWOp;
3063 RMWOp = llvm::AtomicRMWInst::Add;
3066 if (!IsXLHSInRHSPart)
3067 return std::make_pair(false, RValue::get(nullptr));
3068 RMWOp = llvm::AtomicRMWInst::Sub;
3071 RMWOp = llvm::AtomicRMWInst::And;
3074 RMWOp = llvm::AtomicRMWInst::Or;
3077 RMWOp = llvm::AtomicRMWInst::Xor;
3080 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3081 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
3082 : llvm::AtomicRMWInst::Max)
3083 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
3084 : llvm::AtomicRMWInst::UMax);
3087 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3088 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
3089 : llvm::AtomicRMWInst::Min)
3090 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
3091 : llvm::AtomicRMWInst::UMin);
3094 RMWOp = llvm::AtomicRMWInst::Xchg;
3103 return std::make_pair(false, RValue::get(nullptr));
3121 llvm_unreachable("Unsupported atomic update operation");
3123 auto *UpdateVal = Update.getScalarVal();
3124 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
3125 UpdateVal = CGF.Builder.CreateIntCast(
3126 IC, X.getAddress().getElementType(),
3127 X.getType()->hasSignedIntegerRepresentation());
3129 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
3130 return std::make_pair(true, RValue::get(Res));
3133 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
3134 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3135 llvm::AtomicOrdering AO, SourceLocation Loc,
3136 const llvm::function_ref<RValue(RValue)> &CommonGen) {
3137 // Update expressions are allowed to have the following forms:
3138 // x binop= expr; -> xrval + expr;
3139 // x++, ++x -> xrval + 1;
3140 // x--, --x -> xrval - 1;
3141 // x = x binop expr; -> xrval binop expr
3142 // x = expr Op x; - > expr binop xrval;
3143 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
3145 if (X.isGlobalReg()) {
3146 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
3148 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
3150 // Perform compare-and-swap procedure.
3151 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
3157 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
3158 const Expr *X, const Expr *E,
3159 const Expr *UE, bool IsXLHSInRHSPart,
3160 SourceLocation Loc) {
3161 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3162 "Update expr in 'atomic update' must be a binary operator.");
3163 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3164 // Update expressions are allowed to have the following forms:
3165 // x binop= expr; -> xrval + expr;
3166 // x++, ++x -> xrval + 1;
3167 // x--, --x -> xrval - 1;
3168 // x = x binop expr; -> xrval binop expr
3169 // x = expr Op x; - > expr binop xrval;
3170 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3171 LValue XLValue = CGF.EmitLValue(X);
3172 RValue ExprRValue = CGF.EmitAnyExpr(E);
3173 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3174 : llvm::AtomicOrdering::Monotonic;
3175 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3176 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3177 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3178 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3180 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
3181 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3182 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3183 return CGF.EmitAnyExpr(UE);
3185 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3186 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3187 // OpenMP, 2.12.6, atomic Construct
3188 // Any atomic construct with a seq_cst clause forces the atomically
3189 // performed operation to include an implicit flush operation without a
3192 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3195 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
3196 QualType SourceType, QualType ResType,
3197 SourceLocation Loc) {
3198 switch (CGF.getEvaluationKind(ResType)) {
3201 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
3203 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
3204 return RValue::getComplex(Res.first, Res.second);
3209 llvm_unreachable("Must be a scalar or complex.");
3212 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
3213 bool IsPostfixUpdate, const Expr *V,
3214 const Expr *X, const Expr *E,
3215 const Expr *UE, bool IsXLHSInRHSPart,
3216 SourceLocation Loc) {
3217 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3218 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3220 LValue VLValue = CGF.EmitLValue(V);
3221 LValue XLValue = CGF.EmitLValue(X);
3222 RValue ExprRValue = CGF.EmitAnyExpr(E);
3223 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3224 : llvm::AtomicOrdering::Monotonic;
3225 QualType NewVValType;
3227 // 'x' is updated with some additional value.
3228 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3229 "Update expr in 'atomic capture' must be a binary operator.");
3230 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3231 // Update expressions are allowed to have the following forms:
3232 // x binop= expr; -> xrval + expr;
3233 // x++, ++x -> xrval + 1;
3234 // x--, --x -> xrval - 1;
3235 // x = x binop expr; -> xrval binop expr
3236 // x = expr Op x; - > expr binop xrval;
3237 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3238 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3239 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3240 NewVValType = XRValExpr->getType();
3241 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3242 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
3243 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
3244 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3245 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3246 RValue Res = CGF.EmitAnyExpr(UE);
3247 NewVVal = IsPostfixUpdate ? XRValue : Res;
3250 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3251 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3253 // 'atomicrmw' instruction was generated.
3254 if (IsPostfixUpdate) {
3255 // Use old value from 'atomicrmw'.
3256 NewVVal = Res.second;
3258 // 'atomicrmw' does not provide new value, so evaluate it using old
3260 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3261 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3262 NewVVal = CGF.EmitAnyExpr(UE);
3266 // 'x' is simply rewritten with some 'expr'.
3267 NewVValType = X->getType().getNonReferenceType();
3268 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
3269 X->getType().getNonReferenceType(), Loc);
3270 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
3274 // Try to perform atomicrmw xchg, otherwise simple exchange.
3275 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3276 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3279 // 'atomicrmw' instruction was generated.
3280 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3283 // Emit post-update store to 'v' of old/new 'x' value.
3284 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
3285 // OpenMP, 2.12.6, atomic Construct
3286 // Any atomic construct with a seq_cst clause forces the atomically
3287 // performed operation to include an implicit flush operation without a
3290 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3293 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
3294 bool IsSeqCst, bool IsPostfixUpdate,
3295 const Expr *X, const Expr *V, const Expr *E,
3296 const Expr *UE, bool IsXLHSInRHSPart,
3297 SourceLocation Loc) {
3300 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
3303 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
3307 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
3310 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
3311 IsXLHSInRHSPart, Loc);
3315 case OMPC_num_threads:
3317 case OMPC_firstprivate:
3318 case OMPC_lastprivate:
3319 case OMPC_reduction:
3329 case OMPC_copyprivate:
3331 case OMPC_proc_bind:
3336 case OMPC_threadprivate:
3338 case OMPC_mergeable:
3343 case OMPC_num_teams:
3344 case OMPC_thread_limit:
3346 case OMPC_grainsize:
3348 case OMPC_num_tasks:
3350 case OMPC_dist_schedule:
3351 case OMPC_defaultmap:
3355 case OMPC_use_device_ptr:
3356 case OMPC_is_device_ptr:
3357 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3361 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3362 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
3363 OpenMPClauseKind Kind = OMPC_unknown;
3364 for (auto *C : S.clauses()) {
3365 // Find first clause (skip seq_cst clause, if it is first).
3366 if (C->getClauseKind() != OMPC_seq_cst) {
3367 Kind = C->getClauseKind();
3373 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
3374 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
3375 enterFullExpression(EWC);
3377 // Processing for statements under 'atomic capture'.
3378 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
3379 for (const auto *C : Compound->body()) {
3380 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
3381 enterFullExpression(EWC);
3386 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
3387 PrePostActionTy &) {
3388 CGF.EmitStopPoint(CS);
3389 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
3390 S.getV(), S.getExpr(), S.getUpdateExpr(),
3391 S.isXLHSInRHSPart(), S.getLocStart());
3393 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3394 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
3397 std::pair<llvm::Function * /*OutlinedFn*/, llvm::Constant * /*OutlinedFnID*/>
3398 CodeGenFunction::EmitOMPTargetDirectiveOutlinedFunction(
3399 CodeGenModule &CGM, const OMPTargetDirective &S, StringRef ParentName,
3400 bool IsOffloadEntry) {
3401 llvm::Function *OutlinedFn = nullptr;
3402 llvm::Constant *OutlinedFnID = nullptr;
3403 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3404 OMPPrivateScope PrivateScope(CGF);
3405 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3406 CGF.EmitOMPPrivateClause(S, PrivateScope);
3407 (void)PrivateScope.Privatize();
3410 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3412 // Emit target region as a standalone region.
3413 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3414 S, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, CodeGen);
3415 return std::make_pair(OutlinedFn, OutlinedFnID);
3418 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
3419 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
3421 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3422 GenerateOpenMPCapturedVars(CS, CapturedVars);
3424 llvm::Function *Fn = nullptr;
3425 llvm::Constant *FnID = nullptr;
3427 // Check if we have any if clause associated with the directive.
3428 const Expr *IfCond = nullptr;
3430 if (auto *C = S.getSingleClause<OMPIfClause>()) {
3431 IfCond = C->getCondition();
3434 // Check if we have any device clause associated with the directive.
3435 const Expr *Device = nullptr;
3436 if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
3437 Device = C->getDevice();
3440 // Check if we have an if clause whose conditional always evaluates to false
3441 // or if we do not have any targets specified. If so the target region is not
3442 // an offload entry point.
3443 bool IsOffloadEntry = true;
3446 if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
3447 IsOffloadEntry = false;
3449 if (CGM.getLangOpts().OMPTargetTriples.empty())
3450 IsOffloadEntry = false;
3452 assert(CurFuncDecl && "No parent declaration for target region!");
3453 StringRef ParentName;
3454 // In case we have Ctors/Dtors we use the complete type variant to produce
3455 // the mangling of the device outlined kernel.
3456 if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
3457 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
3458 else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
3459 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
3462 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
3464 std::tie(Fn, FnID) = EmitOMPTargetDirectiveOutlinedFunction(
3465 CGM, S, ParentName, IsOffloadEntry);
3466 OMPLexicalScope Scope(*this, S);
3467 CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
3471 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
3472 const OMPExecutableDirective &S,
3473 OpenMPDirectiveKind InnermostKind,
3474 const RegionCodeGenTy &CodeGen) {
3475 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3476 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
3477 emitParallelOrTeamsOutlinedFunction(S,
3478 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
3480 const OMPTeamsDirective &TD = *dyn_cast<OMPTeamsDirective>(&S);
3481 const OMPNumTeamsClause *NT = TD.getSingleClause<OMPNumTeamsClause>();
3482 const OMPThreadLimitClause *TL = TD.getSingleClause<OMPThreadLimitClause>();
3484 Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr;
3485 Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr;
3487 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
3491 OMPLexicalScope Scope(CGF, S);
3492 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3493 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3494 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn,
3498 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
3499 // Emit teams region as a standalone region.
3500 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3501 OMPPrivateScope PrivateScope(CGF);
3502 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3503 CGF.EmitOMPPrivateClause(S, PrivateScope);
3504 (void)PrivateScope.Privatize();
3505 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3507 emitCommonOMPTeamsDirective(*this, S, OMPD_teams, CodeGen);
3510 void CodeGenFunction::EmitOMPCancellationPointDirective(
3511 const OMPCancellationPointDirective &S) {
3512 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
3513 S.getCancelRegion());
3516 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
3517 const Expr *IfCond = nullptr;
3518 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3519 if (C->getNameModifier() == OMPD_unknown ||
3520 C->getNameModifier() == OMPD_cancel) {
3521 IfCond = C->getCondition();
3525 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
3526 S.getCancelRegion());
3529 CodeGenFunction::JumpDest
3530 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
3531 if (Kind == OMPD_parallel || Kind == OMPD_task ||
3532 Kind == OMPD_target_parallel)
3534 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
3535 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
3536 Kind == OMPD_distribute_parallel_for ||
3537 Kind == OMPD_target_parallel_for);
3538 return OMPCancelStack.getExitBlock();
3541 void CodeGenFunction::EmitOMPUseDevicePtrClause(
3542 const OMPClause &NC, OMPPrivateScope &PrivateScope,
3543 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
3544 const auto &C = cast<OMPUseDevicePtrClause>(NC);
3545 auto OrigVarIt = C.varlist_begin();
3546 auto InitIt = C.inits().begin();
3547 for (auto PvtVarIt : C.private_copies()) {
3548 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
3549 auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
3550 auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
3552 // In order to identify the right initializer we need to match the
3553 // declaration used by the mapping logic. In some cases we may get
3554 // OMPCapturedExprDecl that refers to the original declaration.
3555 const ValueDecl *MatchingVD = OrigVD;
3556 if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
3557 // OMPCapturedExprDecl are used to privative fields of the current
3559 auto *ME = cast<MemberExpr>(OED->getInit());
3560 assert(isa<CXXThisExpr>(ME->getBase()) &&
3561 "Base should be the current struct!");
3562 MatchingVD = ME->getMemberDecl();
3565 // If we don't have information about the current list item, move on to
3567 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
3568 if (InitAddrIt == CaptureDeviceAddrMap.end())
3571 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
3572 // Initialize the temporary initialization variable with the address we
3573 // get from the runtime library. We have to cast the source address
3574 // because it is always a void *. References are materialized in the
3575 // privatization scope, so the initialization here disregards the fact
3576 // the original variable is a reference.
3578 getContext().getPointerType(OrigVD->getType().getNonReferenceType());
3579 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
3580 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
3581 setAddrOfLocalVar(InitVD, InitAddr);
3583 // Emit private declaration, it will be initialized by the value we
3584 // declaration we just added to the local declarations map.
3587 // The initialization variables reached its purpose in the emission
3588 // ofthe previous declaration, so we don't need it anymore.
3589 LocalDeclMap.erase(InitVD);
3591 // Return the address of the private variable.
3592 return GetAddrOfLocalVar(PvtVD);
3594 assert(IsRegistered && "firstprivate var already registered as private");
3595 // Silence the warning about unused variable.
3603 // Generate the instructions for '#pragma omp target data' directive.
3604 void CodeGenFunction::EmitOMPTargetDataDirective(
3605 const OMPTargetDataDirective &S) {
3606 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
3608 // Create a pre/post action to signal the privatization of the device pointer.
3609 // This action can be replaced by the OpenMP runtime code generation to
3610 // deactivate privatization.
3611 bool PrivatizeDevicePointers = false;
3612 class DevicePointerPrivActionTy : public PrePostActionTy {
3613 bool &PrivatizeDevicePointers;
3616 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
3617 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
3618 void Enter(CodeGenFunction &CGF) override {
3619 PrivatizeDevicePointers = true;
3622 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
3624 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
3625 CodeGenFunction &CGF, PrePostActionTy &Action) {
3626 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3628 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3631 // Codegen that selects wheather to generate the privatization code or not.
3632 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
3633 &InnermostCodeGen](CodeGenFunction &CGF,
3634 PrePostActionTy &Action) {
3635 RegionCodeGenTy RCG(InnermostCodeGen);
3636 PrivatizeDevicePointers = false;
3638 // Call the pre-action to change the status of PrivatizeDevicePointers if
3642 if (PrivatizeDevicePointers) {
3643 OMPPrivateScope PrivateScope(CGF);
3644 // Emit all instances of the use_device_ptr clause.
3645 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
3646 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
3647 Info.CaptureDeviceAddrMap);
3648 (void)PrivateScope.Privatize();
3654 // Forward the provided action to the privatization codegen.
3655 RegionCodeGenTy PrivRCG(PrivCodeGen);
3656 PrivRCG.setAction(Action);
3658 // Notwithstanding the body of the region is emitted as inlined directive,
3659 // we don't use an inline scope as changes in the references inside the
3660 // region are expected to be visible outside, so we do not privative them.
3661 OMPLexicalScope Scope(CGF, S);
3662 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
3666 RegionCodeGenTy RCG(CodeGen);
3668 // If we don't have target devices, don't bother emitting the data mapping
3670 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
3675 // Check if we have any if clause associated with the directive.
3676 const Expr *IfCond = nullptr;
3677 if (auto *C = S.getSingleClause<OMPIfClause>())
3678 IfCond = C->getCondition();
3680 // Check if we have any device clause associated with the directive.
3681 const Expr *Device = nullptr;
3682 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3683 Device = C->getDevice();
3685 // Set the action to signal privatization of device pointers.
3686 RCG.setAction(PrivAction);
3688 // Emit region code.
3689 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
3693 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
3694 const OMPTargetEnterDataDirective &S) {
3695 // If we don't have target devices, don't bother emitting the data mapping
3697 if (CGM.getLangOpts().OMPTargetTriples.empty())
3700 // Check if we have any if clause associated with the directive.
3701 const Expr *IfCond = nullptr;
3702 if (auto *C = S.getSingleClause<OMPIfClause>())
3703 IfCond = C->getCondition();
3705 // Check if we have any device clause associated with the directive.
3706 const Expr *Device = nullptr;
3707 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3708 Device = C->getDevice();
3710 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3713 void CodeGenFunction::EmitOMPTargetExitDataDirective(
3714 const OMPTargetExitDataDirective &S) {
3715 // If we don't have target devices, don't bother emitting the data mapping
3717 if (CGM.getLangOpts().OMPTargetTriples.empty())
3720 // Check if we have any if clause associated with the directive.
3721 const Expr *IfCond = nullptr;
3722 if (auto *C = S.getSingleClause<OMPIfClause>())
3723 IfCond = C->getCondition();
3725 // Check if we have any device clause associated with the directive.
3726 const Expr *Device = nullptr;
3727 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3728 Device = C->getDevice();
3730 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3733 void CodeGenFunction::EmitOMPTargetParallelDirective(
3734 const OMPTargetParallelDirective &S) {
3735 // TODO: codegen for target parallel.
3738 void CodeGenFunction::EmitOMPTargetParallelForDirective(
3739 const OMPTargetParallelForDirective &S) {
3740 // TODO: codegen for target parallel for.
3743 /// Emit a helper variable and return corresponding lvalue.
3744 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
3745 const ImplicitParamDecl *PVD,
3746 CodeGenFunction::OMPPrivateScope &Privates) {
3747 auto *VDecl = cast<VarDecl>(Helper->getDecl());
3748 Privates.addPrivate(
3749 VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); });
3752 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
3753 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
3754 // Emit outlined function for task construct.
3755 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3756 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
3757 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3758 const Expr *IfCond = nullptr;
3759 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3760 if (C->getNameModifier() == OMPD_unknown ||
3761 C->getNameModifier() == OMPD_taskloop) {
3762 IfCond = C->getCondition();
3768 // Check if taskloop must be emitted without taskgroup.
3769 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
3770 // TODO: Check if we should emit tied or untied task.
3772 // Set scheduling for taskloop
3773 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
3775 Data.Schedule.setInt(/*IntVal=*/false);
3776 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
3777 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
3779 Data.Schedule.setInt(/*IntVal=*/true);
3780 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
3783 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
3785 // for (IV in 0..LastIteration) BODY;
3786 // <Final counter/linear vars updates>;
3790 // Emit: if (PreCond) - begin.
3791 // If the condition constant folds and can be elided, avoid emitting the
3794 llvm::BasicBlock *ContBlock = nullptr;
3795 OMPLoopScope PreInitScope(CGF, S);
3796 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3800 auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
3801 ContBlock = CGF.createBasicBlock("taskloop.if.end");
3802 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
3803 CGF.getProfileCount(&S));
3804 CGF.EmitBlock(ThenBlock);
3805 CGF.incrementProfileCounter(&S);
3808 if (isOpenMPSimdDirective(S.getDirectiveKind()))
3809 CGF.EmitOMPSimdInit(S);
3811 OMPPrivateScope LoopScope(CGF);
3812 // Emit helper vars inits.
3813 enum { LowerBound = 5, UpperBound, Stride, LastIter };
3814 auto *I = CS->getCapturedDecl()->param_begin();
3815 auto *LBP = std::next(I, LowerBound);
3816 auto *UBP = std::next(I, UpperBound);
3817 auto *STP = std::next(I, Stride);
3818 auto *LIP = std::next(I, LastIter);
3819 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
3821 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
3823 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
3824 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
3826 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
3827 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
3828 (void)LoopScope.Privatize();
3829 // Emit the loop iteration variable.
3830 const Expr *IVExpr = S.getIterationVariable();
3831 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
3832 CGF.EmitVarDecl(*IVDecl);
3833 CGF.EmitIgnoredExpr(S.getInit());
3835 // Emit the iterations count variable.
3836 // If it is not a variable, Sema decided to calculate iterations count on
3837 // each iteration (e.g., it is foldable into a constant).
3838 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3839 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3840 // Emit calculation of the iterations count.
3841 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
3844 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
3846 [&S](CodeGenFunction &CGF) {
3847 CGF.EmitOMPLoopBody(S, JumpDest());
3848 CGF.EmitStopPoint(&S);
3850 [](CodeGenFunction &) {});
3851 // Emit: if (PreCond) - end.
3853 CGF.EmitBranch(ContBlock);
3854 CGF.EmitBlock(ContBlock, true);
3856 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3857 if (HasLastprivateClause) {
3858 CGF.EmitOMPLastprivateClauseFinal(
3859 S, isOpenMPSimdDirective(S.getDirectiveKind()),
3860 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
3861 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
3862 (*LIP)->getType(), S.getLocStart())));
3865 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
3866 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
3867 const OMPTaskDataTy &Data) {
3868 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) {
3869 OMPLoopScope PreInitScope(CGF, S);
3870 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
3871 OutlinedFn, SharedsTy,
3872 CapturedStruct, IfCond, Data);
3874 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
3877 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
3880 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
3881 EmitOMPTaskLoopBasedDirective(S);
3884 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
3885 const OMPTaskLoopSimdDirective &S) {
3886 EmitOMPTaskLoopBasedDirective(S);
3889 // Generate the instructions for '#pragma omp target update' directive.
3890 void CodeGenFunction::EmitOMPTargetUpdateDirective(
3891 const OMPTargetUpdateDirective &S) {
3892 // If we don't have target devices, don't bother emitting the data mapping
3894 if (CGM.getLangOpts().OMPTargetTriples.empty())
3897 // Check if we have any if clause associated with the directive.
3898 const Expr *IfCond = nullptr;
3899 if (auto *C = S.getSingleClause<OMPIfClause>())
3900 IfCond = C->getCondition();
3902 // Check if we have any device clause associated with the directive.
3903 const Expr *Device = nullptr;
3904 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3905 Device = C->getDevice();
3907 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);