1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Stmt.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "llvm/IR/CallSite.h"
23 using namespace clang;
24 using namespace CodeGen;
27 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
28 /// for captured expressions.
29 class OMPLexicalScope final : public CodeGenFunction::LexicalScope {
30 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
31 for (const auto *C : S.clauses()) {
32 if (auto *CPI = OMPClauseWithPreInit::get(C)) {
33 if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
34 for (const auto *I : PreInit->decls()) {
35 if (!I->hasAttr<OMPCaptureNoInitAttr>())
36 CGF.EmitVarDecl(cast<VarDecl>(*I));
38 CodeGenFunction::AutoVarEmission Emission =
39 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
40 CGF.EmitAutoVarCleanups(Emission);
47 CodeGenFunction::OMPPrivateScope InlinedShareds;
49 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
50 return CGF.LambdaCaptureFields.lookup(VD) ||
51 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
52 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
56 OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S,
57 bool AsInlined = false)
58 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
60 emitPreInitStmt(CGF, S);
62 if (S.hasAssociatedStmt()) {
63 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
64 for (auto &C : CS->captures()) {
65 if (C.capturesVariable() || C.capturesVariableByCopy()) {
66 auto *VD = C.getCapturedVar();
67 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
68 isCapturedVar(CGF, VD) ||
69 (CGF.CapturedStmtInfo &&
70 InlinedShareds.isGlobalVarCaptured(VD)),
71 VD->getType().getNonReferenceType(), VK_LValue,
73 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
74 return CGF.EmitLValue(&DRE).getAddress();
78 (void)InlinedShareds.Privatize();
84 /// Private scope for OpenMP loop-based directives, that supports capturing
85 /// of used expression from loop statement.
86 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
87 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
88 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
89 if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) {
90 for (const auto *I : PreInits->decls())
91 CGF.EmitVarDecl(cast<VarDecl>(*I));
97 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
98 : CodeGenFunction::RunCleanupsScope(CGF) {
99 emitPreInitStmt(CGF, S);
105 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
106 auto &C = getContext();
107 llvm::Value *Size = nullptr;
108 auto SizeInChars = C.getTypeSizeInChars(Ty);
109 if (SizeInChars.isZero()) {
110 // getTypeSizeInChars() returns 0 for a VLA.
111 while (auto *VAT = C.getAsVariableArrayType(Ty)) {
112 llvm::Value *ArraySize;
113 std::tie(ArraySize, Ty) = getVLASize(VAT);
114 Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
116 SizeInChars = C.getTypeSizeInChars(Ty);
117 if (SizeInChars.isZero())
118 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
119 Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
121 Size = CGM.getSize(SizeInChars);
125 void CodeGenFunction::GenerateOpenMPCapturedVars(
126 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
127 const RecordDecl *RD = S.getCapturedRecordDecl();
128 auto CurField = RD->field_begin();
129 auto CurCap = S.captures().begin();
130 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
131 E = S.capture_init_end();
132 I != E; ++I, ++CurField, ++CurCap) {
133 if (CurField->hasCapturedVLAType()) {
134 auto VAT = CurField->getCapturedVLAType();
135 auto *Val = VLASizeMap[VAT->getSizeExpr()];
136 CapturedVars.push_back(Val);
137 } else if (CurCap->capturesThis())
138 CapturedVars.push_back(CXXThisValue);
139 else if (CurCap->capturesVariableByCopy()) {
141 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal();
143 // If the field is not a pointer, we need to save the actual value
144 // and load it as a void pointer.
145 if (!CurField->getType()->isAnyPointerType()) {
146 auto &Ctx = getContext();
147 auto DstAddr = CreateMemTemp(
148 Ctx.getUIntPtrType(),
149 Twine(CurCap->getCapturedVar()->getName()) + ".casted");
150 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
152 auto *SrcAddrVal = EmitScalarConversion(
153 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
154 Ctx.getPointerType(CurField->getType()), SourceLocation());
156 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
158 // Store the value using the source type pointer.
159 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
161 // Load the value using the destination type pointer.
162 CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
164 CapturedVars.push_back(CV);
166 assert(CurCap->capturesVariable() && "Expected capture by reference.");
167 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
172 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
173 StringRef Name, LValue AddrLV,
174 bool isReferenceType = false) {
175 ASTContext &Ctx = CGF.getContext();
177 auto *CastedPtr = CGF.EmitScalarConversion(
178 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
179 Ctx.getPointerType(DstType), SourceLocation());
181 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
184 // If we are dealing with references we need to return the address of the
185 // reference instead of the reference of the value.
186 if (isReferenceType) {
187 QualType RefType = Ctx.getLValueReferenceType(DstType);
188 auto *RefVal = TmpAddr.getPointer();
189 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
190 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
191 CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true);
198 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
201 "CapturedStmtInfo should be set when generating the captured function");
202 const CapturedDecl *CD = S.getCapturedDecl();
203 const RecordDecl *RD = S.getCapturedRecordDecl();
204 assert(CD->hasBody() && "missing CapturedDecl body");
206 // Build the argument list.
207 ASTContext &Ctx = CGM.getContext();
208 FunctionArgList Args;
209 Args.append(CD->param_begin(),
210 std::next(CD->param_begin(), CD->getContextParamPosition()));
211 auto I = S.captures().begin();
212 for (auto *FD : RD->fields()) {
213 QualType ArgType = FD->getType();
214 IdentifierInfo *II = nullptr;
215 VarDecl *CapVar = nullptr;
217 // If this is a capture by copy and the type is not a pointer, the outlined
218 // function argument type should be uintptr and the value properly casted to
219 // uintptr. This is necessary given that the runtime library is only able to
220 // deal with pointers. We can pass in the same way the VLA type sizes to the
221 // outlined function.
222 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
223 I->capturesVariableArrayType())
224 ArgType = Ctx.getUIntPtrType();
226 if (I->capturesVariable() || I->capturesVariableByCopy()) {
227 CapVar = I->getCapturedVar();
228 II = CapVar->getIdentifier();
229 } else if (I->capturesThis())
230 II = &getContext().Idents.get("this");
232 assert(I->capturesVariableArrayType());
233 II = &getContext().Idents.get("vla");
235 if (ArgType->isVariablyModifiedType()) {
236 bool IsReference = ArgType->isLValueReferenceType();
238 getContext().getCanonicalParamType(ArgType.getNonReferenceType());
239 if (IsReference && !ArgType->isPointerType()) {
240 ArgType = getContext().getLValueReferenceType(
241 ArgType, /*SpelledAsLValue=*/false);
244 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
245 FD->getLocation(), II, ArgType));
249 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
252 // Create the function declaration.
253 FunctionType::ExtInfo ExtInfo;
254 const CGFunctionInfo &FuncInfo =
255 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
256 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
258 llvm::Function *F = llvm::Function::Create(
259 FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
260 CapturedStmtInfo->getHelperName(), &CGM.getModule());
261 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
263 F->addFnAttr(llvm::Attribute::NoUnwind);
265 // Generate the function.
266 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
267 CD->getBody()->getLocStart());
268 unsigned Cnt = CD->getContextParamPosition();
269 I = S.captures().begin();
270 for (auto *FD : RD->fields()) {
271 // If we are capturing a pointer by copy we don't need to do anything, just
272 // use the value that we get from the arguments.
273 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
274 const VarDecl *CurVD = I->getCapturedVar();
275 Address LocalAddr = GetAddrOfLocalVar(Args[Cnt]);
276 // If the variable is a reference we need to materialize it here.
277 if (CurVD->getType()->isReferenceType()) {
278 Address RefAddr = CreateMemTemp(CurVD->getType(), getPointerAlign(),
279 ".materialized_ref");
280 EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr, /*Volatile=*/false,
284 setAddrOfLocalVar(CurVD, LocalAddr);
291 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
292 AlignmentSource::Decl);
293 if (FD->hasCapturedVLAType()) {
294 LValue CastedArgLVal =
295 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
296 Args[Cnt]->getName(), ArgLVal),
297 FD->getType(), AlignmentSource::Decl);
299 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
300 auto VAT = FD->getCapturedVLAType();
301 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
302 } else if (I->capturesVariable()) {
303 auto *Var = I->getCapturedVar();
304 QualType VarTy = Var->getType();
305 Address ArgAddr = ArgLVal.getAddress();
306 if (!VarTy->isReferenceType()) {
307 if (ArgLVal.getType()->isLValueReferenceType()) {
308 ArgAddr = EmitLoadOfReference(
309 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
310 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
311 assert(ArgLVal.getType()->isPointerType());
312 ArgAddr = EmitLoadOfPointer(
313 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
317 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
318 } else if (I->capturesVariableByCopy()) {
319 assert(!FD->getType()->isAnyPointerType() &&
320 "Not expecting a captured pointer.");
321 auto *Var = I->getCapturedVar();
322 QualType VarTy = Var->getType();
323 setAddrOfLocalVar(Var, castValueFromUintptr(*this, FD->getType(),
324 Args[Cnt]->getName(), ArgLVal,
325 VarTy->isReferenceType()));
327 // If 'this' is captured, load it into CXXThisValue.
328 assert(I->capturesThis());
330 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
336 PGO.assignRegionCounters(GlobalDecl(CD), F);
337 CapturedStmtInfo->EmitBody(*this, CD->getBody());
338 FinishFunction(CD->getBodyRBrace());
343 //===----------------------------------------------------------------------===//
344 // OpenMP Directive Emission
345 //===----------------------------------------------------------------------===//
346 void CodeGenFunction::EmitOMPAggregateAssign(
347 Address DestAddr, Address SrcAddr, QualType OriginalType,
348 const llvm::function_ref<void(Address, Address)> &CopyGen) {
349 // Perform element-by-element initialization.
352 // Drill down to the base element type on both arrays.
353 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
354 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
355 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
357 auto SrcBegin = SrcAddr.getPointer();
358 auto DestBegin = DestAddr.getPointer();
359 // Cast from pointer to array type to pointer to single element.
360 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
361 // The basic structure here is a while-do loop.
362 auto BodyBB = createBasicBlock("omp.arraycpy.body");
363 auto DoneBB = createBasicBlock("omp.arraycpy.done");
365 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
366 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
368 // Enter the loop body, making that address the current address.
369 auto EntryBB = Builder.GetInsertBlock();
372 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
374 llvm::PHINode *SrcElementPHI =
375 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
376 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
377 Address SrcElementCurrent =
378 Address(SrcElementPHI,
379 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
381 llvm::PHINode *DestElementPHI =
382 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
383 DestElementPHI->addIncoming(DestBegin, EntryBB);
384 Address DestElementCurrent =
385 Address(DestElementPHI,
386 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
389 CopyGen(DestElementCurrent, SrcElementCurrent);
391 // Shift the address forward by one element.
392 auto DestElementNext = Builder.CreateConstGEP1_32(
393 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
394 auto SrcElementNext = Builder.CreateConstGEP1_32(
395 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
396 // Check whether we've reached the end.
398 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
399 Builder.CreateCondBr(Done, DoneBB, BodyBB);
400 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
401 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
404 EmitBlock(DoneBB, /*IsFinished=*/true);
407 /// Check if the combiner is a call to UDR combiner and if it is so return the
408 /// UDR decl used for reduction.
409 static const OMPDeclareReductionDecl *
410 getReductionInit(const Expr *ReductionOp) {
411 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
412 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
414 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
415 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
420 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
421 const OMPDeclareReductionDecl *DRD,
423 Address Private, Address Original,
425 if (DRD->getInitializer()) {
426 std::pair<llvm::Function *, llvm::Function *> Reduction =
427 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
428 auto *CE = cast<CallExpr>(InitOp);
429 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
430 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
431 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
432 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
433 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
434 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
435 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
436 [=]() -> Address { return Private; });
437 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
438 [=]() -> Address { return Original; });
439 (void)PrivateScope.Privatize();
440 RValue Func = RValue::get(Reduction.second);
441 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
442 CGF.EmitIgnoredExpr(InitOp);
444 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
445 auto *GV = new llvm::GlobalVariable(
446 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
447 llvm::GlobalValue::PrivateLinkage, Init, ".init");
448 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
450 switch (CGF.getEvaluationKind(Ty)) {
452 InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
456 RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
459 InitRVal = RValue::getAggregate(LV.getAddress());
462 OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
463 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
464 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
465 /*IsInitializer=*/false);
469 /// \brief Emit initialization of arrays of complex types.
470 /// \param DestAddr Address of the array.
471 /// \param Type Type of array.
472 /// \param Init Initial expression of array.
473 /// \param SrcAddr Address of the original array.
474 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
475 QualType Type, const Expr *Init,
476 Address SrcAddr = Address::invalid()) {
477 auto *DRD = getReductionInit(Init);
478 // Perform element-by-element initialization.
481 // Drill down to the base element type on both arrays.
482 auto ArrayTy = Type->getAsArrayTypeUnsafe();
483 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
485 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
488 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
490 llvm::Value *SrcBegin = nullptr;
492 SrcBegin = SrcAddr.getPointer();
493 auto DestBegin = DestAddr.getPointer();
494 // Cast from pointer to array type to pointer to single element.
495 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
496 // The basic structure here is a while-do loop.
497 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
498 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
500 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
501 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
503 // Enter the loop body, making that address the current address.
504 auto EntryBB = CGF.Builder.GetInsertBlock();
505 CGF.EmitBlock(BodyBB);
507 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
509 llvm::PHINode *SrcElementPHI = nullptr;
510 Address SrcElementCurrent = Address::invalid();
512 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
513 "omp.arraycpy.srcElementPast");
514 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
516 Address(SrcElementPHI,
517 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
519 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
520 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
521 DestElementPHI->addIncoming(DestBegin, EntryBB);
522 Address DestElementCurrent =
523 Address(DestElementPHI,
524 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
528 CodeGenFunction::RunCleanupsScope InitScope(CGF);
529 if (DRD && (DRD->getInitializer() || !Init)) {
530 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
531 SrcElementCurrent, ElementTy);
533 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
534 /*IsInitializer=*/false);
538 // Shift the address forward by one element.
539 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
540 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
541 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
544 // Shift the address forward by one element.
545 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
546 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
547 // Check whether we've reached the end.
549 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
550 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
551 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
554 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
557 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
558 Address SrcAddr, const VarDecl *DestVD,
559 const VarDecl *SrcVD, const Expr *Copy) {
560 if (OriginalType->isArrayType()) {
561 auto *BO = dyn_cast<BinaryOperator>(Copy);
562 if (BO && BO->getOpcode() == BO_Assign) {
563 // Perform simple memcpy for simple copying.
564 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
566 // For arrays with complex element types perform element by element
568 EmitOMPAggregateAssign(
569 DestAddr, SrcAddr, OriginalType,
570 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
571 // Working with the single array element, so have to remap
572 // destination and source variables to corresponding array
574 CodeGenFunction::OMPPrivateScope Remap(*this);
575 Remap.addPrivate(DestVD, [DestElement]() -> Address {
579 SrcVD, [SrcElement]() -> Address { return SrcElement; });
580 (void)Remap.Privatize();
581 EmitIgnoredExpr(Copy);
585 // Remap pseudo source variable to private copy.
586 CodeGenFunction::OMPPrivateScope Remap(*this);
587 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
588 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
589 (void)Remap.Privatize();
590 // Emit copying of the whole variable.
591 EmitIgnoredExpr(Copy);
595 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
596 OMPPrivateScope &PrivateScope) {
597 if (!HaveInsertPoint())
599 bool FirstprivateIsLastprivate = false;
600 llvm::DenseSet<const VarDecl *> Lastprivates;
601 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
602 for (const auto *D : C->varlists())
604 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
606 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
607 CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt()));
608 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
609 auto IRef = C->varlist_begin();
610 auto InitsRef = C->inits().begin();
611 for (auto IInit : C->private_copies()) {
612 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
613 bool ThisFirstprivateIsLastprivate =
614 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
615 auto *CapFD = CapturesInfo.lookup(OrigVD);
616 auto *FD = CapturedStmtInfo->lookup(OrigVD);
617 if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) &&
618 !FD->getType()->isReferenceType()) {
619 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
624 FirstprivateIsLastprivate =
625 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
626 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
627 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
628 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
630 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
631 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
632 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
633 Address OriginalAddr = EmitLValue(&DRE).getAddress();
634 QualType Type = VD->getType();
635 if (Type->isArrayType()) {
636 // Emit VarDecl with copy init for arrays.
637 // Get the address of the original variable captured in current
639 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
640 auto Emission = EmitAutoVarAlloca(*VD);
641 auto *Init = VD->getInit();
642 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
643 // Perform simple memcpy.
644 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
647 EmitOMPAggregateAssign(
648 Emission.getAllocatedAddress(), OriginalAddr, Type,
649 [this, VDInit, Init](Address DestElement,
650 Address SrcElement) {
651 // Clean up any temporaries needed by the initialization.
652 RunCleanupsScope InitScope(*this);
653 // Emit initialization for single element.
654 setAddrOfLocalVar(VDInit, SrcElement);
655 EmitAnyExprToMem(Init, DestElement,
656 Init->getType().getQualifiers(),
657 /*IsInitializer*/ false);
658 LocalDeclMap.erase(VDInit);
661 EmitAutoVarCleanups(Emission);
662 return Emission.getAllocatedAddress();
665 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
666 // Emit private VarDecl with copy init.
667 // Remap temp VDInit variable to the address of the original
669 // (for proper handling of captured global variables).
670 setAddrOfLocalVar(VDInit, OriginalAddr);
672 LocalDeclMap.erase(VDInit);
673 return GetAddrOfLocalVar(VD);
676 assert(IsRegistered &&
677 "firstprivate var already registered as private");
678 // Silence the warning about unused variable.
685 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
688 void CodeGenFunction::EmitOMPPrivateClause(
689 const OMPExecutableDirective &D,
690 CodeGenFunction::OMPPrivateScope &PrivateScope) {
691 if (!HaveInsertPoint())
693 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
694 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
695 auto IRef = C->varlist_begin();
696 for (auto IInit : C->private_copies()) {
697 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
698 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
699 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
701 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
702 // Emit private VarDecl with copy init.
704 return GetAddrOfLocalVar(VD);
706 assert(IsRegistered && "private var already registered as private");
707 // Silence the warning about unused variable.
715 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
716 if (!HaveInsertPoint())
718 // threadprivate_var1 = master_threadprivate_var1;
719 // operator=(threadprivate_var2, master_threadprivate_var2);
721 // __kmpc_barrier(&loc, global_tid);
722 llvm::DenseSet<const VarDecl *> CopiedVars;
723 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
724 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
725 auto IRef = C->varlist_begin();
726 auto ISrcRef = C->source_exprs().begin();
727 auto IDestRef = C->destination_exprs().begin();
728 for (auto *AssignOp : C->assignment_ops()) {
729 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
730 QualType Type = VD->getType();
731 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
732 // Get the address of the master variable. If we are emitting code with
733 // TLS support, the address is passed from the master as field in the
734 // captured declaration.
735 Address MasterAddr = Address::invalid();
736 if (getLangOpts().OpenMPUseTLS &&
737 getContext().getTargetInfo().isTLSSupported()) {
738 assert(CapturedStmtInfo->lookup(VD) &&
739 "Copyin threadprivates should have been captured!");
740 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
741 VK_LValue, (*IRef)->getExprLoc());
742 MasterAddr = EmitLValue(&DRE).getAddress();
743 LocalDeclMap.erase(VD);
746 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
747 : CGM.GetAddrOfGlobal(VD),
748 getContext().getDeclAlign(VD));
750 // Get the address of the threadprivate variable.
751 Address PrivateAddr = EmitLValue(*IRef).getAddress();
752 if (CopiedVars.size() == 1) {
753 // At first check if current thread is a master thread. If it is, no
754 // need to copy data.
755 CopyBegin = createBasicBlock("copyin.not.master");
756 CopyEnd = createBasicBlock("copyin.not.master.end");
757 Builder.CreateCondBr(
758 Builder.CreateICmpNE(
759 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
760 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
762 EmitBlock(CopyBegin);
764 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
765 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
766 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
774 // Exit out of copying procedure for non-master thread.
775 EmitBlock(CopyEnd, /*IsFinished=*/true);
781 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
782 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
783 if (!HaveInsertPoint())
785 bool HasAtLeastOneLastprivate = false;
786 llvm::DenseSet<const VarDecl *> SIMDLCVs;
787 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
788 auto *LoopDirective = cast<OMPLoopDirective>(&D);
789 for (auto *C : LoopDirective->counters()) {
791 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
794 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
795 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
796 HasAtLeastOneLastprivate = true;
797 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()))
799 auto IRef = C->varlist_begin();
800 auto IDestRef = C->destination_exprs().begin();
801 for (auto *IInit : C->private_copies()) {
802 // Keep the address of the original variable for future update at the end
804 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
805 // Taskloops do not require additional initialization, it is done in
806 // runtime support library.
807 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
808 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
809 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
811 const_cast<VarDecl *>(OrigVD),
812 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
814 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
815 return EmitLValue(&DRE).getAddress();
817 // Check if the variable is also a firstprivate: in this case IInit is
818 // not generated. Initialization of this variable will happen in codegen
819 // for 'firstprivate' clause.
820 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
821 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
822 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
823 // Emit private VarDecl with copy init.
825 return GetAddrOfLocalVar(VD);
827 assert(IsRegistered &&
828 "lastprivate var already registered as private");
836 return HasAtLeastOneLastprivate;
839 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
840 const OMPExecutableDirective &D, bool NoFinals,
841 llvm::Value *IsLastIterCond) {
842 if (!HaveInsertPoint())
844 // Emit following code:
845 // if (<IsLastIterCond>) {
846 // orig_var1 = private_orig_var1;
848 // orig_varn = private_orig_varn;
850 llvm::BasicBlock *ThenBB = nullptr;
851 llvm::BasicBlock *DoneBB = nullptr;
852 if (IsLastIterCond) {
853 ThenBB = createBasicBlock(".omp.lastprivate.then");
854 DoneBB = createBasicBlock(".omp.lastprivate.done");
855 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
858 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
859 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
860 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
861 auto IC = LoopDirective->counters().begin();
862 for (auto F : LoopDirective->finals()) {
864 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
866 AlreadyEmittedVars.insert(D);
868 LoopCountersAndUpdates[D] = F;
872 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
873 auto IRef = C->varlist_begin();
874 auto ISrcRef = C->source_exprs().begin();
875 auto IDestRef = C->destination_exprs().begin();
876 for (auto *AssignOp : C->assignment_ops()) {
877 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
878 QualType Type = PrivateVD->getType();
879 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
880 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
881 // If lastprivate variable is a loop control variable for loop-based
882 // directive, update its value before copyin back to original
884 if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
885 EmitIgnoredExpr(FinalExpr);
886 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
887 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
888 // Get the address of the original variable.
889 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
890 // Get the address of the private variable.
891 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
892 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
894 Address(Builder.CreateLoad(PrivateAddr),
895 getNaturalTypeAlignment(RefTy->getPointeeType()));
896 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
902 if (auto *PostUpdate = C->getPostUpdateExpr())
903 EmitIgnoredExpr(PostUpdate);
906 EmitBlock(DoneBB, /*IsFinished=*/true);
909 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
910 LValue BaseLV, llvm::Value *Addr) {
911 Address Tmp = Address::invalid();
912 Address TopTmp = Address::invalid();
913 Address MostTopTmp = Address::invalid();
914 BaseTy = BaseTy.getNonReferenceType();
915 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
916 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
917 Tmp = CGF.CreateMemTemp(BaseTy);
918 if (TopTmp.isValid())
919 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
923 BaseTy = BaseTy->getPointeeType();
925 llvm::Type *Ty = BaseLV.getPointer()->getType();
927 Ty = Tmp.getElementType();
928 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
930 CGF.Builder.CreateStore(Addr, Tmp);
933 return Address(Addr, BaseLV.getAlignment());
936 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
938 BaseTy = BaseTy.getNonReferenceType();
939 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
940 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
941 if (auto *PtrTy = BaseTy->getAs<PointerType>())
942 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
944 BaseLV = CGF.EmitLoadOfReferenceLValue(BaseLV.getAddress(),
945 BaseTy->castAs<ReferenceType>());
947 BaseTy = BaseTy->getPointeeType();
949 return CGF.MakeAddrLValue(
951 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
952 BaseLV.getPointer(), CGF.ConvertTypeForMem(ElTy)->getPointerTo()),
953 BaseLV.getAlignment()),
954 BaseLV.getType(), BaseLV.getAlignmentSource());
957 void CodeGenFunction::EmitOMPReductionClauseInit(
958 const OMPExecutableDirective &D,
959 CodeGenFunction::OMPPrivateScope &PrivateScope) {
960 if (!HaveInsertPoint())
962 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
963 auto ILHS = C->lhs_exprs().begin();
964 auto IRHS = C->rhs_exprs().begin();
965 auto IPriv = C->privates().begin();
966 auto IRed = C->reduction_ops().begin();
967 for (auto IRef : C->varlists()) {
968 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
969 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
970 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
971 auto *DRD = getReductionInit(*IRed);
972 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
973 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
974 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
975 Base = TempOASE->getBase()->IgnoreParenImpCasts();
976 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
977 Base = TempASE->getBase()->IgnoreParenImpCasts();
978 auto *DE = cast<DeclRefExpr>(Base);
979 auto *OrigVD = cast<VarDecl>(DE->getDecl());
980 auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
982 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
983 auto OriginalBaseLValue = EmitLValue(DE);
985 loadToBegin(*this, OrigVD->getType(), OASELValueLB.getType(),
987 // Store the address of the original variable associated with the LHS
988 // implicit variable.
989 PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address {
990 return OASELValueLB.getAddress();
992 // Emit reduction copy.
993 bool IsRegistered = PrivateScope.addPrivate(
994 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, OASELValueLB,
995 OASELValueUB, OriginalBaseLValue, DRD, IRed]() -> Address {
996 // Emit VarDecl with copy init for arrays.
997 // Get the address of the original variable captured in current
999 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
1000 OASELValueLB.getPointer());
1001 Size = Builder.CreateNUWAdd(
1002 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1003 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1004 *this, cast<OpaqueValueExpr>(
1006 .getAsVariableArrayType(PrivateVD->getType())
1009 EmitVariablyModifiedType(PrivateVD->getType());
1010 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1011 auto Addr = Emission.getAllocatedAddress();
1012 auto *Init = PrivateVD->getInit();
1013 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1015 OASELValueLB.getAddress());
1016 EmitAutoVarCleanups(Emission);
1017 // Emit private VarDecl with reduction init.
1018 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1019 OASELValueLB.getPointer());
1020 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1021 return castToBase(*this, OrigVD->getType(),
1022 OASELValueLB.getType(), OriginalBaseLValue,
1025 assert(IsRegistered && "private var already registered as private");
1026 // Silence the warning about unused variable.
1028 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1029 return GetAddrOfLocalVar(PrivateVD);
1031 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
1032 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1033 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1034 Base = TempASE->getBase()->IgnoreParenImpCasts();
1035 auto *DE = cast<DeclRefExpr>(Base);
1036 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1037 auto ASELValue = EmitLValue(ASE);
1038 auto OriginalBaseLValue = EmitLValue(DE);
1039 LValue BaseLValue = loadToBegin(
1040 *this, OrigVD->getType(), ASELValue.getType(), OriginalBaseLValue);
1041 // Store the address of the original variable associated with the LHS
1042 // implicit variable.
1043 PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address {
1044 return ASELValue.getAddress();
1046 // Emit reduction copy.
1047 bool IsRegistered = PrivateScope.addPrivate(
1048 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, ASELValue,
1049 OriginalBaseLValue, DRD, IRed]() -> Address {
1050 // Emit private VarDecl with reduction init.
1051 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1052 auto Addr = Emission.getAllocatedAddress();
1053 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1054 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1055 ASELValue.getAddress(),
1056 ASELValue.getType());
1058 EmitAutoVarInit(Emission);
1059 EmitAutoVarCleanups(Emission);
1060 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1061 ASELValue.getPointer());
1062 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1063 return castToBase(*this, OrigVD->getType(), ASELValue.getType(),
1064 OriginalBaseLValue, Ptr);
1066 assert(IsRegistered && "private var already registered as private");
1067 // Silence the warning about unused variable.
1069 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1070 return Builder.CreateElementBitCast(
1071 GetAddrOfLocalVar(PrivateVD), ConvertTypeForMem(RHSVD->getType()),
1075 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
1076 QualType Type = PrivateVD->getType();
1077 if (getContext().getAsArrayType(Type)) {
1078 // Store the address of the original variable associated with the LHS
1079 // implicit variable.
1080 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1081 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1082 IRef->getType(), VK_LValue, IRef->getExprLoc());
1083 Address OriginalAddr = EmitLValue(&DRE).getAddress();
1084 PrivateScope.addPrivate(LHSVD, [this, &OriginalAddr,
1085 LHSVD]() -> Address {
1086 OriginalAddr = Builder.CreateElementBitCast(
1087 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1088 return OriginalAddr;
1090 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
1091 if (Type->isVariablyModifiedType()) {
1092 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1093 *this, cast<OpaqueValueExpr>(
1095 .getAsVariableArrayType(PrivateVD->getType())
1098 getTypeSize(OrigVD->getType().getNonReferenceType())));
1099 EmitVariablyModifiedType(Type);
1101 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1102 auto Addr = Emission.getAllocatedAddress();
1103 auto *Init = PrivateVD->getInit();
1104 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1105 DRD ? *IRed : Init, OriginalAddr);
1106 EmitAutoVarCleanups(Emission);
1107 return Emission.getAllocatedAddress();
1109 assert(IsRegistered && "private var already registered as private");
1110 // Silence the warning about unused variable.
1112 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1113 return Builder.CreateElementBitCast(
1114 GetAddrOfLocalVar(PrivateVD),
1115 ConvertTypeForMem(RHSVD->getType()), "rhs.begin");
1118 // Store the address of the original variable associated with the LHS
1119 // implicit variable.
1120 Address OriginalAddr = Address::invalid();
1121 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef,
1122 &OriginalAddr]() -> Address {
1123 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1124 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1125 IRef->getType(), VK_LValue, IRef->getExprLoc());
1126 OriginalAddr = EmitLValue(&DRE).getAddress();
1127 return OriginalAddr;
1129 // Emit reduction copy.
1130 bool IsRegistered = PrivateScope.addPrivate(
1131 OrigVD, [this, PrivateVD, OriginalAddr, DRD, IRed]() -> Address {
1132 // Emit private VarDecl with reduction init.
1133 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1134 auto Addr = Emission.getAllocatedAddress();
1135 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1136 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1138 PrivateVD->getType());
1140 EmitAutoVarInit(Emission);
1141 EmitAutoVarCleanups(Emission);
1144 assert(IsRegistered && "private var already registered as private");
1145 // Silence the warning about unused variable.
1147 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1148 return GetAddrOfLocalVar(PrivateVD);
1160 void CodeGenFunction::EmitOMPReductionClauseFinal(
1161 const OMPExecutableDirective &D) {
1162 if (!HaveInsertPoint())
1164 llvm::SmallVector<const Expr *, 8> Privates;
1165 llvm::SmallVector<const Expr *, 8> LHSExprs;
1166 llvm::SmallVector<const Expr *, 8> RHSExprs;
1167 llvm::SmallVector<const Expr *, 8> ReductionOps;
1168 bool HasAtLeastOneReduction = false;
1169 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1170 HasAtLeastOneReduction = true;
1171 Privates.append(C->privates().begin(), C->privates().end());
1172 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1173 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1174 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1176 if (HasAtLeastOneReduction) {
1177 // Emit nowait reduction if nowait clause is present or directive is a
1178 // parallel directive (it always has implicit barrier).
1179 CGM.getOpenMPRuntime().emitReduction(
1180 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
1181 D.getSingleClause<OMPNowaitClause>() ||
1182 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1183 D.getDirectiveKind() == OMPD_simd,
1184 D.getDirectiveKind() == OMPD_simd);
1188 static void emitPostUpdateForReductionClause(
1189 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1190 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1191 if (!CGF.HaveInsertPoint())
1193 llvm::BasicBlock *DoneBB = nullptr;
1194 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1195 if (auto *PostUpdate = C->getPostUpdateExpr()) {
1197 if (auto *Cond = CondGen(CGF)) {
1198 // If the first post-update expression is found, emit conditional
1199 // block if it was requested.
1200 auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1201 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1202 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1203 CGF.EmitBlock(ThenBB);
1206 CGF.EmitIgnoredExpr(PostUpdate);
1210 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1213 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
1214 const OMPExecutableDirective &S,
1215 OpenMPDirectiveKind InnermostKind,
1216 const RegionCodeGenTy &CodeGen) {
1217 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1218 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
1219 emitParallelOrTeamsOutlinedFunction(S,
1220 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1221 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1222 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1223 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1224 /*IgnoreResultAssign*/ true);
1225 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1226 CGF, NumThreads, NumThreadsClause->getLocStart());
1228 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1229 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1230 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1231 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
1233 const Expr *IfCond = nullptr;
1234 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1235 if (C->getNameModifier() == OMPD_unknown ||
1236 C->getNameModifier() == OMPD_parallel) {
1237 IfCond = C->getCondition();
1242 OMPLexicalScope Scope(CGF, S);
1243 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1244 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1245 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
1246 CapturedVars, IfCond);
1249 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1250 // Emit parallel region as a standalone region.
1251 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1252 OMPPrivateScope PrivateScope(CGF);
1253 bool Copyins = CGF.EmitOMPCopyinClause(S);
1254 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1256 // Emit implicit barrier to synchronize threads and avoid data races on
1257 // propagation master's thread values of threadprivate variables to local
1258 // instances of that variables of all other implicit threads.
1259 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1260 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1261 /*ForceSimpleCall=*/true);
1263 CGF.EmitOMPPrivateClause(S, PrivateScope);
1264 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1265 (void)PrivateScope.Privatize();
1266 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1267 CGF.EmitOMPReductionClauseFinal(S);
1269 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
1270 emitPostUpdateForReductionClause(
1271 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1274 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1275 JumpDest LoopExit) {
1276 RunCleanupsScope BodyScope(*this);
1277 // Update counters values on current iteration.
1278 for (auto I : D.updates()) {
1281 // Update the linear variables.
1282 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1283 for (auto *U : C->updates())
1287 // On a continue in the body, jump to the end.
1288 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
1289 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1291 EmitStmt(D.getBody());
1292 // The end (updates/cleanups).
1293 EmitBlock(Continue.getBlock());
1294 BreakContinueStack.pop_back();
1297 void CodeGenFunction::EmitOMPInnerLoop(
1298 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1299 const Expr *IncExpr,
1300 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
1301 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
1302 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1304 // Start the loop with a block that tests the condition.
1305 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1306 EmitBlock(CondBlock);
1307 const SourceRange &R = S.getSourceRange();
1308 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1309 SourceLocToDebugLoc(R.getEnd()));
1311 // If there are any cleanups between here and the loop-exit scope,
1312 // create a block to stage a loop exit along.
1313 auto ExitBlock = LoopExit.getBlock();
1314 if (RequiresCleanup)
1315 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1317 auto LoopBody = createBasicBlock("omp.inner.for.body");
1320 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1321 if (ExitBlock != LoopExit.getBlock()) {
1322 EmitBlock(ExitBlock);
1323 EmitBranchThroughCleanup(LoopExit);
1326 EmitBlock(LoopBody);
1327 incrementProfileCounter(&S);
1329 // Create a block for the increment.
1330 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1331 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1335 // Emit "IV = IV + 1" and a back-edge to the condition block.
1336 EmitBlock(Continue.getBlock());
1337 EmitIgnoredExpr(IncExpr);
1339 BreakContinueStack.pop_back();
1340 EmitBranch(CondBlock);
1342 // Emit the fall-through block.
1343 EmitBlock(LoopExit.getBlock());
1346 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1347 if (!HaveInsertPoint())
1349 // Emit inits for the linear variables.
1350 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1351 for (auto *Init : C->inits()) {
1352 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1353 if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1354 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1355 auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1356 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1357 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1358 VD->getInit()->getType(), VK_LValue,
1359 VD->getInit()->getExprLoc());
1360 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1362 /*capturedByInit=*/false);
1363 EmitAutoVarCleanups(Emission);
1367 // Emit the linear steps for the linear clauses.
1368 // If a step is not constant, it is pre-calculated before the loop.
1369 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1370 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1371 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1372 // Emit calculation of the linear step.
1373 EmitIgnoredExpr(CS);
1378 void CodeGenFunction::EmitOMPLinearClauseFinal(
1379 const OMPLoopDirective &D,
1380 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1381 if (!HaveInsertPoint())
1383 llvm::BasicBlock *DoneBB = nullptr;
1384 // Emit the final values of the linear variables.
1385 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1386 auto IC = C->varlist_begin();
1387 for (auto *F : C->finals()) {
1389 if (auto *Cond = CondGen(*this)) {
1390 // If the first post-update expression is found, emit conditional
1391 // block if it was requested.
1392 auto *ThenBB = createBasicBlock(".omp.linear.pu");
1393 DoneBB = createBasicBlock(".omp.linear.pu.done");
1394 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1398 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1399 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1400 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1401 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1402 Address OrigAddr = EmitLValue(&DRE).getAddress();
1403 CodeGenFunction::OMPPrivateScope VarScope(*this);
1404 VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; });
1405 (void)VarScope.Privatize();
1409 if (auto *PostUpdate = C->getPostUpdateExpr())
1410 EmitIgnoredExpr(PostUpdate);
1413 EmitBlock(DoneBB, /*IsFinished=*/true);
1416 static void emitAlignedClause(CodeGenFunction &CGF,
1417 const OMPExecutableDirective &D) {
1418 if (!CGF.HaveInsertPoint())
1420 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1421 unsigned ClauseAlignment = 0;
1422 if (auto AlignmentExpr = Clause->getAlignment()) {
1424 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1425 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1427 for (auto E : Clause->varlists()) {
1428 unsigned Alignment = ClauseAlignment;
1429 if (Alignment == 0) {
1430 // OpenMP [2.8.1, Description]
1431 // If no optional parameter is specified, implementation-defined default
1432 // alignments for SIMD instructions on the target platforms are assumed.
1435 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1436 E->getType()->getPointeeType()))
1439 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1440 "alignment is not power of 2");
1441 if (Alignment != 0) {
1442 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1443 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
1449 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1450 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1451 if (!HaveInsertPoint())
1453 auto I = S.private_counters().begin();
1454 for (auto *E : S.counters()) {
1455 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1456 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1457 (void)LoopScope.addPrivate(VD, [&]() -> Address {
1458 // Emit var without initialization.
1459 if (!LocalDeclMap.count(PrivateVD)) {
1460 auto VarEmission = EmitAutoVarAlloca(*PrivateVD);
1461 EmitAutoVarCleanups(VarEmission);
1463 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1464 /*RefersToEnclosingVariableOrCapture=*/false,
1465 (*I)->getType(), VK_LValue, (*I)->getExprLoc());
1466 return EmitLValue(&DRE).getAddress();
1468 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1469 VD->hasGlobalStorage()) {
1470 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
1471 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
1472 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1473 E->getType(), VK_LValue, E->getExprLoc());
1474 return EmitLValue(&DRE).getAddress();
1481 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1482 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1483 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1484 if (!CGF.HaveInsertPoint())
1487 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1488 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1489 (void)PreCondScope.Privatize();
1490 // Get initial values of real counters.
1491 for (auto I : S.inits()) {
1492 CGF.EmitIgnoredExpr(I);
1495 // Check that loop is executed at least one time.
1496 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1499 void CodeGenFunction::EmitOMPLinearClause(
1500 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1501 if (!HaveInsertPoint())
1503 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1504 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1505 auto *LoopDirective = cast<OMPLoopDirective>(&D);
1506 for (auto *C : LoopDirective->counters()) {
1508 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1511 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1512 auto CurPrivate = C->privates().begin();
1513 for (auto *E : C->varlists()) {
1514 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1516 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1517 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1518 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
1519 // Emit private VarDecl with copy init.
1520 EmitVarDecl(*PrivateVD);
1521 return GetAddrOfLocalVar(PrivateVD);
1523 assert(IsRegistered && "linear var already registered as private");
1524 // Silence the warning about unused variable.
1527 EmitVarDecl(*PrivateVD);
1533 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1534 const OMPExecutableDirective &D,
1536 if (!CGF.HaveInsertPoint())
1538 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1539 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1540 /*ignoreResult=*/true);
1541 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1542 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1543 // In presence of finite 'safelen', it may be unsafe to mark all
1544 // the memory instructions parallel, because loop-carried
1545 // dependences of 'safelen' iterations are possible.
1547 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1548 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1549 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1550 /*ignoreResult=*/true);
1551 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1552 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1553 // In presence of finite 'safelen', it may be unsafe to mark all
1554 // the memory instructions parallel, because loop-carried
1555 // dependences of 'safelen' iterations are possible.
1556 CGF.LoopStack.setParallel(false);
1560 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1562 // Walk clauses and process safelen/lastprivate.
1563 LoopStack.setParallel(!IsMonotonic);
1564 LoopStack.setVectorizeEnable(true);
1565 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1568 void CodeGenFunction::EmitOMPSimdFinal(
1569 const OMPLoopDirective &D,
1570 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1571 if (!HaveInsertPoint())
1573 llvm::BasicBlock *DoneBB = nullptr;
1574 auto IC = D.counters().begin();
1575 auto IPC = D.private_counters().begin();
1576 for (auto F : D.finals()) {
1577 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1578 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1579 auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1580 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1581 OrigVD->hasGlobalStorage() || CED) {
1583 if (auto *Cond = CondGen(*this)) {
1584 // If the first post-update expression is found, emit conditional
1585 // block if it was requested.
1586 auto *ThenBB = createBasicBlock(".omp.final.then");
1587 DoneBB = createBasicBlock(".omp.final.done");
1588 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1592 Address OrigAddr = Address::invalid();
1594 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1596 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1597 /*RefersToEnclosingVariableOrCapture=*/false,
1598 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1599 OrigAddr = EmitLValue(&DRE).getAddress();
1601 OMPPrivateScope VarScope(*this);
1602 VarScope.addPrivate(OrigVD,
1603 [OrigAddr]() -> Address { return OrigAddr; });
1604 (void)VarScope.Privatize();
1611 EmitBlock(DoneBB, /*IsFinished=*/true);
1614 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1615 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1616 OMPLoopScope PreInitScope(CGF, S);
1618 // for (IV in 0..LastIteration) BODY;
1619 // <Final counter/linear vars updates>;
1623 // Emit: if (PreCond) - begin.
1624 // If the condition constant folds and can be elided, avoid emitting the
1627 llvm::BasicBlock *ContBlock = nullptr;
1628 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1632 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
1633 ContBlock = CGF.createBasicBlock("simd.if.end");
1634 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1635 CGF.getProfileCount(&S));
1636 CGF.EmitBlock(ThenBlock);
1637 CGF.incrementProfileCounter(&S);
1640 // Emit the loop iteration variable.
1641 const Expr *IVExpr = S.getIterationVariable();
1642 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1643 CGF.EmitVarDecl(*IVDecl);
1644 CGF.EmitIgnoredExpr(S.getInit());
1646 // Emit the iterations count variable.
1647 // If it is not a variable, Sema decided to calculate iterations count on
1648 // each iteration (e.g., it is foldable into a constant).
1649 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1650 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1651 // Emit calculation of the iterations count.
1652 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1655 CGF.EmitOMPSimdInit(S);
1657 emitAlignedClause(CGF, S);
1658 CGF.EmitOMPLinearClauseInit(S);
1660 OMPPrivateScope LoopScope(CGF);
1661 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1662 CGF.EmitOMPLinearClause(S, LoopScope);
1663 CGF.EmitOMPPrivateClause(S, LoopScope);
1664 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1665 bool HasLastprivateClause =
1666 CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1667 (void)LoopScope.Privatize();
1668 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1670 [&S](CodeGenFunction &CGF) {
1671 CGF.EmitOMPLoopBody(S, JumpDest());
1672 CGF.EmitStopPoint(&S);
1674 [](CodeGenFunction &) {});
1675 CGF.EmitOMPSimdFinal(
1676 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1677 // Emit final copy of the lastprivate variables at the end of loops.
1678 if (HasLastprivateClause)
1679 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
1680 CGF.EmitOMPReductionClauseFinal(S);
1681 emitPostUpdateForReductionClause(
1682 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1684 CGF.EmitOMPLinearClauseFinal(
1685 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1686 // Emit: if (PreCond) - end.
1688 CGF.EmitBranch(ContBlock);
1689 CGF.EmitBlock(ContBlock, true);
1692 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1693 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1696 void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
1697 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1698 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1699 auto &RT = CGM.getOpenMPRuntime();
1701 const Expr *IVExpr = S.getIterationVariable();
1702 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1703 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1705 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1707 // Start the loop with a block that tests the condition.
1708 auto CondBlock = createBasicBlock("omp.dispatch.cond");
1709 EmitBlock(CondBlock);
1710 const SourceRange &R = S.getSourceRange();
1711 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1712 SourceLocToDebugLoc(R.getEnd()));
1714 llvm::Value *BoolCondVal = nullptr;
1715 if (!DynamicOrOrdered) {
1716 // UB = min(UB, GlobalUB)
1717 EmitIgnoredExpr(S.getEnsureUpperBound());
1719 EmitIgnoredExpr(S.getInit());
1721 BoolCondVal = EvaluateExprAsBool(S.getCond());
1723 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, IL,
1727 // If there are any cleanups between here and the loop-exit scope,
1728 // create a block to stage a loop exit along.
1729 auto ExitBlock = LoopExit.getBlock();
1730 if (LoopScope.requiresCleanups())
1731 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1733 auto LoopBody = createBasicBlock("omp.dispatch.body");
1734 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1735 if (ExitBlock != LoopExit.getBlock()) {
1736 EmitBlock(ExitBlock);
1737 EmitBranchThroughCleanup(LoopExit);
1739 EmitBlock(LoopBody);
1741 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1742 // LB for loop condition and emitted it above).
1743 if (DynamicOrOrdered)
1744 EmitIgnoredExpr(S.getInit());
1746 // Create a block for the increment.
1747 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1748 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1750 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1751 // with dynamic/guided scheduling and without ordered clause.
1752 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1753 LoopStack.setParallel(!IsMonotonic);
1755 EmitOMPSimdInit(S, IsMonotonic);
1757 SourceLocation Loc = S.getLocStart();
1758 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
1759 [&S, LoopExit](CodeGenFunction &CGF) {
1760 CGF.EmitOMPLoopBody(S, LoopExit);
1761 CGF.EmitStopPoint(&S);
1763 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
1765 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
1766 CGF, Loc, IVSize, IVSigned);
1770 EmitBlock(Continue.getBlock());
1771 BreakContinueStack.pop_back();
1772 if (!DynamicOrOrdered) {
1773 // Emit "LB = LB + Stride", "UB = UB + Stride".
1774 EmitIgnoredExpr(S.getNextLowerBound());
1775 EmitIgnoredExpr(S.getNextUpperBound());
1778 EmitBranch(CondBlock);
1780 // Emit the fall-through block.
1781 EmitBlock(LoopExit.getBlock());
1783 // Tell the runtime we are done.
1784 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
1785 if (!DynamicOrOrdered)
1786 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
1788 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
1791 void CodeGenFunction::EmitOMPForOuterLoop(
1792 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
1793 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1794 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1795 auto &RT = CGM.getOpenMPRuntime();
1797 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1798 const bool DynamicOrOrdered =
1799 Ordered || RT.isDynamic(ScheduleKind.Schedule);
1802 !RT.isStaticNonchunked(ScheduleKind.Schedule,
1803 /*Chunked=*/Chunk != nullptr)) &&
1804 "static non-chunked schedule does not need outer loop");
1808 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1809 // When schedule(dynamic,chunk_size) is specified, the iterations are
1810 // distributed to threads in the team in chunks as the threads request them.
1811 // Each thread executes a chunk of iterations, then requests another chunk,
1812 // until no chunks remain to be distributed. Each chunk contains chunk_size
1813 // iterations, except for the last chunk to be distributed, which may have
1814 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1816 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1817 // to threads in the team in chunks as the executing threads request them.
1818 // Each thread executes a chunk of iterations, then requests another chunk,
1819 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1820 // each chunk is proportional to the number of unassigned iterations divided
1821 // by the number of threads in the team, decreasing to 1. For a chunk_size
1822 // with value k (greater than 1), the size of each chunk is determined in the
1823 // same way, with the restriction that the chunks do not contain fewer than k
1824 // iterations (except for the last chunk to be assigned, which may have fewer
1825 // than k iterations).
1827 // When schedule(auto) is specified, the decision regarding scheduling is
1828 // delegated to the compiler and/or runtime system. The programmer gives the
1829 // implementation the freedom to choose any possible mapping of iterations to
1830 // threads in the team.
1832 // When schedule(runtime) is specified, the decision regarding scheduling is
1833 // deferred until run time, and the schedule and chunk size are taken from the
1834 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1835 // implementation defined
1837 // while(__kmpc_dispatch_next(&LB, &UB)) {
1839 // while (idx <= UB) { BODY; ++idx;
1840 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1844 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1845 // When schedule(static, chunk_size) is specified, iterations are divided into
1846 // chunks of size chunk_size, and the chunks are assigned to the threads in
1847 // the team in a round-robin fashion in the order of the thread number.
1849 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1850 // while (idx <= UB) { BODY; ++idx; } // inner loop
1856 const Expr *IVExpr = S.getIterationVariable();
1857 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1858 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1860 if (DynamicOrOrdered) {
1861 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
1862 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
1863 IVSigned, Ordered, UBVal, Chunk);
1865 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1866 Ordered, IL, LB, UB, ST, Chunk);
1869 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, Ordered, LB, UB,
1873 void CodeGenFunction::EmitOMPDistributeOuterLoop(
1874 OpenMPDistScheduleClauseKind ScheduleKind,
1875 const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
1876 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1878 auto &RT = CGM.getOpenMPRuntime();
1881 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
1885 const Expr *IVExpr = S.getIterationVariable();
1886 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1887 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1889 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
1890 IVSize, IVSigned, /* Ordered = */ false,
1891 IL, LB, UB, ST, Chunk);
1893 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false,
1894 S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk);
1897 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
1898 const OMPDistributeParallelForDirective &S) {
1899 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1900 CGM.getOpenMPRuntime().emitInlinedDirective(
1901 *this, OMPD_distribute_parallel_for,
1902 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1903 OMPLoopScope PreInitScope(CGF, S);
1904 OMPCancelStackRAII CancelRegion(CGF, OMPD_distribute_parallel_for,
1905 /*HasCancel=*/false);
1907 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1911 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
1912 const OMPDistributeParallelForSimdDirective &S) {
1913 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1914 CGM.getOpenMPRuntime().emitInlinedDirective(
1915 *this, OMPD_distribute_parallel_for_simd,
1916 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1917 OMPLoopScope PreInitScope(CGF, S);
1919 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1923 void CodeGenFunction::EmitOMPDistributeSimdDirective(
1924 const OMPDistributeSimdDirective &S) {
1925 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1926 CGM.getOpenMPRuntime().emitInlinedDirective(
1927 *this, OMPD_distribute_simd,
1928 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1929 OMPLoopScope PreInitScope(CGF, S);
1931 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1935 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
1936 const OMPTargetParallelForSimdDirective &S) {
1937 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1938 CGM.getOpenMPRuntime().emitInlinedDirective(
1939 *this, OMPD_target_parallel_for_simd,
1940 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1941 OMPLoopScope PreInitScope(CGF, S);
1943 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1947 void CodeGenFunction::EmitOMPTargetSimdDirective(
1948 const OMPTargetSimdDirective &S) {
1949 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1950 CGM.getOpenMPRuntime().emitInlinedDirective(
1951 *this, OMPD_target_simd, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1952 OMPLoopScope PreInitScope(CGF, S);
1954 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1958 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
1959 const OMPTeamsDistributeDirective &S) {
1960 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1961 CGM.getOpenMPRuntime().emitInlinedDirective(
1962 *this, OMPD_teams_distribute,
1963 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1964 OMPLoopScope PreInitScope(CGF, S);
1966 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1970 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
1971 const OMPTeamsDistributeSimdDirective &S) {
1972 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1973 CGM.getOpenMPRuntime().emitInlinedDirective(
1974 *this, OMPD_teams_distribute_simd,
1975 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1976 OMPLoopScope PreInitScope(CGF, S);
1978 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1982 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
1983 const OMPTeamsDistributeParallelForSimdDirective &S) {
1984 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1985 CGM.getOpenMPRuntime().emitInlinedDirective(
1986 *this, OMPD_teams_distribute_parallel_for_simd,
1987 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1988 OMPLoopScope PreInitScope(CGF, S);
1990 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1994 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
1995 const OMPTeamsDistributeParallelForDirective &S) {
1996 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1997 CGM.getOpenMPRuntime().emitInlinedDirective(
1998 *this, OMPD_teams_distribute_parallel_for,
1999 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2000 OMPLoopScope PreInitScope(CGF, S);
2002 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2006 void CodeGenFunction::EmitOMPTargetTeamsDirective(
2007 const OMPTargetTeamsDirective &S) {
2008 CGM.getOpenMPRuntime().emitInlinedDirective(
2009 *this, OMPD_target_teams, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2011 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2015 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
2016 const OMPTargetTeamsDistributeDirective &S) {
2017 CGM.getOpenMPRuntime().emitInlinedDirective(
2018 *this, OMPD_target_teams_distribute,
2019 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2021 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2025 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
2026 const OMPTargetTeamsDistributeParallelForDirective &S) {
2027 CGM.getOpenMPRuntime().emitInlinedDirective(
2028 *this, OMPD_target_teams_distribute_parallel_for,
2029 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2031 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2035 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
2036 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
2037 CGM.getOpenMPRuntime().emitInlinedDirective(
2038 *this, OMPD_target_teams_distribute_parallel_for_simd,
2039 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2041 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2045 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
2046 const OMPTargetTeamsDistributeSimdDirective &S) {
2047 CGM.getOpenMPRuntime().emitInlinedDirective(
2048 *this, OMPD_target_teams_distribute_simd,
2049 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2051 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2055 /// \brief Emit a helper variable and return corresponding lvalue.
2056 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2057 const DeclRefExpr *Helper) {
2058 auto VDecl = cast<VarDecl>(Helper->getDecl());
2059 CGF.EmitVarDecl(*VDecl);
2060 return CGF.EmitLValue(Helper);
2064 struct ScheduleKindModifiersTy {
2065 OpenMPScheduleClauseKind Kind;
2066 OpenMPScheduleClauseModifier M1;
2067 OpenMPScheduleClauseModifier M2;
2068 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2069 OpenMPScheduleClauseModifier M1,
2070 OpenMPScheduleClauseModifier M2)
2071 : Kind(Kind), M1(M1), M2(M2) {}
2075 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
2076 // Emit the loop iteration variable.
2077 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2078 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2079 EmitVarDecl(*IVDecl);
2081 // Emit the iterations count variable.
2082 // If it is not a variable, Sema decided to calculate iterations count on each
2083 // iteration (e.g., it is foldable into a constant).
2084 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2085 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2086 // Emit calculation of the iterations count.
2087 EmitIgnoredExpr(S.getCalcLastIteration());
2090 auto &RT = CGM.getOpenMPRuntime();
2092 bool HasLastprivateClause;
2093 // Check pre-condition.
2095 OMPLoopScope PreInitScope(*this, S);
2096 // Skip the entire loop if we don't meet the precondition.
2097 // If the condition constant folds and can be elided, avoid emitting the
2100 llvm::BasicBlock *ContBlock = nullptr;
2101 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2105 auto *ThenBlock = createBasicBlock("omp.precond.then");
2106 ContBlock = createBasicBlock("omp.precond.end");
2107 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2108 getProfileCount(&S));
2109 EmitBlock(ThenBlock);
2110 incrementProfileCounter(&S);
2113 bool Ordered = false;
2114 if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2115 if (OrderedClause->getNumForLoops())
2116 RT.emitDoacrossInit(*this, S);
2121 llvm::DenseSet<const Expr *> EmittedFinals;
2122 emitAlignedClause(*this, S);
2123 EmitOMPLinearClauseInit(S);
2124 // Emit helper vars inits.
2126 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2128 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2130 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2132 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2134 // Emit 'then' code.
2136 OMPPrivateScope LoopScope(*this);
2137 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2138 // Emit implicit barrier to synchronize threads and avoid data races on
2139 // initialization of firstprivate variables and post-update of
2140 // lastprivate variables.
2141 CGM.getOpenMPRuntime().emitBarrierCall(
2142 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2143 /*ForceSimpleCall=*/true);
2145 EmitOMPPrivateClause(S, LoopScope);
2146 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2147 EmitOMPReductionClauseInit(S, LoopScope);
2148 EmitOMPPrivateLoopCounters(S, LoopScope);
2149 EmitOMPLinearClause(S, LoopScope);
2150 (void)LoopScope.Privatize();
2152 // Detect the loop schedule kind and chunk.
2153 llvm::Value *Chunk = nullptr;
2154 OpenMPScheduleTy ScheduleKind;
2155 if (auto *C = S.getSingleClause<OMPScheduleClause>()) {
2156 ScheduleKind.Schedule = C->getScheduleKind();
2157 ScheduleKind.M1 = C->getFirstScheduleModifier();
2158 ScheduleKind.M2 = C->getSecondScheduleModifier();
2159 if (const auto *Ch = C->getChunkSize()) {
2160 Chunk = EmitScalarExpr(Ch);
2161 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2162 S.getIterationVariable()->getType(),
2166 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2167 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2168 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2169 // If the static schedule kind is specified or if the ordered clause is
2170 // specified, and if no monotonic modifier is specified, the effect will
2171 // be as if the monotonic modifier was specified.
2172 if (RT.isStaticNonchunked(ScheduleKind.Schedule,
2173 /* Chunked */ Chunk != nullptr) &&
2175 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2176 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2177 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2178 // When no chunk_size is specified, the iteration space is divided into
2179 // chunks that are approximately equal in size, and at most one chunk is
2180 // distributed to each thread. Note that the size of the chunks is
2181 // unspecified in this case.
2182 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
2183 IVSize, IVSigned, Ordered,
2184 IL.getAddress(), LB.getAddress(),
2185 UB.getAddress(), ST.getAddress());
2187 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2188 // UB = min(UB, GlobalUB);
2189 EmitIgnoredExpr(S.getEnsureUpperBound());
2191 EmitIgnoredExpr(S.getInit());
2192 // while (idx <= UB) { BODY; ++idx; }
2193 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2195 [&S, LoopExit](CodeGenFunction &CGF) {
2196 CGF.EmitOMPLoopBody(S, LoopExit);
2197 CGF.EmitStopPoint(&S);
2199 [](CodeGenFunction &) {});
2200 EmitBlock(LoopExit.getBlock());
2201 // Tell the runtime we are done.
2202 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2203 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2205 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2207 const bool IsMonotonic =
2208 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2209 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2210 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2211 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2212 // Emit the outer loop, which requests its work chunk [LB..UB] from
2213 // runtime and runs the inner loop to process it.
2214 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2215 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2216 IL.getAddress(), Chunk);
2218 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2220 [&](CodeGenFunction &CGF) -> llvm::Value * {
2221 return CGF.Builder.CreateIsNotNull(
2222 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2225 EmitOMPReductionClauseFinal(S);
2226 // Emit post-update of the reduction variables if IsLastIter != 0.
2227 emitPostUpdateForReductionClause(
2228 *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2229 return CGF.Builder.CreateIsNotNull(
2230 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2232 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2233 if (HasLastprivateClause)
2234 EmitOMPLastprivateClauseFinal(
2235 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2236 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
2238 EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2239 return CGF.Builder.CreateIsNotNull(
2240 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2242 // We're now done with the loop, so jump to the continuation block.
2244 EmitBranch(ContBlock);
2245 EmitBlock(ContBlock, true);
2248 return HasLastprivateClause;
2251 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2252 bool HasLastprivates = false;
2253 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2254 PrePostActionTy &) {
2255 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
2256 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2259 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2260 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2264 // Emit an implicit barrier at the end.
2265 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2266 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2270 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2271 bool HasLastprivates = false;
2272 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2273 PrePostActionTy &) {
2274 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2277 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2278 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2281 // Emit an implicit barrier at the end.
2282 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2283 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2287 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2289 llvm::Value *Init = nullptr) {
2290 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2292 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
2296 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2297 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
2298 auto *CS = dyn_cast<CompoundStmt>(Stmt);
2299 bool HasLastprivates = false;
2300 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
2301 PrePostActionTy &) {
2302 auto &C = CGF.CGM.getContext();
2303 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2304 // Emit helper vars inits.
2305 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2306 CGF.Builder.getInt32(0));
2307 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
2308 : CGF.Builder.getInt32(0);
2310 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2311 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2312 CGF.Builder.getInt32(1));
2313 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2314 CGF.Builder.getInt32(0));
2316 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2317 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2318 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2319 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2320 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2321 // Generate condition for loop.
2322 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2323 OK_Ordinary, S.getLocStart(),
2324 /*fpContractable=*/false);
2325 // Increment for loop counter.
2326 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2328 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
2329 // Iterate through all sections and emit a switch construct:
2332 // <SectionStmt[0]>;
2335 // case <NumSection> - 1:
2336 // <SectionStmt[<NumSection> - 1]>;
2339 // .omp.sections.exit:
2340 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2341 auto *SwitchStmt = CGF.Builder.CreateSwitch(
2342 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
2343 CS == nullptr ? 1 : CS->size());
2345 unsigned CaseNumber = 0;
2346 for (auto *SubStmt : CS->children()) {
2347 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2348 CGF.EmitBlock(CaseBB);
2349 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2350 CGF.EmitStmt(SubStmt);
2351 CGF.EmitBranch(ExitBB);
2355 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2356 CGF.EmitBlock(CaseBB);
2357 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2359 CGF.EmitBranch(ExitBB);
2361 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2364 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2365 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2366 // Emit implicit barrier to synchronize threads and avoid data races on
2367 // initialization of firstprivate variables and post-update of lastprivate
2369 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2370 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2371 /*ForceSimpleCall=*/true);
2373 CGF.EmitOMPPrivateClause(S, LoopScope);
2374 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2375 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2376 (void)LoopScope.Privatize();
2378 // Emit static non-chunked loop.
2379 OpenMPScheduleTy ScheduleKind;
2380 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2381 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2382 CGF, S.getLocStart(), ScheduleKind, /*IVSize=*/32,
2383 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
2384 UB.getAddress(), ST.getAddress());
2385 // UB = min(UB, GlobalUB);
2386 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
2387 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
2388 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2389 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2391 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
2392 // while (idx <= UB) { BODY; ++idx; }
2393 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2394 [](CodeGenFunction &) {});
2395 // Tell the runtime we are done.
2396 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2397 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2399 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
2400 CGF.EmitOMPReductionClauseFinal(S);
2401 // Emit post-update of the reduction variables if IsLastIter != 0.
2402 emitPostUpdateForReductionClause(
2403 CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2404 return CGF.Builder.CreateIsNotNull(
2405 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2408 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2409 if (HasLastprivates)
2410 CGF.EmitOMPLastprivateClauseFinal(
2411 S, /*NoFinals=*/false,
2412 CGF.Builder.CreateIsNotNull(
2413 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
2416 bool HasCancel = false;
2417 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2418 HasCancel = OSD->hasCancel();
2419 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2420 HasCancel = OPSD->hasCancel();
2421 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
2422 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2424 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2425 // clause. Otherwise the barrier will be generated by the codegen for the
2427 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2428 // Emit implicit barrier to synchronize threads and avoid data races on
2429 // initialization of firstprivate variables.
2430 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2435 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2437 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2440 // Emit an implicit barrier at the end.
2441 if (!S.getSingleClause<OMPNowaitClause>()) {
2442 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2447 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2448 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2449 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2451 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2452 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2456 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2457 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2458 llvm::SmallVector<const Expr *, 8> DestExprs;
2459 llvm::SmallVector<const Expr *, 8> SrcExprs;
2460 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2461 // Check if there are any 'copyprivate' clauses associated with this
2462 // 'single' construct.
2463 // Build a list of copyprivate variables along with helper expressions
2464 // (<source>, <destination>, <destination>=<source> expressions)
2465 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2466 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2467 DestExprs.append(C->destination_exprs().begin(),
2468 C->destination_exprs().end());
2469 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2470 AssignmentOps.append(C->assignment_ops().begin(),
2471 C->assignment_ops().end());
2473 // Emit code for 'single' region along with 'copyprivate' clauses
2474 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2476 OMPPrivateScope SingleScope(CGF);
2477 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2478 CGF.EmitOMPPrivateClause(S, SingleScope);
2479 (void)SingleScope.Privatize();
2480 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2483 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2484 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
2485 CopyprivateVars, DestExprs,
2486 SrcExprs, AssignmentOps);
2488 // Emit an implicit barrier at the end (to avoid data race on firstprivate
2489 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2490 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2491 CGM.getOpenMPRuntime().emitBarrierCall(
2492 *this, S.getLocStart(),
2493 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2497 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2498 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2500 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2502 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2503 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
2506 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2507 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2509 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2511 Expr *Hint = nullptr;
2512 if (auto *HintClause = S.getSingleClause<OMPHintClause>())
2513 Hint = HintClause->getHint();
2514 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2515 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2516 S.getDirectiveName().getAsString(),
2517 CodeGen, S.getLocStart(), Hint);
2520 void CodeGenFunction::EmitOMPParallelForDirective(
2521 const OMPParallelForDirective &S) {
2522 // Emit directive as a combined directive that consists of two implicit
2523 // directives: 'parallel' with 'for' directive.
2524 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2525 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
2526 CGF.EmitOMPWorksharingLoop(S);
2528 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
2531 void CodeGenFunction::EmitOMPParallelForSimdDirective(
2532 const OMPParallelForSimdDirective &S) {
2533 // Emit directive as a combined directive that consists of two implicit
2534 // directives: 'parallel' with 'for' directive.
2535 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2536 CGF.EmitOMPWorksharingLoop(S);
2538 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
2541 void CodeGenFunction::EmitOMPParallelSectionsDirective(
2542 const OMPParallelSectionsDirective &S) {
2543 // Emit directive as a combined directive that consists of two implicit
2544 // directives: 'parallel' with 'sections' directive.
2545 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2546 CGF.EmitSections(S);
2548 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
2551 void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
2552 const RegionCodeGenTy &BodyGen,
2553 const TaskGenTy &TaskGen,
2554 OMPTaskDataTy &Data) {
2555 // Emit outlined function for task construct.
2556 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2557 auto *I = CS->getCapturedDecl()->param_begin();
2558 auto *PartId = std::next(I);
2559 auto *TaskT = std::next(I, 4);
2560 // Check if the task is final
2561 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2562 // If the condition constant folds and can be elided, try to avoid emitting
2563 // the condition and the dead arm of the if/else.
2564 auto *Cond = Clause->getCondition();
2566 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
2567 Data.Final.setInt(CondConstant);
2569 Data.Final.setPointer(EvaluateExprAsBool(Cond));
2571 // By default the task is not final.
2572 Data.Final.setInt(/*IntVal=*/false);
2574 // Check if the task has 'priority' clause.
2575 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2576 auto *Prio = Clause->getPriority();
2577 Data.Priority.setInt(/*IntVal=*/true);
2578 Data.Priority.setPointer(EmitScalarConversion(
2579 EmitScalarExpr(Prio), Prio->getType(),
2580 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
2581 Prio->getExprLoc()));
2583 // The first function argument for tasks is a thread id, the second one is a
2584 // part id (0 for tied tasks, >=0 for untied task).
2585 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2586 // Get list of private variables.
2587 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2588 auto IRef = C->varlist_begin();
2589 for (auto *IInit : C->private_copies()) {
2590 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2591 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2592 Data.PrivateVars.push_back(*IRef);
2593 Data.PrivateCopies.push_back(IInit);
2598 EmittedAsPrivate.clear();
2599 // Get list of firstprivate variables.
2600 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2601 auto IRef = C->varlist_begin();
2602 auto IElemInitRef = C->inits().begin();
2603 for (auto *IInit : C->private_copies()) {
2604 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2605 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2606 Data.FirstprivateVars.push_back(*IRef);
2607 Data.FirstprivateCopies.push_back(IInit);
2608 Data.FirstprivateInits.push_back(*IElemInitRef);
2614 // Get list of lastprivate variables (for taskloops).
2615 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2616 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2617 auto IRef = C->varlist_begin();
2618 auto ID = C->destination_exprs().begin();
2619 for (auto *IInit : C->private_copies()) {
2620 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2621 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2622 Data.LastprivateVars.push_back(*IRef);
2623 Data.LastprivateCopies.push_back(IInit);
2625 LastprivateDstsOrigs.insert(
2626 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2627 cast<DeclRefExpr>(*IRef)});
2632 // Build list of dependences.
2633 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2634 for (auto *IRef : C->varlists())
2635 Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
2636 auto &&CodeGen = [PartId, &S, &Data, CS, &BodyGen, &LastprivateDstsOrigs](
2637 CodeGenFunction &CGF, PrePostActionTy &Action) {
2638 // Set proper addresses for generated private copies.
2639 OMPPrivateScope Scope(CGF);
2640 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2641 !Data.LastprivateVars.empty()) {
2642 auto *CopyFn = CGF.Builder.CreateLoad(
2643 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
2644 auto *PrivatesPtr = CGF.Builder.CreateLoad(
2645 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
2647 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
2648 llvm::SmallVector<llvm::Value *, 16> CallArgs;
2649 CallArgs.push_back(PrivatesPtr);
2650 for (auto *E : Data.PrivateVars) {
2651 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2652 Address PrivatePtr = CGF.CreateMemTemp(
2653 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2654 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2655 CallArgs.push_back(PrivatePtr.getPointer());
2657 for (auto *E : Data.FirstprivateVars) {
2658 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2659 Address PrivatePtr =
2660 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2661 ".firstpriv.ptr.addr");
2662 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2663 CallArgs.push_back(PrivatePtr.getPointer());
2665 for (auto *E : Data.LastprivateVars) {
2666 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2667 Address PrivatePtr =
2668 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2669 ".lastpriv.ptr.addr");
2670 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2671 CallArgs.push_back(PrivatePtr.getPointer());
2673 CGF.EmitRuntimeCall(CopyFn, CallArgs);
2674 for (auto &&Pair : LastprivateDstsOrigs) {
2675 auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2677 const_cast<VarDecl *>(OrigVD),
2678 /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
2680 Pair.second->getType(), VK_LValue, Pair.second->getExprLoc());
2681 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2682 return CGF.EmitLValue(&DRE).getAddress();
2685 for (auto &&Pair : PrivatePtrs) {
2686 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2687 CGF.getContext().getDeclAlign(Pair.first));
2688 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2691 (void)Scope.Privatize();
2696 auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
2697 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
2698 Data.NumberOfParts);
2699 OMPLexicalScope Scope(*this, S);
2700 TaskGen(*this, OutlinedFn, Data);
2703 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
2704 // Emit outlined function for task construct.
2705 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2706 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
2707 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
2708 const Expr *IfCond = nullptr;
2709 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2710 if (C->getNameModifier() == OMPD_unknown ||
2711 C->getNameModifier() == OMPD_task) {
2712 IfCond = C->getCondition();
2718 // Check if we should emit tied or untied task.
2719 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
2720 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
2721 CGF.EmitStmt(CS->getCapturedStmt());
2723 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
2724 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
2725 const OMPTaskDataTy &Data) {
2726 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn,
2727 SharedsTy, CapturedStruct, IfCond,
2730 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
2733 void CodeGenFunction::EmitOMPTaskyieldDirective(
2734 const OMPTaskyieldDirective &S) {
2735 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
2738 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
2739 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
2742 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
2743 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
2746 void CodeGenFunction::EmitOMPTaskgroupDirective(
2747 const OMPTaskgroupDirective &S) {
2748 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2750 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2752 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2753 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
2756 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
2757 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
2758 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
2759 return llvm::makeArrayRef(FlushClause->varlist_begin(),
2760 FlushClause->varlist_end());
2763 }(), S.getLocStart());
2766 void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
2767 // Emit the loop iteration variable.
2768 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2769 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2770 EmitVarDecl(*IVDecl);
2772 // Emit the iterations count variable.
2773 // If it is not a variable, Sema decided to calculate iterations count on each
2774 // iteration (e.g., it is foldable into a constant).
2775 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2776 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2777 // Emit calculation of the iterations count.
2778 EmitIgnoredExpr(S.getCalcLastIteration());
2781 auto &RT = CGM.getOpenMPRuntime();
2783 bool HasLastprivateClause = false;
2784 // Check pre-condition.
2786 OMPLoopScope PreInitScope(*this, S);
2787 // Skip the entire loop if we don't meet the precondition.
2788 // If the condition constant folds and can be elided, avoid emitting the
2791 llvm::BasicBlock *ContBlock = nullptr;
2792 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2796 auto *ThenBlock = createBasicBlock("omp.precond.then");
2797 ContBlock = createBasicBlock("omp.precond.end");
2798 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2799 getProfileCount(&S));
2800 EmitBlock(ThenBlock);
2801 incrementProfileCounter(&S);
2804 // Emit 'then' code.
2806 // Emit helper vars inits.
2808 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2810 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2812 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2814 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2816 OMPPrivateScope LoopScope(*this);
2817 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2818 // Emit implicit barrier to synchronize threads and avoid data races on
2819 // initialization of firstprivate variables and post-update of
2820 // lastprivate variables.
2821 CGM.getOpenMPRuntime().emitBarrierCall(
2822 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2823 /*ForceSimpleCall=*/true);
2825 EmitOMPPrivateClause(S, LoopScope);
2826 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2827 EmitOMPPrivateLoopCounters(S, LoopScope);
2828 (void)LoopScope.Privatize();
2830 // Detect the distribute schedule kind and chunk.
2831 llvm::Value *Chunk = nullptr;
2832 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
2833 if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
2834 ScheduleKind = C->getDistScheduleKind();
2835 if (const auto *Ch = C->getChunkSize()) {
2836 Chunk = EmitScalarExpr(Ch);
2837 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2838 S.getIterationVariable()->getType(),
2842 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2843 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2845 // OpenMP [2.10.8, distribute Construct, Description]
2846 // If dist_schedule is specified, kind must be static. If specified,
2847 // iterations are divided into chunks of size chunk_size, chunks are
2848 // assigned to the teams of the league in a round-robin fashion in the
2849 // order of the team number. When no chunk_size is specified, the
2850 // iteration space is divided into chunks that are approximately equal
2851 // in size, and at most one chunk is distributed to each team of the
2852 // league. The size of the chunks is unspecified in this case.
2853 if (RT.isStaticNonchunked(ScheduleKind,
2854 /* Chunked */ Chunk != nullptr)) {
2855 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
2856 IVSize, IVSigned, /* Ordered = */ false,
2857 IL.getAddress(), LB.getAddress(),
2858 UB.getAddress(), ST.getAddress());
2860 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2861 // UB = min(UB, GlobalUB);
2862 EmitIgnoredExpr(S.getEnsureUpperBound());
2864 EmitIgnoredExpr(S.getInit());
2865 // while (idx <= UB) { BODY; ++idx; }
2866 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2868 [&S, LoopExit](CodeGenFunction &CGF) {
2869 CGF.EmitOMPLoopBody(S, LoopExit);
2870 CGF.EmitStopPoint(&S);
2872 [](CodeGenFunction &) {});
2873 EmitBlock(LoopExit.getBlock());
2874 // Tell the runtime we are done.
2875 RT.emitForStaticFinish(*this, S.getLocStart());
2877 // Emit the outer loop, which requests its work chunk [LB..UB] from
2878 // runtime and runs the inner loop to process it.
2879 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope,
2880 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2881 IL.getAddress(), Chunk);
2884 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2885 if (HasLastprivateClause)
2886 EmitOMPLastprivateClauseFinal(
2887 S, /*NoFinals=*/false,
2888 Builder.CreateIsNotNull(
2889 EmitLoadOfScalar(IL, S.getLocStart())));
2892 // We're now done with the loop, so jump to the continuation block.
2894 EmitBranch(ContBlock);
2895 EmitBlock(ContBlock, true);
2900 void CodeGenFunction::EmitOMPDistributeDirective(
2901 const OMPDistributeDirective &S) {
2902 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2903 CGF.EmitOMPDistributeLoop(S);
2905 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2906 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
2910 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
2911 const CapturedStmt *S) {
2912 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2913 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
2914 CGF.CapturedStmtInfo = &CapStmtInfo;
2915 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
2916 Fn->addFnAttr(llvm::Attribute::NoInline);
2920 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
2921 if (!S.getAssociatedStmt()) {
2922 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
2923 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
2926 auto *C = S.getSingleClause<OMPSIMDClause>();
2927 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
2928 PrePostActionTy &Action) {
2930 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2931 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
2932 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
2933 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
2934 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
2938 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2941 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2942 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
2945 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
2946 QualType SrcType, QualType DestType,
2947 SourceLocation Loc) {
2948 assert(CGF.hasScalarEvaluationKind(DestType) &&
2949 "DestType must have scalar evaluation kind.");
2950 assert(!Val.isAggregate() && "Must be a scalar or complex.");
2951 return Val.isScalar()
2952 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
2954 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
2958 static CodeGenFunction::ComplexPairTy
2959 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
2960 QualType DestType, SourceLocation Loc) {
2961 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
2962 "DestType must have complex evaluation kind.");
2963 CodeGenFunction::ComplexPairTy ComplexVal;
2964 if (Val.isScalar()) {
2965 // Convert the input element to the element type of the complex.
2966 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2967 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
2968 DestElementType, Loc);
2969 ComplexVal = CodeGenFunction::ComplexPairTy(
2970 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
2972 assert(Val.isComplex() && "Must be a scalar or complex.");
2973 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
2974 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2975 ComplexVal.first = CGF.EmitScalarConversion(
2976 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
2977 ComplexVal.second = CGF.EmitScalarConversion(
2978 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
2983 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
2984 LValue LVal, RValue RVal) {
2985 if (LVal.isGlobalReg()) {
2986 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
2988 CGF.EmitAtomicStore(RVal, LVal,
2989 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
2990 : llvm::AtomicOrdering::Monotonic,
2991 LVal.isVolatile(), /*IsInit=*/false);
2995 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
2996 QualType RValTy, SourceLocation Loc) {
2997 switch (getEvaluationKind(LVal.getType())) {
2999 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
3000 *this, RVal, RValTy, LVal.getType(), Loc)),
3005 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
3009 llvm_unreachable("Must be a scalar or complex.");
3013 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
3014 const Expr *X, const Expr *V,
3015 SourceLocation Loc) {
3017 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
3018 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
3019 LValue XLValue = CGF.EmitLValue(X);
3020 LValue VLValue = CGF.EmitLValue(V);
3021 RValue Res = XLValue.isGlobalReg()
3022 ? CGF.EmitLoadOfLValue(XLValue, Loc)
3023 : CGF.EmitAtomicLoad(
3025 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3026 : llvm::AtomicOrdering::Monotonic,
3027 XLValue.isVolatile());
3028 // OpenMP, 2.12.6, atomic Construct
3029 // Any atomic construct with a seq_cst clause forces the atomically
3030 // performed operation to include an implicit flush operation without a
3033 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3034 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
3037 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
3038 const Expr *X, const Expr *E,
3039 SourceLocation Loc) {
3041 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3042 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3043 // OpenMP, 2.12.6, atomic Construct
3044 // Any atomic construct with a seq_cst clause forces the atomically
3045 // performed operation to include an implicit flush operation without a
3048 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3051 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
3053 BinaryOperatorKind BO,
3054 llvm::AtomicOrdering AO,
3055 bool IsXLHSInRHSPart) {
3056 auto &Context = CGF.CGM.getContext();
3057 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3058 // expression is simple and atomic is allowed for the given type for the
3060 if (BO == BO_Comma || !Update.isScalar() ||
3061 !Update.getScalarVal()->getType()->isIntegerTy() ||
3062 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
3063 (Update.getScalarVal()->getType() !=
3064 X.getAddress().getElementType())) ||
3065 !X.getAddress().getElementType()->isIntegerTy() ||
3066 !Context.getTargetInfo().hasBuiltinAtomic(
3067 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
3068 return std::make_pair(false, RValue::get(nullptr));
3070 llvm::AtomicRMWInst::BinOp RMWOp;
3073 RMWOp = llvm::AtomicRMWInst::Add;
3076 if (!IsXLHSInRHSPart)
3077 return std::make_pair(false, RValue::get(nullptr));
3078 RMWOp = llvm::AtomicRMWInst::Sub;
3081 RMWOp = llvm::AtomicRMWInst::And;
3084 RMWOp = llvm::AtomicRMWInst::Or;
3087 RMWOp = llvm::AtomicRMWInst::Xor;
3090 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3091 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
3092 : llvm::AtomicRMWInst::Max)
3093 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
3094 : llvm::AtomicRMWInst::UMax);
3097 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3098 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
3099 : llvm::AtomicRMWInst::Min)
3100 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
3101 : llvm::AtomicRMWInst::UMin);
3104 RMWOp = llvm::AtomicRMWInst::Xchg;
3113 return std::make_pair(false, RValue::get(nullptr));
3131 llvm_unreachable("Unsupported atomic update operation");
3133 auto *UpdateVal = Update.getScalarVal();
3134 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
3135 UpdateVal = CGF.Builder.CreateIntCast(
3136 IC, X.getAddress().getElementType(),
3137 X.getType()->hasSignedIntegerRepresentation());
3139 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
3140 return std::make_pair(true, RValue::get(Res));
3143 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
3144 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3145 llvm::AtomicOrdering AO, SourceLocation Loc,
3146 const llvm::function_ref<RValue(RValue)> &CommonGen) {
3147 // Update expressions are allowed to have the following forms:
3148 // x binop= expr; -> xrval + expr;
3149 // x++, ++x -> xrval + 1;
3150 // x--, --x -> xrval - 1;
3151 // x = x binop expr; -> xrval binop expr
3152 // x = expr Op x; - > expr binop xrval;
3153 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
3155 if (X.isGlobalReg()) {
3156 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
3158 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
3160 // Perform compare-and-swap procedure.
3161 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
3167 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
3168 const Expr *X, const Expr *E,
3169 const Expr *UE, bool IsXLHSInRHSPart,
3170 SourceLocation Loc) {
3171 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3172 "Update expr in 'atomic update' must be a binary operator.");
3173 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3174 // Update expressions are allowed to have the following forms:
3175 // x binop= expr; -> xrval + expr;
3176 // x++, ++x -> xrval + 1;
3177 // x--, --x -> xrval - 1;
3178 // x = x binop expr; -> xrval binop expr
3179 // x = expr Op x; - > expr binop xrval;
3180 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3181 LValue XLValue = CGF.EmitLValue(X);
3182 RValue ExprRValue = CGF.EmitAnyExpr(E);
3183 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3184 : llvm::AtomicOrdering::Monotonic;
3185 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3186 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3187 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3188 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3190 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
3191 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3192 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3193 return CGF.EmitAnyExpr(UE);
3195 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3196 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3197 // OpenMP, 2.12.6, atomic Construct
3198 // Any atomic construct with a seq_cst clause forces the atomically
3199 // performed operation to include an implicit flush operation without a
3202 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3205 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
3206 QualType SourceType, QualType ResType,
3207 SourceLocation Loc) {
3208 switch (CGF.getEvaluationKind(ResType)) {
3211 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
3213 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
3214 return RValue::getComplex(Res.first, Res.second);
3219 llvm_unreachable("Must be a scalar or complex.");
3222 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
3223 bool IsPostfixUpdate, const Expr *V,
3224 const Expr *X, const Expr *E,
3225 const Expr *UE, bool IsXLHSInRHSPart,
3226 SourceLocation Loc) {
3227 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3228 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3230 LValue VLValue = CGF.EmitLValue(V);
3231 LValue XLValue = CGF.EmitLValue(X);
3232 RValue ExprRValue = CGF.EmitAnyExpr(E);
3233 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3234 : llvm::AtomicOrdering::Monotonic;
3235 QualType NewVValType;
3237 // 'x' is updated with some additional value.
3238 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3239 "Update expr in 'atomic capture' must be a binary operator.");
3240 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3241 // Update expressions are allowed to have the following forms:
3242 // x binop= expr; -> xrval + expr;
3243 // x++, ++x -> xrval + 1;
3244 // x--, --x -> xrval - 1;
3245 // x = x binop expr; -> xrval binop expr
3246 // x = expr Op x; - > expr binop xrval;
3247 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3248 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3249 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3250 NewVValType = XRValExpr->getType();
3251 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3252 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
3253 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
3254 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3255 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3256 RValue Res = CGF.EmitAnyExpr(UE);
3257 NewVVal = IsPostfixUpdate ? XRValue : Res;
3260 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3261 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3263 // 'atomicrmw' instruction was generated.
3264 if (IsPostfixUpdate) {
3265 // Use old value from 'atomicrmw'.
3266 NewVVal = Res.second;
3268 // 'atomicrmw' does not provide new value, so evaluate it using old
3270 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3271 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3272 NewVVal = CGF.EmitAnyExpr(UE);
3276 // 'x' is simply rewritten with some 'expr'.
3277 NewVValType = X->getType().getNonReferenceType();
3278 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
3279 X->getType().getNonReferenceType(), Loc);
3280 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
3284 // Try to perform atomicrmw xchg, otherwise simple exchange.
3285 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3286 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3289 // 'atomicrmw' instruction was generated.
3290 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3293 // Emit post-update store to 'v' of old/new 'x' value.
3294 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
3295 // OpenMP, 2.12.6, atomic Construct
3296 // Any atomic construct with a seq_cst clause forces the atomically
3297 // performed operation to include an implicit flush operation without a
3300 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3303 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
3304 bool IsSeqCst, bool IsPostfixUpdate,
3305 const Expr *X, const Expr *V, const Expr *E,
3306 const Expr *UE, bool IsXLHSInRHSPart,
3307 SourceLocation Loc) {
3310 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
3313 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
3317 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
3320 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
3321 IsXLHSInRHSPart, Loc);
3325 case OMPC_num_threads:
3327 case OMPC_firstprivate:
3328 case OMPC_lastprivate:
3329 case OMPC_reduction:
3339 case OMPC_copyprivate:
3341 case OMPC_proc_bind:
3346 case OMPC_threadprivate:
3348 case OMPC_mergeable:
3353 case OMPC_num_teams:
3354 case OMPC_thread_limit:
3356 case OMPC_grainsize:
3358 case OMPC_num_tasks:
3360 case OMPC_dist_schedule:
3361 case OMPC_defaultmap:
3365 case OMPC_use_device_ptr:
3366 case OMPC_is_device_ptr:
3367 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3371 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3372 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
3373 OpenMPClauseKind Kind = OMPC_unknown;
3374 for (auto *C : S.clauses()) {
3375 // Find first clause (skip seq_cst clause, if it is first).
3376 if (C->getClauseKind() != OMPC_seq_cst) {
3377 Kind = C->getClauseKind();
3383 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
3384 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
3385 enterFullExpression(EWC);
3387 // Processing for statements under 'atomic capture'.
3388 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
3389 for (const auto *C : Compound->body()) {
3390 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
3391 enterFullExpression(EWC);
3396 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
3397 PrePostActionTy &) {
3398 CGF.EmitStopPoint(CS);
3399 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
3400 S.getV(), S.getExpr(), S.getUpdateExpr(),
3401 S.isXLHSInRHSPart(), S.getLocStart());
3403 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3404 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
3407 std::pair<llvm::Function * /*OutlinedFn*/, llvm::Constant * /*OutlinedFnID*/>
3408 CodeGenFunction::EmitOMPTargetDirectiveOutlinedFunction(
3409 CodeGenModule &CGM, const OMPTargetDirective &S, StringRef ParentName,
3410 bool IsOffloadEntry) {
3411 llvm::Function *OutlinedFn = nullptr;
3412 llvm::Constant *OutlinedFnID = nullptr;
3413 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3414 OMPPrivateScope PrivateScope(CGF);
3415 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3416 CGF.EmitOMPPrivateClause(S, PrivateScope);
3417 (void)PrivateScope.Privatize();
3420 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3422 // Emit target region as a standalone region.
3423 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3424 S, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, CodeGen);
3425 return std::make_pair(OutlinedFn, OutlinedFnID);
3428 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
3429 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
3431 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3432 GenerateOpenMPCapturedVars(CS, CapturedVars);
3434 llvm::Function *Fn = nullptr;
3435 llvm::Constant *FnID = nullptr;
3437 // Check if we have any if clause associated with the directive.
3438 const Expr *IfCond = nullptr;
3440 if (auto *C = S.getSingleClause<OMPIfClause>()) {
3441 IfCond = C->getCondition();
3444 // Check if we have any device clause associated with the directive.
3445 const Expr *Device = nullptr;
3446 if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
3447 Device = C->getDevice();
3450 // Check if we have an if clause whose conditional always evaluates to false
3451 // or if we do not have any targets specified. If so the target region is not
3452 // an offload entry point.
3453 bool IsOffloadEntry = true;
3456 if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
3457 IsOffloadEntry = false;
3459 if (CGM.getLangOpts().OMPTargetTriples.empty())
3460 IsOffloadEntry = false;
3462 assert(CurFuncDecl && "No parent declaration for target region!");
3463 StringRef ParentName;
3464 // In case we have Ctors/Dtors we use the complete type variant to produce
3465 // the mangling of the device outlined kernel.
3466 if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
3467 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
3468 else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
3469 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
3472 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
3474 std::tie(Fn, FnID) = EmitOMPTargetDirectiveOutlinedFunction(
3475 CGM, S, ParentName, IsOffloadEntry);
3476 OMPLexicalScope Scope(*this, S);
3477 CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
3481 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
3482 const OMPExecutableDirective &S,
3483 OpenMPDirectiveKind InnermostKind,
3484 const RegionCodeGenTy &CodeGen) {
3485 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3486 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
3487 emitParallelOrTeamsOutlinedFunction(S,
3488 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
3490 const OMPTeamsDirective &TD = *dyn_cast<OMPTeamsDirective>(&S);
3491 const OMPNumTeamsClause *NT = TD.getSingleClause<OMPNumTeamsClause>();
3492 const OMPThreadLimitClause *TL = TD.getSingleClause<OMPThreadLimitClause>();
3494 Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr;
3495 Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr;
3497 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
3501 OMPLexicalScope Scope(CGF, S);
3502 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3503 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3504 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn,
3508 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
3509 // Emit teams region as a standalone region.
3510 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3511 OMPPrivateScope PrivateScope(CGF);
3512 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3513 CGF.EmitOMPPrivateClause(S, PrivateScope);
3514 (void)PrivateScope.Privatize();
3515 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3517 emitCommonOMPTeamsDirective(*this, S, OMPD_teams, CodeGen);
3520 void CodeGenFunction::EmitOMPCancellationPointDirective(
3521 const OMPCancellationPointDirective &S) {
3522 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
3523 S.getCancelRegion());
3526 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
3527 const Expr *IfCond = nullptr;
3528 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3529 if (C->getNameModifier() == OMPD_unknown ||
3530 C->getNameModifier() == OMPD_cancel) {
3531 IfCond = C->getCondition();
3535 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
3536 S.getCancelRegion());
3539 CodeGenFunction::JumpDest
3540 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
3541 if (Kind == OMPD_parallel || Kind == OMPD_task ||
3542 Kind == OMPD_target_parallel)
3544 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
3545 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
3546 Kind == OMPD_distribute_parallel_for ||
3547 Kind == OMPD_target_parallel_for);
3548 return OMPCancelStack.getExitBlock();
3551 void CodeGenFunction::EmitOMPUseDevicePtrClause(
3552 const OMPClause &NC, OMPPrivateScope &PrivateScope,
3553 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
3554 const auto &C = cast<OMPUseDevicePtrClause>(NC);
3555 auto OrigVarIt = C.varlist_begin();
3556 auto InitIt = C.inits().begin();
3557 for (auto PvtVarIt : C.private_copies()) {
3558 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
3559 auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
3560 auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
3562 // In order to identify the right initializer we need to match the
3563 // declaration used by the mapping logic. In some cases we may get
3564 // OMPCapturedExprDecl that refers to the original declaration.
3565 const ValueDecl *MatchingVD = OrigVD;
3566 if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
3567 // OMPCapturedExprDecl are used to privative fields of the current
3569 auto *ME = cast<MemberExpr>(OED->getInit());
3570 assert(isa<CXXThisExpr>(ME->getBase()) &&
3571 "Base should be the current struct!");
3572 MatchingVD = ME->getMemberDecl();
3575 // If we don't have information about the current list item, move on to
3577 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
3578 if (InitAddrIt == CaptureDeviceAddrMap.end())
3581 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
3582 // Initialize the temporary initialization variable with the address we
3583 // get from the runtime library. We have to cast the source address
3584 // because it is always a void *. References are materialized in the
3585 // privatization scope, so the initialization here disregards the fact
3586 // the original variable is a reference.
3588 getContext().getPointerType(OrigVD->getType().getNonReferenceType());
3589 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
3590 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
3591 setAddrOfLocalVar(InitVD, InitAddr);
3593 // Emit private declaration, it will be initialized by the value we
3594 // declaration we just added to the local declarations map.
3597 // The initialization variables reached its purpose in the emission
3598 // ofthe previous declaration, so we don't need it anymore.
3599 LocalDeclMap.erase(InitVD);
3601 // Return the address of the private variable.
3602 return GetAddrOfLocalVar(PvtVD);
3604 assert(IsRegistered && "firstprivate var already registered as private");
3605 // Silence the warning about unused variable.
3613 // Generate the instructions for '#pragma omp target data' directive.
3614 void CodeGenFunction::EmitOMPTargetDataDirective(
3615 const OMPTargetDataDirective &S) {
3616 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
3618 // Create a pre/post action to signal the privatization of the device pointer.
3619 // This action can be replaced by the OpenMP runtime code generation to
3620 // deactivate privatization.
3621 bool PrivatizeDevicePointers = false;
3622 class DevicePointerPrivActionTy : public PrePostActionTy {
3623 bool &PrivatizeDevicePointers;
3626 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
3627 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
3628 void Enter(CodeGenFunction &CGF) override {
3629 PrivatizeDevicePointers = true;
3632 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
3634 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
3635 CodeGenFunction &CGF, PrePostActionTy &Action) {
3636 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3638 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3641 // Codegen that selects wheather to generate the privatization code or not.
3642 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
3643 &InnermostCodeGen](CodeGenFunction &CGF,
3644 PrePostActionTy &Action) {
3645 RegionCodeGenTy RCG(InnermostCodeGen);
3646 PrivatizeDevicePointers = false;
3648 // Call the pre-action to change the status of PrivatizeDevicePointers if
3652 if (PrivatizeDevicePointers) {
3653 OMPPrivateScope PrivateScope(CGF);
3654 // Emit all instances of the use_device_ptr clause.
3655 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
3656 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
3657 Info.CaptureDeviceAddrMap);
3658 (void)PrivateScope.Privatize();
3664 // Forward the provided action to the privatization codegen.
3665 RegionCodeGenTy PrivRCG(PrivCodeGen);
3666 PrivRCG.setAction(Action);
3668 // Notwithstanding the body of the region is emitted as inlined directive,
3669 // we don't use an inline scope as changes in the references inside the
3670 // region are expected to be visible outside, so we do not privative them.
3671 OMPLexicalScope Scope(CGF, S);
3672 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
3676 RegionCodeGenTy RCG(CodeGen);
3678 // If we don't have target devices, don't bother emitting the data mapping
3680 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
3685 // Check if we have any if clause associated with the directive.
3686 const Expr *IfCond = nullptr;
3687 if (auto *C = S.getSingleClause<OMPIfClause>())
3688 IfCond = C->getCondition();
3690 // Check if we have any device clause associated with the directive.
3691 const Expr *Device = nullptr;
3692 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3693 Device = C->getDevice();
3695 // Set the action to signal privatization of device pointers.
3696 RCG.setAction(PrivAction);
3698 // Emit region code.
3699 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
3703 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
3704 const OMPTargetEnterDataDirective &S) {
3705 // If we don't have target devices, don't bother emitting the data mapping
3707 if (CGM.getLangOpts().OMPTargetTriples.empty())
3710 // Check if we have any if clause associated with the directive.
3711 const Expr *IfCond = nullptr;
3712 if (auto *C = S.getSingleClause<OMPIfClause>())
3713 IfCond = C->getCondition();
3715 // Check if we have any device clause associated with the directive.
3716 const Expr *Device = nullptr;
3717 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3718 Device = C->getDevice();
3720 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3723 void CodeGenFunction::EmitOMPTargetExitDataDirective(
3724 const OMPTargetExitDataDirective &S) {
3725 // If we don't have target devices, don't bother emitting the data mapping
3727 if (CGM.getLangOpts().OMPTargetTriples.empty())
3730 // Check if we have any if clause associated with the directive.
3731 const Expr *IfCond = nullptr;
3732 if (auto *C = S.getSingleClause<OMPIfClause>())
3733 IfCond = C->getCondition();
3735 // Check if we have any device clause associated with the directive.
3736 const Expr *Device = nullptr;
3737 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3738 Device = C->getDevice();
3740 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3743 void CodeGenFunction::EmitOMPTargetParallelDirective(
3744 const OMPTargetParallelDirective &S) {
3745 // TODO: codegen for target parallel.
3748 void CodeGenFunction::EmitOMPTargetParallelForDirective(
3749 const OMPTargetParallelForDirective &S) {
3750 // TODO: codegen for target parallel for.
3753 /// Emit a helper variable and return corresponding lvalue.
3754 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
3755 const ImplicitParamDecl *PVD,
3756 CodeGenFunction::OMPPrivateScope &Privates) {
3757 auto *VDecl = cast<VarDecl>(Helper->getDecl());
3758 Privates.addPrivate(
3759 VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); });
3762 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
3763 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
3764 // Emit outlined function for task construct.
3765 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3766 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
3767 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3768 const Expr *IfCond = nullptr;
3769 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3770 if (C->getNameModifier() == OMPD_unknown ||
3771 C->getNameModifier() == OMPD_taskloop) {
3772 IfCond = C->getCondition();
3778 // Check if taskloop must be emitted without taskgroup.
3779 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
3780 // TODO: Check if we should emit tied or untied task.
3782 // Set scheduling for taskloop
3783 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
3785 Data.Schedule.setInt(/*IntVal=*/false);
3786 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
3787 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
3789 Data.Schedule.setInt(/*IntVal=*/true);
3790 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
3793 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
3795 // for (IV in 0..LastIteration) BODY;
3796 // <Final counter/linear vars updates>;
3800 // Emit: if (PreCond) - begin.
3801 // If the condition constant folds and can be elided, avoid emitting the
3804 llvm::BasicBlock *ContBlock = nullptr;
3805 OMPLoopScope PreInitScope(CGF, S);
3806 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3810 auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
3811 ContBlock = CGF.createBasicBlock("taskloop.if.end");
3812 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
3813 CGF.getProfileCount(&S));
3814 CGF.EmitBlock(ThenBlock);
3815 CGF.incrementProfileCounter(&S);
3818 if (isOpenMPSimdDirective(S.getDirectiveKind()))
3819 CGF.EmitOMPSimdInit(S);
3821 OMPPrivateScope LoopScope(CGF);
3822 // Emit helper vars inits.
3823 enum { LowerBound = 5, UpperBound, Stride, LastIter };
3824 auto *I = CS->getCapturedDecl()->param_begin();
3825 auto *LBP = std::next(I, LowerBound);
3826 auto *UBP = std::next(I, UpperBound);
3827 auto *STP = std::next(I, Stride);
3828 auto *LIP = std::next(I, LastIter);
3829 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
3831 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
3833 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
3834 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
3836 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
3837 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
3838 (void)LoopScope.Privatize();
3839 // Emit the loop iteration variable.
3840 const Expr *IVExpr = S.getIterationVariable();
3841 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
3842 CGF.EmitVarDecl(*IVDecl);
3843 CGF.EmitIgnoredExpr(S.getInit());
3845 // Emit the iterations count variable.
3846 // If it is not a variable, Sema decided to calculate iterations count on
3847 // each iteration (e.g., it is foldable into a constant).
3848 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3849 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3850 // Emit calculation of the iterations count.
3851 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
3854 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
3856 [&S](CodeGenFunction &CGF) {
3857 CGF.EmitOMPLoopBody(S, JumpDest());
3858 CGF.EmitStopPoint(&S);
3860 [](CodeGenFunction &) {});
3861 // Emit: if (PreCond) - end.
3863 CGF.EmitBranch(ContBlock);
3864 CGF.EmitBlock(ContBlock, true);
3866 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3867 if (HasLastprivateClause) {
3868 CGF.EmitOMPLastprivateClauseFinal(
3869 S, isOpenMPSimdDirective(S.getDirectiveKind()),
3870 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
3871 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
3872 (*LIP)->getType(), S.getLocStart())));
3875 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
3876 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
3877 const OMPTaskDataTy &Data) {
3878 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) {
3879 OMPLoopScope PreInitScope(CGF, S);
3880 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
3881 OutlinedFn, SharedsTy,
3882 CapturedStruct, IfCond, Data);
3884 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
3887 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
3890 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
3891 EmitOMPTaskLoopBasedDirective(S);
3894 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
3895 const OMPTaskLoopSimdDirective &S) {
3896 EmitOMPTaskLoopBasedDirective(S);
3899 // Generate the instructions for '#pragma omp target update' directive.
3900 void CodeGenFunction::EmitOMPTargetUpdateDirective(
3901 const OMPTargetUpdateDirective &S) {
3902 // If we don't have target devices, don't bother emitting the data mapping
3904 if (CGM.getLangOpts().OMPTargetTriples.empty())
3907 // Check if we have any if clause associated with the directive.
3908 const Expr *IfCond = nullptr;
3909 if (auto *C = S.getSingleClause<OMPIfClause>())
3910 IfCond = C->getCondition();
3912 // Check if we have any device clause associated with the directive.
3913 const Expr *Device = nullptr;
3914 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3915 Device = C->getDevice();
3917 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);