1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Stmt.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "llvm/IR/CallSite.h"
23 using namespace clang;
24 using namespace CodeGen;
27 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
28 /// for captured expressions.
29 class OMPLexicalScope final : public CodeGenFunction::LexicalScope {
30 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
31 for (const auto *C : S.clauses()) {
32 if (auto *CPI = OMPClauseWithPreInit::get(C)) {
33 if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
34 for (const auto *I : PreInit->decls()) {
35 if (!I->hasAttr<OMPCaptureNoInitAttr>())
36 CGF.EmitVarDecl(cast<VarDecl>(*I));
38 CodeGenFunction::AutoVarEmission Emission =
39 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
40 CGF.EmitAutoVarCleanups(Emission);
47 CodeGenFunction::OMPPrivateScope InlinedShareds;
49 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
50 return CGF.LambdaCaptureFields.lookup(VD) ||
51 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
52 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
56 OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S,
57 bool AsInlined = false)
58 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
60 emitPreInitStmt(CGF, S);
62 if (S.hasAssociatedStmt()) {
63 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
64 for (auto &C : CS->captures()) {
65 if (C.capturesVariable() || C.capturesVariableByCopy()) {
66 auto *VD = C.getCapturedVar();
67 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
68 isCapturedVar(CGF, VD) ||
69 (CGF.CapturedStmtInfo &&
70 InlinedShareds.isGlobalVarCaptured(VD)),
71 VD->getType().getNonReferenceType(), VK_LValue,
73 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
74 return CGF.EmitLValue(&DRE).getAddress();
78 (void)InlinedShareds.Privatize();
84 /// Private scope for OpenMP loop-based directives, that supports capturing
85 /// of used expression from loop statement.
86 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
87 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
88 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
89 if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) {
90 for (const auto *I : PreInits->decls())
91 CGF.EmitVarDecl(cast<VarDecl>(*I));
97 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
98 : CodeGenFunction::RunCleanupsScope(CGF) {
99 emitPreInitStmt(CGF, S);
105 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
106 auto &C = getContext();
107 llvm::Value *Size = nullptr;
108 auto SizeInChars = C.getTypeSizeInChars(Ty);
109 if (SizeInChars.isZero()) {
110 // getTypeSizeInChars() returns 0 for a VLA.
111 while (auto *VAT = C.getAsVariableArrayType(Ty)) {
112 llvm::Value *ArraySize;
113 std::tie(ArraySize, Ty) = getVLASize(VAT);
114 Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
116 SizeInChars = C.getTypeSizeInChars(Ty);
117 if (SizeInChars.isZero())
118 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
119 Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
121 Size = CGM.getSize(SizeInChars);
125 void CodeGenFunction::GenerateOpenMPCapturedVars(
126 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
127 const RecordDecl *RD = S.getCapturedRecordDecl();
128 auto CurField = RD->field_begin();
129 auto CurCap = S.captures().begin();
130 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
131 E = S.capture_init_end();
132 I != E; ++I, ++CurField, ++CurCap) {
133 if (CurField->hasCapturedVLAType()) {
134 auto VAT = CurField->getCapturedVLAType();
135 auto *Val = VLASizeMap[VAT->getSizeExpr()];
136 CapturedVars.push_back(Val);
137 } else if (CurCap->capturesThis())
138 CapturedVars.push_back(CXXThisValue);
139 else if (CurCap->capturesVariableByCopy()) {
141 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal();
143 // If the field is not a pointer, we need to save the actual value
144 // and load it as a void pointer.
145 if (!CurField->getType()->isAnyPointerType()) {
146 auto &Ctx = getContext();
147 auto DstAddr = CreateMemTemp(
148 Ctx.getUIntPtrType(),
149 Twine(CurCap->getCapturedVar()->getName()) + ".casted");
150 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
152 auto *SrcAddrVal = EmitScalarConversion(
153 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
154 Ctx.getPointerType(CurField->getType()), SourceLocation());
156 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
158 // Store the value using the source type pointer.
159 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
161 // Load the value using the destination type pointer.
162 CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
164 CapturedVars.push_back(CV);
166 assert(CurCap->capturesVariable() && "Expected capture by reference.");
167 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
172 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
173 StringRef Name, LValue AddrLV,
174 bool isReferenceType = false) {
175 ASTContext &Ctx = CGF.getContext();
177 auto *CastedPtr = CGF.EmitScalarConversion(
178 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
179 Ctx.getPointerType(DstType), SourceLocation());
181 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
184 // If we are dealing with references we need to return the address of the
185 // reference instead of the reference of the value.
186 if (isReferenceType) {
187 QualType RefType = Ctx.getLValueReferenceType(DstType);
188 auto *RefVal = TmpAddr.getPointer();
189 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
190 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
191 CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true);
198 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
201 "CapturedStmtInfo should be set when generating the captured function");
202 const CapturedDecl *CD = S.getCapturedDecl();
203 const RecordDecl *RD = S.getCapturedRecordDecl();
204 assert(CD->hasBody() && "missing CapturedDecl body");
206 // Build the argument list.
207 ASTContext &Ctx = CGM.getContext();
208 FunctionArgList Args;
209 Args.append(CD->param_begin(),
210 std::next(CD->param_begin(), CD->getContextParamPosition()));
211 auto I = S.captures().begin();
212 for (auto *FD : RD->fields()) {
213 QualType ArgType = FD->getType();
214 IdentifierInfo *II = nullptr;
215 VarDecl *CapVar = nullptr;
217 // If this is a capture by copy and the type is not a pointer, the outlined
218 // function argument type should be uintptr and the value properly casted to
219 // uintptr. This is necessary given that the runtime library is only able to
220 // deal with pointers. We can pass in the same way the VLA type sizes to the
221 // outlined function.
222 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
223 I->capturesVariableArrayType())
224 ArgType = Ctx.getUIntPtrType();
226 if (I->capturesVariable() || I->capturesVariableByCopy()) {
227 CapVar = I->getCapturedVar();
228 II = CapVar->getIdentifier();
229 } else if (I->capturesThis())
230 II = &getContext().Idents.get("this");
232 assert(I->capturesVariableArrayType());
233 II = &getContext().Idents.get("vla");
235 if (ArgType->isVariablyModifiedType()) {
236 bool IsReference = ArgType->isLValueReferenceType();
238 getContext().getCanonicalParamType(ArgType.getNonReferenceType());
239 if (IsReference && !ArgType->isPointerType()) {
240 ArgType = getContext().getLValueReferenceType(
241 ArgType, /*SpelledAsLValue=*/false);
244 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
245 FD->getLocation(), II, ArgType));
249 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
252 // Create the function declaration.
253 FunctionType::ExtInfo ExtInfo;
254 const CGFunctionInfo &FuncInfo =
255 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
256 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
258 llvm::Function *F = llvm::Function::Create(
259 FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
260 CapturedStmtInfo->getHelperName(), &CGM.getModule());
261 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
263 F->addFnAttr(llvm::Attribute::NoUnwind);
265 // Generate the function.
266 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
267 CD->getBody()->getLocStart());
268 unsigned Cnt = CD->getContextParamPosition();
269 I = S.captures().begin();
270 for (auto *FD : RD->fields()) {
271 // If we are capturing a pointer by copy we don't need to do anything, just
272 // use the value that we get from the arguments.
273 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
274 const VarDecl *CurVD = I->getCapturedVar();
275 Address LocalAddr = GetAddrOfLocalVar(Args[Cnt]);
276 // If the variable is a reference we need to materialize it here.
277 if (CurVD->getType()->isReferenceType()) {
278 Address RefAddr = CreateMemTemp(CurVD->getType(), getPointerAlign(),
279 ".materialized_ref");
280 EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr, /*Volatile=*/false,
284 setAddrOfLocalVar(CurVD, LocalAddr);
291 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
292 AlignmentSource::Decl);
293 if (FD->hasCapturedVLAType()) {
294 LValue CastedArgLVal =
295 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
296 Args[Cnt]->getName(), ArgLVal),
297 FD->getType(), AlignmentSource::Decl);
299 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
300 auto VAT = FD->getCapturedVLAType();
301 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
302 } else if (I->capturesVariable()) {
303 auto *Var = I->getCapturedVar();
304 QualType VarTy = Var->getType();
305 Address ArgAddr = ArgLVal.getAddress();
306 if (!VarTy->isReferenceType()) {
307 if (ArgLVal.getType()->isLValueReferenceType()) {
308 ArgAddr = EmitLoadOfReference(
309 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
310 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
311 assert(ArgLVal.getType()->isPointerType());
312 ArgAddr = EmitLoadOfPointer(
313 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
317 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
318 } else if (I->capturesVariableByCopy()) {
319 assert(!FD->getType()->isAnyPointerType() &&
320 "Not expecting a captured pointer.");
321 auto *Var = I->getCapturedVar();
322 QualType VarTy = Var->getType();
323 setAddrOfLocalVar(Var, castValueFromUintptr(*this, FD->getType(),
324 Args[Cnt]->getName(), ArgLVal,
325 VarTy->isReferenceType()));
327 // If 'this' is captured, load it into CXXThisValue.
328 assert(I->capturesThis());
330 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
336 PGO.assignRegionCounters(GlobalDecl(CD), F);
337 CapturedStmtInfo->EmitBody(*this, CD->getBody());
338 FinishFunction(CD->getBodyRBrace());
343 //===----------------------------------------------------------------------===//
344 // OpenMP Directive Emission
345 //===----------------------------------------------------------------------===//
346 void CodeGenFunction::EmitOMPAggregateAssign(
347 Address DestAddr, Address SrcAddr, QualType OriginalType,
348 const llvm::function_ref<void(Address, Address)> &CopyGen) {
349 // Perform element-by-element initialization.
352 // Drill down to the base element type on both arrays.
353 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
354 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
355 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
357 auto SrcBegin = SrcAddr.getPointer();
358 auto DestBegin = DestAddr.getPointer();
359 // Cast from pointer to array type to pointer to single element.
360 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
361 // The basic structure here is a while-do loop.
362 auto BodyBB = createBasicBlock("omp.arraycpy.body");
363 auto DoneBB = createBasicBlock("omp.arraycpy.done");
365 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
366 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
368 // Enter the loop body, making that address the current address.
369 auto EntryBB = Builder.GetInsertBlock();
372 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
374 llvm::PHINode *SrcElementPHI =
375 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
376 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
377 Address SrcElementCurrent =
378 Address(SrcElementPHI,
379 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
381 llvm::PHINode *DestElementPHI =
382 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
383 DestElementPHI->addIncoming(DestBegin, EntryBB);
384 Address DestElementCurrent =
385 Address(DestElementPHI,
386 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
389 CopyGen(DestElementCurrent, SrcElementCurrent);
391 // Shift the address forward by one element.
392 auto DestElementNext = Builder.CreateConstGEP1_32(
393 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
394 auto SrcElementNext = Builder.CreateConstGEP1_32(
395 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
396 // Check whether we've reached the end.
398 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
399 Builder.CreateCondBr(Done, DoneBB, BodyBB);
400 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
401 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
404 EmitBlock(DoneBB, /*IsFinished=*/true);
407 /// Check if the combiner is a call to UDR combiner and if it is so return the
408 /// UDR decl used for reduction.
409 static const OMPDeclareReductionDecl *
410 getReductionInit(const Expr *ReductionOp) {
411 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
412 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
414 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
415 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
420 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
421 const OMPDeclareReductionDecl *DRD,
423 Address Private, Address Original,
425 if (DRD->getInitializer()) {
426 std::pair<llvm::Function *, llvm::Function *> Reduction =
427 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
428 auto *CE = cast<CallExpr>(InitOp);
429 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
430 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
431 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
432 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
433 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
434 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
435 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
436 [=]() -> Address { return Private; });
437 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
438 [=]() -> Address { return Original; });
439 (void)PrivateScope.Privatize();
440 RValue Func = RValue::get(Reduction.second);
441 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
442 CGF.EmitIgnoredExpr(InitOp);
444 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
445 auto *GV = new llvm::GlobalVariable(
446 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
447 llvm::GlobalValue::PrivateLinkage, Init, ".init");
448 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
450 switch (CGF.getEvaluationKind(Ty)) {
452 InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
456 RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
459 InitRVal = RValue::getAggregate(LV.getAddress());
462 OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
463 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
464 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
465 /*IsInitializer=*/false);
469 /// \brief Emit initialization of arrays of complex types.
470 /// \param DestAddr Address of the array.
471 /// \param Type Type of array.
472 /// \param Init Initial expression of array.
473 /// \param SrcAddr Address of the original array.
474 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
475 QualType Type, const Expr *Init,
476 Address SrcAddr = Address::invalid()) {
477 auto *DRD = getReductionInit(Init);
478 // Perform element-by-element initialization.
481 // Drill down to the base element type on both arrays.
482 auto ArrayTy = Type->getAsArrayTypeUnsafe();
483 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
485 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
488 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
490 llvm::Value *SrcBegin = nullptr;
492 SrcBegin = SrcAddr.getPointer();
493 auto DestBegin = DestAddr.getPointer();
494 // Cast from pointer to array type to pointer to single element.
495 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
496 // The basic structure here is a while-do loop.
497 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
498 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
500 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
501 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
503 // Enter the loop body, making that address the current address.
504 auto EntryBB = CGF.Builder.GetInsertBlock();
505 CGF.EmitBlock(BodyBB);
507 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
509 llvm::PHINode *SrcElementPHI = nullptr;
510 Address SrcElementCurrent = Address::invalid();
512 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
513 "omp.arraycpy.srcElementPast");
514 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
516 Address(SrcElementPHI,
517 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
519 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
520 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
521 DestElementPHI->addIncoming(DestBegin, EntryBB);
522 Address DestElementCurrent =
523 Address(DestElementPHI,
524 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
528 CodeGenFunction::RunCleanupsScope InitScope(CGF);
529 if (DRD && (DRD->getInitializer() || !Init)) {
530 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
531 SrcElementCurrent, ElementTy);
533 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
534 /*IsInitializer=*/false);
538 // Shift the address forward by one element.
539 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
540 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
541 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
544 // Shift the address forward by one element.
545 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
546 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
547 // Check whether we've reached the end.
549 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
550 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
551 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
554 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
557 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
558 Address SrcAddr, const VarDecl *DestVD,
559 const VarDecl *SrcVD, const Expr *Copy) {
560 if (OriginalType->isArrayType()) {
561 auto *BO = dyn_cast<BinaryOperator>(Copy);
562 if (BO && BO->getOpcode() == BO_Assign) {
563 // Perform simple memcpy for simple copying.
564 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
566 // For arrays with complex element types perform element by element
568 EmitOMPAggregateAssign(
569 DestAddr, SrcAddr, OriginalType,
570 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
571 // Working with the single array element, so have to remap
572 // destination and source variables to corresponding array
574 CodeGenFunction::OMPPrivateScope Remap(*this);
575 Remap.addPrivate(DestVD, [DestElement]() -> Address {
579 SrcVD, [SrcElement]() -> Address { return SrcElement; });
580 (void)Remap.Privatize();
581 EmitIgnoredExpr(Copy);
585 // Remap pseudo source variable to private copy.
586 CodeGenFunction::OMPPrivateScope Remap(*this);
587 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
588 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
589 (void)Remap.Privatize();
590 // Emit copying of the whole variable.
591 EmitIgnoredExpr(Copy);
595 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
596 OMPPrivateScope &PrivateScope) {
597 if (!HaveInsertPoint())
599 bool FirstprivateIsLastprivate = false;
600 llvm::DenseSet<const VarDecl *> Lastprivates;
601 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
602 for (const auto *D : C->varlists())
604 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
606 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
607 CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt()));
608 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
609 auto IRef = C->varlist_begin();
610 auto InitsRef = C->inits().begin();
611 for (auto IInit : C->private_copies()) {
612 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
613 bool ThisFirstprivateIsLastprivate =
614 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
615 auto *CapFD = CapturesInfo.lookup(OrigVD);
616 auto *FD = CapturedStmtInfo->lookup(OrigVD);
617 if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) &&
618 !FD->getType()->isReferenceType()) {
619 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
624 FirstprivateIsLastprivate =
625 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
626 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
627 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
628 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
630 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
631 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
632 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
633 Address OriginalAddr = EmitLValue(&DRE).getAddress();
634 QualType Type = VD->getType();
635 if (Type->isArrayType()) {
636 // Emit VarDecl with copy init for arrays.
637 // Get the address of the original variable captured in current
639 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
640 auto Emission = EmitAutoVarAlloca(*VD);
641 auto *Init = VD->getInit();
642 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
643 // Perform simple memcpy.
644 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
647 EmitOMPAggregateAssign(
648 Emission.getAllocatedAddress(), OriginalAddr, Type,
649 [this, VDInit, Init](Address DestElement,
650 Address SrcElement) {
651 // Clean up any temporaries needed by the initialization.
652 RunCleanupsScope InitScope(*this);
653 // Emit initialization for single element.
654 setAddrOfLocalVar(VDInit, SrcElement);
655 EmitAnyExprToMem(Init, DestElement,
656 Init->getType().getQualifiers(),
657 /*IsInitializer*/ false);
658 LocalDeclMap.erase(VDInit);
661 EmitAutoVarCleanups(Emission);
662 return Emission.getAllocatedAddress();
665 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
666 // Emit private VarDecl with copy init.
667 // Remap temp VDInit variable to the address of the original
669 // (for proper handling of captured global variables).
670 setAddrOfLocalVar(VDInit, OriginalAddr);
672 LocalDeclMap.erase(VDInit);
673 return GetAddrOfLocalVar(VD);
676 assert(IsRegistered &&
677 "firstprivate var already registered as private");
678 // Silence the warning about unused variable.
685 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
688 void CodeGenFunction::EmitOMPPrivateClause(
689 const OMPExecutableDirective &D,
690 CodeGenFunction::OMPPrivateScope &PrivateScope) {
691 if (!HaveInsertPoint())
693 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
694 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
695 auto IRef = C->varlist_begin();
696 for (auto IInit : C->private_copies()) {
697 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
698 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
699 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
701 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
702 // Emit private VarDecl with copy init.
704 return GetAddrOfLocalVar(VD);
706 assert(IsRegistered && "private var already registered as private");
707 // Silence the warning about unused variable.
715 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
716 if (!HaveInsertPoint())
718 // threadprivate_var1 = master_threadprivate_var1;
719 // operator=(threadprivate_var2, master_threadprivate_var2);
721 // __kmpc_barrier(&loc, global_tid);
722 llvm::DenseSet<const VarDecl *> CopiedVars;
723 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
724 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
725 auto IRef = C->varlist_begin();
726 auto ISrcRef = C->source_exprs().begin();
727 auto IDestRef = C->destination_exprs().begin();
728 for (auto *AssignOp : C->assignment_ops()) {
729 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
730 QualType Type = VD->getType();
731 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
732 // Get the address of the master variable. If we are emitting code with
733 // TLS support, the address is passed from the master as field in the
734 // captured declaration.
735 Address MasterAddr = Address::invalid();
736 if (getLangOpts().OpenMPUseTLS &&
737 getContext().getTargetInfo().isTLSSupported()) {
738 assert(CapturedStmtInfo->lookup(VD) &&
739 "Copyin threadprivates should have been captured!");
740 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
741 VK_LValue, (*IRef)->getExprLoc());
742 MasterAddr = EmitLValue(&DRE).getAddress();
743 LocalDeclMap.erase(VD);
746 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
747 : CGM.GetAddrOfGlobal(VD),
748 getContext().getDeclAlign(VD));
750 // Get the address of the threadprivate variable.
751 Address PrivateAddr = EmitLValue(*IRef).getAddress();
752 if (CopiedVars.size() == 1) {
753 // At first check if current thread is a master thread. If it is, no
754 // need to copy data.
755 CopyBegin = createBasicBlock("copyin.not.master");
756 CopyEnd = createBasicBlock("copyin.not.master.end");
757 Builder.CreateCondBr(
758 Builder.CreateICmpNE(
759 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
760 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
762 EmitBlock(CopyBegin);
764 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
765 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
766 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
774 // Exit out of copying procedure for non-master thread.
775 EmitBlock(CopyEnd, /*IsFinished=*/true);
781 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
782 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
783 if (!HaveInsertPoint())
785 bool HasAtLeastOneLastprivate = false;
786 llvm::DenseSet<const VarDecl *> SIMDLCVs;
787 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
788 auto *LoopDirective = cast<OMPLoopDirective>(&D);
789 for (auto *C : LoopDirective->counters()) {
791 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
794 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
795 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
796 HasAtLeastOneLastprivate = true;
797 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()))
799 auto IRef = C->varlist_begin();
800 auto IDestRef = C->destination_exprs().begin();
801 for (auto *IInit : C->private_copies()) {
802 // Keep the address of the original variable for future update at the end
804 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
805 // Taskloops do not require additional initialization, it is done in
806 // runtime support library.
807 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
808 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
809 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
811 const_cast<VarDecl *>(OrigVD),
812 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
814 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
815 return EmitLValue(&DRE).getAddress();
817 // Check if the variable is also a firstprivate: in this case IInit is
818 // not generated. Initialization of this variable will happen in codegen
819 // for 'firstprivate' clause.
820 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
821 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
822 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
823 // Emit private VarDecl with copy init.
825 return GetAddrOfLocalVar(VD);
827 assert(IsRegistered &&
828 "lastprivate var already registered as private");
836 return HasAtLeastOneLastprivate;
839 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
840 const OMPExecutableDirective &D, bool NoFinals,
841 llvm::Value *IsLastIterCond) {
842 if (!HaveInsertPoint())
844 // Emit following code:
845 // if (<IsLastIterCond>) {
846 // orig_var1 = private_orig_var1;
848 // orig_varn = private_orig_varn;
850 llvm::BasicBlock *ThenBB = nullptr;
851 llvm::BasicBlock *DoneBB = nullptr;
852 if (IsLastIterCond) {
853 ThenBB = createBasicBlock(".omp.lastprivate.then");
854 DoneBB = createBasicBlock(".omp.lastprivate.done");
855 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
858 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
859 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
860 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
861 auto IC = LoopDirective->counters().begin();
862 for (auto F : LoopDirective->finals()) {
864 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
866 AlreadyEmittedVars.insert(D);
868 LoopCountersAndUpdates[D] = F;
872 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
873 auto IRef = C->varlist_begin();
874 auto ISrcRef = C->source_exprs().begin();
875 auto IDestRef = C->destination_exprs().begin();
876 for (auto *AssignOp : C->assignment_ops()) {
877 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
878 QualType Type = PrivateVD->getType();
879 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
880 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
881 // If lastprivate variable is a loop control variable for loop-based
882 // directive, update its value before copyin back to original
884 if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
885 EmitIgnoredExpr(FinalExpr);
886 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
887 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
888 // Get the address of the original variable.
889 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
890 // Get the address of the private variable.
891 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
892 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
894 Address(Builder.CreateLoad(PrivateAddr),
895 getNaturalTypeAlignment(RefTy->getPointeeType()));
896 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
902 if (auto *PostUpdate = C->getPostUpdateExpr())
903 EmitIgnoredExpr(PostUpdate);
906 EmitBlock(DoneBB, /*IsFinished=*/true);
909 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
910 LValue BaseLV, llvm::Value *Addr) {
911 Address Tmp = Address::invalid();
912 Address TopTmp = Address::invalid();
913 Address MostTopTmp = Address::invalid();
914 BaseTy = BaseTy.getNonReferenceType();
915 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
916 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
917 Tmp = CGF.CreateMemTemp(BaseTy);
918 if (TopTmp.isValid())
919 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
923 BaseTy = BaseTy->getPointeeType();
925 llvm::Type *Ty = BaseLV.getPointer()->getType();
927 Ty = Tmp.getElementType();
928 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
930 CGF.Builder.CreateStore(Addr, Tmp);
933 return Address(Addr, BaseLV.getAlignment());
936 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
938 BaseTy = BaseTy.getNonReferenceType();
939 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
940 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
941 if (auto *PtrTy = BaseTy->getAs<PointerType>())
942 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
944 BaseLV = CGF.EmitLoadOfReferenceLValue(BaseLV.getAddress(),
945 BaseTy->castAs<ReferenceType>());
947 BaseTy = BaseTy->getPointeeType();
949 return CGF.MakeAddrLValue(
951 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
952 BaseLV.getPointer(), CGF.ConvertTypeForMem(ElTy)->getPointerTo()),
953 BaseLV.getAlignment()),
954 BaseLV.getType(), BaseLV.getAlignmentSource());
957 void CodeGenFunction::EmitOMPReductionClauseInit(
958 const OMPExecutableDirective &D,
959 CodeGenFunction::OMPPrivateScope &PrivateScope) {
960 if (!HaveInsertPoint())
962 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
963 auto ILHS = C->lhs_exprs().begin();
964 auto IRHS = C->rhs_exprs().begin();
965 auto IPriv = C->privates().begin();
966 auto IRed = C->reduction_ops().begin();
967 for (auto IRef : C->varlists()) {
968 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
969 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
970 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
971 auto *DRD = getReductionInit(*IRed);
972 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
973 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
974 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
975 Base = TempOASE->getBase()->IgnoreParenImpCasts();
976 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
977 Base = TempASE->getBase()->IgnoreParenImpCasts();
978 auto *DE = cast<DeclRefExpr>(Base);
979 auto *OrigVD = cast<VarDecl>(DE->getDecl());
980 auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
982 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
983 auto OriginalBaseLValue = EmitLValue(DE);
985 loadToBegin(*this, OrigVD->getType(), OASELValueLB.getType(),
987 // Store the address of the original variable associated with the LHS
988 // implicit variable.
989 PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address {
990 return OASELValueLB.getAddress();
992 // Emit reduction copy.
993 bool IsRegistered = PrivateScope.addPrivate(
994 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, OASELValueLB,
995 OASELValueUB, OriginalBaseLValue, DRD, IRed]() -> Address {
996 // Emit VarDecl with copy init for arrays.
997 // Get the address of the original variable captured in current
999 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
1000 OASELValueLB.getPointer());
1001 Size = Builder.CreateNUWAdd(
1002 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1003 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1004 *this, cast<OpaqueValueExpr>(
1006 .getAsVariableArrayType(PrivateVD->getType())
1009 EmitVariablyModifiedType(PrivateVD->getType());
1010 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1011 auto Addr = Emission.getAllocatedAddress();
1012 auto *Init = PrivateVD->getInit();
1013 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1015 OASELValueLB.getAddress());
1016 EmitAutoVarCleanups(Emission);
1017 // Emit private VarDecl with reduction init.
1018 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1019 OASELValueLB.getPointer());
1020 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1021 return castToBase(*this, OrigVD->getType(),
1022 OASELValueLB.getType(), OriginalBaseLValue,
1025 assert(IsRegistered && "private var already registered as private");
1026 // Silence the warning about unused variable.
1028 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1029 return GetAddrOfLocalVar(PrivateVD);
1031 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
1032 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1033 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1034 Base = TempASE->getBase()->IgnoreParenImpCasts();
1035 auto *DE = cast<DeclRefExpr>(Base);
1036 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1037 auto ASELValue = EmitLValue(ASE);
1038 auto OriginalBaseLValue = EmitLValue(DE);
1039 LValue BaseLValue = loadToBegin(
1040 *this, OrigVD->getType(), ASELValue.getType(), OriginalBaseLValue);
1041 // Store the address of the original variable associated with the LHS
1042 // implicit variable.
1043 PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address {
1044 return ASELValue.getAddress();
1046 // Emit reduction copy.
1047 bool IsRegistered = PrivateScope.addPrivate(
1048 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, ASELValue,
1049 OriginalBaseLValue, DRD, IRed]() -> Address {
1050 // Emit private VarDecl with reduction init.
1051 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1052 auto Addr = Emission.getAllocatedAddress();
1053 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1054 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1055 ASELValue.getAddress(),
1056 ASELValue.getType());
1058 EmitAutoVarInit(Emission);
1059 EmitAutoVarCleanups(Emission);
1060 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1061 ASELValue.getPointer());
1062 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1063 return castToBase(*this, OrigVD->getType(), ASELValue.getType(),
1064 OriginalBaseLValue, Ptr);
1066 assert(IsRegistered && "private var already registered as private");
1067 // Silence the warning about unused variable.
1069 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1070 return Builder.CreateElementBitCast(
1071 GetAddrOfLocalVar(PrivateVD), ConvertTypeForMem(RHSVD->getType()),
1075 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
1076 QualType Type = PrivateVD->getType();
1077 if (getContext().getAsArrayType(Type)) {
1078 // Store the address of the original variable associated with the LHS
1079 // implicit variable.
1080 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1081 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1082 IRef->getType(), VK_LValue, IRef->getExprLoc());
1083 Address OriginalAddr = EmitLValue(&DRE).getAddress();
1084 PrivateScope.addPrivate(LHSVD, [this, &OriginalAddr,
1085 LHSVD]() -> Address {
1086 OriginalAddr = Builder.CreateElementBitCast(
1087 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1088 return OriginalAddr;
1090 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
1091 if (Type->isVariablyModifiedType()) {
1092 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1093 *this, cast<OpaqueValueExpr>(
1095 .getAsVariableArrayType(PrivateVD->getType())
1098 getTypeSize(OrigVD->getType().getNonReferenceType())));
1099 EmitVariablyModifiedType(Type);
1101 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1102 auto Addr = Emission.getAllocatedAddress();
1103 auto *Init = PrivateVD->getInit();
1104 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1105 DRD ? *IRed : Init, OriginalAddr);
1106 EmitAutoVarCleanups(Emission);
1107 return Emission.getAllocatedAddress();
1109 assert(IsRegistered && "private var already registered as private");
1110 // Silence the warning about unused variable.
1112 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1113 return Builder.CreateElementBitCast(
1114 GetAddrOfLocalVar(PrivateVD),
1115 ConvertTypeForMem(RHSVD->getType()), "rhs.begin");
1118 // Store the address of the original variable associated with the LHS
1119 // implicit variable.
1120 Address OriginalAddr = Address::invalid();
1121 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef,
1122 &OriginalAddr]() -> Address {
1123 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1124 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1125 IRef->getType(), VK_LValue, IRef->getExprLoc());
1126 OriginalAddr = EmitLValue(&DRE).getAddress();
1127 return OriginalAddr;
1129 // Emit reduction copy.
1130 bool IsRegistered = PrivateScope.addPrivate(
1131 OrigVD, [this, PrivateVD, OriginalAddr, DRD, IRed]() -> Address {
1132 // Emit private VarDecl with reduction init.
1133 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1134 auto Addr = Emission.getAllocatedAddress();
1135 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1136 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1138 PrivateVD->getType());
1140 EmitAutoVarInit(Emission);
1141 EmitAutoVarCleanups(Emission);
1144 assert(IsRegistered && "private var already registered as private");
1145 // Silence the warning about unused variable.
1147 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1148 return GetAddrOfLocalVar(PrivateVD);
1160 void CodeGenFunction::EmitOMPReductionClauseFinal(
1161 const OMPExecutableDirective &D) {
1162 if (!HaveInsertPoint())
1164 llvm::SmallVector<const Expr *, 8> Privates;
1165 llvm::SmallVector<const Expr *, 8> LHSExprs;
1166 llvm::SmallVector<const Expr *, 8> RHSExprs;
1167 llvm::SmallVector<const Expr *, 8> ReductionOps;
1168 bool HasAtLeastOneReduction = false;
1169 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1170 HasAtLeastOneReduction = true;
1171 Privates.append(C->privates().begin(), C->privates().end());
1172 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1173 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1174 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1176 if (HasAtLeastOneReduction) {
1177 // Emit nowait reduction if nowait clause is present or directive is a
1178 // parallel directive (it always has implicit barrier).
1179 CGM.getOpenMPRuntime().emitReduction(
1180 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
1181 D.getSingleClause<OMPNowaitClause>() ||
1182 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1183 D.getDirectiveKind() == OMPD_simd,
1184 D.getDirectiveKind() == OMPD_simd);
1188 static void emitPostUpdateForReductionClause(
1189 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1190 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1191 if (!CGF.HaveInsertPoint())
1193 llvm::BasicBlock *DoneBB = nullptr;
1194 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1195 if (auto *PostUpdate = C->getPostUpdateExpr()) {
1197 if (auto *Cond = CondGen(CGF)) {
1198 // If the first post-update expression is found, emit conditional
1199 // block if it was requested.
1200 auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1201 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1202 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1203 CGF.EmitBlock(ThenBB);
1206 CGF.EmitIgnoredExpr(PostUpdate);
1210 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1213 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
1214 const OMPExecutableDirective &S,
1215 OpenMPDirectiveKind InnermostKind,
1216 const RegionCodeGenTy &CodeGen) {
1217 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
1218 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
1219 emitParallelOrTeamsOutlinedFunction(S,
1220 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1221 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1222 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1223 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1224 /*IgnoreResultAssign*/ true);
1225 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1226 CGF, NumThreads, NumThreadsClause->getLocStart());
1228 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1229 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1230 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1231 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
1233 const Expr *IfCond = nullptr;
1234 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1235 if (C->getNameModifier() == OMPD_unknown ||
1236 C->getNameModifier() == OMPD_parallel) {
1237 IfCond = C->getCondition();
1242 OMPLexicalScope Scope(CGF, S);
1243 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1244 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1245 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
1246 CapturedVars, IfCond);
1249 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1250 // Emit parallel region as a standalone region.
1251 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1252 OMPPrivateScope PrivateScope(CGF);
1253 bool Copyins = CGF.EmitOMPCopyinClause(S);
1254 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1256 // Emit implicit barrier to synchronize threads and avoid data races on
1257 // propagation master's thread values of threadprivate variables to local
1258 // instances of that variables of all other implicit threads.
1259 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1260 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1261 /*ForceSimpleCall=*/true);
1263 CGF.EmitOMPPrivateClause(S, PrivateScope);
1264 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1265 (void)PrivateScope.Privatize();
1266 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1267 CGF.EmitOMPReductionClauseFinal(S);
1269 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
1270 emitPostUpdateForReductionClause(
1271 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1274 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1275 JumpDest LoopExit) {
1276 RunCleanupsScope BodyScope(*this);
1277 // Update counters values on current iteration.
1278 for (auto I : D.updates()) {
1281 // Update the linear variables.
1282 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1283 for (auto *U : C->updates())
1287 // On a continue in the body, jump to the end.
1288 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
1289 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1291 EmitStmt(D.getBody());
1292 // The end (updates/cleanups).
1293 EmitBlock(Continue.getBlock());
1294 BreakContinueStack.pop_back();
1297 void CodeGenFunction::EmitOMPInnerLoop(
1298 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1299 const Expr *IncExpr,
1300 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
1301 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
1302 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1304 // Start the loop with a block that tests the condition.
1305 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1306 EmitBlock(CondBlock);
1307 const SourceRange &R = S.getSourceRange();
1308 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1309 SourceLocToDebugLoc(R.getEnd()));
1311 // If there are any cleanups between here and the loop-exit scope,
1312 // create a block to stage a loop exit along.
1313 auto ExitBlock = LoopExit.getBlock();
1314 if (RequiresCleanup)
1315 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1317 auto LoopBody = createBasicBlock("omp.inner.for.body");
1320 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1321 if (ExitBlock != LoopExit.getBlock()) {
1322 EmitBlock(ExitBlock);
1323 EmitBranchThroughCleanup(LoopExit);
1326 EmitBlock(LoopBody);
1327 incrementProfileCounter(&S);
1329 // Create a block for the increment.
1330 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1331 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1335 // Emit "IV = IV + 1" and a back-edge to the condition block.
1336 EmitBlock(Continue.getBlock());
1337 EmitIgnoredExpr(IncExpr);
1339 BreakContinueStack.pop_back();
1340 EmitBranch(CondBlock);
1342 // Emit the fall-through block.
1343 EmitBlock(LoopExit.getBlock());
1346 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1347 if (!HaveInsertPoint())
1349 // Emit inits for the linear variables.
1350 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1351 for (auto *Init : C->inits()) {
1352 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1353 if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1354 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1355 auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1356 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1357 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1358 VD->getInit()->getType(), VK_LValue,
1359 VD->getInit()->getExprLoc());
1360 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1362 /*capturedByInit=*/false);
1363 EmitAutoVarCleanups(Emission);
1367 // Emit the linear steps for the linear clauses.
1368 // If a step is not constant, it is pre-calculated before the loop.
1369 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1370 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1371 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1372 // Emit calculation of the linear step.
1373 EmitIgnoredExpr(CS);
1378 void CodeGenFunction::EmitOMPLinearClauseFinal(
1379 const OMPLoopDirective &D,
1380 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1381 if (!HaveInsertPoint())
1383 llvm::BasicBlock *DoneBB = nullptr;
1384 // Emit the final values of the linear variables.
1385 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1386 auto IC = C->varlist_begin();
1387 for (auto *F : C->finals()) {
1389 if (auto *Cond = CondGen(*this)) {
1390 // If the first post-update expression is found, emit conditional
1391 // block if it was requested.
1392 auto *ThenBB = createBasicBlock(".omp.linear.pu");
1393 DoneBB = createBasicBlock(".omp.linear.pu.done");
1394 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1398 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1399 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1400 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1401 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1402 Address OrigAddr = EmitLValue(&DRE).getAddress();
1403 CodeGenFunction::OMPPrivateScope VarScope(*this);
1404 VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; });
1405 (void)VarScope.Privatize();
1409 if (auto *PostUpdate = C->getPostUpdateExpr())
1410 EmitIgnoredExpr(PostUpdate);
1413 EmitBlock(DoneBB, /*IsFinished=*/true);
1416 static void emitAlignedClause(CodeGenFunction &CGF,
1417 const OMPExecutableDirective &D) {
1418 if (!CGF.HaveInsertPoint())
1420 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1421 unsigned ClauseAlignment = 0;
1422 if (auto AlignmentExpr = Clause->getAlignment()) {
1424 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1425 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1427 for (auto E : Clause->varlists()) {
1428 unsigned Alignment = ClauseAlignment;
1429 if (Alignment == 0) {
1430 // OpenMP [2.8.1, Description]
1431 // If no optional parameter is specified, implementation-defined default
1432 // alignments for SIMD instructions on the target platforms are assumed.
1435 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1436 E->getType()->getPointeeType()))
1439 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1440 "alignment is not power of 2");
1441 if (Alignment != 0) {
1442 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1443 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
1449 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1450 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1451 if (!HaveInsertPoint())
1453 auto I = S.private_counters().begin();
1454 for (auto *E : S.counters()) {
1455 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1456 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1457 (void)LoopScope.addPrivate(VD, [&]() -> Address {
1458 // Emit var without initialization.
1459 if (!LocalDeclMap.count(PrivateVD)) {
1460 auto VarEmission = EmitAutoVarAlloca(*PrivateVD);
1461 EmitAutoVarCleanups(VarEmission);
1463 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1464 /*RefersToEnclosingVariableOrCapture=*/false,
1465 (*I)->getType(), VK_LValue, (*I)->getExprLoc());
1466 return EmitLValue(&DRE).getAddress();
1468 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1469 VD->hasGlobalStorage()) {
1470 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
1471 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
1472 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1473 E->getType(), VK_LValue, E->getExprLoc());
1474 return EmitLValue(&DRE).getAddress();
1481 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1482 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1483 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1484 if (!CGF.HaveInsertPoint())
1487 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1488 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1489 (void)PreCondScope.Privatize();
1490 // Get initial values of real counters.
1491 for (auto I : S.inits()) {
1492 CGF.EmitIgnoredExpr(I);
1495 // Check that loop is executed at least one time.
1496 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1499 void CodeGenFunction::EmitOMPLinearClause(
1500 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1501 if (!HaveInsertPoint())
1503 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1504 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1505 auto *LoopDirective = cast<OMPLoopDirective>(&D);
1506 for (auto *C : LoopDirective->counters()) {
1508 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1511 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1512 auto CurPrivate = C->privates().begin();
1513 for (auto *E : C->varlists()) {
1514 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1516 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1517 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1518 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
1519 // Emit private VarDecl with copy init.
1520 EmitVarDecl(*PrivateVD);
1521 return GetAddrOfLocalVar(PrivateVD);
1523 assert(IsRegistered && "linear var already registered as private");
1524 // Silence the warning about unused variable.
1527 EmitVarDecl(*PrivateVD);
1533 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1534 const OMPExecutableDirective &D,
1536 if (!CGF.HaveInsertPoint())
1538 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1539 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1540 /*ignoreResult=*/true);
1541 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1542 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1543 // In presence of finite 'safelen', it may be unsafe to mark all
1544 // the memory instructions parallel, because loop-carried
1545 // dependences of 'safelen' iterations are possible.
1547 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1548 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1549 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1550 /*ignoreResult=*/true);
1551 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1552 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1553 // In presence of finite 'safelen', it may be unsafe to mark all
1554 // the memory instructions parallel, because loop-carried
1555 // dependences of 'safelen' iterations are possible.
1556 CGF.LoopStack.setParallel(false);
1560 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1562 // Walk clauses and process safelen/lastprivate.
1563 LoopStack.setParallel(!IsMonotonic);
1564 LoopStack.setVectorizeEnable(true);
1565 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1568 void CodeGenFunction::EmitOMPSimdFinal(
1569 const OMPLoopDirective &D,
1570 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1571 if (!HaveInsertPoint())
1573 llvm::BasicBlock *DoneBB = nullptr;
1574 auto IC = D.counters().begin();
1575 auto IPC = D.private_counters().begin();
1576 for (auto F : D.finals()) {
1577 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1578 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1579 auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1580 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1581 OrigVD->hasGlobalStorage() || CED) {
1583 if (auto *Cond = CondGen(*this)) {
1584 // If the first post-update expression is found, emit conditional
1585 // block if it was requested.
1586 auto *ThenBB = createBasicBlock(".omp.final.then");
1587 DoneBB = createBasicBlock(".omp.final.done");
1588 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1592 Address OrigAddr = Address::invalid();
1594 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1596 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1597 /*RefersToEnclosingVariableOrCapture=*/false,
1598 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1599 OrigAddr = EmitLValue(&DRE).getAddress();
1601 OMPPrivateScope VarScope(*this);
1602 VarScope.addPrivate(OrigVD,
1603 [OrigAddr]() -> Address { return OrigAddr; });
1604 (void)VarScope.Privatize();
1611 EmitBlock(DoneBB, /*IsFinished=*/true);
1614 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1615 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1616 OMPLoopScope PreInitScope(CGF, S);
1618 // for (IV in 0..LastIteration) BODY;
1619 // <Final counter/linear vars updates>;
1623 // Emit: if (PreCond) - begin.
1624 // If the condition constant folds and can be elided, avoid emitting the
1627 llvm::BasicBlock *ContBlock = nullptr;
1628 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1632 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
1633 ContBlock = CGF.createBasicBlock("simd.if.end");
1634 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1635 CGF.getProfileCount(&S));
1636 CGF.EmitBlock(ThenBlock);
1637 CGF.incrementProfileCounter(&S);
1640 // Emit the loop iteration variable.
1641 const Expr *IVExpr = S.getIterationVariable();
1642 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1643 CGF.EmitVarDecl(*IVDecl);
1644 CGF.EmitIgnoredExpr(S.getInit());
1646 // Emit the iterations count variable.
1647 // If it is not a variable, Sema decided to calculate iterations count on
1648 // each iteration (e.g., it is foldable into a constant).
1649 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1650 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1651 // Emit calculation of the iterations count.
1652 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1655 CGF.EmitOMPSimdInit(S);
1657 emitAlignedClause(CGF, S);
1658 CGF.EmitOMPLinearClauseInit(S);
1660 OMPPrivateScope LoopScope(CGF);
1661 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1662 CGF.EmitOMPLinearClause(S, LoopScope);
1663 CGF.EmitOMPPrivateClause(S, LoopScope);
1664 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1665 bool HasLastprivateClause =
1666 CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1667 (void)LoopScope.Privatize();
1668 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1670 [&S](CodeGenFunction &CGF) {
1671 CGF.EmitOMPLoopBody(S, JumpDest());
1672 CGF.EmitStopPoint(&S);
1674 [](CodeGenFunction &) {});
1675 CGF.EmitOMPSimdFinal(
1676 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1677 // Emit final copy of the lastprivate variables at the end of loops.
1678 if (HasLastprivateClause)
1679 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
1680 CGF.EmitOMPReductionClauseFinal(S);
1681 emitPostUpdateForReductionClause(
1682 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1684 CGF.EmitOMPLinearClauseFinal(
1685 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1686 // Emit: if (PreCond) - end.
1688 CGF.EmitBranch(ContBlock);
1689 CGF.EmitBlock(ContBlock, true);
1692 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1693 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1696 void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
1697 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1698 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1699 auto &RT = CGM.getOpenMPRuntime();
1701 const Expr *IVExpr = S.getIterationVariable();
1702 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1703 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1705 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1707 // Start the loop with a block that tests the condition.
1708 auto CondBlock = createBasicBlock("omp.dispatch.cond");
1709 EmitBlock(CondBlock);
1710 const SourceRange &R = S.getSourceRange();
1711 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1712 SourceLocToDebugLoc(R.getEnd()));
1714 llvm::Value *BoolCondVal = nullptr;
1715 if (!DynamicOrOrdered) {
1716 // UB = min(UB, GlobalUB)
1717 EmitIgnoredExpr(S.getEnsureUpperBound());
1719 EmitIgnoredExpr(S.getInit());
1721 BoolCondVal = EvaluateExprAsBool(S.getCond());
1723 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, IL,
1727 // If there are any cleanups between here and the loop-exit scope,
1728 // create a block to stage a loop exit along.
1729 auto ExitBlock = LoopExit.getBlock();
1730 if (LoopScope.requiresCleanups())
1731 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1733 auto LoopBody = createBasicBlock("omp.dispatch.body");
1734 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1735 if (ExitBlock != LoopExit.getBlock()) {
1736 EmitBlock(ExitBlock);
1737 EmitBranchThroughCleanup(LoopExit);
1739 EmitBlock(LoopBody);
1741 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1742 // LB for loop condition and emitted it above).
1743 if (DynamicOrOrdered)
1744 EmitIgnoredExpr(S.getInit());
1746 // Create a block for the increment.
1747 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1748 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1750 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1751 // with dynamic/guided scheduling and without ordered clause.
1752 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1753 LoopStack.setParallel(!IsMonotonic);
1755 EmitOMPSimdInit(S, IsMonotonic);
1757 SourceLocation Loc = S.getLocStart();
1758 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
1759 [&S, LoopExit](CodeGenFunction &CGF) {
1760 CGF.EmitOMPLoopBody(S, LoopExit);
1761 CGF.EmitStopPoint(&S);
1763 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
1765 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
1766 CGF, Loc, IVSize, IVSigned);
1770 EmitBlock(Continue.getBlock());
1771 BreakContinueStack.pop_back();
1772 if (!DynamicOrOrdered) {
1773 // Emit "LB = LB + Stride", "UB = UB + Stride".
1774 EmitIgnoredExpr(S.getNextLowerBound());
1775 EmitIgnoredExpr(S.getNextUpperBound());
1778 EmitBranch(CondBlock);
1780 // Emit the fall-through block.
1781 EmitBlock(LoopExit.getBlock());
1783 // Tell the runtime we are done.
1784 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
1785 if (!DynamicOrOrdered)
1786 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
1788 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
1791 void CodeGenFunction::EmitOMPForOuterLoop(
1792 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
1793 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1794 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1795 auto &RT = CGM.getOpenMPRuntime();
1797 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1798 const bool DynamicOrOrdered =
1799 Ordered || RT.isDynamic(ScheduleKind.Schedule);
1802 !RT.isStaticNonchunked(ScheduleKind.Schedule,
1803 /*Chunked=*/Chunk != nullptr)) &&
1804 "static non-chunked schedule does not need outer loop");
1808 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1809 // When schedule(dynamic,chunk_size) is specified, the iterations are
1810 // distributed to threads in the team in chunks as the threads request them.
1811 // Each thread executes a chunk of iterations, then requests another chunk,
1812 // until no chunks remain to be distributed. Each chunk contains chunk_size
1813 // iterations, except for the last chunk to be distributed, which may have
1814 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1816 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1817 // to threads in the team in chunks as the executing threads request them.
1818 // Each thread executes a chunk of iterations, then requests another chunk,
1819 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1820 // each chunk is proportional to the number of unassigned iterations divided
1821 // by the number of threads in the team, decreasing to 1. For a chunk_size
1822 // with value k (greater than 1), the size of each chunk is determined in the
1823 // same way, with the restriction that the chunks do not contain fewer than k
1824 // iterations (except for the last chunk to be assigned, which may have fewer
1825 // than k iterations).
1827 // When schedule(auto) is specified, the decision regarding scheduling is
1828 // delegated to the compiler and/or runtime system. The programmer gives the
1829 // implementation the freedom to choose any possible mapping of iterations to
1830 // threads in the team.
1832 // When schedule(runtime) is specified, the decision regarding scheduling is
1833 // deferred until run time, and the schedule and chunk size are taken from the
1834 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1835 // implementation defined
1837 // while(__kmpc_dispatch_next(&LB, &UB)) {
1839 // while (idx <= UB) { BODY; ++idx;
1840 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1844 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1845 // When schedule(static, chunk_size) is specified, iterations are divided into
1846 // chunks of size chunk_size, and the chunks are assigned to the threads in
1847 // the team in a round-robin fashion in the order of the thread number.
1849 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1850 // while (idx <= UB) { BODY; ++idx; } // inner loop
1856 const Expr *IVExpr = S.getIterationVariable();
1857 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1858 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1860 if (DynamicOrOrdered) {
1861 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
1862 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
1863 IVSigned, Ordered, UBVal, Chunk);
1865 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1866 Ordered, IL, LB, UB, ST, Chunk);
1869 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, Ordered, LB, UB,
1873 void CodeGenFunction::EmitOMPDistributeOuterLoop(
1874 OpenMPDistScheduleClauseKind ScheduleKind,
1875 const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
1876 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1878 auto &RT = CGM.getOpenMPRuntime();
1881 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
1885 const Expr *IVExpr = S.getIterationVariable();
1886 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1887 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1889 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
1890 IVSize, IVSigned, /* Ordered = */ false,
1891 IL, LB, UB, ST, Chunk);
1893 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false,
1894 S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk);
1897 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
1898 const OMPDistributeParallelForDirective &S) {
1899 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1900 CGM.getOpenMPRuntime().emitInlinedDirective(
1901 *this, OMPD_distribute_parallel_for,
1902 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1903 OMPLoopScope PreInitScope(CGF, S);
1904 OMPCancelStackRAII CancelRegion(CGF, OMPD_distribute_parallel_for,
1905 /*HasCancel=*/false);
1907 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1911 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
1912 const OMPDistributeParallelForSimdDirective &S) {
1913 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1914 CGM.getOpenMPRuntime().emitInlinedDirective(
1915 *this, OMPD_distribute_parallel_for_simd,
1916 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1917 OMPLoopScope PreInitScope(CGF, S);
1919 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1923 void CodeGenFunction::EmitOMPDistributeSimdDirective(
1924 const OMPDistributeSimdDirective &S) {
1925 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1926 CGM.getOpenMPRuntime().emitInlinedDirective(
1927 *this, OMPD_distribute_simd,
1928 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1929 OMPLoopScope PreInitScope(CGF, S);
1931 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1935 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
1936 const OMPTargetParallelForSimdDirective &S) {
1937 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1938 CGM.getOpenMPRuntime().emitInlinedDirective(
1939 *this, OMPD_target_parallel_for_simd,
1940 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1941 OMPLoopScope PreInitScope(CGF, S);
1943 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1947 void CodeGenFunction::EmitOMPTargetSimdDirective(
1948 const OMPTargetSimdDirective &S) {
1949 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1950 CGM.getOpenMPRuntime().emitInlinedDirective(
1951 *this, OMPD_target_simd, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1952 OMPLoopScope PreInitScope(CGF, S);
1954 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1958 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
1959 const OMPTeamsDistributeDirective &S) {
1960 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1961 CGM.getOpenMPRuntime().emitInlinedDirective(
1962 *this, OMPD_teams_distribute,
1963 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1964 OMPLoopScope PreInitScope(CGF, S);
1966 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1970 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
1971 const OMPTeamsDistributeSimdDirective &S) {
1972 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1973 CGM.getOpenMPRuntime().emitInlinedDirective(
1974 *this, OMPD_teams_distribute_simd,
1975 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1976 OMPLoopScope PreInitScope(CGF, S);
1978 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1982 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
1983 const OMPTeamsDistributeParallelForSimdDirective &S) {
1984 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1985 CGM.getOpenMPRuntime().emitInlinedDirective(
1986 *this, OMPD_teams_distribute_parallel_for_simd,
1987 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1988 OMPLoopScope PreInitScope(CGF, S);
1990 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1994 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
1995 const OMPTeamsDistributeParallelForDirective &S) {
1996 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1997 CGM.getOpenMPRuntime().emitInlinedDirective(
1998 *this, OMPD_teams_distribute_parallel_for,
1999 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2000 OMPLoopScope PreInitScope(CGF, S);
2002 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2006 void CodeGenFunction::EmitOMPTargetTeamsDirective(
2007 const OMPTargetTeamsDirective &S) {
2008 CGM.getOpenMPRuntime().emitInlinedDirective(
2009 *this, OMPD_target_teams, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2011 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2015 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
2016 const OMPTargetTeamsDistributeDirective &S) {
2017 CGM.getOpenMPRuntime().emitInlinedDirective(
2018 *this, OMPD_target_teams_distribute,
2019 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2021 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2025 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
2026 const OMPTargetTeamsDistributeParallelForDirective &S) {
2027 CGM.getOpenMPRuntime().emitInlinedDirective(
2028 *this, OMPD_target_teams_distribute_parallel_for,
2029 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2031 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2035 /// \brief Emit a helper variable and return corresponding lvalue.
2036 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2037 const DeclRefExpr *Helper) {
2038 auto VDecl = cast<VarDecl>(Helper->getDecl());
2039 CGF.EmitVarDecl(*VDecl);
2040 return CGF.EmitLValue(Helper);
2044 struct ScheduleKindModifiersTy {
2045 OpenMPScheduleClauseKind Kind;
2046 OpenMPScheduleClauseModifier M1;
2047 OpenMPScheduleClauseModifier M2;
2048 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2049 OpenMPScheduleClauseModifier M1,
2050 OpenMPScheduleClauseModifier M2)
2051 : Kind(Kind), M1(M1), M2(M2) {}
2055 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
2056 // Emit the loop iteration variable.
2057 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2058 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2059 EmitVarDecl(*IVDecl);
2061 // Emit the iterations count variable.
2062 // If it is not a variable, Sema decided to calculate iterations count on each
2063 // iteration (e.g., it is foldable into a constant).
2064 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2065 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2066 // Emit calculation of the iterations count.
2067 EmitIgnoredExpr(S.getCalcLastIteration());
2070 auto &RT = CGM.getOpenMPRuntime();
2072 bool HasLastprivateClause;
2073 // Check pre-condition.
2075 OMPLoopScope PreInitScope(*this, S);
2076 // Skip the entire loop if we don't meet the precondition.
2077 // If the condition constant folds and can be elided, avoid emitting the
2080 llvm::BasicBlock *ContBlock = nullptr;
2081 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2085 auto *ThenBlock = createBasicBlock("omp.precond.then");
2086 ContBlock = createBasicBlock("omp.precond.end");
2087 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2088 getProfileCount(&S));
2089 EmitBlock(ThenBlock);
2090 incrementProfileCounter(&S);
2093 bool Ordered = false;
2094 if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2095 if (OrderedClause->getNumForLoops())
2096 RT.emitDoacrossInit(*this, S);
2101 llvm::DenseSet<const Expr *> EmittedFinals;
2102 emitAlignedClause(*this, S);
2103 EmitOMPLinearClauseInit(S);
2104 // Emit helper vars inits.
2106 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2108 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2110 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2112 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2114 // Emit 'then' code.
2116 OMPPrivateScope LoopScope(*this);
2117 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2118 // Emit implicit barrier to synchronize threads and avoid data races on
2119 // initialization of firstprivate variables and post-update of
2120 // lastprivate variables.
2121 CGM.getOpenMPRuntime().emitBarrierCall(
2122 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2123 /*ForceSimpleCall=*/true);
2125 EmitOMPPrivateClause(S, LoopScope);
2126 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2127 EmitOMPReductionClauseInit(S, LoopScope);
2128 EmitOMPPrivateLoopCounters(S, LoopScope);
2129 EmitOMPLinearClause(S, LoopScope);
2130 (void)LoopScope.Privatize();
2132 // Detect the loop schedule kind and chunk.
2133 llvm::Value *Chunk = nullptr;
2134 OpenMPScheduleTy ScheduleKind;
2135 if (auto *C = S.getSingleClause<OMPScheduleClause>()) {
2136 ScheduleKind.Schedule = C->getScheduleKind();
2137 ScheduleKind.M1 = C->getFirstScheduleModifier();
2138 ScheduleKind.M2 = C->getSecondScheduleModifier();
2139 if (const auto *Ch = C->getChunkSize()) {
2140 Chunk = EmitScalarExpr(Ch);
2141 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2142 S.getIterationVariable()->getType(),
2146 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2147 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2148 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2149 // If the static schedule kind is specified or if the ordered clause is
2150 // specified, and if no monotonic modifier is specified, the effect will
2151 // be as if the monotonic modifier was specified.
2152 if (RT.isStaticNonchunked(ScheduleKind.Schedule,
2153 /* Chunked */ Chunk != nullptr) &&
2155 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2156 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2157 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2158 // When no chunk_size is specified, the iteration space is divided into
2159 // chunks that are approximately equal in size, and at most one chunk is
2160 // distributed to each thread. Note that the size of the chunks is
2161 // unspecified in this case.
2162 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
2163 IVSize, IVSigned, Ordered,
2164 IL.getAddress(), LB.getAddress(),
2165 UB.getAddress(), ST.getAddress());
2167 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2168 // UB = min(UB, GlobalUB);
2169 EmitIgnoredExpr(S.getEnsureUpperBound());
2171 EmitIgnoredExpr(S.getInit());
2172 // while (idx <= UB) { BODY; ++idx; }
2173 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2175 [&S, LoopExit](CodeGenFunction &CGF) {
2176 CGF.EmitOMPLoopBody(S, LoopExit);
2177 CGF.EmitStopPoint(&S);
2179 [](CodeGenFunction &) {});
2180 EmitBlock(LoopExit.getBlock());
2181 // Tell the runtime we are done.
2182 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2183 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2185 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2187 const bool IsMonotonic =
2188 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2189 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2190 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2191 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2192 // Emit the outer loop, which requests its work chunk [LB..UB] from
2193 // runtime and runs the inner loop to process it.
2194 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2195 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2196 IL.getAddress(), Chunk);
2198 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2200 [&](CodeGenFunction &CGF) -> llvm::Value * {
2201 return CGF.Builder.CreateIsNotNull(
2202 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2205 EmitOMPReductionClauseFinal(S);
2206 // Emit post-update of the reduction variables if IsLastIter != 0.
2207 emitPostUpdateForReductionClause(
2208 *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2209 return CGF.Builder.CreateIsNotNull(
2210 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2212 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2213 if (HasLastprivateClause)
2214 EmitOMPLastprivateClauseFinal(
2215 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2216 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
2218 EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2219 return CGF.Builder.CreateIsNotNull(
2220 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2222 // We're now done with the loop, so jump to the continuation block.
2224 EmitBranch(ContBlock);
2225 EmitBlock(ContBlock, true);
2228 return HasLastprivateClause;
2231 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2232 bool HasLastprivates = false;
2233 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2234 PrePostActionTy &) {
2235 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
2236 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2239 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2240 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2244 // Emit an implicit barrier at the end.
2245 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2246 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2250 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2251 bool HasLastprivates = false;
2252 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2253 PrePostActionTy &) {
2254 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2257 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2258 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2261 // Emit an implicit barrier at the end.
2262 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2263 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2267 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2269 llvm::Value *Init = nullptr) {
2270 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2272 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
2276 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2277 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
2278 auto *CS = dyn_cast<CompoundStmt>(Stmt);
2279 bool HasLastprivates = false;
2280 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
2281 PrePostActionTy &) {
2282 auto &C = CGF.CGM.getContext();
2283 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2284 // Emit helper vars inits.
2285 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2286 CGF.Builder.getInt32(0));
2287 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
2288 : CGF.Builder.getInt32(0);
2290 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2291 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2292 CGF.Builder.getInt32(1));
2293 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2294 CGF.Builder.getInt32(0));
2296 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2297 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2298 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2299 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2300 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2301 // Generate condition for loop.
2302 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2303 OK_Ordinary, S.getLocStart(),
2304 /*fpContractable=*/false);
2305 // Increment for loop counter.
2306 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2308 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
2309 // Iterate through all sections and emit a switch construct:
2312 // <SectionStmt[0]>;
2315 // case <NumSection> - 1:
2316 // <SectionStmt[<NumSection> - 1]>;
2319 // .omp.sections.exit:
2320 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2321 auto *SwitchStmt = CGF.Builder.CreateSwitch(
2322 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
2323 CS == nullptr ? 1 : CS->size());
2325 unsigned CaseNumber = 0;
2326 for (auto *SubStmt : CS->children()) {
2327 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2328 CGF.EmitBlock(CaseBB);
2329 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2330 CGF.EmitStmt(SubStmt);
2331 CGF.EmitBranch(ExitBB);
2335 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2336 CGF.EmitBlock(CaseBB);
2337 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2339 CGF.EmitBranch(ExitBB);
2341 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2344 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2345 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2346 // Emit implicit barrier to synchronize threads and avoid data races on
2347 // initialization of firstprivate variables and post-update of lastprivate
2349 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2350 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2351 /*ForceSimpleCall=*/true);
2353 CGF.EmitOMPPrivateClause(S, LoopScope);
2354 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2355 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2356 (void)LoopScope.Privatize();
2358 // Emit static non-chunked loop.
2359 OpenMPScheduleTy ScheduleKind;
2360 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2361 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2362 CGF, S.getLocStart(), ScheduleKind, /*IVSize=*/32,
2363 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
2364 UB.getAddress(), ST.getAddress());
2365 // UB = min(UB, GlobalUB);
2366 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
2367 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
2368 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2369 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2371 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
2372 // while (idx <= UB) { BODY; ++idx; }
2373 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2374 [](CodeGenFunction &) {});
2375 // Tell the runtime we are done.
2376 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2377 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2379 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
2380 CGF.EmitOMPReductionClauseFinal(S);
2381 // Emit post-update of the reduction variables if IsLastIter != 0.
2382 emitPostUpdateForReductionClause(
2383 CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2384 return CGF.Builder.CreateIsNotNull(
2385 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2388 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2389 if (HasLastprivates)
2390 CGF.EmitOMPLastprivateClauseFinal(
2391 S, /*NoFinals=*/false,
2392 CGF.Builder.CreateIsNotNull(
2393 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
2396 bool HasCancel = false;
2397 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2398 HasCancel = OSD->hasCancel();
2399 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2400 HasCancel = OPSD->hasCancel();
2401 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
2402 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2404 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2405 // clause. Otherwise the barrier will be generated by the codegen for the
2407 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2408 // Emit implicit barrier to synchronize threads and avoid data races on
2409 // initialization of firstprivate variables.
2410 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2415 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2417 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2420 // Emit an implicit barrier at the end.
2421 if (!S.getSingleClause<OMPNowaitClause>()) {
2422 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2427 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2428 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2429 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2431 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2432 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2436 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2437 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2438 llvm::SmallVector<const Expr *, 8> DestExprs;
2439 llvm::SmallVector<const Expr *, 8> SrcExprs;
2440 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2441 // Check if there are any 'copyprivate' clauses associated with this
2442 // 'single' construct.
2443 // Build a list of copyprivate variables along with helper expressions
2444 // (<source>, <destination>, <destination>=<source> expressions)
2445 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2446 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2447 DestExprs.append(C->destination_exprs().begin(),
2448 C->destination_exprs().end());
2449 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2450 AssignmentOps.append(C->assignment_ops().begin(),
2451 C->assignment_ops().end());
2453 // Emit code for 'single' region along with 'copyprivate' clauses
2454 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2456 OMPPrivateScope SingleScope(CGF);
2457 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2458 CGF.EmitOMPPrivateClause(S, SingleScope);
2459 (void)SingleScope.Privatize();
2460 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2463 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2464 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
2465 CopyprivateVars, DestExprs,
2466 SrcExprs, AssignmentOps);
2468 // Emit an implicit barrier at the end (to avoid data race on firstprivate
2469 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2470 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2471 CGM.getOpenMPRuntime().emitBarrierCall(
2472 *this, S.getLocStart(),
2473 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2477 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2478 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2480 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2482 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2483 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
2486 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2487 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2489 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2491 Expr *Hint = nullptr;
2492 if (auto *HintClause = S.getSingleClause<OMPHintClause>())
2493 Hint = HintClause->getHint();
2494 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2495 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2496 S.getDirectiveName().getAsString(),
2497 CodeGen, S.getLocStart(), Hint);
2500 void CodeGenFunction::EmitOMPParallelForDirective(
2501 const OMPParallelForDirective &S) {
2502 // Emit directive as a combined directive that consists of two implicit
2503 // directives: 'parallel' with 'for' directive.
2504 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2505 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
2506 CGF.EmitOMPWorksharingLoop(S);
2508 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
2511 void CodeGenFunction::EmitOMPParallelForSimdDirective(
2512 const OMPParallelForSimdDirective &S) {
2513 // Emit directive as a combined directive that consists of two implicit
2514 // directives: 'parallel' with 'for' directive.
2515 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2516 CGF.EmitOMPWorksharingLoop(S);
2518 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
2521 void CodeGenFunction::EmitOMPParallelSectionsDirective(
2522 const OMPParallelSectionsDirective &S) {
2523 // Emit directive as a combined directive that consists of two implicit
2524 // directives: 'parallel' with 'sections' directive.
2525 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2526 CGF.EmitSections(S);
2528 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
2531 void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
2532 const RegionCodeGenTy &BodyGen,
2533 const TaskGenTy &TaskGen,
2534 OMPTaskDataTy &Data) {
2535 // Emit outlined function for task construct.
2536 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2537 auto *I = CS->getCapturedDecl()->param_begin();
2538 auto *PartId = std::next(I);
2539 auto *TaskT = std::next(I, 4);
2540 // Check if the task is final
2541 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2542 // If the condition constant folds and can be elided, try to avoid emitting
2543 // the condition and the dead arm of the if/else.
2544 auto *Cond = Clause->getCondition();
2546 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
2547 Data.Final.setInt(CondConstant);
2549 Data.Final.setPointer(EvaluateExprAsBool(Cond));
2551 // By default the task is not final.
2552 Data.Final.setInt(/*IntVal=*/false);
2554 // Check if the task has 'priority' clause.
2555 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2556 auto *Prio = Clause->getPriority();
2557 Data.Priority.setInt(/*IntVal=*/true);
2558 Data.Priority.setPointer(EmitScalarConversion(
2559 EmitScalarExpr(Prio), Prio->getType(),
2560 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
2561 Prio->getExprLoc()));
2563 // The first function argument for tasks is a thread id, the second one is a
2564 // part id (0 for tied tasks, >=0 for untied task).
2565 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2566 // Get list of private variables.
2567 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2568 auto IRef = C->varlist_begin();
2569 for (auto *IInit : C->private_copies()) {
2570 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2571 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2572 Data.PrivateVars.push_back(*IRef);
2573 Data.PrivateCopies.push_back(IInit);
2578 EmittedAsPrivate.clear();
2579 // Get list of firstprivate variables.
2580 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2581 auto IRef = C->varlist_begin();
2582 auto IElemInitRef = C->inits().begin();
2583 for (auto *IInit : C->private_copies()) {
2584 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2585 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2586 Data.FirstprivateVars.push_back(*IRef);
2587 Data.FirstprivateCopies.push_back(IInit);
2588 Data.FirstprivateInits.push_back(*IElemInitRef);
2594 // Get list of lastprivate variables (for taskloops).
2595 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2596 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2597 auto IRef = C->varlist_begin();
2598 auto ID = C->destination_exprs().begin();
2599 for (auto *IInit : C->private_copies()) {
2600 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2601 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2602 Data.LastprivateVars.push_back(*IRef);
2603 Data.LastprivateCopies.push_back(IInit);
2605 LastprivateDstsOrigs.insert(
2606 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2607 cast<DeclRefExpr>(*IRef)});
2612 // Build list of dependences.
2613 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2614 for (auto *IRef : C->varlists())
2615 Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
2616 auto &&CodeGen = [PartId, &S, &Data, CS, &BodyGen, &LastprivateDstsOrigs](
2617 CodeGenFunction &CGF, PrePostActionTy &Action) {
2618 // Set proper addresses for generated private copies.
2619 OMPPrivateScope Scope(CGF);
2620 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2621 !Data.LastprivateVars.empty()) {
2622 auto *CopyFn = CGF.Builder.CreateLoad(
2623 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
2624 auto *PrivatesPtr = CGF.Builder.CreateLoad(
2625 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
2627 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
2628 llvm::SmallVector<llvm::Value *, 16> CallArgs;
2629 CallArgs.push_back(PrivatesPtr);
2630 for (auto *E : Data.PrivateVars) {
2631 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2632 Address PrivatePtr = CGF.CreateMemTemp(
2633 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2634 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2635 CallArgs.push_back(PrivatePtr.getPointer());
2637 for (auto *E : Data.FirstprivateVars) {
2638 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2639 Address PrivatePtr =
2640 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2641 ".firstpriv.ptr.addr");
2642 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2643 CallArgs.push_back(PrivatePtr.getPointer());
2645 for (auto *E : Data.LastprivateVars) {
2646 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2647 Address PrivatePtr =
2648 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2649 ".lastpriv.ptr.addr");
2650 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2651 CallArgs.push_back(PrivatePtr.getPointer());
2653 CGF.EmitRuntimeCall(CopyFn, CallArgs);
2654 for (auto &&Pair : LastprivateDstsOrigs) {
2655 auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2657 const_cast<VarDecl *>(OrigVD),
2658 /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
2660 Pair.second->getType(), VK_LValue, Pair.second->getExprLoc());
2661 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2662 return CGF.EmitLValue(&DRE).getAddress();
2665 for (auto &&Pair : PrivatePtrs) {
2666 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2667 CGF.getContext().getDeclAlign(Pair.first));
2668 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2671 (void)Scope.Privatize();
2676 auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
2677 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
2678 Data.NumberOfParts);
2679 OMPLexicalScope Scope(*this, S);
2680 TaskGen(*this, OutlinedFn, Data);
2683 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
2684 // Emit outlined function for task construct.
2685 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2686 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
2687 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
2688 const Expr *IfCond = nullptr;
2689 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2690 if (C->getNameModifier() == OMPD_unknown ||
2691 C->getNameModifier() == OMPD_task) {
2692 IfCond = C->getCondition();
2698 // Check if we should emit tied or untied task.
2699 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
2700 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
2701 CGF.EmitStmt(CS->getCapturedStmt());
2703 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
2704 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
2705 const OMPTaskDataTy &Data) {
2706 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn,
2707 SharedsTy, CapturedStruct, IfCond,
2710 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
2713 void CodeGenFunction::EmitOMPTaskyieldDirective(
2714 const OMPTaskyieldDirective &S) {
2715 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
2718 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
2719 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
2722 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
2723 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
2726 void CodeGenFunction::EmitOMPTaskgroupDirective(
2727 const OMPTaskgroupDirective &S) {
2728 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2730 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2732 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2733 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
2736 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
2737 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
2738 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
2739 return llvm::makeArrayRef(FlushClause->varlist_begin(),
2740 FlushClause->varlist_end());
2743 }(), S.getLocStart());
2746 void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
2747 // Emit the loop iteration variable.
2748 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2749 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2750 EmitVarDecl(*IVDecl);
2752 // Emit the iterations count variable.
2753 // If it is not a variable, Sema decided to calculate iterations count on each
2754 // iteration (e.g., it is foldable into a constant).
2755 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2756 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2757 // Emit calculation of the iterations count.
2758 EmitIgnoredExpr(S.getCalcLastIteration());
2761 auto &RT = CGM.getOpenMPRuntime();
2763 // Check pre-condition.
2765 OMPLoopScope PreInitScope(*this, S);
2766 // Skip the entire loop if we don't meet the precondition.
2767 // If the condition constant folds and can be elided, avoid emitting the
2770 llvm::BasicBlock *ContBlock = nullptr;
2771 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2775 auto *ThenBlock = createBasicBlock("omp.precond.then");
2776 ContBlock = createBasicBlock("omp.precond.end");
2777 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2778 getProfileCount(&S));
2779 EmitBlock(ThenBlock);
2780 incrementProfileCounter(&S);
2783 // Emit 'then' code.
2785 // Emit helper vars inits.
2787 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2789 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2791 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2793 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2795 OMPPrivateScope LoopScope(*this);
2796 EmitOMPPrivateLoopCounters(S, LoopScope);
2797 (void)LoopScope.Privatize();
2799 // Detect the distribute schedule kind and chunk.
2800 llvm::Value *Chunk = nullptr;
2801 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
2802 if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
2803 ScheduleKind = C->getDistScheduleKind();
2804 if (const auto *Ch = C->getChunkSize()) {
2805 Chunk = EmitScalarExpr(Ch);
2806 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2807 S.getIterationVariable()->getType(),
2811 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2812 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2814 // OpenMP [2.10.8, distribute Construct, Description]
2815 // If dist_schedule is specified, kind must be static. If specified,
2816 // iterations are divided into chunks of size chunk_size, chunks are
2817 // assigned to the teams of the league in a round-robin fashion in the
2818 // order of the team number. When no chunk_size is specified, the
2819 // iteration space is divided into chunks that are approximately equal
2820 // in size, and at most one chunk is distributed to each team of the
2821 // league. The size of the chunks is unspecified in this case.
2822 if (RT.isStaticNonchunked(ScheduleKind,
2823 /* Chunked */ Chunk != nullptr)) {
2824 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
2825 IVSize, IVSigned, /* Ordered = */ false,
2826 IL.getAddress(), LB.getAddress(),
2827 UB.getAddress(), ST.getAddress());
2829 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2830 // UB = min(UB, GlobalUB);
2831 EmitIgnoredExpr(S.getEnsureUpperBound());
2833 EmitIgnoredExpr(S.getInit());
2834 // while (idx <= UB) { BODY; ++idx; }
2835 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2837 [&S, LoopExit](CodeGenFunction &CGF) {
2838 CGF.EmitOMPLoopBody(S, LoopExit);
2839 CGF.EmitStopPoint(&S);
2841 [](CodeGenFunction &) {});
2842 EmitBlock(LoopExit.getBlock());
2843 // Tell the runtime we are done.
2844 RT.emitForStaticFinish(*this, S.getLocStart());
2846 // Emit the outer loop, which requests its work chunk [LB..UB] from
2847 // runtime and runs the inner loop to process it.
2848 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope,
2849 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2850 IL.getAddress(), Chunk);
2854 // We're now done with the loop, so jump to the continuation block.
2856 EmitBranch(ContBlock);
2857 EmitBlock(ContBlock, true);
2862 void CodeGenFunction::EmitOMPDistributeDirective(
2863 const OMPDistributeDirective &S) {
2864 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2865 CGF.EmitOMPDistributeLoop(S);
2867 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2868 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
2872 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
2873 const CapturedStmt *S) {
2874 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2875 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
2876 CGF.CapturedStmtInfo = &CapStmtInfo;
2877 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
2878 Fn->addFnAttr(llvm::Attribute::NoInline);
2882 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
2883 if (!S.getAssociatedStmt()) {
2884 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
2885 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
2888 auto *C = S.getSingleClause<OMPSIMDClause>();
2889 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
2890 PrePostActionTy &Action) {
2892 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2893 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
2894 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
2895 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
2896 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
2900 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2903 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2904 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
2907 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
2908 QualType SrcType, QualType DestType,
2909 SourceLocation Loc) {
2910 assert(CGF.hasScalarEvaluationKind(DestType) &&
2911 "DestType must have scalar evaluation kind.");
2912 assert(!Val.isAggregate() && "Must be a scalar or complex.");
2913 return Val.isScalar()
2914 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
2916 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
2920 static CodeGenFunction::ComplexPairTy
2921 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
2922 QualType DestType, SourceLocation Loc) {
2923 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
2924 "DestType must have complex evaluation kind.");
2925 CodeGenFunction::ComplexPairTy ComplexVal;
2926 if (Val.isScalar()) {
2927 // Convert the input element to the element type of the complex.
2928 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2929 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
2930 DestElementType, Loc);
2931 ComplexVal = CodeGenFunction::ComplexPairTy(
2932 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
2934 assert(Val.isComplex() && "Must be a scalar or complex.");
2935 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
2936 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2937 ComplexVal.first = CGF.EmitScalarConversion(
2938 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
2939 ComplexVal.second = CGF.EmitScalarConversion(
2940 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
2945 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
2946 LValue LVal, RValue RVal) {
2947 if (LVal.isGlobalReg()) {
2948 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
2950 CGF.EmitAtomicStore(RVal, LVal,
2951 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
2952 : llvm::AtomicOrdering::Monotonic,
2953 LVal.isVolatile(), /*IsInit=*/false);
2957 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
2958 QualType RValTy, SourceLocation Loc) {
2959 switch (getEvaluationKind(LVal.getType())) {
2961 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
2962 *this, RVal, RValTy, LVal.getType(), Loc)),
2967 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
2971 llvm_unreachable("Must be a scalar or complex.");
2975 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
2976 const Expr *X, const Expr *V,
2977 SourceLocation Loc) {
2979 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
2980 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
2981 LValue XLValue = CGF.EmitLValue(X);
2982 LValue VLValue = CGF.EmitLValue(V);
2983 RValue Res = XLValue.isGlobalReg()
2984 ? CGF.EmitLoadOfLValue(XLValue, Loc)
2985 : CGF.EmitAtomicLoad(
2987 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
2988 : llvm::AtomicOrdering::Monotonic,
2989 XLValue.isVolatile());
2990 // OpenMP, 2.12.6, atomic Construct
2991 // Any atomic construct with a seq_cst clause forces the atomically
2992 // performed operation to include an implicit flush operation without a
2995 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
2996 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
2999 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
3000 const Expr *X, const Expr *E,
3001 SourceLocation Loc) {
3003 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3004 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3005 // OpenMP, 2.12.6, atomic Construct
3006 // Any atomic construct with a seq_cst clause forces the atomically
3007 // performed operation to include an implicit flush operation without a
3010 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3013 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
3015 BinaryOperatorKind BO,
3016 llvm::AtomicOrdering AO,
3017 bool IsXLHSInRHSPart) {
3018 auto &Context = CGF.CGM.getContext();
3019 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3020 // expression is simple and atomic is allowed for the given type for the
3022 if (BO == BO_Comma || !Update.isScalar() ||
3023 !Update.getScalarVal()->getType()->isIntegerTy() ||
3024 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
3025 (Update.getScalarVal()->getType() !=
3026 X.getAddress().getElementType())) ||
3027 !X.getAddress().getElementType()->isIntegerTy() ||
3028 !Context.getTargetInfo().hasBuiltinAtomic(
3029 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
3030 return std::make_pair(false, RValue::get(nullptr));
3032 llvm::AtomicRMWInst::BinOp RMWOp;
3035 RMWOp = llvm::AtomicRMWInst::Add;
3038 if (!IsXLHSInRHSPart)
3039 return std::make_pair(false, RValue::get(nullptr));
3040 RMWOp = llvm::AtomicRMWInst::Sub;
3043 RMWOp = llvm::AtomicRMWInst::And;
3046 RMWOp = llvm::AtomicRMWInst::Or;
3049 RMWOp = llvm::AtomicRMWInst::Xor;
3052 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3053 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
3054 : llvm::AtomicRMWInst::Max)
3055 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
3056 : llvm::AtomicRMWInst::UMax);
3059 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3060 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
3061 : llvm::AtomicRMWInst::Min)
3062 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
3063 : llvm::AtomicRMWInst::UMin);
3066 RMWOp = llvm::AtomicRMWInst::Xchg;
3075 return std::make_pair(false, RValue::get(nullptr));
3093 llvm_unreachable("Unsupported atomic update operation");
3095 auto *UpdateVal = Update.getScalarVal();
3096 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
3097 UpdateVal = CGF.Builder.CreateIntCast(
3098 IC, X.getAddress().getElementType(),
3099 X.getType()->hasSignedIntegerRepresentation());
3101 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
3102 return std::make_pair(true, RValue::get(Res));
3105 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
3106 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3107 llvm::AtomicOrdering AO, SourceLocation Loc,
3108 const llvm::function_ref<RValue(RValue)> &CommonGen) {
3109 // Update expressions are allowed to have the following forms:
3110 // x binop= expr; -> xrval + expr;
3111 // x++, ++x -> xrval + 1;
3112 // x--, --x -> xrval - 1;
3113 // x = x binop expr; -> xrval binop expr
3114 // x = expr Op x; - > expr binop xrval;
3115 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
3117 if (X.isGlobalReg()) {
3118 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
3120 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
3122 // Perform compare-and-swap procedure.
3123 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
3129 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
3130 const Expr *X, const Expr *E,
3131 const Expr *UE, bool IsXLHSInRHSPart,
3132 SourceLocation Loc) {
3133 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3134 "Update expr in 'atomic update' must be a binary operator.");
3135 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3136 // Update expressions are allowed to have the following forms:
3137 // x binop= expr; -> xrval + expr;
3138 // x++, ++x -> xrval + 1;
3139 // x--, --x -> xrval - 1;
3140 // x = x binop expr; -> xrval binop expr
3141 // x = expr Op x; - > expr binop xrval;
3142 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3143 LValue XLValue = CGF.EmitLValue(X);
3144 RValue ExprRValue = CGF.EmitAnyExpr(E);
3145 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3146 : llvm::AtomicOrdering::Monotonic;
3147 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3148 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3149 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3150 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3152 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
3153 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3154 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3155 return CGF.EmitAnyExpr(UE);
3157 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3158 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3159 // OpenMP, 2.12.6, atomic Construct
3160 // Any atomic construct with a seq_cst clause forces the atomically
3161 // performed operation to include an implicit flush operation without a
3164 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3167 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
3168 QualType SourceType, QualType ResType,
3169 SourceLocation Loc) {
3170 switch (CGF.getEvaluationKind(ResType)) {
3173 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
3175 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
3176 return RValue::getComplex(Res.first, Res.second);
3181 llvm_unreachable("Must be a scalar or complex.");
3184 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
3185 bool IsPostfixUpdate, const Expr *V,
3186 const Expr *X, const Expr *E,
3187 const Expr *UE, bool IsXLHSInRHSPart,
3188 SourceLocation Loc) {
3189 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3190 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3192 LValue VLValue = CGF.EmitLValue(V);
3193 LValue XLValue = CGF.EmitLValue(X);
3194 RValue ExprRValue = CGF.EmitAnyExpr(E);
3195 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3196 : llvm::AtomicOrdering::Monotonic;
3197 QualType NewVValType;
3199 // 'x' is updated with some additional value.
3200 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3201 "Update expr in 'atomic capture' must be a binary operator.");
3202 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3203 // Update expressions are allowed to have the following forms:
3204 // x binop= expr; -> xrval + expr;
3205 // x++, ++x -> xrval + 1;
3206 // x--, --x -> xrval - 1;
3207 // x = x binop expr; -> xrval binop expr
3208 // x = expr Op x; - > expr binop xrval;
3209 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3210 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3211 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3212 NewVValType = XRValExpr->getType();
3213 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3214 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
3215 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue {
3216 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3217 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3218 RValue Res = CGF.EmitAnyExpr(UE);
3219 NewVVal = IsPostfixUpdate ? XRValue : Res;
3222 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3223 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3225 // 'atomicrmw' instruction was generated.
3226 if (IsPostfixUpdate) {
3227 // Use old value from 'atomicrmw'.
3228 NewVVal = Res.second;
3230 // 'atomicrmw' does not provide new value, so evaluate it using old
3232 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3233 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3234 NewVVal = CGF.EmitAnyExpr(UE);
3238 // 'x' is simply rewritten with some 'expr'.
3239 NewVValType = X->getType().getNonReferenceType();
3240 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
3241 X->getType().getNonReferenceType(), Loc);
3242 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
3246 // Try to perform atomicrmw xchg, otherwise simple exchange.
3247 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3248 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3251 // 'atomicrmw' instruction was generated.
3252 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3255 // Emit post-update store to 'v' of old/new 'x' value.
3256 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
3257 // OpenMP, 2.12.6, atomic Construct
3258 // Any atomic construct with a seq_cst clause forces the atomically
3259 // performed operation to include an implicit flush operation without a
3262 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3265 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
3266 bool IsSeqCst, bool IsPostfixUpdate,
3267 const Expr *X, const Expr *V, const Expr *E,
3268 const Expr *UE, bool IsXLHSInRHSPart,
3269 SourceLocation Loc) {
3272 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
3275 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
3279 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
3282 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
3283 IsXLHSInRHSPart, Loc);
3287 case OMPC_num_threads:
3289 case OMPC_firstprivate:
3290 case OMPC_lastprivate:
3291 case OMPC_reduction:
3301 case OMPC_copyprivate:
3303 case OMPC_proc_bind:
3308 case OMPC_threadprivate:
3310 case OMPC_mergeable:
3315 case OMPC_num_teams:
3316 case OMPC_thread_limit:
3318 case OMPC_grainsize:
3320 case OMPC_num_tasks:
3322 case OMPC_dist_schedule:
3323 case OMPC_defaultmap:
3327 case OMPC_use_device_ptr:
3328 case OMPC_is_device_ptr:
3329 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3333 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3334 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
3335 OpenMPClauseKind Kind = OMPC_unknown;
3336 for (auto *C : S.clauses()) {
3337 // Find first clause (skip seq_cst clause, if it is first).
3338 if (C->getClauseKind() != OMPC_seq_cst) {
3339 Kind = C->getClauseKind();
3345 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
3346 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
3347 enterFullExpression(EWC);
3349 // Processing for statements under 'atomic capture'.
3350 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
3351 for (const auto *C : Compound->body()) {
3352 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
3353 enterFullExpression(EWC);
3358 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
3359 PrePostActionTy &) {
3360 CGF.EmitStopPoint(CS);
3361 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
3362 S.getV(), S.getExpr(), S.getUpdateExpr(),
3363 S.isXLHSInRHSPart(), S.getLocStart());
3365 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3366 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
3369 std::pair<llvm::Function * /*OutlinedFn*/, llvm::Constant * /*OutlinedFnID*/>
3370 CodeGenFunction::EmitOMPTargetDirectiveOutlinedFunction(
3371 CodeGenModule &CGM, const OMPTargetDirective &S, StringRef ParentName,
3372 bool IsOffloadEntry) {
3373 llvm::Function *OutlinedFn = nullptr;
3374 llvm::Constant *OutlinedFnID = nullptr;
3375 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3376 OMPPrivateScope PrivateScope(CGF);
3377 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3378 CGF.EmitOMPPrivateClause(S, PrivateScope);
3379 (void)PrivateScope.Privatize();
3382 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3384 // Emit target region as a standalone region.
3385 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3386 S, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, CodeGen);
3387 return std::make_pair(OutlinedFn, OutlinedFnID);
3390 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
3391 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
3393 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3394 GenerateOpenMPCapturedVars(CS, CapturedVars);
3396 llvm::Function *Fn = nullptr;
3397 llvm::Constant *FnID = nullptr;
3399 // Check if we have any if clause associated with the directive.
3400 const Expr *IfCond = nullptr;
3402 if (auto *C = S.getSingleClause<OMPIfClause>()) {
3403 IfCond = C->getCondition();
3406 // Check if we have any device clause associated with the directive.
3407 const Expr *Device = nullptr;
3408 if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
3409 Device = C->getDevice();
3412 // Check if we have an if clause whose conditional always evaluates to false
3413 // or if we do not have any targets specified. If so the target region is not
3414 // an offload entry point.
3415 bool IsOffloadEntry = true;
3418 if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
3419 IsOffloadEntry = false;
3421 if (CGM.getLangOpts().OMPTargetTriples.empty())
3422 IsOffloadEntry = false;
3424 assert(CurFuncDecl && "No parent declaration for target region!");
3425 StringRef ParentName;
3426 // In case we have Ctors/Dtors we use the complete type variant to produce
3427 // the mangling of the device outlined kernel.
3428 if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
3429 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
3430 else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
3431 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
3434 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
3436 std::tie(Fn, FnID) = EmitOMPTargetDirectiveOutlinedFunction(
3437 CGM, S, ParentName, IsOffloadEntry);
3438 OMPLexicalScope Scope(*this, S);
3439 CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
3443 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
3444 const OMPExecutableDirective &S,
3445 OpenMPDirectiveKind InnermostKind,
3446 const RegionCodeGenTy &CodeGen) {
3447 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3448 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().
3449 emitParallelOrTeamsOutlinedFunction(S,
3450 *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
3452 const OMPTeamsDirective &TD = *dyn_cast<OMPTeamsDirective>(&S);
3453 const OMPNumTeamsClause *NT = TD.getSingleClause<OMPNumTeamsClause>();
3454 const OMPThreadLimitClause *TL = TD.getSingleClause<OMPThreadLimitClause>();
3456 Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr;
3457 Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr;
3459 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
3463 OMPLexicalScope Scope(CGF, S);
3464 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3465 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3466 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn,
3470 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
3471 // Emit teams region as a standalone region.
3472 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3473 OMPPrivateScope PrivateScope(CGF);
3474 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3475 CGF.EmitOMPPrivateClause(S, PrivateScope);
3476 (void)PrivateScope.Privatize();
3477 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3479 emitCommonOMPTeamsDirective(*this, S, OMPD_teams, CodeGen);
3482 void CodeGenFunction::EmitOMPCancellationPointDirective(
3483 const OMPCancellationPointDirective &S) {
3484 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
3485 S.getCancelRegion());
3488 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
3489 const Expr *IfCond = nullptr;
3490 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3491 if (C->getNameModifier() == OMPD_unknown ||
3492 C->getNameModifier() == OMPD_cancel) {
3493 IfCond = C->getCondition();
3497 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
3498 S.getCancelRegion());
3501 CodeGenFunction::JumpDest
3502 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
3503 if (Kind == OMPD_parallel || Kind == OMPD_task ||
3504 Kind == OMPD_target_parallel)
3506 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
3507 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
3508 Kind == OMPD_distribute_parallel_for ||
3509 Kind == OMPD_target_parallel_for);
3510 return OMPCancelStack.getExitBlock();
3513 void CodeGenFunction::EmitOMPUseDevicePtrClause(
3514 const OMPClause &NC, OMPPrivateScope &PrivateScope,
3515 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
3516 const auto &C = cast<OMPUseDevicePtrClause>(NC);
3517 auto OrigVarIt = C.varlist_begin();
3518 auto InitIt = C.inits().begin();
3519 for (auto PvtVarIt : C.private_copies()) {
3520 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
3521 auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
3522 auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
3524 // In order to identify the right initializer we need to match the
3525 // declaration used by the mapping logic. In some cases we may get
3526 // OMPCapturedExprDecl that refers to the original declaration.
3527 const ValueDecl *MatchingVD = OrigVD;
3528 if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
3529 // OMPCapturedExprDecl are used to privative fields of the current
3531 auto *ME = cast<MemberExpr>(OED->getInit());
3532 assert(isa<CXXThisExpr>(ME->getBase()) &&
3533 "Base should be the current struct!");
3534 MatchingVD = ME->getMemberDecl();
3537 // If we don't have information about the current list item, move on to
3539 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
3540 if (InitAddrIt == CaptureDeviceAddrMap.end())
3543 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
3544 // Initialize the temporary initialization variable with the address we
3545 // get from the runtime library. We have to cast the source address
3546 // because it is always a void *. References are materialized in the
3547 // privatization scope, so the initialization here disregards the fact
3548 // the original variable is a reference.
3550 getContext().getPointerType(OrigVD->getType().getNonReferenceType());
3551 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
3552 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
3553 setAddrOfLocalVar(InitVD, InitAddr);
3555 // Emit private declaration, it will be initialized by the value we
3556 // declaration we just added to the local declarations map.
3559 // The initialization variables reached its purpose in the emission
3560 // ofthe previous declaration, so we don't need it anymore.
3561 LocalDeclMap.erase(InitVD);
3563 // Return the address of the private variable.
3564 return GetAddrOfLocalVar(PvtVD);
3566 assert(IsRegistered && "firstprivate var already registered as private");
3567 // Silence the warning about unused variable.
3575 // Generate the instructions for '#pragma omp target data' directive.
3576 void CodeGenFunction::EmitOMPTargetDataDirective(
3577 const OMPTargetDataDirective &S) {
3578 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
3580 // Create a pre/post action to signal the privatization of the device pointer.
3581 // This action can be replaced by the OpenMP runtime code generation to
3582 // deactivate privatization.
3583 bool PrivatizeDevicePointers = false;
3584 class DevicePointerPrivActionTy : public PrePostActionTy {
3585 bool &PrivatizeDevicePointers;
3588 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
3589 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
3590 void Enter(CodeGenFunction &CGF) override {
3591 PrivatizeDevicePointers = true;
3594 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
3596 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
3597 CodeGenFunction &CGF, PrePostActionTy &Action) {
3598 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3600 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3603 // Codegen that selects wheather to generate the privatization code or not.
3604 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
3605 &InnermostCodeGen](CodeGenFunction &CGF,
3606 PrePostActionTy &Action) {
3607 RegionCodeGenTy RCG(InnermostCodeGen);
3608 PrivatizeDevicePointers = false;
3610 // Call the pre-action to change the status of PrivatizeDevicePointers if
3614 if (PrivatizeDevicePointers) {
3615 OMPPrivateScope PrivateScope(CGF);
3616 // Emit all instances of the use_device_ptr clause.
3617 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
3618 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
3619 Info.CaptureDeviceAddrMap);
3620 (void)PrivateScope.Privatize();
3626 // Forward the provided action to the privatization codegen.
3627 RegionCodeGenTy PrivRCG(PrivCodeGen);
3628 PrivRCG.setAction(Action);
3630 // Notwithstanding the body of the region is emitted as inlined directive,
3631 // we don't use an inline scope as changes in the references inside the
3632 // region are expected to be visible outside, so we do not privative them.
3633 OMPLexicalScope Scope(CGF, S);
3634 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
3638 RegionCodeGenTy RCG(CodeGen);
3640 // If we don't have target devices, don't bother emitting the data mapping
3642 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
3647 // Check if we have any if clause associated with the directive.
3648 const Expr *IfCond = nullptr;
3649 if (auto *C = S.getSingleClause<OMPIfClause>())
3650 IfCond = C->getCondition();
3652 // Check if we have any device clause associated with the directive.
3653 const Expr *Device = nullptr;
3654 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3655 Device = C->getDevice();
3657 // Set the action to signal privatization of device pointers.
3658 RCG.setAction(PrivAction);
3660 // Emit region code.
3661 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
3665 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
3666 const OMPTargetEnterDataDirective &S) {
3667 // If we don't have target devices, don't bother emitting the data mapping
3669 if (CGM.getLangOpts().OMPTargetTriples.empty())
3672 // Check if we have any if clause associated with the directive.
3673 const Expr *IfCond = nullptr;
3674 if (auto *C = S.getSingleClause<OMPIfClause>())
3675 IfCond = C->getCondition();
3677 // Check if we have any device clause associated with the directive.
3678 const Expr *Device = nullptr;
3679 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3680 Device = C->getDevice();
3682 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3685 void CodeGenFunction::EmitOMPTargetExitDataDirective(
3686 const OMPTargetExitDataDirective &S) {
3687 // If we don't have target devices, don't bother emitting the data mapping
3689 if (CGM.getLangOpts().OMPTargetTriples.empty())
3692 // Check if we have any if clause associated with the directive.
3693 const Expr *IfCond = nullptr;
3694 if (auto *C = S.getSingleClause<OMPIfClause>())
3695 IfCond = C->getCondition();
3697 // Check if we have any device clause associated with the directive.
3698 const Expr *Device = nullptr;
3699 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3700 Device = C->getDevice();
3702 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3705 void CodeGenFunction::EmitOMPTargetParallelDirective(
3706 const OMPTargetParallelDirective &S) {
3707 // TODO: codegen for target parallel.
3710 void CodeGenFunction::EmitOMPTargetParallelForDirective(
3711 const OMPTargetParallelForDirective &S) {
3712 // TODO: codegen for target parallel for.
3715 /// Emit a helper variable and return corresponding lvalue.
3716 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
3717 const ImplicitParamDecl *PVD,
3718 CodeGenFunction::OMPPrivateScope &Privates) {
3719 auto *VDecl = cast<VarDecl>(Helper->getDecl());
3720 Privates.addPrivate(
3721 VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); });
3724 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
3725 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
3726 // Emit outlined function for task construct.
3727 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3728 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
3729 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3730 const Expr *IfCond = nullptr;
3731 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3732 if (C->getNameModifier() == OMPD_unknown ||
3733 C->getNameModifier() == OMPD_taskloop) {
3734 IfCond = C->getCondition();
3740 // Check if taskloop must be emitted without taskgroup.
3741 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
3742 // TODO: Check if we should emit tied or untied task.
3744 // Set scheduling for taskloop
3745 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
3747 Data.Schedule.setInt(/*IntVal=*/false);
3748 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
3749 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
3751 Data.Schedule.setInt(/*IntVal=*/true);
3752 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
3755 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
3757 // for (IV in 0..LastIteration) BODY;
3758 // <Final counter/linear vars updates>;
3762 // Emit: if (PreCond) - begin.
3763 // If the condition constant folds and can be elided, avoid emitting the
3766 llvm::BasicBlock *ContBlock = nullptr;
3767 OMPLoopScope PreInitScope(CGF, S);
3768 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3772 auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
3773 ContBlock = CGF.createBasicBlock("taskloop.if.end");
3774 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
3775 CGF.getProfileCount(&S));
3776 CGF.EmitBlock(ThenBlock);
3777 CGF.incrementProfileCounter(&S);
3780 if (isOpenMPSimdDirective(S.getDirectiveKind()))
3781 CGF.EmitOMPSimdInit(S);
3783 OMPPrivateScope LoopScope(CGF);
3784 // Emit helper vars inits.
3785 enum { LowerBound = 5, UpperBound, Stride, LastIter };
3786 auto *I = CS->getCapturedDecl()->param_begin();
3787 auto *LBP = std::next(I, LowerBound);
3788 auto *UBP = std::next(I, UpperBound);
3789 auto *STP = std::next(I, Stride);
3790 auto *LIP = std::next(I, LastIter);
3791 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
3793 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
3795 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
3796 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
3798 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
3799 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
3800 (void)LoopScope.Privatize();
3801 // Emit the loop iteration variable.
3802 const Expr *IVExpr = S.getIterationVariable();
3803 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
3804 CGF.EmitVarDecl(*IVDecl);
3805 CGF.EmitIgnoredExpr(S.getInit());
3807 // Emit the iterations count variable.
3808 // If it is not a variable, Sema decided to calculate iterations count on
3809 // each iteration (e.g., it is foldable into a constant).
3810 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3811 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3812 // Emit calculation of the iterations count.
3813 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
3816 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
3818 [&S](CodeGenFunction &CGF) {
3819 CGF.EmitOMPLoopBody(S, JumpDest());
3820 CGF.EmitStopPoint(&S);
3822 [](CodeGenFunction &) {});
3823 // Emit: if (PreCond) - end.
3825 CGF.EmitBranch(ContBlock);
3826 CGF.EmitBlock(ContBlock, true);
3828 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3829 if (HasLastprivateClause) {
3830 CGF.EmitOMPLastprivateClauseFinal(
3831 S, isOpenMPSimdDirective(S.getDirectiveKind()),
3832 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
3833 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
3834 (*LIP)->getType(), S.getLocStart())));
3837 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
3838 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
3839 const OMPTaskDataTy &Data) {
3840 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) {
3841 OMPLoopScope PreInitScope(CGF, S);
3842 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
3843 OutlinedFn, SharedsTy,
3844 CapturedStruct, IfCond, Data);
3846 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
3849 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
3852 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
3853 EmitOMPTaskLoopBasedDirective(S);
3856 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
3857 const OMPTaskLoopSimdDirective &S) {
3858 EmitOMPTaskLoopBasedDirective(S);
3861 // Generate the instructions for '#pragma omp target update' directive.
3862 void CodeGenFunction::EmitOMPTargetUpdateDirective(
3863 const OMPTargetUpdateDirective &S) {
3864 // If we don't have target devices, don't bother emitting the data mapping
3866 if (CGM.getLangOpts().OMPTargetTriples.empty())
3869 // Check if we have any if clause associated with the directive.
3870 const Expr *IfCond = nullptr;
3871 if (auto *C = S.getSingleClause<OMPIfClause>())
3872 IfCond = C->getCondition();
3874 // Check if we have any device clause associated with the directive.
3875 const Expr *Device = nullptr;
3876 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3877 Device = C->getDevice();
3879 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);