1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Stmt.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "llvm/IR/CallSite.h"
23 using namespace clang;
24 using namespace CodeGen;
27 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
28 /// for captured expressions.
29 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
30 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
31 for (const auto *C : S.clauses()) {
32 if (auto *CPI = OMPClauseWithPreInit::get(C)) {
33 if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
34 for (const auto *I : PreInit->decls()) {
35 if (!I->hasAttr<OMPCaptureNoInitAttr>())
36 CGF.EmitVarDecl(cast<VarDecl>(*I));
38 CodeGenFunction::AutoVarEmission Emission =
39 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
40 CGF.EmitAutoVarCleanups(Emission);
47 CodeGenFunction::OMPPrivateScope InlinedShareds;
49 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
50 return CGF.LambdaCaptureFields.lookup(VD) ||
51 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
52 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
56 OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S,
57 bool AsInlined = false, bool EmitPreInitStmt = true)
58 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
61 emitPreInitStmt(CGF, S);
63 if (S.hasAssociatedStmt()) {
64 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
65 for (auto &C : CS->captures()) {
66 if (C.capturesVariable() || C.capturesVariableByCopy()) {
67 auto *VD = C.getCapturedVar();
68 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
69 isCapturedVar(CGF, VD) ||
70 (CGF.CapturedStmtInfo &&
71 InlinedShareds.isGlobalVarCaptured(VD)),
72 VD->getType().getNonReferenceType(), VK_LValue,
74 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
75 return CGF.EmitLValue(&DRE).getAddress();
79 (void)InlinedShareds.Privatize();
85 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
86 /// for captured expressions.
87 class OMPParallelScope final : public OMPLexicalScope {
88 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
89 OpenMPDirectiveKind Kind = S.getDirectiveKind();
90 return !(isOpenMPTargetExecutionDirective(Kind) ||
91 isOpenMPLoopBoundSharingDirective(Kind)) &&
92 isOpenMPParallelDirective(Kind);
96 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
97 : OMPLexicalScope(CGF, S,
99 /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {}
102 /// Lexical scope for OpenMP teams construct, that handles correct codegen
103 /// for captured expressions.
104 class OMPTeamsScope final : public OMPLexicalScope {
105 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
106 OpenMPDirectiveKind Kind = S.getDirectiveKind();
107 return !isOpenMPTargetExecutionDirective(Kind) &&
108 isOpenMPTeamsDirective(Kind);
112 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
113 : OMPLexicalScope(CGF, S,
115 /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {}
118 /// Private scope for OpenMP loop-based directives, that supports capturing
119 /// of used expression from loop statement.
120 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
121 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
122 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
123 if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) {
124 for (const auto *I : PreInits->decls())
125 CGF.EmitVarDecl(cast<VarDecl>(*I));
131 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
132 : CodeGenFunction::RunCleanupsScope(CGF) {
133 emitPreInitStmt(CGF, S);
139 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
140 auto &C = getContext();
141 llvm::Value *Size = nullptr;
142 auto SizeInChars = C.getTypeSizeInChars(Ty);
143 if (SizeInChars.isZero()) {
144 // getTypeSizeInChars() returns 0 for a VLA.
145 while (auto *VAT = C.getAsVariableArrayType(Ty)) {
146 llvm::Value *ArraySize;
147 std::tie(ArraySize, Ty) = getVLASize(VAT);
148 Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
150 SizeInChars = C.getTypeSizeInChars(Ty);
151 if (SizeInChars.isZero())
152 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
153 Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
155 Size = CGM.getSize(SizeInChars);
159 void CodeGenFunction::GenerateOpenMPCapturedVars(
160 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
161 const RecordDecl *RD = S.getCapturedRecordDecl();
162 auto CurField = RD->field_begin();
163 auto CurCap = S.captures().begin();
164 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
165 E = S.capture_init_end();
166 I != E; ++I, ++CurField, ++CurCap) {
167 if (CurField->hasCapturedVLAType()) {
168 auto VAT = CurField->getCapturedVLAType();
169 auto *Val = VLASizeMap[VAT->getSizeExpr()];
170 CapturedVars.push_back(Val);
171 } else if (CurCap->capturesThis())
172 CapturedVars.push_back(CXXThisValue);
173 else if (CurCap->capturesVariableByCopy()) {
175 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal();
177 // If the field is not a pointer, we need to save the actual value
178 // and load it as a void pointer.
179 if (!CurField->getType()->isAnyPointerType()) {
180 auto &Ctx = getContext();
181 auto DstAddr = CreateMemTemp(
182 Ctx.getUIntPtrType(),
183 Twine(CurCap->getCapturedVar()->getName()) + ".casted");
184 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
186 auto *SrcAddrVal = EmitScalarConversion(
187 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
188 Ctx.getPointerType(CurField->getType()), SourceLocation());
190 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
192 // Store the value using the source type pointer.
193 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
195 // Load the value using the destination type pointer.
196 CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
198 CapturedVars.push_back(CV);
200 assert(CurCap->capturesVariable() && "Expected capture by reference.");
201 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
206 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
207 StringRef Name, LValue AddrLV,
208 bool isReferenceType = false) {
209 ASTContext &Ctx = CGF.getContext();
211 auto *CastedPtr = CGF.EmitScalarConversion(
212 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
213 Ctx.getPointerType(DstType), SourceLocation());
215 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
218 // If we are dealing with references we need to return the address of the
219 // reference instead of the reference of the value.
220 if (isReferenceType) {
221 QualType RefType = Ctx.getLValueReferenceType(DstType);
222 auto *RefVal = TmpAddr.getPointer();
223 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
224 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
225 CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true);
231 static QualType getCanonicalParamType(ASTContext &C, QualType T) {
232 if (T->isLValueReferenceType()) {
233 return C.getLValueReferenceType(
234 getCanonicalParamType(C, T.getNonReferenceType()),
235 /*SpelledAsLValue=*/false);
237 if (T->isPointerType())
238 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
239 return C.getCanonicalParamType(T);
243 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
246 "CapturedStmtInfo should be set when generating the captured function");
247 const CapturedDecl *CD = S.getCapturedDecl();
248 const RecordDecl *RD = S.getCapturedRecordDecl();
249 assert(CD->hasBody() && "missing CapturedDecl body");
251 // Build the argument list.
252 ASTContext &Ctx = CGM.getContext();
253 FunctionArgList Args;
254 Args.append(CD->param_begin(),
255 std::next(CD->param_begin(), CD->getContextParamPosition()));
256 auto I = S.captures().begin();
257 for (auto *FD : RD->fields()) {
258 QualType ArgType = FD->getType();
259 IdentifierInfo *II = nullptr;
260 VarDecl *CapVar = nullptr;
262 // If this is a capture by copy and the type is not a pointer, the outlined
263 // function argument type should be uintptr and the value properly casted to
264 // uintptr. This is necessary given that the runtime library is only able to
265 // deal with pointers. We can pass in the same way the VLA type sizes to the
266 // outlined function.
267 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
268 I->capturesVariableArrayType())
269 ArgType = Ctx.getUIntPtrType();
271 if (I->capturesVariable() || I->capturesVariableByCopy()) {
272 CapVar = I->getCapturedVar();
273 II = CapVar->getIdentifier();
274 } else if (I->capturesThis())
275 II = &getContext().Idents.get("this");
277 assert(I->capturesVariableArrayType());
278 II = &getContext().Idents.get("vla");
280 if (ArgType->isVariablyModifiedType()) {
282 getCanonicalParamType(getContext(), ArgType.getNonReferenceType());
284 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
285 FD->getLocation(), II, ArgType));
289 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
292 // Create the function declaration.
293 FunctionType::ExtInfo ExtInfo;
294 const CGFunctionInfo &FuncInfo =
295 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
296 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
298 llvm::Function *F = llvm::Function::Create(
299 FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
300 CapturedStmtInfo->getHelperName(), &CGM.getModule());
301 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
303 F->addFnAttr(llvm::Attribute::NoUnwind);
305 // Generate the function.
306 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
307 CD->getBody()->getLocStart());
308 unsigned Cnt = CD->getContextParamPosition();
309 I = S.captures().begin();
310 for (auto *FD : RD->fields()) {
311 // If we are capturing a pointer by copy we don't need to do anything, just
312 // use the value that we get from the arguments.
313 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
314 const VarDecl *CurVD = I->getCapturedVar();
315 Address LocalAddr = GetAddrOfLocalVar(Args[Cnt]);
316 // If the variable is a reference we need to materialize it here.
317 if (CurVD->getType()->isReferenceType()) {
318 Address RefAddr = CreateMemTemp(CurVD->getType(), getPointerAlign(),
319 ".materialized_ref");
320 EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr, /*Volatile=*/false,
324 setAddrOfLocalVar(CurVD, LocalAddr);
331 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
332 AlignmentSource::Decl);
333 if (FD->hasCapturedVLAType()) {
334 LValue CastedArgLVal =
335 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
336 Args[Cnt]->getName(), ArgLVal),
337 FD->getType(), AlignmentSource::Decl);
339 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
340 auto VAT = FD->getCapturedVLAType();
341 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
342 } else if (I->capturesVariable()) {
343 auto *Var = I->getCapturedVar();
344 QualType VarTy = Var->getType();
345 Address ArgAddr = ArgLVal.getAddress();
346 if (!VarTy->isReferenceType()) {
347 if (ArgLVal.getType()->isLValueReferenceType()) {
348 ArgAddr = EmitLoadOfReference(
349 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
350 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
351 assert(ArgLVal.getType()->isPointerType());
352 ArgAddr = EmitLoadOfPointer(
353 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
357 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
358 } else if (I->capturesVariableByCopy()) {
359 assert(!FD->getType()->isAnyPointerType() &&
360 "Not expecting a captured pointer.");
361 auto *Var = I->getCapturedVar();
362 QualType VarTy = Var->getType();
363 setAddrOfLocalVar(Var, castValueFromUintptr(*this, FD->getType(),
364 Args[Cnt]->getName(), ArgLVal,
365 VarTy->isReferenceType()));
367 // If 'this' is captured, load it into CXXThisValue.
368 assert(I->capturesThis());
370 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
376 PGO.assignRegionCounters(GlobalDecl(CD), F);
377 CapturedStmtInfo->EmitBody(*this, CD->getBody());
378 FinishFunction(CD->getBodyRBrace());
383 //===----------------------------------------------------------------------===//
384 // OpenMP Directive Emission
385 //===----------------------------------------------------------------------===//
386 void CodeGenFunction::EmitOMPAggregateAssign(
387 Address DestAddr, Address SrcAddr, QualType OriginalType,
388 const llvm::function_ref<void(Address, Address)> &CopyGen) {
389 // Perform element-by-element initialization.
392 // Drill down to the base element type on both arrays.
393 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
394 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
395 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
397 auto SrcBegin = SrcAddr.getPointer();
398 auto DestBegin = DestAddr.getPointer();
399 // Cast from pointer to array type to pointer to single element.
400 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
401 // The basic structure here is a while-do loop.
402 auto BodyBB = createBasicBlock("omp.arraycpy.body");
403 auto DoneBB = createBasicBlock("omp.arraycpy.done");
405 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
406 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
408 // Enter the loop body, making that address the current address.
409 auto EntryBB = Builder.GetInsertBlock();
412 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
414 llvm::PHINode *SrcElementPHI =
415 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
416 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
417 Address SrcElementCurrent =
418 Address(SrcElementPHI,
419 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
421 llvm::PHINode *DestElementPHI =
422 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
423 DestElementPHI->addIncoming(DestBegin, EntryBB);
424 Address DestElementCurrent =
425 Address(DestElementPHI,
426 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
429 CopyGen(DestElementCurrent, SrcElementCurrent);
431 // Shift the address forward by one element.
432 auto DestElementNext = Builder.CreateConstGEP1_32(
433 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
434 auto SrcElementNext = Builder.CreateConstGEP1_32(
435 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
436 // Check whether we've reached the end.
438 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
439 Builder.CreateCondBr(Done, DoneBB, BodyBB);
440 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
441 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
444 EmitBlock(DoneBB, /*IsFinished=*/true);
447 /// Check if the combiner is a call to UDR combiner and if it is so return the
448 /// UDR decl used for reduction.
449 static const OMPDeclareReductionDecl *
450 getReductionInit(const Expr *ReductionOp) {
451 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
452 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
454 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
455 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
460 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
461 const OMPDeclareReductionDecl *DRD,
463 Address Private, Address Original,
465 if (DRD->getInitializer()) {
466 std::pair<llvm::Function *, llvm::Function *> Reduction =
467 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
468 auto *CE = cast<CallExpr>(InitOp);
469 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
470 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
471 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
472 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
473 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
474 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
475 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
476 [=]() -> Address { return Private; });
477 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
478 [=]() -> Address { return Original; });
479 (void)PrivateScope.Privatize();
480 RValue Func = RValue::get(Reduction.second);
481 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
482 CGF.EmitIgnoredExpr(InitOp);
484 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
485 auto *GV = new llvm::GlobalVariable(
486 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
487 llvm::GlobalValue::PrivateLinkage, Init, ".init");
488 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
490 switch (CGF.getEvaluationKind(Ty)) {
492 InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
496 RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
499 InitRVal = RValue::getAggregate(LV.getAddress());
502 OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
503 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
504 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
505 /*IsInitializer=*/false);
509 /// \brief Emit initialization of arrays of complex types.
510 /// \param DestAddr Address of the array.
511 /// \param Type Type of array.
512 /// \param Init Initial expression of array.
513 /// \param SrcAddr Address of the original array.
514 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
515 QualType Type, const Expr *Init,
516 Address SrcAddr = Address::invalid()) {
517 auto *DRD = getReductionInit(Init);
518 // Perform element-by-element initialization.
521 // Drill down to the base element type on both arrays.
522 auto ArrayTy = Type->getAsArrayTypeUnsafe();
523 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
525 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
528 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
530 llvm::Value *SrcBegin = nullptr;
532 SrcBegin = SrcAddr.getPointer();
533 auto DestBegin = DestAddr.getPointer();
534 // Cast from pointer to array type to pointer to single element.
535 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
536 // The basic structure here is a while-do loop.
537 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
538 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
540 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
541 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
543 // Enter the loop body, making that address the current address.
544 auto EntryBB = CGF.Builder.GetInsertBlock();
545 CGF.EmitBlock(BodyBB);
547 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
549 llvm::PHINode *SrcElementPHI = nullptr;
550 Address SrcElementCurrent = Address::invalid();
552 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
553 "omp.arraycpy.srcElementPast");
554 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
556 Address(SrcElementPHI,
557 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
559 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
560 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
561 DestElementPHI->addIncoming(DestBegin, EntryBB);
562 Address DestElementCurrent =
563 Address(DestElementPHI,
564 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
568 CodeGenFunction::RunCleanupsScope InitScope(CGF);
569 if (DRD && (DRD->getInitializer() || !Init)) {
570 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
571 SrcElementCurrent, ElementTy);
573 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
574 /*IsInitializer=*/false);
578 // Shift the address forward by one element.
579 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
580 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
581 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
584 // Shift the address forward by one element.
585 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
586 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
587 // Check whether we've reached the end.
589 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
590 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
591 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
594 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
597 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
598 Address SrcAddr, const VarDecl *DestVD,
599 const VarDecl *SrcVD, const Expr *Copy) {
600 if (OriginalType->isArrayType()) {
601 auto *BO = dyn_cast<BinaryOperator>(Copy);
602 if (BO && BO->getOpcode() == BO_Assign) {
603 // Perform simple memcpy for simple copying.
604 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
606 // For arrays with complex element types perform element by element
608 EmitOMPAggregateAssign(
609 DestAddr, SrcAddr, OriginalType,
610 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
611 // Working with the single array element, so have to remap
612 // destination and source variables to corresponding array
614 CodeGenFunction::OMPPrivateScope Remap(*this);
615 Remap.addPrivate(DestVD, [DestElement]() -> Address {
619 SrcVD, [SrcElement]() -> Address { return SrcElement; });
620 (void)Remap.Privatize();
621 EmitIgnoredExpr(Copy);
625 // Remap pseudo source variable to private copy.
626 CodeGenFunction::OMPPrivateScope Remap(*this);
627 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
628 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
629 (void)Remap.Privatize();
630 // Emit copying of the whole variable.
631 EmitIgnoredExpr(Copy);
635 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
636 OMPPrivateScope &PrivateScope) {
637 if (!HaveInsertPoint())
639 bool FirstprivateIsLastprivate = false;
640 llvm::DenseSet<const VarDecl *> Lastprivates;
641 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
642 for (const auto *D : C->varlists())
644 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
646 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
647 CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt()));
648 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
649 auto IRef = C->varlist_begin();
650 auto InitsRef = C->inits().begin();
651 for (auto IInit : C->private_copies()) {
652 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
653 bool ThisFirstprivateIsLastprivate =
654 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
655 auto *CapFD = CapturesInfo.lookup(OrigVD);
656 auto *FD = CapturedStmtInfo->lookup(OrigVD);
657 if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) &&
658 !FD->getType()->isReferenceType()) {
659 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
664 FirstprivateIsLastprivate =
665 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
666 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
667 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
668 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
670 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
671 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
672 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
673 Address OriginalAddr = EmitLValue(&DRE).getAddress();
674 QualType Type = VD->getType();
675 if (Type->isArrayType()) {
676 // Emit VarDecl with copy init for arrays.
677 // Get the address of the original variable captured in current
679 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
680 auto Emission = EmitAutoVarAlloca(*VD);
681 auto *Init = VD->getInit();
682 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
683 // Perform simple memcpy.
684 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
687 EmitOMPAggregateAssign(
688 Emission.getAllocatedAddress(), OriginalAddr, Type,
689 [this, VDInit, Init](Address DestElement,
690 Address SrcElement) {
691 // Clean up any temporaries needed by the initialization.
692 RunCleanupsScope InitScope(*this);
693 // Emit initialization for single element.
694 setAddrOfLocalVar(VDInit, SrcElement);
695 EmitAnyExprToMem(Init, DestElement,
696 Init->getType().getQualifiers(),
697 /*IsInitializer*/ false);
698 LocalDeclMap.erase(VDInit);
701 EmitAutoVarCleanups(Emission);
702 return Emission.getAllocatedAddress();
705 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
706 // Emit private VarDecl with copy init.
707 // Remap temp VDInit variable to the address of the original
709 // (for proper handling of captured global variables).
710 setAddrOfLocalVar(VDInit, OriginalAddr);
712 LocalDeclMap.erase(VDInit);
713 return GetAddrOfLocalVar(VD);
716 assert(IsRegistered &&
717 "firstprivate var already registered as private");
718 // Silence the warning about unused variable.
725 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
728 void CodeGenFunction::EmitOMPPrivateClause(
729 const OMPExecutableDirective &D,
730 CodeGenFunction::OMPPrivateScope &PrivateScope) {
731 if (!HaveInsertPoint())
733 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
734 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
735 auto IRef = C->varlist_begin();
736 for (auto IInit : C->private_copies()) {
737 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
738 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
739 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
741 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
742 // Emit private VarDecl with copy init.
744 return GetAddrOfLocalVar(VD);
746 assert(IsRegistered && "private var already registered as private");
747 // Silence the warning about unused variable.
755 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
756 if (!HaveInsertPoint())
758 // threadprivate_var1 = master_threadprivate_var1;
759 // operator=(threadprivate_var2, master_threadprivate_var2);
761 // __kmpc_barrier(&loc, global_tid);
762 llvm::DenseSet<const VarDecl *> CopiedVars;
763 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
764 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
765 auto IRef = C->varlist_begin();
766 auto ISrcRef = C->source_exprs().begin();
767 auto IDestRef = C->destination_exprs().begin();
768 for (auto *AssignOp : C->assignment_ops()) {
769 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
770 QualType Type = VD->getType();
771 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
772 // Get the address of the master variable. If we are emitting code with
773 // TLS support, the address is passed from the master as field in the
774 // captured declaration.
775 Address MasterAddr = Address::invalid();
776 if (getLangOpts().OpenMPUseTLS &&
777 getContext().getTargetInfo().isTLSSupported()) {
778 assert(CapturedStmtInfo->lookup(VD) &&
779 "Copyin threadprivates should have been captured!");
780 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
781 VK_LValue, (*IRef)->getExprLoc());
782 MasterAddr = EmitLValue(&DRE).getAddress();
783 LocalDeclMap.erase(VD);
786 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
787 : CGM.GetAddrOfGlobal(VD),
788 getContext().getDeclAlign(VD));
790 // Get the address of the threadprivate variable.
791 Address PrivateAddr = EmitLValue(*IRef).getAddress();
792 if (CopiedVars.size() == 1) {
793 // At first check if current thread is a master thread. If it is, no
794 // need to copy data.
795 CopyBegin = createBasicBlock("copyin.not.master");
796 CopyEnd = createBasicBlock("copyin.not.master.end");
797 Builder.CreateCondBr(
798 Builder.CreateICmpNE(
799 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
800 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
802 EmitBlock(CopyBegin);
804 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
805 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
806 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
814 // Exit out of copying procedure for non-master thread.
815 EmitBlock(CopyEnd, /*IsFinished=*/true);
821 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
822 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
823 if (!HaveInsertPoint())
825 bool HasAtLeastOneLastprivate = false;
826 llvm::DenseSet<const VarDecl *> SIMDLCVs;
827 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
828 auto *LoopDirective = cast<OMPLoopDirective>(&D);
829 for (auto *C : LoopDirective->counters()) {
831 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
834 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
835 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
836 HasAtLeastOneLastprivate = true;
837 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()))
839 auto IRef = C->varlist_begin();
840 auto IDestRef = C->destination_exprs().begin();
841 for (auto *IInit : C->private_copies()) {
842 // Keep the address of the original variable for future update at the end
844 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
845 // Taskloops do not require additional initialization, it is done in
846 // runtime support library.
847 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
848 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
849 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
851 const_cast<VarDecl *>(OrigVD),
852 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
854 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
855 return EmitLValue(&DRE).getAddress();
857 // Check if the variable is also a firstprivate: in this case IInit is
858 // not generated. Initialization of this variable will happen in codegen
859 // for 'firstprivate' clause.
860 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
861 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
862 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
863 // Emit private VarDecl with copy init.
865 return GetAddrOfLocalVar(VD);
867 assert(IsRegistered &&
868 "lastprivate var already registered as private");
876 return HasAtLeastOneLastprivate;
879 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
880 const OMPExecutableDirective &D, bool NoFinals,
881 llvm::Value *IsLastIterCond) {
882 if (!HaveInsertPoint())
884 // Emit following code:
885 // if (<IsLastIterCond>) {
886 // orig_var1 = private_orig_var1;
888 // orig_varn = private_orig_varn;
890 llvm::BasicBlock *ThenBB = nullptr;
891 llvm::BasicBlock *DoneBB = nullptr;
892 if (IsLastIterCond) {
893 ThenBB = createBasicBlock(".omp.lastprivate.then");
894 DoneBB = createBasicBlock(".omp.lastprivate.done");
895 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
898 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
899 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
900 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
901 auto IC = LoopDirective->counters().begin();
902 for (auto F : LoopDirective->finals()) {
904 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
906 AlreadyEmittedVars.insert(D);
908 LoopCountersAndUpdates[D] = F;
912 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
913 auto IRef = C->varlist_begin();
914 auto ISrcRef = C->source_exprs().begin();
915 auto IDestRef = C->destination_exprs().begin();
916 for (auto *AssignOp : C->assignment_ops()) {
917 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
918 QualType Type = PrivateVD->getType();
919 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
920 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
921 // If lastprivate variable is a loop control variable for loop-based
922 // directive, update its value before copyin back to original
924 if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
925 EmitIgnoredExpr(FinalExpr);
926 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
927 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
928 // Get the address of the original variable.
929 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
930 // Get the address of the private variable.
931 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
932 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
934 Address(Builder.CreateLoad(PrivateAddr),
935 getNaturalTypeAlignment(RefTy->getPointeeType()));
936 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
942 if (auto *PostUpdate = C->getPostUpdateExpr())
943 EmitIgnoredExpr(PostUpdate);
946 EmitBlock(DoneBB, /*IsFinished=*/true);
949 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
950 LValue BaseLV, llvm::Value *Addr) {
951 Address Tmp = Address::invalid();
952 Address TopTmp = Address::invalid();
953 Address MostTopTmp = Address::invalid();
954 BaseTy = BaseTy.getNonReferenceType();
955 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
956 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
957 Tmp = CGF.CreateMemTemp(BaseTy);
958 if (TopTmp.isValid())
959 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
963 BaseTy = BaseTy->getPointeeType();
965 llvm::Type *Ty = BaseLV.getPointer()->getType();
967 Ty = Tmp.getElementType();
968 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
970 CGF.Builder.CreateStore(Addr, Tmp);
973 return Address(Addr, BaseLV.getAlignment());
976 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
978 BaseTy = BaseTy.getNonReferenceType();
979 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
980 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
981 if (auto *PtrTy = BaseTy->getAs<PointerType>())
982 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
984 BaseLV = CGF.EmitLoadOfReferenceLValue(BaseLV.getAddress(),
985 BaseTy->castAs<ReferenceType>());
987 BaseTy = BaseTy->getPointeeType();
989 return CGF.MakeAddrLValue(
991 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
992 BaseLV.getPointer(), CGF.ConvertTypeForMem(ElTy)->getPointerTo()),
993 BaseLV.getAlignment()),
994 BaseLV.getType(), BaseLV.getAlignmentSource());
997 void CodeGenFunction::EmitOMPReductionClauseInit(
998 const OMPExecutableDirective &D,
999 CodeGenFunction::OMPPrivateScope &PrivateScope) {
1000 if (!HaveInsertPoint())
1002 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1003 auto ILHS = C->lhs_exprs().begin();
1004 auto IRHS = C->rhs_exprs().begin();
1005 auto IPriv = C->privates().begin();
1006 auto IRed = C->reduction_ops().begin();
1007 for (auto IRef : C->varlists()) {
1008 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1009 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1010 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1011 auto *DRD = getReductionInit(*IRed);
1012 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
1013 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
1014 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1015 Base = TempOASE->getBase()->IgnoreParenImpCasts();
1016 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1017 Base = TempASE->getBase()->IgnoreParenImpCasts();
1018 auto *DE = cast<DeclRefExpr>(Base);
1019 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1020 auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
1022 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
1023 auto OriginalBaseLValue = EmitLValue(DE);
1025 loadToBegin(*this, OrigVD->getType(), OASELValueLB.getType(),
1026 OriginalBaseLValue);
1027 // Store the address of the original variable associated with the LHS
1028 // implicit variable.
1029 PrivateScope.addPrivate(LHSVD, [OASELValueLB]() -> Address {
1030 return OASELValueLB.getAddress();
1032 // Emit reduction copy.
1033 bool IsRegistered = PrivateScope.addPrivate(
1034 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, OASELValueLB,
1035 OASELValueUB, OriginalBaseLValue, DRD, IRed]() -> Address {
1036 // Emit VarDecl with copy init for arrays.
1037 // Get the address of the original variable captured in current
1039 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
1040 OASELValueLB.getPointer());
1041 Size = Builder.CreateNUWAdd(
1042 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1043 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1044 *this, cast<OpaqueValueExpr>(
1046 .getAsVariableArrayType(PrivateVD->getType())
1049 EmitVariablyModifiedType(PrivateVD->getType());
1050 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1051 auto Addr = Emission.getAllocatedAddress();
1052 auto *Init = PrivateVD->getInit();
1053 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1055 OASELValueLB.getAddress());
1056 EmitAutoVarCleanups(Emission);
1057 // Emit private VarDecl with reduction init.
1058 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1059 OASELValueLB.getPointer());
1060 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1061 return castToBase(*this, OrigVD->getType(),
1062 OASELValueLB.getType(), OriginalBaseLValue,
1065 assert(IsRegistered && "private var already registered as private");
1066 // Silence the warning about unused variable.
1068 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1069 return GetAddrOfLocalVar(PrivateVD);
1071 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
1072 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1073 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1074 Base = TempASE->getBase()->IgnoreParenImpCasts();
1075 auto *DE = cast<DeclRefExpr>(Base);
1076 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1077 auto ASELValue = EmitLValue(ASE);
1078 auto OriginalBaseLValue = EmitLValue(DE);
1079 LValue BaseLValue = loadToBegin(
1080 *this, OrigVD->getType(), ASELValue.getType(), OriginalBaseLValue);
1081 // Store the address of the original variable associated with the LHS
1082 // implicit variable.
1083 PrivateScope.addPrivate(
1084 LHSVD, [ASELValue]() -> Address { return ASELValue.getAddress(); });
1085 // Emit reduction copy.
1086 bool IsRegistered = PrivateScope.addPrivate(
1087 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, ASELValue,
1088 OriginalBaseLValue, DRD, IRed]() -> Address {
1089 // Emit private VarDecl with reduction init.
1090 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1091 auto Addr = Emission.getAllocatedAddress();
1092 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1093 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1094 ASELValue.getAddress(),
1095 ASELValue.getType());
1097 EmitAutoVarInit(Emission);
1098 EmitAutoVarCleanups(Emission);
1099 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1100 ASELValue.getPointer());
1101 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1102 return castToBase(*this, OrigVD->getType(), ASELValue.getType(),
1103 OriginalBaseLValue, Ptr);
1105 assert(IsRegistered && "private var already registered as private");
1106 // Silence the warning about unused variable.
1108 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1109 return Builder.CreateElementBitCast(
1110 GetAddrOfLocalVar(PrivateVD), ConvertTypeForMem(RHSVD->getType()),
1114 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
1115 QualType Type = PrivateVD->getType();
1116 if (getContext().getAsArrayType(Type)) {
1117 // Store the address of the original variable associated with the LHS
1118 // implicit variable.
1119 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1120 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1121 IRef->getType(), VK_LValue, IRef->getExprLoc());
1122 Address OriginalAddr = EmitLValue(&DRE).getAddress();
1123 PrivateScope.addPrivate(LHSVD, [this, &OriginalAddr,
1124 LHSVD]() -> Address {
1125 OriginalAddr = Builder.CreateElementBitCast(
1126 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1127 return OriginalAddr;
1129 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
1130 if (Type->isVariablyModifiedType()) {
1131 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1132 *this, cast<OpaqueValueExpr>(
1134 .getAsVariableArrayType(PrivateVD->getType())
1137 getTypeSize(OrigVD->getType().getNonReferenceType())));
1138 EmitVariablyModifiedType(Type);
1140 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1141 auto Addr = Emission.getAllocatedAddress();
1142 auto *Init = PrivateVD->getInit();
1143 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1144 DRD ? *IRed : Init, OriginalAddr);
1145 EmitAutoVarCleanups(Emission);
1146 return Emission.getAllocatedAddress();
1148 assert(IsRegistered && "private var already registered as private");
1149 // Silence the warning about unused variable.
1151 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1152 return Builder.CreateElementBitCast(
1153 GetAddrOfLocalVar(PrivateVD),
1154 ConvertTypeForMem(RHSVD->getType()), "rhs.begin");
1157 // Store the address of the original variable associated with the LHS
1158 // implicit variable.
1159 Address OriginalAddr = Address::invalid();
1160 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef,
1161 &OriginalAddr]() -> Address {
1162 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1163 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1164 IRef->getType(), VK_LValue, IRef->getExprLoc());
1165 OriginalAddr = EmitLValue(&DRE).getAddress();
1166 return OriginalAddr;
1168 // Emit reduction copy.
1169 bool IsRegistered = PrivateScope.addPrivate(
1170 OrigVD, [this, PrivateVD, OriginalAddr, DRD, IRed]() -> Address {
1171 // Emit private VarDecl with reduction init.
1172 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1173 auto Addr = Emission.getAllocatedAddress();
1174 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1175 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1177 PrivateVD->getType());
1179 EmitAutoVarInit(Emission);
1180 EmitAutoVarCleanups(Emission);
1183 assert(IsRegistered && "private var already registered as private");
1184 // Silence the warning about unused variable.
1186 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1187 return GetAddrOfLocalVar(PrivateVD);
1199 void CodeGenFunction::EmitOMPReductionClauseFinal(
1200 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1201 if (!HaveInsertPoint())
1203 llvm::SmallVector<const Expr *, 8> Privates;
1204 llvm::SmallVector<const Expr *, 8> LHSExprs;
1205 llvm::SmallVector<const Expr *, 8> RHSExprs;
1206 llvm::SmallVector<const Expr *, 8> ReductionOps;
1207 bool HasAtLeastOneReduction = false;
1208 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1209 HasAtLeastOneReduction = true;
1210 Privates.append(C->privates().begin(), C->privates().end());
1211 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1212 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1213 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1215 if (HasAtLeastOneReduction) {
1216 bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1217 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1218 D.getDirectiveKind() == OMPD_simd;
1219 bool SimpleReduction = D.getDirectiveKind() == OMPD_simd;
1220 // Emit nowait reduction if nowait clause is present or directive is a
1221 // parallel directive (it always has implicit barrier).
1222 CGM.getOpenMPRuntime().emitReduction(
1223 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
1224 {WithNowait, SimpleReduction, ReductionKind});
1228 static void emitPostUpdateForReductionClause(
1229 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1230 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1231 if (!CGF.HaveInsertPoint())
1233 llvm::BasicBlock *DoneBB = nullptr;
1234 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1235 if (auto *PostUpdate = C->getPostUpdateExpr()) {
1237 if (auto *Cond = CondGen(CGF)) {
1238 // If the first post-update expression is found, emit conditional
1239 // block if it was requested.
1240 auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1241 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1242 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1243 CGF.EmitBlock(ThenBB);
1246 CGF.EmitIgnoredExpr(PostUpdate);
1250 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1254 /// Codegen lambda for appending distribute lower and upper bounds to outlined
1255 /// parallel function. This is necessary for combined constructs such as
1256 /// 'distribute parallel for'
1257 typedef llvm::function_ref<void(CodeGenFunction &,
1258 const OMPExecutableDirective &,
1259 llvm::SmallVectorImpl<llvm::Value *> &)>
1260 CodeGenBoundParametersTy;
1261 } // anonymous namespace
1263 static void emitCommonOMPParallelDirective(
1264 CodeGenFunction &CGF, const OMPExecutableDirective &S,
1265 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1266 const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1267 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1268 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1269 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1270 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1271 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1272 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1273 /*IgnoreResultAssign*/ true);
1274 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1275 CGF, NumThreads, NumThreadsClause->getLocStart());
1277 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1278 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1279 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1280 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
1282 const Expr *IfCond = nullptr;
1283 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1284 if (C->getNameModifier() == OMPD_unknown ||
1285 C->getNameModifier() == OMPD_parallel) {
1286 IfCond = C->getCondition();
1291 OMPParallelScope Scope(CGF, S);
1292 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1293 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1294 // lower and upper bounds with the pragma 'for' chunking mechanism.
1295 // The following lambda takes care of appending the lower and upper bound
1296 // parameters when necessary
1297 CodeGenBoundParameters(CGF, S, CapturedVars);
1298 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1299 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
1300 CapturedVars, IfCond);
1303 static void emitEmptyBoundParameters(CodeGenFunction &,
1304 const OMPExecutableDirective &,
1305 llvm::SmallVectorImpl<llvm::Value *> &) {}
1307 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1308 // Emit parallel region as a standalone region.
1309 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1310 OMPPrivateScope PrivateScope(CGF);
1311 bool Copyins = CGF.EmitOMPCopyinClause(S);
1312 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1314 // Emit implicit barrier to synchronize threads and avoid data races on
1315 // propagation master's thread values of threadprivate variables to local
1316 // instances of that variables of all other implicit threads.
1317 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1318 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1319 /*ForceSimpleCall=*/true);
1321 CGF.EmitOMPPrivateClause(S, PrivateScope);
1322 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1323 (void)PrivateScope.Privatize();
1324 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1325 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1327 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1328 emitEmptyBoundParameters);
1329 emitPostUpdateForReductionClause(
1330 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1333 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1334 JumpDest LoopExit) {
1335 RunCleanupsScope BodyScope(*this);
1336 // Update counters values on current iteration.
1337 for (auto I : D.updates()) {
1340 // Update the linear variables.
1341 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1342 for (auto *U : C->updates())
1346 // On a continue in the body, jump to the end.
1347 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
1348 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1350 EmitStmt(D.getBody());
1351 // The end (updates/cleanups).
1352 EmitBlock(Continue.getBlock());
1353 BreakContinueStack.pop_back();
1356 void CodeGenFunction::EmitOMPInnerLoop(
1357 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1358 const Expr *IncExpr,
1359 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
1360 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
1361 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1363 // Start the loop with a block that tests the condition.
1364 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1365 EmitBlock(CondBlock);
1366 const SourceRange &R = S.getSourceRange();
1367 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1368 SourceLocToDebugLoc(R.getEnd()));
1370 // If there are any cleanups between here and the loop-exit scope,
1371 // create a block to stage a loop exit along.
1372 auto ExitBlock = LoopExit.getBlock();
1373 if (RequiresCleanup)
1374 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1376 auto LoopBody = createBasicBlock("omp.inner.for.body");
1379 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1380 if (ExitBlock != LoopExit.getBlock()) {
1381 EmitBlock(ExitBlock);
1382 EmitBranchThroughCleanup(LoopExit);
1385 EmitBlock(LoopBody);
1386 incrementProfileCounter(&S);
1388 // Create a block for the increment.
1389 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1390 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1394 // Emit "IV = IV + 1" and a back-edge to the condition block.
1395 EmitBlock(Continue.getBlock());
1396 EmitIgnoredExpr(IncExpr);
1398 BreakContinueStack.pop_back();
1399 EmitBranch(CondBlock);
1401 // Emit the fall-through block.
1402 EmitBlock(LoopExit.getBlock());
1405 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1406 if (!HaveInsertPoint())
1408 // Emit inits for the linear variables.
1409 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1410 for (auto *Init : C->inits()) {
1411 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1412 if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1413 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1414 auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1415 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1416 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1417 VD->getInit()->getType(), VK_LValue,
1418 VD->getInit()->getExprLoc());
1419 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1421 /*capturedByInit=*/false);
1422 EmitAutoVarCleanups(Emission);
1426 // Emit the linear steps for the linear clauses.
1427 // If a step is not constant, it is pre-calculated before the loop.
1428 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1429 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1430 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1431 // Emit calculation of the linear step.
1432 EmitIgnoredExpr(CS);
1437 void CodeGenFunction::EmitOMPLinearClauseFinal(
1438 const OMPLoopDirective &D,
1439 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1440 if (!HaveInsertPoint())
1442 llvm::BasicBlock *DoneBB = nullptr;
1443 // Emit the final values of the linear variables.
1444 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1445 auto IC = C->varlist_begin();
1446 for (auto *F : C->finals()) {
1448 if (auto *Cond = CondGen(*this)) {
1449 // If the first post-update expression is found, emit conditional
1450 // block if it was requested.
1451 auto *ThenBB = createBasicBlock(".omp.linear.pu");
1452 DoneBB = createBasicBlock(".omp.linear.pu.done");
1453 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1457 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1458 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1459 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1460 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1461 Address OrigAddr = EmitLValue(&DRE).getAddress();
1462 CodeGenFunction::OMPPrivateScope VarScope(*this);
1463 VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; });
1464 (void)VarScope.Privatize();
1468 if (auto *PostUpdate = C->getPostUpdateExpr())
1469 EmitIgnoredExpr(PostUpdate);
1472 EmitBlock(DoneBB, /*IsFinished=*/true);
1475 static void emitAlignedClause(CodeGenFunction &CGF,
1476 const OMPExecutableDirective &D) {
1477 if (!CGF.HaveInsertPoint())
1479 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1480 unsigned ClauseAlignment = 0;
1481 if (auto AlignmentExpr = Clause->getAlignment()) {
1483 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1484 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1486 for (auto E : Clause->varlists()) {
1487 unsigned Alignment = ClauseAlignment;
1488 if (Alignment == 0) {
1489 // OpenMP [2.8.1, Description]
1490 // If no optional parameter is specified, implementation-defined default
1491 // alignments for SIMD instructions on the target platforms are assumed.
1494 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1495 E->getType()->getPointeeType()))
1498 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1499 "alignment is not power of 2");
1500 if (Alignment != 0) {
1501 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1502 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
1508 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1509 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1510 if (!HaveInsertPoint())
1512 auto I = S.private_counters().begin();
1513 for (auto *E : S.counters()) {
1514 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1515 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1516 (void)LoopScope.addPrivate(VD, [&]() -> Address {
1517 // Emit var without initialization.
1518 if (!LocalDeclMap.count(PrivateVD)) {
1519 auto VarEmission = EmitAutoVarAlloca(*PrivateVD);
1520 EmitAutoVarCleanups(VarEmission);
1522 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1523 /*RefersToEnclosingVariableOrCapture=*/false,
1524 (*I)->getType(), VK_LValue, (*I)->getExprLoc());
1525 return EmitLValue(&DRE).getAddress();
1527 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1528 VD->hasGlobalStorage()) {
1529 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
1530 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
1531 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1532 E->getType(), VK_LValue, E->getExprLoc());
1533 return EmitLValue(&DRE).getAddress();
1540 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1541 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1542 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1543 if (!CGF.HaveInsertPoint())
1546 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1547 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1548 (void)PreCondScope.Privatize();
1549 // Get initial values of real counters.
1550 for (auto I : S.inits()) {
1551 CGF.EmitIgnoredExpr(I);
1554 // Check that loop is executed at least one time.
1555 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1558 void CodeGenFunction::EmitOMPLinearClause(
1559 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1560 if (!HaveInsertPoint())
1562 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1563 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1564 auto *LoopDirective = cast<OMPLoopDirective>(&D);
1565 for (auto *C : LoopDirective->counters()) {
1567 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1570 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1571 auto CurPrivate = C->privates().begin();
1572 for (auto *E : C->varlists()) {
1573 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1575 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1576 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1577 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
1578 // Emit private VarDecl with copy init.
1579 EmitVarDecl(*PrivateVD);
1580 return GetAddrOfLocalVar(PrivateVD);
1582 assert(IsRegistered && "linear var already registered as private");
1583 // Silence the warning about unused variable.
1586 EmitVarDecl(*PrivateVD);
1592 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1593 const OMPExecutableDirective &D,
1595 if (!CGF.HaveInsertPoint())
1597 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1598 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1599 /*ignoreResult=*/true);
1600 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1601 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1602 // In presence of finite 'safelen', it may be unsafe to mark all
1603 // the memory instructions parallel, because loop-carried
1604 // dependences of 'safelen' iterations are possible.
1606 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1607 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1608 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1609 /*ignoreResult=*/true);
1610 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1611 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1612 // In presence of finite 'safelen', it may be unsafe to mark all
1613 // the memory instructions parallel, because loop-carried
1614 // dependences of 'safelen' iterations are possible.
1615 CGF.LoopStack.setParallel(false);
1619 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1621 // Walk clauses and process safelen/lastprivate.
1622 LoopStack.setParallel(!IsMonotonic);
1623 LoopStack.setVectorizeEnable(true);
1624 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1627 void CodeGenFunction::EmitOMPSimdFinal(
1628 const OMPLoopDirective &D,
1629 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1630 if (!HaveInsertPoint())
1632 llvm::BasicBlock *DoneBB = nullptr;
1633 auto IC = D.counters().begin();
1634 auto IPC = D.private_counters().begin();
1635 for (auto F : D.finals()) {
1636 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1637 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1638 auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1639 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1640 OrigVD->hasGlobalStorage() || CED) {
1642 if (auto *Cond = CondGen(*this)) {
1643 // If the first post-update expression is found, emit conditional
1644 // block if it was requested.
1645 auto *ThenBB = createBasicBlock(".omp.final.then");
1646 DoneBB = createBasicBlock(".omp.final.done");
1647 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1651 Address OrigAddr = Address::invalid();
1653 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1655 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1656 /*RefersToEnclosingVariableOrCapture=*/false,
1657 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1658 OrigAddr = EmitLValue(&DRE).getAddress();
1660 OMPPrivateScope VarScope(*this);
1661 VarScope.addPrivate(OrigVD,
1662 [OrigAddr]() -> Address { return OrigAddr; });
1663 (void)VarScope.Privatize();
1670 EmitBlock(DoneBB, /*IsFinished=*/true);
1673 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
1674 const OMPLoopDirective &S,
1675 CodeGenFunction::JumpDest LoopExit) {
1676 CGF.EmitOMPLoopBody(S, LoopExit);
1677 CGF.EmitStopPoint(&S);
1680 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1681 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1682 OMPLoopScope PreInitScope(CGF, S);
1684 // for (IV in 0..LastIteration) BODY;
1685 // <Final counter/linear vars updates>;
1689 // Emit: if (PreCond) - begin.
1690 // If the condition constant folds and can be elided, avoid emitting the
1693 llvm::BasicBlock *ContBlock = nullptr;
1694 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1698 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
1699 ContBlock = CGF.createBasicBlock("simd.if.end");
1700 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1701 CGF.getProfileCount(&S));
1702 CGF.EmitBlock(ThenBlock);
1703 CGF.incrementProfileCounter(&S);
1706 // Emit the loop iteration variable.
1707 const Expr *IVExpr = S.getIterationVariable();
1708 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1709 CGF.EmitVarDecl(*IVDecl);
1710 CGF.EmitIgnoredExpr(S.getInit());
1712 // Emit the iterations count variable.
1713 // If it is not a variable, Sema decided to calculate iterations count on
1714 // each iteration (e.g., it is foldable into a constant).
1715 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1716 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1717 // Emit calculation of the iterations count.
1718 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1721 CGF.EmitOMPSimdInit(S);
1723 emitAlignedClause(CGF, S);
1724 CGF.EmitOMPLinearClauseInit(S);
1726 OMPPrivateScope LoopScope(CGF);
1727 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1728 CGF.EmitOMPLinearClause(S, LoopScope);
1729 CGF.EmitOMPPrivateClause(S, LoopScope);
1730 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1731 bool HasLastprivateClause =
1732 CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1733 (void)LoopScope.Privatize();
1734 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1736 [&S](CodeGenFunction &CGF) {
1737 CGF.EmitOMPLoopBody(S, JumpDest());
1738 CGF.EmitStopPoint(&S);
1740 [](CodeGenFunction &) {});
1741 CGF.EmitOMPSimdFinal(
1742 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1743 // Emit final copy of the lastprivate variables at the end of loops.
1744 if (HasLastprivateClause)
1745 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
1746 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
1747 emitPostUpdateForReductionClause(
1748 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1750 CGF.EmitOMPLinearClauseFinal(
1751 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1752 // Emit: if (PreCond) - end.
1754 CGF.EmitBranch(ContBlock);
1755 CGF.EmitBlock(ContBlock, true);
1758 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1759 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1762 void CodeGenFunction::EmitOMPOuterLoop(
1763 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
1764 CodeGenFunction::OMPPrivateScope &LoopScope,
1765 const CodeGenFunction::OMPLoopArguments &LoopArgs,
1766 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
1767 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
1768 auto &RT = CGM.getOpenMPRuntime();
1770 const Expr *IVExpr = S.getIterationVariable();
1771 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1772 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1774 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1776 // Start the loop with a block that tests the condition.
1777 auto CondBlock = createBasicBlock("omp.dispatch.cond");
1778 EmitBlock(CondBlock);
1779 const SourceRange &R = S.getSourceRange();
1780 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1781 SourceLocToDebugLoc(R.getEnd()));
1783 llvm::Value *BoolCondVal = nullptr;
1784 if (!DynamicOrOrdered) {
1785 // UB = min(UB, GlobalUB) or
1786 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
1787 // 'distribute parallel for')
1788 EmitIgnoredExpr(LoopArgs.EUB);
1790 EmitIgnoredExpr(LoopArgs.Init);
1792 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
1795 RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, LoopArgs.IL,
1796 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
1799 // If there are any cleanups between here and the loop-exit scope,
1800 // create a block to stage a loop exit along.
1801 auto ExitBlock = LoopExit.getBlock();
1802 if (LoopScope.requiresCleanups())
1803 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1805 auto LoopBody = createBasicBlock("omp.dispatch.body");
1806 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1807 if (ExitBlock != LoopExit.getBlock()) {
1808 EmitBlock(ExitBlock);
1809 EmitBranchThroughCleanup(LoopExit);
1811 EmitBlock(LoopBody);
1813 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1814 // LB for loop condition and emitted it above).
1815 if (DynamicOrOrdered)
1816 EmitIgnoredExpr(LoopArgs.Init);
1818 // Create a block for the increment.
1819 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1820 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1822 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1823 // with dynamic/guided scheduling and without ordered clause.
1824 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1825 LoopStack.setParallel(!IsMonotonic);
1827 EmitOMPSimdInit(S, IsMonotonic);
1829 SourceLocation Loc = S.getLocStart();
1831 // when 'distribute' is not combined with a 'for':
1832 // while (idx <= UB) { BODY; ++idx; }
1833 // when 'distribute' is combined with a 'for'
1834 // (e.g. 'distribute parallel for')
1835 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
1837 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
1838 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
1839 CodeGenLoop(CGF, S, LoopExit);
1841 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
1842 CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
1845 EmitBlock(Continue.getBlock());
1846 BreakContinueStack.pop_back();
1847 if (!DynamicOrOrdered) {
1848 // Emit "LB = LB + Stride", "UB = UB + Stride".
1849 EmitIgnoredExpr(LoopArgs.NextLB);
1850 EmitIgnoredExpr(LoopArgs.NextUB);
1853 EmitBranch(CondBlock);
1855 // Emit the fall-through block.
1856 EmitBlock(LoopExit.getBlock());
1858 // Tell the runtime we are done.
1859 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
1860 if (!DynamicOrOrdered)
1861 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
1863 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
1866 void CodeGenFunction::EmitOMPForOuterLoop(
1867 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
1868 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1869 const OMPLoopArguments &LoopArgs,
1870 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
1871 auto &RT = CGM.getOpenMPRuntime();
1873 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1874 const bool DynamicOrOrdered =
1875 Ordered || RT.isDynamic(ScheduleKind.Schedule);
1878 !RT.isStaticNonchunked(ScheduleKind.Schedule,
1879 LoopArgs.Chunk != nullptr)) &&
1880 "static non-chunked schedule does not need outer loop");
1884 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1885 // When schedule(dynamic,chunk_size) is specified, the iterations are
1886 // distributed to threads in the team in chunks as the threads request them.
1887 // Each thread executes a chunk of iterations, then requests another chunk,
1888 // until no chunks remain to be distributed. Each chunk contains chunk_size
1889 // iterations, except for the last chunk to be distributed, which may have
1890 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1892 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1893 // to threads in the team in chunks as the executing threads request them.
1894 // Each thread executes a chunk of iterations, then requests another chunk,
1895 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1896 // each chunk is proportional to the number of unassigned iterations divided
1897 // by the number of threads in the team, decreasing to 1. For a chunk_size
1898 // with value k (greater than 1), the size of each chunk is determined in the
1899 // same way, with the restriction that the chunks do not contain fewer than k
1900 // iterations (except for the last chunk to be assigned, which may have fewer
1901 // than k iterations).
1903 // When schedule(auto) is specified, the decision regarding scheduling is
1904 // delegated to the compiler and/or runtime system. The programmer gives the
1905 // implementation the freedom to choose any possible mapping of iterations to
1906 // threads in the team.
1908 // When schedule(runtime) is specified, the decision regarding scheduling is
1909 // deferred until run time, and the schedule and chunk size are taken from the
1910 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1911 // implementation defined
1913 // while(__kmpc_dispatch_next(&LB, &UB)) {
1915 // while (idx <= UB) { BODY; ++idx;
1916 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1920 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1921 // When schedule(static, chunk_size) is specified, iterations are divided into
1922 // chunks of size chunk_size, and the chunks are assigned to the threads in
1923 // the team in a round-robin fashion in the order of the thread number.
1925 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1926 // while (idx <= UB) { BODY; ++idx; } // inner loop
1932 const Expr *IVExpr = S.getIterationVariable();
1933 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1934 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1936 if (DynamicOrOrdered) {
1937 auto DispatchBounds = CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
1938 llvm::Value *LBVal = DispatchBounds.first;
1939 llvm::Value *UBVal = DispatchBounds.second;
1940 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
1942 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
1943 IVSigned, Ordered, DipatchRTInputValues);
1945 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1946 Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
1947 LoopArgs.ST, LoopArgs.Chunk);
1950 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
1951 const unsigned IVSize,
1952 const bool IVSigned) {
1954 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
1959 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
1960 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
1961 OuterLoopArgs.IncExpr = S.getInc();
1962 OuterLoopArgs.Init = S.getInit();
1963 OuterLoopArgs.Cond = S.getCond();
1964 OuterLoopArgs.NextLB = S.getNextLowerBound();
1965 OuterLoopArgs.NextUB = S.getNextUpperBound();
1966 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
1967 emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
1970 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
1971 const unsigned IVSize, const bool IVSigned) {}
1973 void CodeGenFunction::EmitOMPDistributeOuterLoop(
1974 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
1975 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
1976 const CodeGenLoopTy &CodeGenLoopContent) {
1978 auto &RT = CGM.getOpenMPRuntime();
1981 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
1985 const Expr *IVExpr = S.getIterationVariable();
1986 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1987 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1989 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize,
1990 IVSigned, /* Ordered = */ false, LoopArgs.IL,
1991 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
1994 // for combined 'distribute' and 'for' the increment expression of distribute
1995 // is store in DistInc. For 'distribute' alone, it is in Inc.
1997 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
1998 IncExpr = S.getDistInc();
2000 IncExpr = S.getInc();
2002 // this routine is shared by 'omp distribute parallel for' and
2003 // 'omp distribute': select the right EUB expression depending on the
2005 OMPLoopArguments OuterLoopArgs;
2006 OuterLoopArgs.LB = LoopArgs.LB;
2007 OuterLoopArgs.UB = LoopArgs.UB;
2008 OuterLoopArgs.ST = LoopArgs.ST;
2009 OuterLoopArgs.IL = LoopArgs.IL;
2010 OuterLoopArgs.Chunk = LoopArgs.Chunk;
2011 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2012 ? S.getCombinedEnsureUpperBound()
2013 : S.getEnsureUpperBound();
2014 OuterLoopArgs.IncExpr = IncExpr;
2015 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2016 ? S.getCombinedInit()
2018 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2019 ? S.getCombinedCond()
2021 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2022 ? S.getCombinedNextLowerBound()
2023 : S.getNextLowerBound();
2024 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
2025 ? S.getCombinedNextUpperBound()
2026 : S.getNextUpperBound();
2028 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
2029 LoopScope, OuterLoopArgs, CodeGenLoopContent,
2033 /// Emit a helper variable and return corresponding lvalue.
2034 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2035 const DeclRefExpr *Helper) {
2036 auto VDecl = cast<VarDecl>(Helper->getDecl());
2037 CGF.EmitVarDecl(*VDecl);
2038 return CGF.EmitLValue(Helper);
2041 static std::pair<LValue, LValue>
2042 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
2043 const OMPExecutableDirective &S) {
2044 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2046 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2048 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2050 // When composing 'distribute' with 'for' (e.g. as in 'distribute
2051 // parallel for') we need to use the 'distribute'
2052 // chunk lower and upper bounds rather than the whole loop iteration
2053 // space. These are parameters to the outlined function for 'parallel'
2054 // and we copy the bounds of the previous schedule into the
2055 // the current ones.
2056 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
2057 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
2058 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(PrevLB, SourceLocation());
2059 PrevLBVal = CGF.EmitScalarConversion(
2060 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
2061 LS.getIterationVariable()->getType(), SourceLocation());
2062 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(PrevUB, SourceLocation());
2063 PrevUBVal = CGF.EmitScalarConversion(
2064 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
2065 LS.getIterationVariable()->getType(), SourceLocation());
2067 CGF.EmitStoreOfScalar(PrevLBVal, LB);
2068 CGF.EmitStoreOfScalar(PrevUBVal, UB);
2073 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
2074 /// we need to use the LB and UB expressions generated by the worksharing
2075 /// code generation support, whereas in non combined situations we would
2076 /// just emit 0 and the LastIteration expression
2077 /// This function is necessary due to the difference of the LB and UB
2078 /// types for the RT emission routines for 'for_static_init' and
2079 /// 'for_dispatch_init'
2080 static std::pair<llvm::Value *, llvm::Value *>
2081 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
2082 const OMPExecutableDirective &S,
2083 Address LB, Address UB) {
2084 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2085 const Expr *IVExpr = LS.getIterationVariable();
2086 // when implementing a dynamic schedule for a 'for' combined with a
2087 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
2088 // is not normalized as each team only executes its own assigned
2090 QualType IteratorTy = IVExpr->getType();
2091 llvm::Value *LBVal = CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy,
2093 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy,
2095 return {LBVal, UBVal};
2098 static void emitDistributeParallelForDistributeInnerBoundParams(
2099 CodeGenFunction &CGF, const OMPExecutableDirective &S,
2100 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
2101 const auto &Dir = cast<OMPLoopDirective>(S);
2103 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
2104 auto LBCast = CGF.Builder.CreateIntCast(
2105 CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
2106 CapturedVars.push_back(LBCast);
2108 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
2110 auto UBCast = CGF.Builder.CreateIntCast(
2111 CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
2112 CapturedVars.push_back(UBCast);
2116 emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
2117 const OMPLoopDirective &S,
2118 CodeGenFunction::JumpDest LoopExit) {
2119 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
2120 PrePostActionTy &) {
2121 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
2122 emitDistributeParallelForInnerBounds,
2123 emitDistributeParallelForDispatchBounds);
2126 emitCommonOMPParallelDirective(
2127 CGF, S, OMPD_for, CGInlinedWorksharingLoop,
2128 emitDistributeParallelForDistributeInnerBoundParams);
2131 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
2132 const OMPDistributeParallelForDirective &S) {
2133 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2134 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
2137 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2138 OMPCancelStackRAII CancelRegion(*this, OMPD_distribute_parallel_for,
2139 /*HasCancel=*/false);
2140 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
2141 /*HasCancel=*/false);
2144 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
2145 const OMPDistributeParallelForSimdDirective &S) {
2146 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2147 CGM.getOpenMPRuntime().emitInlinedDirective(
2148 *this, OMPD_distribute_parallel_for_simd,
2149 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2150 OMPLoopScope PreInitScope(CGF, S);
2152 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2156 void CodeGenFunction::EmitOMPDistributeSimdDirective(
2157 const OMPDistributeSimdDirective &S) {
2158 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2159 CGM.getOpenMPRuntime().emitInlinedDirective(
2160 *this, OMPD_distribute_simd,
2161 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2162 OMPLoopScope PreInitScope(CGF, S);
2164 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2168 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
2169 const OMPTargetParallelForSimdDirective &S) {
2170 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2171 CGM.getOpenMPRuntime().emitInlinedDirective(
2172 *this, OMPD_target_parallel_for_simd,
2173 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2174 OMPLoopScope PreInitScope(CGF, S);
2176 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2180 void CodeGenFunction::EmitOMPTargetSimdDirective(
2181 const OMPTargetSimdDirective &S) {
2182 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2183 CGM.getOpenMPRuntime().emitInlinedDirective(
2184 *this, OMPD_target_simd, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2185 OMPLoopScope PreInitScope(CGF, S);
2187 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2191 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
2192 const OMPTeamsDistributeDirective &S) {
2193 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2194 CGM.getOpenMPRuntime().emitInlinedDirective(
2195 *this, OMPD_teams_distribute,
2196 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2197 OMPLoopScope PreInitScope(CGF, S);
2199 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2203 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
2204 const OMPTeamsDistributeSimdDirective &S) {
2205 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2206 CGM.getOpenMPRuntime().emitInlinedDirective(
2207 *this, OMPD_teams_distribute_simd,
2208 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2209 OMPLoopScope PreInitScope(CGF, S);
2211 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2215 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
2216 const OMPTeamsDistributeParallelForSimdDirective &S) {
2217 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2218 CGM.getOpenMPRuntime().emitInlinedDirective(
2219 *this, OMPD_teams_distribute_parallel_for_simd,
2220 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2221 OMPLoopScope PreInitScope(CGF, S);
2223 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2227 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
2228 const OMPTeamsDistributeParallelForDirective &S) {
2229 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2230 CGM.getOpenMPRuntime().emitInlinedDirective(
2231 *this, OMPD_teams_distribute_parallel_for,
2232 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2233 OMPLoopScope PreInitScope(CGF, S);
2235 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2239 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
2240 const OMPTargetTeamsDistributeDirective &S) {
2241 CGM.getOpenMPRuntime().emitInlinedDirective(
2242 *this, OMPD_target_teams_distribute,
2243 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2245 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2249 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
2250 const OMPTargetTeamsDistributeParallelForDirective &S) {
2251 CGM.getOpenMPRuntime().emitInlinedDirective(
2252 *this, OMPD_target_teams_distribute_parallel_for,
2253 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2255 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2259 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
2260 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
2261 CGM.getOpenMPRuntime().emitInlinedDirective(
2262 *this, OMPD_target_teams_distribute_parallel_for_simd,
2263 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2265 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2269 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
2270 const OMPTargetTeamsDistributeSimdDirective &S) {
2271 CGM.getOpenMPRuntime().emitInlinedDirective(
2272 *this, OMPD_target_teams_distribute_simd,
2273 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2275 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2280 struct ScheduleKindModifiersTy {
2281 OpenMPScheduleClauseKind Kind;
2282 OpenMPScheduleClauseModifier M1;
2283 OpenMPScheduleClauseModifier M2;
2284 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2285 OpenMPScheduleClauseModifier M1,
2286 OpenMPScheduleClauseModifier M2)
2287 : Kind(Kind), M1(M1), M2(M2) {}
2291 bool CodeGenFunction::EmitOMPWorksharingLoop(
2292 const OMPLoopDirective &S, Expr *EUB,
2293 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
2294 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2295 // Emit the loop iteration variable.
2296 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2297 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2298 EmitVarDecl(*IVDecl);
2300 // Emit the iterations count variable.
2301 // If it is not a variable, Sema decided to calculate iterations count on each
2302 // iteration (e.g., it is foldable into a constant).
2303 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2304 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2305 // Emit calculation of the iterations count.
2306 EmitIgnoredExpr(S.getCalcLastIteration());
2309 auto &RT = CGM.getOpenMPRuntime();
2311 bool HasLastprivateClause;
2312 // Check pre-condition.
2314 OMPLoopScope PreInitScope(*this, S);
2315 // Skip the entire loop if we don't meet the precondition.
2316 // If the condition constant folds and can be elided, avoid emitting the
2319 llvm::BasicBlock *ContBlock = nullptr;
2320 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2324 auto *ThenBlock = createBasicBlock("omp.precond.then");
2325 ContBlock = createBasicBlock("omp.precond.end");
2326 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2327 getProfileCount(&S));
2328 EmitBlock(ThenBlock);
2329 incrementProfileCounter(&S);
2332 bool Ordered = false;
2333 if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2334 if (OrderedClause->getNumForLoops())
2335 RT.emitDoacrossInit(*this, S);
2340 llvm::DenseSet<const Expr *> EmittedFinals;
2341 emitAlignedClause(*this, S);
2342 EmitOMPLinearClauseInit(S);
2343 // Emit helper vars inits.
2345 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
2346 LValue LB = Bounds.first;
2347 LValue UB = Bounds.second;
2349 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2351 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2353 // Emit 'then' code.
2355 OMPPrivateScope LoopScope(*this);
2356 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2357 // Emit implicit barrier to synchronize threads and avoid data races on
2358 // initialization of firstprivate variables and post-update of
2359 // lastprivate variables.
2360 CGM.getOpenMPRuntime().emitBarrierCall(
2361 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2362 /*ForceSimpleCall=*/true);
2364 EmitOMPPrivateClause(S, LoopScope);
2365 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2366 EmitOMPReductionClauseInit(S, LoopScope);
2367 EmitOMPPrivateLoopCounters(S, LoopScope);
2368 EmitOMPLinearClause(S, LoopScope);
2369 (void)LoopScope.Privatize();
2371 // Detect the loop schedule kind and chunk.
2372 llvm::Value *Chunk = nullptr;
2373 OpenMPScheduleTy ScheduleKind;
2374 if (auto *C = S.getSingleClause<OMPScheduleClause>()) {
2375 ScheduleKind.Schedule = C->getScheduleKind();
2376 ScheduleKind.M1 = C->getFirstScheduleModifier();
2377 ScheduleKind.M2 = C->getSecondScheduleModifier();
2378 if (const auto *Ch = C->getChunkSize()) {
2379 Chunk = EmitScalarExpr(Ch);
2380 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2381 S.getIterationVariable()->getType(),
2385 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2386 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2387 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2388 // If the static schedule kind is specified or if the ordered clause is
2389 // specified, and if no monotonic modifier is specified, the effect will
2390 // be as if the monotonic modifier was specified.
2391 if (RT.isStaticNonchunked(ScheduleKind.Schedule,
2392 /* Chunked */ Chunk != nullptr) &&
2394 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2395 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2396 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2397 // When no chunk_size is specified, the iteration space is divided into
2398 // chunks that are approximately equal in size, and at most one chunk is
2399 // distributed to each thread. Note that the size of the chunks is
2400 // unspecified in this case.
2401 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
2402 IVSize, IVSigned, Ordered,
2403 IL.getAddress(), LB.getAddress(),
2404 UB.getAddress(), ST.getAddress());
2406 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2407 // UB = min(UB, GlobalUB);
2408 EmitIgnoredExpr(S.getEnsureUpperBound());
2410 EmitIgnoredExpr(S.getInit());
2411 // while (idx <= UB) { BODY; ++idx; }
2412 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2414 [&S, LoopExit](CodeGenFunction &CGF) {
2415 CGF.EmitOMPLoopBody(S, LoopExit);
2416 CGF.EmitStopPoint(&S);
2418 [](CodeGenFunction &) {});
2419 EmitBlock(LoopExit.getBlock());
2420 // Tell the runtime we are done.
2421 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2422 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2424 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2426 const bool IsMonotonic =
2427 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2428 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2429 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2430 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2431 // Emit the outer loop, which requests its work chunk [LB..UB] from
2432 // runtime and runs the inner loop to process it.
2433 const OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
2434 ST.getAddress(), IL.getAddress(),
2436 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2437 LoopArguments, CGDispatchBounds);
2439 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2441 [&](CodeGenFunction &CGF) -> llvm::Value * {
2442 return CGF.Builder.CreateIsNotNull(
2443 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2446 EmitOMPReductionClauseFinal(
2447 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
2448 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
2449 : /*Parallel only*/ OMPD_parallel);
2450 // Emit post-update of the reduction variables if IsLastIter != 0.
2451 emitPostUpdateForReductionClause(
2452 *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2453 return CGF.Builder.CreateIsNotNull(
2454 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2456 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2457 if (HasLastprivateClause)
2458 EmitOMPLastprivateClauseFinal(
2459 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2460 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
2462 EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2463 return CGF.Builder.CreateIsNotNull(
2464 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2466 // We're now done with the loop, so jump to the continuation block.
2468 EmitBranch(ContBlock);
2469 EmitBlock(ContBlock, true);
2472 return HasLastprivateClause;
2475 /// The following two functions generate expressions for the loop lower
2476 /// and upper bounds in case of static and dynamic (dispatch) schedule
2477 /// of the associated 'for' or 'distribute' loop.
2478 static std::pair<LValue, LValue>
2479 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
2480 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2482 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
2484 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
2488 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
2489 /// consider the lower and upper bound expressions generated by the
2490 /// worksharing loop support, but we use 0 and the iteration space size as
2492 static std::pair<llvm::Value *, llvm::Value *>
2493 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
2494 Address LB, Address UB) {
2495 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
2496 const Expr *IVExpr = LS.getIterationVariable();
2497 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
2498 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
2499 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
2500 return {LBVal, UBVal};
2503 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2504 bool HasLastprivates = false;
2505 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2506 PrePostActionTy &) {
2507 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
2508 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
2510 emitDispatchForLoopBounds);
2513 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2514 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2518 // Emit an implicit barrier at the end.
2519 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2520 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2524 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2525 bool HasLastprivates = false;
2526 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2527 PrePostActionTy &) {
2528 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
2530 emitDispatchForLoopBounds);
2533 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2534 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2537 // Emit an implicit barrier at the end.
2538 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2539 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2543 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2545 llvm::Value *Init = nullptr) {
2546 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2548 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
2552 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2553 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
2554 auto *CS = dyn_cast<CompoundStmt>(Stmt);
2555 bool HasLastprivates = false;
2556 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
2557 PrePostActionTy &) {
2558 auto &C = CGF.CGM.getContext();
2559 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2560 // Emit helper vars inits.
2561 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2562 CGF.Builder.getInt32(0));
2563 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
2564 : CGF.Builder.getInt32(0);
2566 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2567 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2568 CGF.Builder.getInt32(1));
2569 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2570 CGF.Builder.getInt32(0));
2572 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2573 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2574 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2575 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2576 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2577 // Generate condition for loop.
2578 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2579 OK_Ordinary, S.getLocStart(), FPOptions());
2580 // Increment for loop counter.
2581 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2583 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
2584 // Iterate through all sections and emit a switch construct:
2587 // <SectionStmt[0]>;
2590 // case <NumSection> - 1:
2591 // <SectionStmt[<NumSection> - 1]>;
2594 // .omp.sections.exit:
2595 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2596 auto *SwitchStmt = CGF.Builder.CreateSwitch(
2597 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
2598 CS == nullptr ? 1 : CS->size());
2600 unsigned CaseNumber = 0;
2601 for (auto *SubStmt : CS->children()) {
2602 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2603 CGF.EmitBlock(CaseBB);
2604 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2605 CGF.EmitStmt(SubStmt);
2606 CGF.EmitBranch(ExitBB);
2610 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2611 CGF.EmitBlock(CaseBB);
2612 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2614 CGF.EmitBranch(ExitBB);
2616 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2619 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2620 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2621 // Emit implicit barrier to synchronize threads and avoid data races on
2622 // initialization of firstprivate variables and post-update of lastprivate
2624 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2625 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2626 /*ForceSimpleCall=*/true);
2628 CGF.EmitOMPPrivateClause(S, LoopScope);
2629 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2630 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2631 (void)LoopScope.Privatize();
2633 // Emit static non-chunked loop.
2634 OpenMPScheduleTy ScheduleKind;
2635 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2636 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2637 CGF, S.getLocStart(), ScheduleKind, /*IVSize=*/32,
2638 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
2639 UB.getAddress(), ST.getAddress());
2640 // UB = min(UB, GlobalUB);
2641 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
2642 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
2643 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2644 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2646 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
2647 // while (idx <= UB) { BODY; ++idx; }
2648 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2649 [](CodeGenFunction &) {});
2650 // Tell the runtime we are done.
2651 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2652 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2654 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
2655 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
2656 // Emit post-update of the reduction variables if IsLastIter != 0.
2657 emitPostUpdateForReductionClause(
2658 CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2659 return CGF.Builder.CreateIsNotNull(
2660 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2663 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2664 if (HasLastprivates)
2665 CGF.EmitOMPLastprivateClauseFinal(
2666 S, /*NoFinals=*/false,
2667 CGF.Builder.CreateIsNotNull(
2668 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
2671 bool HasCancel = false;
2672 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2673 HasCancel = OSD->hasCancel();
2674 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2675 HasCancel = OPSD->hasCancel();
2676 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
2677 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2679 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2680 // clause. Otherwise the barrier will be generated by the codegen for the
2682 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2683 // Emit implicit barrier to synchronize threads and avoid data races on
2684 // initialization of firstprivate variables.
2685 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2690 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2692 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2695 // Emit an implicit barrier at the end.
2696 if (!S.getSingleClause<OMPNowaitClause>()) {
2697 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2702 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2703 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2704 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2706 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2707 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2711 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2712 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2713 llvm::SmallVector<const Expr *, 8> DestExprs;
2714 llvm::SmallVector<const Expr *, 8> SrcExprs;
2715 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2716 // Check if there are any 'copyprivate' clauses associated with this
2717 // 'single' construct.
2718 // Build a list of copyprivate variables along with helper expressions
2719 // (<source>, <destination>, <destination>=<source> expressions)
2720 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2721 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2722 DestExprs.append(C->destination_exprs().begin(),
2723 C->destination_exprs().end());
2724 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2725 AssignmentOps.append(C->assignment_ops().begin(),
2726 C->assignment_ops().end());
2728 // Emit code for 'single' region along with 'copyprivate' clauses
2729 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2731 OMPPrivateScope SingleScope(CGF);
2732 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2733 CGF.EmitOMPPrivateClause(S, SingleScope);
2734 (void)SingleScope.Privatize();
2735 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2738 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2739 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
2740 CopyprivateVars, DestExprs,
2741 SrcExprs, AssignmentOps);
2743 // Emit an implicit barrier at the end (to avoid data race on firstprivate
2744 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2745 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2746 CGM.getOpenMPRuntime().emitBarrierCall(
2747 *this, S.getLocStart(),
2748 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2752 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2753 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2755 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2757 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2758 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
2761 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2762 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2764 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2766 Expr *Hint = nullptr;
2767 if (auto *HintClause = S.getSingleClause<OMPHintClause>())
2768 Hint = HintClause->getHint();
2769 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2770 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2771 S.getDirectiveName().getAsString(),
2772 CodeGen, S.getLocStart(), Hint);
2775 void CodeGenFunction::EmitOMPParallelForDirective(
2776 const OMPParallelForDirective &S) {
2777 // Emit directive as a combined directive that consists of two implicit
2778 // directives: 'parallel' with 'for' directive.
2779 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2780 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
2781 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
2782 emitDispatchForLoopBounds);
2784 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
2785 emitEmptyBoundParameters);
2788 void CodeGenFunction::EmitOMPParallelForSimdDirective(
2789 const OMPParallelForSimdDirective &S) {
2790 // Emit directive as a combined directive that consists of two implicit
2791 // directives: 'parallel' with 'for' directive.
2792 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2793 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
2794 emitDispatchForLoopBounds);
2796 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen,
2797 emitEmptyBoundParameters);
2800 void CodeGenFunction::EmitOMPParallelSectionsDirective(
2801 const OMPParallelSectionsDirective &S) {
2802 // Emit directive as a combined directive that consists of two implicit
2803 // directives: 'parallel' with 'sections' directive.
2804 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2805 CGF.EmitSections(S);
2807 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
2808 emitEmptyBoundParameters);
2811 void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
2812 const RegionCodeGenTy &BodyGen,
2813 const TaskGenTy &TaskGen,
2814 OMPTaskDataTy &Data) {
2815 // Emit outlined function for task construct.
2816 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2817 auto *I = CS->getCapturedDecl()->param_begin();
2818 auto *PartId = std::next(I);
2819 auto *TaskT = std::next(I, 4);
2820 // Check if the task is final
2821 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2822 // If the condition constant folds and can be elided, try to avoid emitting
2823 // the condition and the dead arm of the if/else.
2824 auto *Cond = Clause->getCondition();
2826 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
2827 Data.Final.setInt(CondConstant);
2829 Data.Final.setPointer(EvaluateExprAsBool(Cond));
2831 // By default the task is not final.
2832 Data.Final.setInt(/*IntVal=*/false);
2834 // Check if the task has 'priority' clause.
2835 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2836 auto *Prio = Clause->getPriority();
2837 Data.Priority.setInt(/*IntVal=*/true);
2838 Data.Priority.setPointer(EmitScalarConversion(
2839 EmitScalarExpr(Prio), Prio->getType(),
2840 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
2841 Prio->getExprLoc()));
2843 // The first function argument for tasks is a thread id, the second one is a
2844 // part id (0 for tied tasks, >=0 for untied task).
2845 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2846 // Get list of private variables.
2847 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2848 auto IRef = C->varlist_begin();
2849 for (auto *IInit : C->private_copies()) {
2850 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2851 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2852 Data.PrivateVars.push_back(*IRef);
2853 Data.PrivateCopies.push_back(IInit);
2858 EmittedAsPrivate.clear();
2859 // Get list of firstprivate variables.
2860 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2861 auto IRef = C->varlist_begin();
2862 auto IElemInitRef = C->inits().begin();
2863 for (auto *IInit : C->private_copies()) {
2864 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2865 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2866 Data.FirstprivateVars.push_back(*IRef);
2867 Data.FirstprivateCopies.push_back(IInit);
2868 Data.FirstprivateInits.push_back(*IElemInitRef);
2874 // Get list of lastprivate variables (for taskloops).
2875 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2876 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2877 auto IRef = C->varlist_begin();
2878 auto ID = C->destination_exprs().begin();
2879 for (auto *IInit : C->private_copies()) {
2880 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2881 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2882 Data.LastprivateVars.push_back(*IRef);
2883 Data.LastprivateCopies.push_back(IInit);
2885 LastprivateDstsOrigs.insert(
2886 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2887 cast<DeclRefExpr>(*IRef)});
2892 // Build list of dependences.
2893 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2894 for (auto *IRef : C->varlists())
2895 Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
2896 auto &&CodeGen = [&Data, CS, &BodyGen, &LastprivateDstsOrigs](
2897 CodeGenFunction &CGF, PrePostActionTy &Action) {
2898 // Set proper addresses for generated private copies.
2899 OMPPrivateScope Scope(CGF);
2900 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2901 !Data.LastprivateVars.empty()) {
2902 auto *CopyFn = CGF.Builder.CreateLoad(
2903 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
2904 auto *PrivatesPtr = CGF.Builder.CreateLoad(
2905 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
2907 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
2908 llvm::SmallVector<llvm::Value *, 16> CallArgs;
2909 CallArgs.push_back(PrivatesPtr);
2910 for (auto *E : Data.PrivateVars) {
2911 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2912 Address PrivatePtr = CGF.CreateMemTemp(
2913 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2914 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2915 CallArgs.push_back(PrivatePtr.getPointer());
2917 for (auto *E : Data.FirstprivateVars) {
2918 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2919 Address PrivatePtr =
2920 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2921 ".firstpriv.ptr.addr");
2922 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2923 CallArgs.push_back(PrivatePtr.getPointer());
2925 for (auto *E : Data.LastprivateVars) {
2926 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2927 Address PrivatePtr =
2928 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2929 ".lastpriv.ptr.addr");
2930 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2931 CallArgs.push_back(PrivatePtr.getPointer());
2933 CGF.EmitRuntimeCall(CopyFn, CallArgs);
2934 for (auto &&Pair : LastprivateDstsOrigs) {
2935 auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2937 const_cast<VarDecl *>(OrigVD),
2938 /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
2940 Pair.second->getType(), VK_LValue, Pair.second->getExprLoc());
2941 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2942 return CGF.EmitLValue(&DRE).getAddress();
2945 for (auto &&Pair : PrivatePtrs) {
2946 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2947 CGF.getContext().getDeclAlign(Pair.first));
2948 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2951 (void)Scope.Privatize();
2956 auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
2957 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
2958 Data.NumberOfParts);
2959 OMPLexicalScope Scope(*this, S);
2960 TaskGen(*this, OutlinedFn, Data);
2963 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
2964 // Emit outlined function for task construct.
2965 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2966 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
2967 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
2968 const Expr *IfCond = nullptr;
2969 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2970 if (C->getNameModifier() == OMPD_unknown ||
2971 C->getNameModifier() == OMPD_task) {
2972 IfCond = C->getCondition();
2978 // Check if we should emit tied or untied task.
2979 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
2980 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
2981 CGF.EmitStmt(CS->getCapturedStmt());
2983 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
2984 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
2985 const OMPTaskDataTy &Data) {
2986 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn,
2987 SharedsTy, CapturedStruct, IfCond,
2990 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
2993 void CodeGenFunction::EmitOMPTaskyieldDirective(
2994 const OMPTaskyieldDirective &S) {
2995 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
2998 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
2999 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
3002 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
3003 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
3006 void CodeGenFunction::EmitOMPTaskgroupDirective(
3007 const OMPTaskgroupDirective &S) {
3008 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3010 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3012 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3013 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
3016 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
3017 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
3018 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
3019 return llvm::makeArrayRef(FlushClause->varlist_begin(),
3020 FlushClause->varlist_end());
3023 }(), S.getLocStart());
3026 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
3027 const CodeGenLoopTy &CodeGenLoop,
3029 // Emit the loop iteration variable.
3030 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3031 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
3032 EmitVarDecl(*IVDecl);
3034 // Emit the iterations count variable.
3035 // If it is not a variable, Sema decided to calculate iterations count on each
3036 // iteration (e.g., it is foldable into a constant).
3037 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3038 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3039 // Emit calculation of the iterations count.
3040 EmitIgnoredExpr(S.getCalcLastIteration());
3043 auto &RT = CGM.getOpenMPRuntime();
3045 bool HasLastprivateClause = false;
3046 // Check pre-condition.
3048 OMPLoopScope PreInitScope(*this, S);
3049 // Skip the entire loop if we don't meet the precondition.
3050 // If the condition constant folds and can be elided, avoid emitting the
3053 llvm::BasicBlock *ContBlock = nullptr;
3054 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3058 auto *ThenBlock = createBasicBlock("omp.precond.then");
3059 ContBlock = createBasicBlock("omp.precond.end");
3060 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
3061 getProfileCount(&S));
3062 EmitBlock(ThenBlock);
3063 incrementProfileCounter(&S);
3066 // Emit 'then' code.
3068 // Emit helper vars inits.
3070 LValue LB = EmitOMPHelperVar(
3071 *this, cast<DeclRefExpr>(
3072 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3073 ? S.getCombinedLowerBoundVariable()
3074 : S.getLowerBoundVariable())));
3075 LValue UB = EmitOMPHelperVar(
3076 *this, cast<DeclRefExpr>(
3077 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3078 ? S.getCombinedUpperBoundVariable()
3079 : S.getUpperBoundVariable())));
3081 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3083 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3085 OMPPrivateScope LoopScope(*this);
3086 if (EmitOMPFirstprivateClause(S, LoopScope)) {
3087 // Emit implicit barrier to synchronize threads and avoid data races on
3088 // initialization of firstprivate variables and post-update of
3089 // lastprivate variables.
3090 CGM.getOpenMPRuntime().emitBarrierCall(
3091 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
3092 /*ForceSimpleCall=*/true);
3094 EmitOMPPrivateClause(S, LoopScope);
3095 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
3096 EmitOMPPrivateLoopCounters(S, LoopScope);
3097 (void)LoopScope.Privatize();
3099 // Detect the distribute schedule kind and chunk.
3100 llvm::Value *Chunk = nullptr;
3101 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
3102 if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
3103 ScheduleKind = C->getDistScheduleKind();
3104 if (const auto *Ch = C->getChunkSize()) {
3105 Chunk = EmitScalarExpr(Ch);
3106 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
3107 S.getIterationVariable()->getType(),
3111 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3112 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3114 // OpenMP [2.10.8, distribute Construct, Description]
3115 // If dist_schedule is specified, kind must be static. If specified,
3116 // iterations are divided into chunks of size chunk_size, chunks are
3117 // assigned to the teams of the league in a round-robin fashion in the
3118 // order of the team number. When no chunk_size is specified, the
3119 // iteration space is divided into chunks that are approximately equal
3120 // in size, and at most one chunk is distributed to each team of the
3121 // league. The size of the chunks is unspecified in this case.
3122 if (RT.isStaticNonchunked(ScheduleKind,
3123 /* Chunked */ Chunk != nullptr)) {
3124 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
3125 IVSize, IVSigned, /* Ordered = */ false,
3126 IL.getAddress(), LB.getAddress(),
3127 UB.getAddress(), ST.getAddress());
3129 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3130 // UB = min(UB, GlobalUB);
3131 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3132 ? S.getCombinedEnsureUpperBound()
3133 : S.getEnsureUpperBound());
3135 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3136 ? S.getCombinedInit()
3139 Expr *Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3140 ? S.getCombinedCond()
3143 // for distribute alone, codegen
3144 // while (idx <= UB) { BODY; ++idx; }
3145 // when combined with 'for' (e.g. as in 'distribute parallel for')
3146 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
3147 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr,
3148 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
3149 CodeGenLoop(CGF, S, LoopExit);
3151 [](CodeGenFunction &) {});
3152 EmitBlock(LoopExit.getBlock());
3153 // Tell the runtime we are done.
3154 RT.emitForStaticFinish(*this, S.getLocStart());
3156 // Emit the outer loop, which requests its work chunk [LB..UB] from
3157 // runtime and runs the inner loop to process it.
3158 const OMPLoopArguments LoopArguments = {
3159 LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
3161 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
3165 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3166 if (HasLastprivateClause)
3167 EmitOMPLastprivateClauseFinal(
3168 S, /*NoFinals=*/false,
3169 Builder.CreateIsNotNull(
3170 EmitLoadOfScalar(IL, S.getLocStart())));
3173 // We're now done with the loop, so jump to the continuation block.
3175 EmitBranch(ContBlock);
3176 EmitBlock(ContBlock, true);
3181 void CodeGenFunction::EmitOMPDistributeDirective(
3182 const OMPDistributeDirective &S) {
3183 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3185 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
3187 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3188 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
3192 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
3193 const CapturedStmt *S) {
3194 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3195 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
3196 CGF.CapturedStmtInfo = &CapStmtInfo;
3197 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
3198 Fn->addFnAttr(llvm::Attribute::NoInline);
3202 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
3203 if (!S.getAssociatedStmt()) {
3204 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
3205 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
3208 auto *C = S.getSingleClause<OMPSIMDClause>();
3209 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
3210 PrePostActionTy &Action) {
3212 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3213 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3214 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3215 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
3216 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
3220 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3223 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3224 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
3227 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
3228 QualType SrcType, QualType DestType,
3229 SourceLocation Loc) {
3230 assert(CGF.hasScalarEvaluationKind(DestType) &&
3231 "DestType must have scalar evaluation kind.");
3232 assert(!Val.isAggregate() && "Must be a scalar or complex.");
3233 return Val.isScalar()
3234 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
3236 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
3240 static CodeGenFunction::ComplexPairTy
3241 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
3242 QualType DestType, SourceLocation Loc) {
3243 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
3244 "DestType must have complex evaluation kind.");
3245 CodeGenFunction::ComplexPairTy ComplexVal;
3246 if (Val.isScalar()) {
3247 // Convert the input element to the element type of the complex.
3248 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
3249 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
3250 DestElementType, Loc);
3251 ComplexVal = CodeGenFunction::ComplexPairTy(
3252 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
3254 assert(Val.isComplex() && "Must be a scalar or complex.");
3255 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
3256 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
3257 ComplexVal.first = CGF.EmitScalarConversion(
3258 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
3259 ComplexVal.second = CGF.EmitScalarConversion(
3260 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
3265 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
3266 LValue LVal, RValue RVal) {
3267 if (LVal.isGlobalReg()) {
3268 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
3270 CGF.EmitAtomicStore(RVal, LVal,
3271 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3272 : llvm::AtomicOrdering::Monotonic,
3273 LVal.isVolatile(), /*IsInit=*/false);
3277 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
3278 QualType RValTy, SourceLocation Loc) {
3279 switch (getEvaluationKind(LVal.getType())) {
3281 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
3282 *this, RVal, RValTy, LVal.getType(), Loc)),
3287 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
3291 llvm_unreachable("Must be a scalar or complex.");
3295 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
3296 const Expr *X, const Expr *V,
3297 SourceLocation Loc) {
3299 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
3300 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
3301 LValue XLValue = CGF.EmitLValue(X);
3302 LValue VLValue = CGF.EmitLValue(V);
3303 RValue Res = XLValue.isGlobalReg()
3304 ? CGF.EmitLoadOfLValue(XLValue, Loc)
3305 : CGF.EmitAtomicLoad(
3307 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3308 : llvm::AtomicOrdering::Monotonic,
3309 XLValue.isVolatile());
3310 // OpenMP, 2.12.6, atomic Construct
3311 // Any atomic construct with a seq_cst clause forces the atomically
3312 // performed operation to include an implicit flush operation without a
3315 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3316 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
3319 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
3320 const Expr *X, const Expr *E,
3321 SourceLocation Loc) {
3323 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3324 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3325 // OpenMP, 2.12.6, atomic Construct
3326 // Any atomic construct with a seq_cst clause forces the atomically
3327 // performed operation to include an implicit flush operation without a
3330 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3333 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
3335 BinaryOperatorKind BO,
3336 llvm::AtomicOrdering AO,
3337 bool IsXLHSInRHSPart) {
3338 auto &Context = CGF.CGM.getContext();
3339 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3340 // expression is simple and atomic is allowed for the given type for the
3342 if (BO == BO_Comma || !Update.isScalar() ||
3343 !Update.getScalarVal()->getType()->isIntegerTy() ||
3344 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
3345 (Update.getScalarVal()->getType() !=
3346 X.getAddress().getElementType())) ||
3347 !X.getAddress().getElementType()->isIntegerTy() ||
3348 !Context.getTargetInfo().hasBuiltinAtomic(
3349 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
3350 return std::make_pair(false, RValue::get(nullptr));
3352 llvm::AtomicRMWInst::BinOp RMWOp;
3355 RMWOp = llvm::AtomicRMWInst::Add;
3358 if (!IsXLHSInRHSPart)
3359 return std::make_pair(false, RValue::get(nullptr));
3360 RMWOp = llvm::AtomicRMWInst::Sub;
3363 RMWOp = llvm::AtomicRMWInst::And;
3366 RMWOp = llvm::AtomicRMWInst::Or;
3369 RMWOp = llvm::AtomicRMWInst::Xor;
3372 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3373 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
3374 : llvm::AtomicRMWInst::Max)
3375 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
3376 : llvm::AtomicRMWInst::UMax);
3379 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3380 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
3381 : llvm::AtomicRMWInst::Min)
3382 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
3383 : llvm::AtomicRMWInst::UMin);
3386 RMWOp = llvm::AtomicRMWInst::Xchg;
3395 return std::make_pair(false, RValue::get(nullptr));
3413 llvm_unreachable("Unsupported atomic update operation");
3415 auto *UpdateVal = Update.getScalarVal();
3416 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
3417 UpdateVal = CGF.Builder.CreateIntCast(
3418 IC, X.getAddress().getElementType(),
3419 X.getType()->hasSignedIntegerRepresentation());
3421 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
3422 return std::make_pair(true, RValue::get(Res));
3425 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
3426 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3427 llvm::AtomicOrdering AO, SourceLocation Loc,
3428 const llvm::function_ref<RValue(RValue)> &CommonGen) {
3429 // Update expressions are allowed to have the following forms:
3430 // x binop= expr; -> xrval + expr;
3431 // x++, ++x -> xrval + 1;
3432 // x--, --x -> xrval - 1;
3433 // x = x binop expr; -> xrval binop expr
3434 // x = expr Op x; - > expr binop xrval;
3435 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
3437 if (X.isGlobalReg()) {
3438 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
3440 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
3442 // Perform compare-and-swap procedure.
3443 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
3449 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
3450 const Expr *X, const Expr *E,
3451 const Expr *UE, bool IsXLHSInRHSPart,
3452 SourceLocation Loc) {
3453 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3454 "Update expr in 'atomic update' must be a binary operator.");
3455 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3456 // Update expressions are allowed to have the following forms:
3457 // x binop= expr; -> xrval + expr;
3458 // x++, ++x -> xrval + 1;
3459 // x--, --x -> xrval - 1;
3460 // x = x binop expr; -> xrval binop expr
3461 // x = expr Op x; - > expr binop xrval;
3462 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3463 LValue XLValue = CGF.EmitLValue(X);
3464 RValue ExprRValue = CGF.EmitAnyExpr(E);
3465 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3466 : llvm::AtomicOrdering::Monotonic;
3467 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3468 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3469 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3470 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3472 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
3473 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3474 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3475 return CGF.EmitAnyExpr(UE);
3477 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3478 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3479 // OpenMP, 2.12.6, atomic Construct
3480 // Any atomic construct with a seq_cst clause forces the atomically
3481 // performed operation to include an implicit flush operation without a
3484 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3487 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
3488 QualType SourceType, QualType ResType,
3489 SourceLocation Loc) {
3490 switch (CGF.getEvaluationKind(ResType)) {
3493 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
3495 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
3496 return RValue::getComplex(Res.first, Res.second);
3501 llvm_unreachable("Must be a scalar or complex.");
3504 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
3505 bool IsPostfixUpdate, const Expr *V,
3506 const Expr *X, const Expr *E,
3507 const Expr *UE, bool IsXLHSInRHSPart,
3508 SourceLocation Loc) {
3509 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3510 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3512 LValue VLValue = CGF.EmitLValue(V);
3513 LValue XLValue = CGF.EmitLValue(X);
3514 RValue ExprRValue = CGF.EmitAnyExpr(E);
3515 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3516 : llvm::AtomicOrdering::Monotonic;
3517 QualType NewVValType;
3519 // 'x' is updated with some additional value.
3520 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3521 "Update expr in 'atomic capture' must be a binary operator.");
3522 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3523 // Update expressions are allowed to have the following forms:
3524 // x binop= expr; -> xrval + expr;
3525 // x++, ++x -> xrval + 1;
3526 // x--, --x -> xrval - 1;
3527 // x = x binop expr; -> xrval binop expr
3528 // x = expr Op x; - > expr binop xrval;
3529 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3530 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3531 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3532 NewVValType = XRValExpr->getType();
3533 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3534 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
3535 IsPostfixUpdate](RValue XRValue) -> RValue {
3536 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3537 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3538 RValue Res = CGF.EmitAnyExpr(UE);
3539 NewVVal = IsPostfixUpdate ? XRValue : Res;
3542 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3543 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3545 // 'atomicrmw' instruction was generated.
3546 if (IsPostfixUpdate) {
3547 // Use old value from 'atomicrmw'.
3548 NewVVal = Res.second;
3550 // 'atomicrmw' does not provide new value, so evaluate it using old
3552 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3553 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3554 NewVVal = CGF.EmitAnyExpr(UE);
3558 // 'x' is simply rewritten with some 'expr'.
3559 NewVValType = X->getType().getNonReferenceType();
3560 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
3561 X->getType().getNonReferenceType(), Loc);
3562 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) -> RValue {
3566 // Try to perform atomicrmw xchg, otherwise simple exchange.
3567 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3568 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3571 // 'atomicrmw' instruction was generated.
3572 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3575 // Emit post-update store to 'v' of old/new 'x' value.
3576 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
3577 // OpenMP, 2.12.6, atomic Construct
3578 // Any atomic construct with a seq_cst clause forces the atomically
3579 // performed operation to include an implicit flush operation without a
3582 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3585 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
3586 bool IsSeqCst, bool IsPostfixUpdate,
3587 const Expr *X, const Expr *V, const Expr *E,
3588 const Expr *UE, bool IsXLHSInRHSPart,
3589 SourceLocation Loc) {
3592 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
3595 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
3599 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
3602 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
3603 IsXLHSInRHSPart, Loc);
3607 case OMPC_num_threads:
3609 case OMPC_firstprivate:
3610 case OMPC_lastprivate:
3611 case OMPC_reduction:
3621 case OMPC_copyprivate:
3623 case OMPC_proc_bind:
3628 case OMPC_threadprivate:
3630 case OMPC_mergeable:
3635 case OMPC_num_teams:
3636 case OMPC_thread_limit:
3638 case OMPC_grainsize:
3640 case OMPC_num_tasks:
3642 case OMPC_dist_schedule:
3643 case OMPC_defaultmap:
3647 case OMPC_use_device_ptr:
3648 case OMPC_is_device_ptr:
3649 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3653 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3654 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
3655 OpenMPClauseKind Kind = OMPC_unknown;
3656 for (auto *C : S.clauses()) {
3657 // Find first clause (skip seq_cst clause, if it is first).
3658 if (C->getClauseKind() != OMPC_seq_cst) {
3659 Kind = C->getClauseKind();
3665 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
3666 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
3667 enterFullExpression(EWC);
3669 // Processing for statements under 'atomic capture'.
3670 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
3671 for (const auto *C : Compound->body()) {
3672 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
3673 enterFullExpression(EWC);
3678 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
3679 PrePostActionTy &) {
3680 CGF.EmitStopPoint(CS);
3681 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
3682 S.getV(), S.getExpr(), S.getUpdateExpr(),
3683 S.isXLHSInRHSPart(), S.getLocStart());
3685 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3686 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
3689 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
3690 const OMPExecutableDirective &S,
3691 const RegionCodeGenTy &CodeGen) {
3692 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
3693 CodeGenModule &CGM = CGF.CGM;
3694 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
3696 llvm::Function *Fn = nullptr;
3697 llvm::Constant *FnID = nullptr;
3699 const Expr *IfCond = nullptr;
3700 // Check for the at most one if clause associated with the target region.
3701 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3702 if (C->getNameModifier() == OMPD_unknown ||
3703 C->getNameModifier() == OMPD_target) {
3704 IfCond = C->getCondition();
3709 // Check if we have any device clause associated with the directive.
3710 const Expr *Device = nullptr;
3711 if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
3712 Device = C->getDevice();
3715 // Check if we have an if clause whose conditional always evaluates to false
3716 // or if we do not have any targets specified. If so the target region is not
3717 // an offload entry point.
3718 bool IsOffloadEntry = true;
3721 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
3722 IsOffloadEntry = false;
3724 if (CGM.getLangOpts().OMPTargetTriples.empty())
3725 IsOffloadEntry = false;
3727 assert(CGF.CurFuncDecl && "No parent declaration for target region!");
3728 StringRef ParentName;
3729 // In case we have Ctors/Dtors we use the complete type variant to produce
3730 // the mangling of the device outlined kernel.
3731 if (auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
3732 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
3733 else if (auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
3734 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
3737 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl)));
3739 // Emit target region as a standalone region.
3740 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
3741 IsOffloadEntry, CodeGen);
3742 OMPLexicalScope Scope(CGF, S);
3743 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3744 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
3745 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device,
3749 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
3750 PrePostActionTy &Action) {
3751 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
3752 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3753 CGF.EmitOMPPrivateClause(S, PrivateScope);
3754 (void)PrivateScope.Privatize();
3757 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3760 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3761 StringRef ParentName,
3762 const OMPTargetDirective &S) {
3763 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3764 emitTargetRegion(CGF, S, Action);
3767 llvm::Constant *Addr;
3768 // Emit target region as a standalone region.
3769 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3770 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3771 assert(Fn && Addr && "Target device function emission failed.");
3774 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
3775 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3776 emitTargetRegion(CGF, S, Action);
3778 emitCommonOMPTargetDirective(*this, S, CodeGen);
3781 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
3782 const OMPExecutableDirective &S,
3783 OpenMPDirectiveKind InnermostKind,
3784 const RegionCodeGenTy &CodeGen) {
3785 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
3786 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
3787 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
3789 const OMPNumTeamsClause *NT = S.getSingleClause<OMPNumTeamsClause>();
3790 const OMPThreadLimitClause *TL = S.getSingleClause<OMPThreadLimitClause>();
3792 Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr;
3793 Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr;
3795 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
3799 OMPTeamsScope Scope(CGF, S);
3800 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3801 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3802 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn,
3806 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
3807 // Emit teams region as a standalone region.
3808 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3809 OMPPrivateScope PrivateScope(CGF);
3810 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3811 CGF.EmitOMPPrivateClause(S, PrivateScope);
3812 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
3813 (void)PrivateScope.Privatize();
3814 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3815 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
3817 emitCommonOMPTeamsDirective(*this, S, OMPD_teams, CodeGen);
3818 emitPostUpdateForReductionClause(
3819 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
3822 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
3823 const OMPTargetTeamsDirective &S) {
3824 auto *CS = S.getCapturedStmt(OMPD_teams);
3826 auto &&CodeGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
3827 // TODO: Add support for clauses.
3828 CGF.EmitStmt(CS->getCapturedStmt());
3830 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen);
3833 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
3834 CodeGenModule &CGM, StringRef ParentName,
3835 const OMPTargetTeamsDirective &S) {
3836 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3837 emitTargetTeamsRegion(CGF, Action, S);
3840 llvm::Constant *Addr;
3841 // Emit target region as a standalone region.
3842 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3843 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3844 assert(Fn && Addr && "Target device function emission failed.");
3847 void CodeGenFunction::EmitOMPTargetTeamsDirective(
3848 const OMPTargetTeamsDirective &S) {
3849 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3850 emitTargetTeamsRegion(CGF, Action, S);
3852 emitCommonOMPTargetDirective(*this, S, CodeGen);
3855 void CodeGenFunction::EmitOMPCancellationPointDirective(
3856 const OMPCancellationPointDirective &S) {
3857 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
3858 S.getCancelRegion());
3861 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
3862 const Expr *IfCond = nullptr;
3863 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3864 if (C->getNameModifier() == OMPD_unknown ||
3865 C->getNameModifier() == OMPD_cancel) {
3866 IfCond = C->getCondition();
3870 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
3871 S.getCancelRegion());
3874 CodeGenFunction::JumpDest
3875 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
3876 if (Kind == OMPD_parallel || Kind == OMPD_task ||
3877 Kind == OMPD_target_parallel)
3879 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
3880 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
3881 Kind == OMPD_distribute_parallel_for ||
3882 Kind == OMPD_target_parallel_for);
3883 return OMPCancelStack.getExitBlock();
3886 void CodeGenFunction::EmitOMPUseDevicePtrClause(
3887 const OMPClause &NC, OMPPrivateScope &PrivateScope,
3888 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
3889 const auto &C = cast<OMPUseDevicePtrClause>(NC);
3890 auto OrigVarIt = C.varlist_begin();
3891 auto InitIt = C.inits().begin();
3892 for (auto PvtVarIt : C.private_copies()) {
3893 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
3894 auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
3895 auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
3897 // In order to identify the right initializer we need to match the
3898 // declaration used by the mapping logic. In some cases we may get
3899 // OMPCapturedExprDecl that refers to the original declaration.
3900 const ValueDecl *MatchingVD = OrigVD;
3901 if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
3902 // OMPCapturedExprDecl are used to privative fields of the current
3904 auto *ME = cast<MemberExpr>(OED->getInit());
3905 assert(isa<CXXThisExpr>(ME->getBase()) &&
3906 "Base should be the current struct!");
3907 MatchingVD = ME->getMemberDecl();
3910 // If we don't have information about the current list item, move on to
3912 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
3913 if (InitAddrIt == CaptureDeviceAddrMap.end())
3916 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
3917 // Initialize the temporary initialization variable with the address we
3918 // get from the runtime library. We have to cast the source address
3919 // because it is always a void *. References are materialized in the
3920 // privatization scope, so the initialization here disregards the fact
3921 // the original variable is a reference.
3923 getContext().getPointerType(OrigVD->getType().getNonReferenceType());
3924 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
3925 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
3926 setAddrOfLocalVar(InitVD, InitAddr);
3928 // Emit private declaration, it will be initialized by the value we
3929 // declaration we just added to the local declarations map.
3932 // The initialization variables reached its purpose in the emission
3933 // ofthe previous declaration, so we don't need it anymore.
3934 LocalDeclMap.erase(InitVD);
3936 // Return the address of the private variable.
3937 return GetAddrOfLocalVar(PvtVD);
3939 assert(IsRegistered && "firstprivate var already registered as private");
3940 // Silence the warning about unused variable.
3948 // Generate the instructions for '#pragma omp target data' directive.
3949 void CodeGenFunction::EmitOMPTargetDataDirective(
3950 const OMPTargetDataDirective &S) {
3951 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
3953 // Create a pre/post action to signal the privatization of the device pointer.
3954 // This action can be replaced by the OpenMP runtime code generation to
3955 // deactivate privatization.
3956 bool PrivatizeDevicePointers = false;
3957 class DevicePointerPrivActionTy : public PrePostActionTy {
3958 bool &PrivatizeDevicePointers;
3961 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
3962 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
3963 void Enter(CodeGenFunction &CGF) override {
3964 PrivatizeDevicePointers = true;
3967 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
3969 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
3970 CodeGenFunction &CGF, PrePostActionTy &Action) {
3971 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3973 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3976 // Codegen that selects wheather to generate the privatization code or not.
3977 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
3978 &InnermostCodeGen](CodeGenFunction &CGF,
3979 PrePostActionTy &Action) {
3980 RegionCodeGenTy RCG(InnermostCodeGen);
3981 PrivatizeDevicePointers = false;
3983 // Call the pre-action to change the status of PrivatizeDevicePointers if
3987 if (PrivatizeDevicePointers) {
3988 OMPPrivateScope PrivateScope(CGF);
3989 // Emit all instances of the use_device_ptr clause.
3990 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
3991 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
3992 Info.CaptureDeviceAddrMap);
3993 (void)PrivateScope.Privatize();
3999 // Forward the provided action to the privatization codegen.
4000 RegionCodeGenTy PrivRCG(PrivCodeGen);
4001 PrivRCG.setAction(Action);
4003 // Notwithstanding the body of the region is emitted as inlined directive,
4004 // we don't use an inline scope as changes in the references inside the
4005 // region are expected to be visible outside, so we do not privative them.
4006 OMPLexicalScope Scope(CGF, S);
4007 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
4011 RegionCodeGenTy RCG(CodeGen);
4013 // If we don't have target devices, don't bother emitting the data mapping
4015 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
4020 // Check if we have any if clause associated with the directive.
4021 const Expr *IfCond = nullptr;
4022 if (auto *C = S.getSingleClause<OMPIfClause>())
4023 IfCond = C->getCondition();
4025 // Check if we have any device clause associated with the directive.
4026 const Expr *Device = nullptr;
4027 if (auto *C = S.getSingleClause<OMPDeviceClause>())
4028 Device = C->getDevice();
4030 // Set the action to signal privatization of device pointers.
4031 RCG.setAction(PrivAction);
4033 // Emit region code.
4034 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
4038 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
4039 const OMPTargetEnterDataDirective &S) {
4040 // If we don't have target devices, don't bother emitting the data mapping
4042 if (CGM.getLangOpts().OMPTargetTriples.empty())
4045 // Check if we have any if clause associated with the directive.
4046 const Expr *IfCond = nullptr;
4047 if (auto *C = S.getSingleClause<OMPIfClause>())
4048 IfCond = C->getCondition();
4050 // Check if we have any device clause associated with the directive.
4051 const Expr *Device = nullptr;
4052 if (auto *C = S.getSingleClause<OMPDeviceClause>())
4053 Device = C->getDevice();
4055 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
4058 void CodeGenFunction::EmitOMPTargetExitDataDirective(
4059 const OMPTargetExitDataDirective &S) {
4060 // If we don't have target devices, don't bother emitting the data mapping
4062 if (CGM.getLangOpts().OMPTargetTriples.empty())
4065 // Check if we have any if clause associated with the directive.
4066 const Expr *IfCond = nullptr;
4067 if (auto *C = S.getSingleClause<OMPIfClause>())
4068 IfCond = C->getCondition();
4070 // Check if we have any device clause associated with the directive.
4071 const Expr *Device = nullptr;
4072 if (auto *C = S.getSingleClause<OMPDeviceClause>())
4073 Device = C->getDevice();
4075 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
4078 static void emitTargetParallelRegion(CodeGenFunction &CGF,
4079 const OMPTargetParallelDirective &S,
4080 PrePostActionTy &Action) {
4081 // Get the captured statement associated with the 'parallel' region.
4082 auto *CS = S.getCapturedStmt(OMPD_parallel);
4084 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &) {
4085 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
4086 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4087 CGF.EmitOMPPrivateClause(S, PrivateScope);
4088 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4089 (void)PrivateScope.Privatize();
4090 // TODO: Add support for clauses.
4091 CGF.EmitStmt(CS->getCapturedStmt());
4092 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4094 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen,
4095 emitEmptyBoundParameters);
4096 emitPostUpdateForReductionClause(
4097 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
4100 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
4101 CodeGenModule &CGM, StringRef ParentName,
4102 const OMPTargetParallelDirective &S) {
4103 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4104 emitTargetParallelRegion(CGF, S, Action);
4107 llvm::Constant *Addr;
4108 // Emit target region as a standalone region.
4109 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
4110 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
4111 assert(Fn && Addr && "Target device function emission failed.");
4114 void CodeGenFunction::EmitOMPTargetParallelDirective(
4115 const OMPTargetParallelDirective &S) {
4116 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4117 emitTargetParallelRegion(CGF, S, Action);
4119 emitCommonOMPTargetDirective(*this, S, CodeGen);
4122 void CodeGenFunction::EmitOMPTargetParallelForDirective(
4123 const OMPTargetParallelForDirective &S) {
4124 // TODO: codegen for target parallel for.
4127 /// Emit a helper variable and return corresponding lvalue.
4128 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
4129 const ImplicitParamDecl *PVD,
4130 CodeGenFunction::OMPPrivateScope &Privates) {
4131 auto *VDecl = cast<VarDecl>(Helper->getDecl());
4132 Privates.addPrivate(
4133 VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); });
4136 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
4137 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
4138 // Emit outlined function for task construct.
4139 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
4140 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
4141 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4142 const Expr *IfCond = nullptr;
4143 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
4144 if (C->getNameModifier() == OMPD_unknown ||
4145 C->getNameModifier() == OMPD_taskloop) {
4146 IfCond = C->getCondition();
4152 // Check if taskloop must be emitted without taskgroup.
4153 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
4154 // TODO: Check if we should emit tied or untied task.
4156 // Set scheduling for taskloop
4157 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
4159 Data.Schedule.setInt(/*IntVal=*/false);
4160 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
4161 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
4163 Data.Schedule.setInt(/*IntVal=*/true);
4164 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
4167 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
4169 // for (IV in 0..LastIteration) BODY;
4170 // <Final counter/linear vars updates>;
4174 // Emit: if (PreCond) - begin.
4175 // If the condition constant folds and can be elided, avoid emitting the
4178 llvm::BasicBlock *ContBlock = nullptr;
4179 OMPLoopScope PreInitScope(CGF, S);
4180 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
4184 auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
4185 ContBlock = CGF.createBasicBlock("taskloop.if.end");
4186 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
4187 CGF.getProfileCount(&S));
4188 CGF.EmitBlock(ThenBlock);
4189 CGF.incrementProfileCounter(&S);
4192 if (isOpenMPSimdDirective(S.getDirectiveKind()))
4193 CGF.EmitOMPSimdInit(S);
4195 OMPPrivateScope LoopScope(CGF);
4196 // Emit helper vars inits.
4197 enum { LowerBound = 5, UpperBound, Stride, LastIter };
4198 auto *I = CS->getCapturedDecl()->param_begin();
4199 auto *LBP = std::next(I, LowerBound);
4200 auto *UBP = std::next(I, UpperBound);
4201 auto *STP = std::next(I, Stride);
4202 auto *LIP = std::next(I, LastIter);
4203 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
4205 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
4207 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
4208 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
4210 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
4211 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
4212 (void)LoopScope.Privatize();
4213 // Emit the loop iteration variable.
4214 const Expr *IVExpr = S.getIterationVariable();
4215 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
4216 CGF.EmitVarDecl(*IVDecl);
4217 CGF.EmitIgnoredExpr(S.getInit());
4219 // Emit the iterations count variable.
4220 // If it is not a variable, Sema decided to calculate iterations count on
4221 // each iteration (e.g., it is foldable into a constant).
4222 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
4223 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
4224 // Emit calculation of the iterations count.
4225 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
4228 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
4230 [&S](CodeGenFunction &CGF) {
4231 CGF.EmitOMPLoopBody(S, JumpDest());
4232 CGF.EmitStopPoint(&S);
4234 [](CodeGenFunction &) {});
4235 // Emit: if (PreCond) - end.
4237 CGF.EmitBranch(ContBlock);
4238 CGF.EmitBlock(ContBlock, true);
4240 // Emit final copy of the lastprivate variables if IsLastIter != 0.
4241 if (HasLastprivateClause) {
4242 CGF.EmitOMPLastprivateClauseFinal(
4243 S, isOpenMPSimdDirective(S.getDirectiveKind()),
4244 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
4245 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
4246 (*LIP)->getType(), S.getLocStart())));
4249 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
4250 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
4251 const OMPTaskDataTy &Data) {
4252 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) {
4253 OMPLoopScope PreInitScope(CGF, S);
4254 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
4255 OutlinedFn, SharedsTy,
4256 CapturedStruct, IfCond, Data);
4258 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
4261 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
4264 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
4265 EmitOMPTaskLoopBasedDirective(S);
4268 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
4269 const OMPTaskLoopSimdDirective &S) {
4270 EmitOMPTaskLoopBasedDirective(S);
4273 // Generate the instructions for '#pragma omp target update' directive.
4274 void CodeGenFunction::EmitOMPTargetUpdateDirective(
4275 const OMPTargetUpdateDirective &S) {
4276 // If we don't have target devices, don't bother emitting the data mapping
4278 if (CGM.getLangOpts().OMPTargetTriples.empty())
4281 // Check if we have any if clause associated with the directive.
4282 const Expr *IfCond = nullptr;
4283 if (auto *C = S.getSingleClause<OMPIfClause>())
4284 IfCond = C->getCondition();
4286 // Check if we have any device clause associated with the directive.
4287 const Expr *Device = nullptr;
4288 if (auto *C = S.getSingleClause<OMPDeviceClause>())
4289 Device = C->getDevice();
4291 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);