1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit OpenMP nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/Stmt.h"
20 #include "clang/AST/StmtOpenMP.h"
21 #include "clang/AST/DeclOpenMP.h"
22 #include "llvm/IR/CallSite.h"
23 using namespace clang;
24 using namespace CodeGen;
27 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
28 /// for captured expressions.
29 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
30 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
31 for (const auto *C : S.clauses()) {
32 if (auto *CPI = OMPClauseWithPreInit::get(C)) {
33 if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
34 for (const auto *I : PreInit->decls()) {
35 if (!I->hasAttr<OMPCaptureNoInitAttr>())
36 CGF.EmitVarDecl(cast<VarDecl>(*I));
38 CodeGenFunction::AutoVarEmission Emission =
39 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
40 CGF.EmitAutoVarCleanups(Emission);
47 CodeGenFunction::OMPPrivateScope InlinedShareds;
49 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
50 return CGF.LambdaCaptureFields.lookup(VD) ||
51 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
52 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
56 OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S,
57 bool AsInlined = false, bool EmitPreInitStmt = true)
58 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
61 emitPreInitStmt(CGF, S);
63 if (S.hasAssociatedStmt()) {
64 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
65 for (auto &C : CS->captures()) {
66 if (C.capturesVariable() || C.capturesVariableByCopy()) {
67 auto *VD = C.getCapturedVar();
68 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
69 isCapturedVar(CGF, VD) ||
70 (CGF.CapturedStmtInfo &&
71 InlinedShareds.isGlobalVarCaptured(VD)),
72 VD->getType().getNonReferenceType(), VK_LValue,
74 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
75 return CGF.EmitLValue(&DRE).getAddress();
79 (void)InlinedShareds.Privatize();
85 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
86 /// for captured expressions.
87 class OMPParallelScope final : public OMPLexicalScope {
88 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
89 OpenMPDirectiveKind Kind = S.getDirectiveKind();
90 return !isOpenMPTargetExecutionDirective(Kind) &&
91 isOpenMPParallelDirective(Kind);
95 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
96 : OMPLexicalScope(CGF, S,
98 /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {}
101 /// Lexical scope for OpenMP teams construct, that handles correct codegen
102 /// for captured expressions.
103 class OMPTeamsScope final : public OMPLexicalScope {
104 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
105 OpenMPDirectiveKind Kind = S.getDirectiveKind();
106 return !isOpenMPTargetExecutionDirective(Kind) &&
107 isOpenMPTeamsDirective(Kind);
111 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
112 : OMPLexicalScope(CGF, S,
114 /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {}
117 /// Private scope for OpenMP loop-based directives, that supports capturing
118 /// of used expression from loop statement.
119 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
120 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
121 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
122 if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) {
123 for (const auto *I : PreInits->decls())
124 CGF.EmitVarDecl(cast<VarDecl>(*I));
130 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
131 : CodeGenFunction::RunCleanupsScope(CGF) {
132 emitPreInitStmt(CGF, S);
138 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
139 auto &C = getContext();
140 llvm::Value *Size = nullptr;
141 auto SizeInChars = C.getTypeSizeInChars(Ty);
142 if (SizeInChars.isZero()) {
143 // getTypeSizeInChars() returns 0 for a VLA.
144 while (auto *VAT = C.getAsVariableArrayType(Ty)) {
145 llvm::Value *ArraySize;
146 std::tie(ArraySize, Ty) = getVLASize(VAT);
147 Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
149 SizeInChars = C.getTypeSizeInChars(Ty);
150 if (SizeInChars.isZero())
151 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
152 Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
154 Size = CGM.getSize(SizeInChars);
158 void CodeGenFunction::GenerateOpenMPCapturedVars(
159 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
160 const RecordDecl *RD = S.getCapturedRecordDecl();
161 auto CurField = RD->field_begin();
162 auto CurCap = S.captures().begin();
163 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
164 E = S.capture_init_end();
165 I != E; ++I, ++CurField, ++CurCap) {
166 if (CurField->hasCapturedVLAType()) {
167 auto VAT = CurField->getCapturedVLAType();
168 auto *Val = VLASizeMap[VAT->getSizeExpr()];
169 CapturedVars.push_back(Val);
170 } else if (CurCap->capturesThis())
171 CapturedVars.push_back(CXXThisValue);
172 else if (CurCap->capturesVariableByCopy()) {
174 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal();
176 // If the field is not a pointer, we need to save the actual value
177 // and load it as a void pointer.
178 if (!CurField->getType()->isAnyPointerType()) {
179 auto &Ctx = getContext();
180 auto DstAddr = CreateMemTemp(
181 Ctx.getUIntPtrType(),
182 Twine(CurCap->getCapturedVar()->getName()) + ".casted");
183 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
185 auto *SrcAddrVal = EmitScalarConversion(
186 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
187 Ctx.getPointerType(CurField->getType()), SourceLocation());
189 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
191 // Store the value using the source type pointer.
192 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
194 // Load the value using the destination type pointer.
195 CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
197 CapturedVars.push_back(CV);
199 assert(CurCap->capturesVariable() && "Expected capture by reference.");
200 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
205 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
206 StringRef Name, LValue AddrLV,
207 bool isReferenceType = false) {
208 ASTContext &Ctx = CGF.getContext();
210 auto *CastedPtr = CGF.EmitScalarConversion(
211 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
212 Ctx.getPointerType(DstType), SourceLocation());
214 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
217 // If we are dealing with references we need to return the address of the
218 // reference instead of the reference of the value.
219 if (isReferenceType) {
220 QualType RefType = Ctx.getLValueReferenceType(DstType);
221 auto *RefVal = TmpAddr.getPointer();
222 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
223 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
224 CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true);
230 static QualType getCanonicalParamType(ASTContext &C, QualType T) {
231 if (T->isLValueReferenceType()) {
232 return C.getLValueReferenceType(
233 getCanonicalParamType(C, T.getNonReferenceType()),
234 /*SpelledAsLValue=*/false);
236 if (T->isPointerType())
237 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
238 return C.getCanonicalParamType(T);
242 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
245 "CapturedStmtInfo should be set when generating the captured function");
246 const CapturedDecl *CD = S.getCapturedDecl();
247 const RecordDecl *RD = S.getCapturedRecordDecl();
248 assert(CD->hasBody() && "missing CapturedDecl body");
250 // Build the argument list.
251 ASTContext &Ctx = CGM.getContext();
252 FunctionArgList Args;
253 Args.append(CD->param_begin(),
254 std::next(CD->param_begin(), CD->getContextParamPosition()));
255 auto I = S.captures().begin();
256 for (auto *FD : RD->fields()) {
257 QualType ArgType = FD->getType();
258 IdentifierInfo *II = nullptr;
259 VarDecl *CapVar = nullptr;
261 // If this is a capture by copy and the type is not a pointer, the outlined
262 // function argument type should be uintptr and the value properly casted to
263 // uintptr. This is necessary given that the runtime library is only able to
264 // deal with pointers. We can pass in the same way the VLA type sizes to the
265 // outlined function.
266 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
267 I->capturesVariableArrayType())
268 ArgType = Ctx.getUIntPtrType();
270 if (I->capturesVariable() || I->capturesVariableByCopy()) {
271 CapVar = I->getCapturedVar();
272 II = CapVar->getIdentifier();
273 } else if (I->capturesThis())
274 II = &getContext().Idents.get("this");
276 assert(I->capturesVariableArrayType());
277 II = &getContext().Idents.get("vla");
279 if (ArgType->isVariablyModifiedType()) {
281 getCanonicalParamType(getContext(), ArgType.getNonReferenceType());
283 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
284 FD->getLocation(), II, ArgType));
288 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
291 // Create the function declaration.
292 FunctionType::ExtInfo ExtInfo;
293 const CGFunctionInfo &FuncInfo =
294 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
295 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
297 llvm::Function *F = llvm::Function::Create(
298 FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
299 CapturedStmtInfo->getHelperName(), &CGM.getModule());
300 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
302 F->addFnAttr(llvm::Attribute::NoUnwind);
304 // Generate the function.
305 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
306 CD->getBody()->getLocStart());
307 unsigned Cnt = CD->getContextParamPosition();
308 I = S.captures().begin();
309 for (auto *FD : RD->fields()) {
310 // If we are capturing a pointer by copy we don't need to do anything, just
311 // use the value that we get from the arguments.
312 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
313 const VarDecl *CurVD = I->getCapturedVar();
314 Address LocalAddr = GetAddrOfLocalVar(Args[Cnt]);
315 // If the variable is a reference we need to materialize it here.
316 if (CurVD->getType()->isReferenceType()) {
317 Address RefAddr = CreateMemTemp(CurVD->getType(), getPointerAlign(),
318 ".materialized_ref");
319 EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr, /*Volatile=*/false,
323 setAddrOfLocalVar(CurVD, LocalAddr);
330 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
331 AlignmentSource::Decl);
332 if (FD->hasCapturedVLAType()) {
333 LValue CastedArgLVal =
334 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
335 Args[Cnt]->getName(), ArgLVal),
336 FD->getType(), AlignmentSource::Decl);
338 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
339 auto VAT = FD->getCapturedVLAType();
340 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
341 } else if (I->capturesVariable()) {
342 auto *Var = I->getCapturedVar();
343 QualType VarTy = Var->getType();
344 Address ArgAddr = ArgLVal.getAddress();
345 if (!VarTy->isReferenceType()) {
346 if (ArgLVal.getType()->isLValueReferenceType()) {
347 ArgAddr = EmitLoadOfReference(
348 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
349 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
350 assert(ArgLVal.getType()->isPointerType());
351 ArgAddr = EmitLoadOfPointer(
352 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
356 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
357 } else if (I->capturesVariableByCopy()) {
358 assert(!FD->getType()->isAnyPointerType() &&
359 "Not expecting a captured pointer.");
360 auto *Var = I->getCapturedVar();
361 QualType VarTy = Var->getType();
362 setAddrOfLocalVar(Var, castValueFromUintptr(*this, FD->getType(),
363 Args[Cnt]->getName(), ArgLVal,
364 VarTy->isReferenceType()));
366 // If 'this' is captured, load it into CXXThisValue.
367 assert(I->capturesThis());
369 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
375 PGO.assignRegionCounters(GlobalDecl(CD), F);
376 CapturedStmtInfo->EmitBody(*this, CD->getBody());
377 FinishFunction(CD->getBodyRBrace());
382 //===----------------------------------------------------------------------===//
383 // OpenMP Directive Emission
384 //===----------------------------------------------------------------------===//
385 void CodeGenFunction::EmitOMPAggregateAssign(
386 Address DestAddr, Address SrcAddr, QualType OriginalType,
387 const llvm::function_ref<void(Address, Address)> &CopyGen) {
388 // Perform element-by-element initialization.
391 // Drill down to the base element type on both arrays.
392 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
393 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
394 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
396 auto SrcBegin = SrcAddr.getPointer();
397 auto DestBegin = DestAddr.getPointer();
398 // Cast from pointer to array type to pointer to single element.
399 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
400 // The basic structure here is a while-do loop.
401 auto BodyBB = createBasicBlock("omp.arraycpy.body");
402 auto DoneBB = createBasicBlock("omp.arraycpy.done");
404 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
405 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
407 // Enter the loop body, making that address the current address.
408 auto EntryBB = Builder.GetInsertBlock();
411 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
413 llvm::PHINode *SrcElementPHI =
414 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
415 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
416 Address SrcElementCurrent =
417 Address(SrcElementPHI,
418 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
420 llvm::PHINode *DestElementPHI =
421 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
422 DestElementPHI->addIncoming(DestBegin, EntryBB);
423 Address DestElementCurrent =
424 Address(DestElementPHI,
425 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
428 CopyGen(DestElementCurrent, SrcElementCurrent);
430 // Shift the address forward by one element.
431 auto DestElementNext = Builder.CreateConstGEP1_32(
432 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
433 auto SrcElementNext = Builder.CreateConstGEP1_32(
434 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
435 // Check whether we've reached the end.
437 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
438 Builder.CreateCondBr(Done, DoneBB, BodyBB);
439 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
440 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
443 EmitBlock(DoneBB, /*IsFinished=*/true);
446 /// Check if the combiner is a call to UDR combiner and if it is so return the
447 /// UDR decl used for reduction.
448 static const OMPDeclareReductionDecl *
449 getReductionInit(const Expr *ReductionOp) {
450 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
451 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
453 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
454 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
459 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
460 const OMPDeclareReductionDecl *DRD,
462 Address Private, Address Original,
464 if (DRD->getInitializer()) {
465 std::pair<llvm::Function *, llvm::Function *> Reduction =
466 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
467 auto *CE = cast<CallExpr>(InitOp);
468 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
469 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
470 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
471 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
472 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
473 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
474 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
475 [=]() -> Address { return Private; });
476 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
477 [=]() -> Address { return Original; });
478 (void)PrivateScope.Privatize();
479 RValue Func = RValue::get(Reduction.second);
480 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
481 CGF.EmitIgnoredExpr(InitOp);
483 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
484 auto *GV = new llvm::GlobalVariable(
485 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
486 llvm::GlobalValue::PrivateLinkage, Init, ".init");
487 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
489 switch (CGF.getEvaluationKind(Ty)) {
491 InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
495 RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
498 InitRVal = RValue::getAggregate(LV.getAddress());
501 OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
502 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
503 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
504 /*IsInitializer=*/false);
508 /// \brief Emit initialization of arrays of complex types.
509 /// \param DestAddr Address of the array.
510 /// \param Type Type of array.
511 /// \param Init Initial expression of array.
512 /// \param SrcAddr Address of the original array.
513 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
514 QualType Type, const Expr *Init,
515 Address SrcAddr = Address::invalid()) {
516 auto *DRD = getReductionInit(Init);
517 // Perform element-by-element initialization.
520 // Drill down to the base element type on both arrays.
521 auto ArrayTy = Type->getAsArrayTypeUnsafe();
522 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
524 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
527 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
529 llvm::Value *SrcBegin = nullptr;
531 SrcBegin = SrcAddr.getPointer();
532 auto DestBegin = DestAddr.getPointer();
533 // Cast from pointer to array type to pointer to single element.
534 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
535 // The basic structure here is a while-do loop.
536 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
537 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
539 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
540 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
542 // Enter the loop body, making that address the current address.
543 auto EntryBB = CGF.Builder.GetInsertBlock();
544 CGF.EmitBlock(BodyBB);
546 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
548 llvm::PHINode *SrcElementPHI = nullptr;
549 Address SrcElementCurrent = Address::invalid();
551 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
552 "omp.arraycpy.srcElementPast");
553 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
555 Address(SrcElementPHI,
556 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
558 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
559 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
560 DestElementPHI->addIncoming(DestBegin, EntryBB);
561 Address DestElementCurrent =
562 Address(DestElementPHI,
563 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
567 CodeGenFunction::RunCleanupsScope InitScope(CGF);
568 if (DRD && (DRD->getInitializer() || !Init)) {
569 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
570 SrcElementCurrent, ElementTy);
572 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
573 /*IsInitializer=*/false);
577 // Shift the address forward by one element.
578 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
579 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
580 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
583 // Shift the address forward by one element.
584 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
585 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
586 // Check whether we've reached the end.
588 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
589 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
590 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
593 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
596 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
597 Address SrcAddr, const VarDecl *DestVD,
598 const VarDecl *SrcVD, const Expr *Copy) {
599 if (OriginalType->isArrayType()) {
600 auto *BO = dyn_cast<BinaryOperator>(Copy);
601 if (BO && BO->getOpcode() == BO_Assign) {
602 // Perform simple memcpy for simple copying.
603 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
605 // For arrays with complex element types perform element by element
607 EmitOMPAggregateAssign(
608 DestAddr, SrcAddr, OriginalType,
609 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
610 // Working with the single array element, so have to remap
611 // destination and source variables to corresponding array
613 CodeGenFunction::OMPPrivateScope Remap(*this);
614 Remap.addPrivate(DestVD, [DestElement]() -> Address {
618 SrcVD, [SrcElement]() -> Address { return SrcElement; });
619 (void)Remap.Privatize();
620 EmitIgnoredExpr(Copy);
624 // Remap pseudo source variable to private copy.
625 CodeGenFunction::OMPPrivateScope Remap(*this);
626 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
627 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
628 (void)Remap.Privatize();
629 // Emit copying of the whole variable.
630 EmitIgnoredExpr(Copy);
634 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
635 OMPPrivateScope &PrivateScope) {
636 if (!HaveInsertPoint())
638 bool FirstprivateIsLastprivate = false;
639 llvm::DenseSet<const VarDecl *> Lastprivates;
640 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
641 for (const auto *D : C->varlists())
643 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
645 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
646 CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt()));
647 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
648 auto IRef = C->varlist_begin();
649 auto InitsRef = C->inits().begin();
650 for (auto IInit : C->private_copies()) {
651 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
652 bool ThisFirstprivateIsLastprivate =
653 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
654 auto *CapFD = CapturesInfo.lookup(OrigVD);
655 auto *FD = CapturedStmtInfo->lookup(OrigVD);
656 if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) &&
657 !FD->getType()->isReferenceType()) {
658 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
663 FirstprivateIsLastprivate =
664 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
665 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
666 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
667 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
669 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
670 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
671 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
672 Address OriginalAddr = EmitLValue(&DRE).getAddress();
673 QualType Type = VD->getType();
674 if (Type->isArrayType()) {
675 // Emit VarDecl with copy init for arrays.
676 // Get the address of the original variable captured in current
678 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
679 auto Emission = EmitAutoVarAlloca(*VD);
680 auto *Init = VD->getInit();
681 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
682 // Perform simple memcpy.
683 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
686 EmitOMPAggregateAssign(
687 Emission.getAllocatedAddress(), OriginalAddr, Type,
688 [this, VDInit, Init](Address DestElement,
689 Address SrcElement) {
690 // Clean up any temporaries needed by the initialization.
691 RunCleanupsScope InitScope(*this);
692 // Emit initialization for single element.
693 setAddrOfLocalVar(VDInit, SrcElement);
694 EmitAnyExprToMem(Init, DestElement,
695 Init->getType().getQualifiers(),
696 /*IsInitializer*/ false);
697 LocalDeclMap.erase(VDInit);
700 EmitAutoVarCleanups(Emission);
701 return Emission.getAllocatedAddress();
704 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
705 // Emit private VarDecl with copy init.
706 // Remap temp VDInit variable to the address of the original
708 // (for proper handling of captured global variables).
709 setAddrOfLocalVar(VDInit, OriginalAddr);
711 LocalDeclMap.erase(VDInit);
712 return GetAddrOfLocalVar(VD);
715 assert(IsRegistered &&
716 "firstprivate var already registered as private");
717 // Silence the warning about unused variable.
724 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
727 void CodeGenFunction::EmitOMPPrivateClause(
728 const OMPExecutableDirective &D,
729 CodeGenFunction::OMPPrivateScope &PrivateScope) {
730 if (!HaveInsertPoint())
732 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
733 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
734 auto IRef = C->varlist_begin();
735 for (auto IInit : C->private_copies()) {
736 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
737 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
738 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
740 PrivateScope.addPrivate(OrigVD, [&]() -> Address {
741 // Emit private VarDecl with copy init.
743 return GetAddrOfLocalVar(VD);
745 assert(IsRegistered && "private var already registered as private");
746 // Silence the warning about unused variable.
754 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
755 if (!HaveInsertPoint())
757 // threadprivate_var1 = master_threadprivate_var1;
758 // operator=(threadprivate_var2, master_threadprivate_var2);
760 // __kmpc_barrier(&loc, global_tid);
761 llvm::DenseSet<const VarDecl *> CopiedVars;
762 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
763 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
764 auto IRef = C->varlist_begin();
765 auto ISrcRef = C->source_exprs().begin();
766 auto IDestRef = C->destination_exprs().begin();
767 for (auto *AssignOp : C->assignment_ops()) {
768 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
769 QualType Type = VD->getType();
770 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
771 // Get the address of the master variable. If we are emitting code with
772 // TLS support, the address is passed from the master as field in the
773 // captured declaration.
774 Address MasterAddr = Address::invalid();
775 if (getLangOpts().OpenMPUseTLS &&
776 getContext().getTargetInfo().isTLSSupported()) {
777 assert(CapturedStmtInfo->lookup(VD) &&
778 "Copyin threadprivates should have been captured!");
779 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
780 VK_LValue, (*IRef)->getExprLoc());
781 MasterAddr = EmitLValue(&DRE).getAddress();
782 LocalDeclMap.erase(VD);
785 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
786 : CGM.GetAddrOfGlobal(VD),
787 getContext().getDeclAlign(VD));
789 // Get the address of the threadprivate variable.
790 Address PrivateAddr = EmitLValue(*IRef).getAddress();
791 if (CopiedVars.size() == 1) {
792 // At first check if current thread is a master thread. If it is, no
793 // need to copy data.
794 CopyBegin = createBasicBlock("copyin.not.master");
795 CopyEnd = createBasicBlock("copyin.not.master.end");
796 Builder.CreateCondBr(
797 Builder.CreateICmpNE(
798 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
799 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
801 EmitBlock(CopyBegin);
803 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
804 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
805 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
813 // Exit out of copying procedure for non-master thread.
814 EmitBlock(CopyEnd, /*IsFinished=*/true);
820 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
821 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
822 if (!HaveInsertPoint())
824 bool HasAtLeastOneLastprivate = false;
825 llvm::DenseSet<const VarDecl *> SIMDLCVs;
826 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
827 auto *LoopDirective = cast<OMPLoopDirective>(&D);
828 for (auto *C : LoopDirective->counters()) {
830 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
833 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
834 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
835 HasAtLeastOneLastprivate = true;
836 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()))
838 auto IRef = C->varlist_begin();
839 auto IDestRef = C->destination_exprs().begin();
840 for (auto *IInit : C->private_copies()) {
841 // Keep the address of the original variable for future update at the end
843 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
844 // Taskloops do not require additional initialization, it is done in
845 // runtime support library.
846 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
847 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
848 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
850 const_cast<VarDecl *>(OrigVD),
851 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
853 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
854 return EmitLValue(&DRE).getAddress();
856 // Check if the variable is also a firstprivate: in this case IInit is
857 // not generated. Initialization of this variable will happen in codegen
858 // for 'firstprivate' clause.
859 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
860 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
861 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
862 // Emit private VarDecl with copy init.
864 return GetAddrOfLocalVar(VD);
866 assert(IsRegistered &&
867 "lastprivate var already registered as private");
875 return HasAtLeastOneLastprivate;
878 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
879 const OMPExecutableDirective &D, bool NoFinals,
880 llvm::Value *IsLastIterCond) {
881 if (!HaveInsertPoint())
883 // Emit following code:
884 // if (<IsLastIterCond>) {
885 // orig_var1 = private_orig_var1;
887 // orig_varn = private_orig_varn;
889 llvm::BasicBlock *ThenBB = nullptr;
890 llvm::BasicBlock *DoneBB = nullptr;
891 if (IsLastIterCond) {
892 ThenBB = createBasicBlock(".omp.lastprivate.then");
893 DoneBB = createBasicBlock(".omp.lastprivate.done");
894 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
897 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
898 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
899 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
900 auto IC = LoopDirective->counters().begin();
901 for (auto F : LoopDirective->finals()) {
903 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
905 AlreadyEmittedVars.insert(D);
907 LoopCountersAndUpdates[D] = F;
911 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
912 auto IRef = C->varlist_begin();
913 auto ISrcRef = C->source_exprs().begin();
914 auto IDestRef = C->destination_exprs().begin();
915 for (auto *AssignOp : C->assignment_ops()) {
916 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
917 QualType Type = PrivateVD->getType();
918 auto *CanonicalVD = PrivateVD->getCanonicalDecl();
919 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
920 // If lastprivate variable is a loop control variable for loop-based
921 // directive, update its value before copyin back to original
923 if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
924 EmitIgnoredExpr(FinalExpr);
925 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
926 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
927 // Get the address of the original variable.
928 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
929 // Get the address of the private variable.
930 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
931 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
933 Address(Builder.CreateLoad(PrivateAddr),
934 getNaturalTypeAlignment(RefTy->getPointeeType()));
935 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
941 if (auto *PostUpdate = C->getPostUpdateExpr())
942 EmitIgnoredExpr(PostUpdate);
945 EmitBlock(DoneBB, /*IsFinished=*/true);
948 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
949 LValue BaseLV, llvm::Value *Addr) {
950 Address Tmp = Address::invalid();
951 Address TopTmp = Address::invalid();
952 Address MostTopTmp = Address::invalid();
953 BaseTy = BaseTy.getNonReferenceType();
954 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
955 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
956 Tmp = CGF.CreateMemTemp(BaseTy);
957 if (TopTmp.isValid())
958 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
962 BaseTy = BaseTy->getPointeeType();
964 llvm::Type *Ty = BaseLV.getPointer()->getType();
966 Ty = Tmp.getElementType();
967 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
969 CGF.Builder.CreateStore(Addr, Tmp);
972 return Address(Addr, BaseLV.getAlignment());
975 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
977 BaseTy = BaseTy.getNonReferenceType();
978 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
979 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
980 if (auto *PtrTy = BaseTy->getAs<PointerType>())
981 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
983 BaseLV = CGF.EmitLoadOfReferenceLValue(BaseLV.getAddress(),
984 BaseTy->castAs<ReferenceType>());
986 BaseTy = BaseTy->getPointeeType();
988 return CGF.MakeAddrLValue(
990 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
991 BaseLV.getPointer(), CGF.ConvertTypeForMem(ElTy)->getPointerTo()),
992 BaseLV.getAlignment()),
993 BaseLV.getType(), BaseLV.getAlignmentSource());
996 void CodeGenFunction::EmitOMPReductionClauseInit(
997 const OMPExecutableDirective &D,
998 CodeGenFunction::OMPPrivateScope &PrivateScope) {
999 if (!HaveInsertPoint())
1001 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1002 auto ILHS = C->lhs_exprs().begin();
1003 auto IRHS = C->rhs_exprs().begin();
1004 auto IPriv = C->privates().begin();
1005 auto IRed = C->reduction_ops().begin();
1006 for (auto IRef : C->varlists()) {
1007 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1008 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1009 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1010 auto *DRD = getReductionInit(*IRed);
1011 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
1012 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
1013 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1014 Base = TempOASE->getBase()->IgnoreParenImpCasts();
1015 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1016 Base = TempASE->getBase()->IgnoreParenImpCasts();
1017 auto *DE = cast<DeclRefExpr>(Base);
1018 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1019 auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
1021 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
1022 auto OriginalBaseLValue = EmitLValue(DE);
1024 loadToBegin(*this, OrigVD->getType(), OASELValueLB.getType(),
1025 OriginalBaseLValue);
1026 // Store the address of the original variable associated with the LHS
1027 // implicit variable.
1028 PrivateScope.addPrivate(LHSVD, [OASELValueLB]() -> Address {
1029 return OASELValueLB.getAddress();
1031 // Emit reduction copy.
1032 bool IsRegistered = PrivateScope.addPrivate(
1033 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, OASELValueLB,
1034 OASELValueUB, OriginalBaseLValue, DRD, IRed]() -> Address {
1035 // Emit VarDecl with copy init for arrays.
1036 // Get the address of the original variable captured in current
1038 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
1039 OASELValueLB.getPointer());
1040 Size = Builder.CreateNUWAdd(
1041 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
1042 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1043 *this, cast<OpaqueValueExpr>(
1045 .getAsVariableArrayType(PrivateVD->getType())
1048 EmitVariablyModifiedType(PrivateVD->getType());
1049 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1050 auto Addr = Emission.getAllocatedAddress();
1051 auto *Init = PrivateVD->getInit();
1052 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1054 OASELValueLB.getAddress());
1055 EmitAutoVarCleanups(Emission);
1056 // Emit private VarDecl with reduction init.
1057 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1058 OASELValueLB.getPointer());
1059 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1060 return castToBase(*this, OrigVD->getType(),
1061 OASELValueLB.getType(), OriginalBaseLValue,
1064 assert(IsRegistered && "private var already registered as private");
1065 // Silence the warning about unused variable.
1067 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1068 return GetAddrOfLocalVar(PrivateVD);
1070 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
1071 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1072 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1073 Base = TempASE->getBase()->IgnoreParenImpCasts();
1074 auto *DE = cast<DeclRefExpr>(Base);
1075 auto *OrigVD = cast<VarDecl>(DE->getDecl());
1076 auto ASELValue = EmitLValue(ASE);
1077 auto OriginalBaseLValue = EmitLValue(DE);
1078 LValue BaseLValue = loadToBegin(
1079 *this, OrigVD->getType(), ASELValue.getType(), OriginalBaseLValue);
1080 // Store the address of the original variable associated with the LHS
1081 // implicit variable.
1082 PrivateScope.addPrivate(
1083 LHSVD, [ASELValue]() -> Address { return ASELValue.getAddress(); });
1084 // Emit reduction copy.
1085 bool IsRegistered = PrivateScope.addPrivate(
1086 OrigVD, [this, OrigVD, PrivateVD, BaseLValue, ASELValue,
1087 OriginalBaseLValue, DRD, IRed]() -> Address {
1088 // Emit private VarDecl with reduction init.
1089 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1090 auto Addr = Emission.getAllocatedAddress();
1091 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1092 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1093 ASELValue.getAddress(),
1094 ASELValue.getType());
1096 EmitAutoVarInit(Emission);
1097 EmitAutoVarCleanups(Emission);
1098 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
1099 ASELValue.getPointer());
1100 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
1101 return castToBase(*this, OrigVD->getType(), ASELValue.getType(),
1102 OriginalBaseLValue, Ptr);
1104 assert(IsRegistered && "private var already registered as private");
1105 // Silence the warning about unused variable.
1107 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1108 return Builder.CreateElementBitCast(
1109 GetAddrOfLocalVar(PrivateVD), ConvertTypeForMem(RHSVD->getType()),
1113 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
1114 QualType Type = PrivateVD->getType();
1115 if (getContext().getAsArrayType(Type)) {
1116 // Store the address of the original variable associated with the LHS
1117 // implicit variable.
1118 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1119 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1120 IRef->getType(), VK_LValue, IRef->getExprLoc());
1121 Address OriginalAddr = EmitLValue(&DRE).getAddress();
1122 PrivateScope.addPrivate(LHSVD, [this, &OriginalAddr,
1123 LHSVD]() -> Address {
1124 OriginalAddr = Builder.CreateElementBitCast(
1125 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1126 return OriginalAddr;
1128 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
1129 if (Type->isVariablyModifiedType()) {
1130 CodeGenFunction::OpaqueValueMapping OpaqueMap(
1131 *this, cast<OpaqueValueExpr>(
1133 .getAsVariableArrayType(PrivateVD->getType())
1136 getTypeSize(OrigVD->getType().getNonReferenceType())));
1137 EmitVariablyModifiedType(Type);
1139 auto Emission = EmitAutoVarAlloca(*PrivateVD);
1140 auto Addr = Emission.getAllocatedAddress();
1141 auto *Init = PrivateVD->getInit();
1142 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(),
1143 DRD ? *IRed : Init, OriginalAddr);
1144 EmitAutoVarCleanups(Emission);
1145 return Emission.getAllocatedAddress();
1147 assert(IsRegistered && "private var already registered as private");
1148 // Silence the warning about unused variable.
1150 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
1151 return Builder.CreateElementBitCast(
1152 GetAddrOfLocalVar(PrivateVD),
1153 ConvertTypeForMem(RHSVD->getType()), "rhs.begin");
1156 // Store the address of the original variable associated with the LHS
1157 // implicit variable.
1158 Address OriginalAddr = Address::invalid();
1159 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef,
1160 &OriginalAddr]() -> Address {
1161 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1162 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1163 IRef->getType(), VK_LValue, IRef->getExprLoc());
1164 OriginalAddr = EmitLValue(&DRE).getAddress();
1165 return OriginalAddr;
1167 // Emit reduction copy.
1168 bool IsRegistered = PrivateScope.addPrivate(
1169 OrigVD, [this, PrivateVD, OriginalAddr, DRD, IRed]() -> Address {
1170 // Emit private VarDecl with reduction init.
1171 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1172 auto Addr = Emission.getAllocatedAddress();
1173 if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1174 emitInitWithReductionInitializer(*this, DRD, *IRed, Addr,
1176 PrivateVD->getType());
1178 EmitAutoVarInit(Emission);
1179 EmitAutoVarCleanups(Emission);
1182 assert(IsRegistered && "private var already registered as private");
1183 // Silence the warning about unused variable.
1185 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
1186 return GetAddrOfLocalVar(PrivateVD);
1198 void CodeGenFunction::EmitOMPReductionClauseFinal(
1199 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1200 if (!HaveInsertPoint())
1202 llvm::SmallVector<const Expr *, 8> Privates;
1203 llvm::SmallVector<const Expr *, 8> LHSExprs;
1204 llvm::SmallVector<const Expr *, 8> RHSExprs;
1205 llvm::SmallVector<const Expr *, 8> ReductionOps;
1206 bool HasAtLeastOneReduction = false;
1207 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1208 HasAtLeastOneReduction = true;
1209 Privates.append(C->privates().begin(), C->privates().end());
1210 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1211 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1212 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1214 if (HasAtLeastOneReduction) {
1215 bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1216 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1217 D.getDirectiveKind() == OMPD_simd;
1218 bool SimpleReduction = D.getDirectiveKind() == OMPD_simd;
1219 // Emit nowait reduction if nowait clause is present or directive is a
1220 // parallel directive (it always has implicit barrier).
1221 CGM.getOpenMPRuntime().emitReduction(
1222 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
1223 {WithNowait, SimpleReduction, ReductionKind});
1227 static void emitPostUpdateForReductionClause(
1228 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1229 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1230 if (!CGF.HaveInsertPoint())
1232 llvm::BasicBlock *DoneBB = nullptr;
1233 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1234 if (auto *PostUpdate = C->getPostUpdateExpr()) {
1236 if (auto *Cond = CondGen(CGF)) {
1237 // If the first post-update expression is found, emit conditional
1238 // block if it was requested.
1239 auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1240 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1241 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1242 CGF.EmitBlock(ThenBB);
1245 CGF.EmitIgnoredExpr(PostUpdate);
1249 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1252 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
1253 const OMPExecutableDirective &S,
1254 OpenMPDirectiveKind InnermostKind,
1255 const RegionCodeGenTy &CodeGen) {
1256 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1257 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1258 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1259 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1260 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1261 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1262 /*IgnoreResultAssign*/ true);
1263 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1264 CGF, NumThreads, NumThreadsClause->getLocStart());
1266 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1267 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1268 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1269 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
1271 const Expr *IfCond = nullptr;
1272 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1273 if (C->getNameModifier() == OMPD_unknown ||
1274 C->getNameModifier() == OMPD_parallel) {
1275 IfCond = C->getCondition();
1280 OMPParallelScope Scope(CGF, S);
1281 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1282 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1283 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
1284 CapturedVars, IfCond);
1287 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1288 // Emit parallel region as a standalone region.
1289 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1290 OMPPrivateScope PrivateScope(CGF);
1291 bool Copyins = CGF.EmitOMPCopyinClause(S);
1292 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1294 // Emit implicit barrier to synchronize threads and avoid data races on
1295 // propagation master's thread values of threadprivate variables to local
1296 // instances of that variables of all other implicit threads.
1297 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1298 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
1299 /*ForceSimpleCall=*/true);
1301 CGF.EmitOMPPrivateClause(S, PrivateScope);
1302 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1303 (void)PrivateScope.Privatize();
1304 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1305 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1307 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
1308 emitPostUpdateForReductionClause(
1309 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1312 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1313 JumpDest LoopExit) {
1314 RunCleanupsScope BodyScope(*this);
1315 // Update counters values on current iteration.
1316 for (auto I : D.updates()) {
1319 // Update the linear variables.
1320 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1321 for (auto *U : C->updates())
1325 // On a continue in the body, jump to the end.
1326 auto Continue = getJumpDestInCurrentScope("omp.body.continue");
1327 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1329 EmitStmt(D.getBody());
1330 // The end (updates/cleanups).
1331 EmitBlock(Continue.getBlock());
1332 BreakContinueStack.pop_back();
1335 void CodeGenFunction::EmitOMPInnerLoop(
1336 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
1337 const Expr *IncExpr,
1338 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
1339 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
1340 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
1342 // Start the loop with a block that tests the condition.
1343 auto CondBlock = createBasicBlock("omp.inner.for.cond");
1344 EmitBlock(CondBlock);
1345 const SourceRange &R = S.getSourceRange();
1346 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1347 SourceLocToDebugLoc(R.getEnd()));
1349 // If there are any cleanups between here and the loop-exit scope,
1350 // create a block to stage a loop exit along.
1351 auto ExitBlock = LoopExit.getBlock();
1352 if (RequiresCleanup)
1353 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
1355 auto LoopBody = createBasicBlock("omp.inner.for.body");
1358 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
1359 if (ExitBlock != LoopExit.getBlock()) {
1360 EmitBlock(ExitBlock);
1361 EmitBranchThroughCleanup(LoopExit);
1364 EmitBlock(LoopBody);
1365 incrementProfileCounter(&S);
1367 // Create a block for the increment.
1368 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
1369 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1373 // Emit "IV = IV + 1" and a back-edge to the condition block.
1374 EmitBlock(Continue.getBlock());
1375 EmitIgnoredExpr(IncExpr);
1377 BreakContinueStack.pop_back();
1378 EmitBranch(CondBlock);
1380 // Emit the fall-through block.
1381 EmitBlock(LoopExit.getBlock());
1384 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
1385 if (!HaveInsertPoint())
1387 // Emit inits for the linear variables.
1388 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1389 for (auto *Init : C->inits()) {
1390 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
1391 if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
1392 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
1393 auto *OrigVD = cast<VarDecl>(Ref->getDecl());
1394 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1395 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1396 VD->getInit()->getType(), VK_LValue,
1397 VD->getInit()->getExprLoc());
1398 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
1400 /*capturedByInit=*/false);
1401 EmitAutoVarCleanups(Emission);
1405 // Emit the linear steps for the linear clauses.
1406 // If a step is not constant, it is pre-calculated before the loop.
1407 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
1408 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
1409 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
1410 // Emit calculation of the linear step.
1411 EmitIgnoredExpr(CS);
1416 void CodeGenFunction::EmitOMPLinearClauseFinal(
1417 const OMPLoopDirective &D,
1418 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1419 if (!HaveInsertPoint())
1421 llvm::BasicBlock *DoneBB = nullptr;
1422 // Emit the final values of the linear variables.
1423 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1424 auto IC = C->varlist_begin();
1425 for (auto *F : C->finals()) {
1427 if (auto *Cond = CondGen(*this)) {
1428 // If the first post-update expression is found, emit conditional
1429 // block if it was requested.
1430 auto *ThenBB = createBasicBlock(".omp.linear.pu");
1431 DoneBB = createBasicBlock(".omp.linear.pu.done");
1432 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1436 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
1437 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
1438 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1439 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
1440 Address OrigAddr = EmitLValue(&DRE).getAddress();
1441 CodeGenFunction::OMPPrivateScope VarScope(*this);
1442 VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; });
1443 (void)VarScope.Privatize();
1447 if (auto *PostUpdate = C->getPostUpdateExpr())
1448 EmitIgnoredExpr(PostUpdate);
1451 EmitBlock(DoneBB, /*IsFinished=*/true);
1454 static void emitAlignedClause(CodeGenFunction &CGF,
1455 const OMPExecutableDirective &D) {
1456 if (!CGF.HaveInsertPoint())
1458 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
1459 unsigned ClauseAlignment = 0;
1460 if (auto AlignmentExpr = Clause->getAlignment()) {
1462 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
1463 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
1465 for (auto E : Clause->varlists()) {
1466 unsigned Alignment = ClauseAlignment;
1467 if (Alignment == 0) {
1468 // OpenMP [2.8.1, Description]
1469 // If no optional parameter is specified, implementation-defined default
1470 // alignments for SIMD instructions on the target platforms are assumed.
1473 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
1474 E->getType()->getPointeeType()))
1477 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
1478 "alignment is not power of 2");
1479 if (Alignment != 0) {
1480 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
1481 CGF.EmitAlignmentAssumption(PtrValue, Alignment);
1487 void CodeGenFunction::EmitOMPPrivateLoopCounters(
1488 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
1489 if (!HaveInsertPoint())
1491 auto I = S.private_counters().begin();
1492 for (auto *E : S.counters()) {
1493 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1494 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
1495 (void)LoopScope.addPrivate(VD, [&]() -> Address {
1496 // Emit var without initialization.
1497 if (!LocalDeclMap.count(PrivateVD)) {
1498 auto VarEmission = EmitAutoVarAlloca(*PrivateVD);
1499 EmitAutoVarCleanups(VarEmission);
1501 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1502 /*RefersToEnclosingVariableOrCapture=*/false,
1503 (*I)->getType(), VK_LValue, (*I)->getExprLoc());
1504 return EmitLValue(&DRE).getAddress();
1506 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
1507 VD->hasGlobalStorage()) {
1508 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
1509 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
1510 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
1511 E->getType(), VK_LValue, E->getExprLoc());
1512 return EmitLValue(&DRE).getAddress();
1519 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
1520 const Expr *Cond, llvm::BasicBlock *TrueBlock,
1521 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
1522 if (!CGF.HaveInsertPoint())
1525 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
1526 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
1527 (void)PreCondScope.Privatize();
1528 // Get initial values of real counters.
1529 for (auto I : S.inits()) {
1530 CGF.EmitIgnoredExpr(I);
1533 // Check that loop is executed at least one time.
1534 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
1537 void CodeGenFunction::EmitOMPLinearClause(
1538 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
1539 if (!HaveInsertPoint())
1541 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1542 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1543 auto *LoopDirective = cast<OMPLoopDirective>(&D);
1544 for (auto *C : LoopDirective->counters()) {
1546 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1549 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1550 auto CurPrivate = C->privates().begin();
1551 for (auto *E : C->varlists()) {
1552 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
1554 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
1555 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
1556 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
1557 // Emit private VarDecl with copy init.
1558 EmitVarDecl(*PrivateVD);
1559 return GetAddrOfLocalVar(PrivateVD);
1561 assert(IsRegistered && "linear var already registered as private");
1562 // Silence the warning about unused variable.
1565 EmitVarDecl(*PrivateVD);
1571 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
1572 const OMPExecutableDirective &D,
1574 if (!CGF.HaveInsertPoint())
1576 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
1577 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
1578 /*ignoreResult=*/true);
1579 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1580 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1581 // In presence of finite 'safelen', it may be unsafe to mark all
1582 // the memory instructions parallel, because loop-carried
1583 // dependences of 'safelen' iterations are possible.
1585 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
1586 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
1587 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
1588 /*ignoreResult=*/true);
1589 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
1590 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
1591 // In presence of finite 'safelen', it may be unsafe to mark all
1592 // the memory instructions parallel, because loop-carried
1593 // dependences of 'safelen' iterations are possible.
1594 CGF.LoopStack.setParallel(false);
1598 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
1600 // Walk clauses and process safelen/lastprivate.
1601 LoopStack.setParallel(!IsMonotonic);
1602 LoopStack.setVectorizeEnable(true);
1603 emitSimdlenSafelenClause(*this, D, IsMonotonic);
1606 void CodeGenFunction::EmitOMPSimdFinal(
1607 const OMPLoopDirective &D,
1608 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
1609 if (!HaveInsertPoint())
1611 llvm::BasicBlock *DoneBB = nullptr;
1612 auto IC = D.counters().begin();
1613 auto IPC = D.private_counters().begin();
1614 for (auto F : D.finals()) {
1615 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
1616 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
1617 auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
1618 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
1619 OrigVD->hasGlobalStorage() || CED) {
1621 if (auto *Cond = CondGen(*this)) {
1622 // If the first post-update expression is found, emit conditional
1623 // block if it was requested.
1624 auto *ThenBB = createBasicBlock(".omp.final.then");
1625 DoneBB = createBasicBlock(".omp.final.done");
1626 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1630 Address OrigAddr = Address::invalid();
1632 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
1634 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
1635 /*RefersToEnclosingVariableOrCapture=*/false,
1636 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
1637 OrigAddr = EmitLValue(&DRE).getAddress();
1639 OMPPrivateScope VarScope(*this);
1640 VarScope.addPrivate(OrigVD,
1641 [OrigAddr]() -> Address { return OrigAddr; });
1642 (void)VarScope.Privatize();
1649 EmitBlock(DoneBB, /*IsFinished=*/true);
1652 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
1653 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1654 OMPLoopScope PreInitScope(CGF, S);
1656 // for (IV in 0..LastIteration) BODY;
1657 // <Final counter/linear vars updates>;
1661 // Emit: if (PreCond) - begin.
1662 // If the condition constant folds and can be elided, avoid emitting the
1665 llvm::BasicBlock *ContBlock = nullptr;
1666 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
1670 auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
1671 ContBlock = CGF.createBasicBlock("simd.if.end");
1672 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
1673 CGF.getProfileCount(&S));
1674 CGF.EmitBlock(ThenBlock);
1675 CGF.incrementProfileCounter(&S);
1678 // Emit the loop iteration variable.
1679 const Expr *IVExpr = S.getIterationVariable();
1680 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
1681 CGF.EmitVarDecl(*IVDecl);
1682 CGF.EmitIgnoredExpr(S.getInit());
1684 // Emit the iterations count variable.
1685 // If it is not a variable, Sema decided to calculate iterations count on
1686 // each iteration (e.g., it is foldable into a constant).
1687 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
1688 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
1689 // Emit calculation of the iterations count.
1690 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
1693 CGF.EmitOMPSimdInit(S);
1695 emitAlignedClause(CGF, S);
1696 CGF.EmitOMPLinearClauseInit(S);
1698 OMPPrivateScope LoopScope(CGF);
1699 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
1700 CGF.EmitOMPLinearClause(S, LoopScope);
1701 CGF.EmitOMPPrivateClause(S, LoopScope);
1702 CGF.EmitOMPReductionClauseInit(S, LoopScope);
1703 bool HasLastprivateClause =
1704 CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
1705 (void)LoopScope.Privatize();
1706 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
1708 [&S](CodeGenFunction &CGF) {
1709 CGF.EmitOMPLoopBody(S, JumpDest());
1710 CGF.EmitStopPoint(&S);
1712 [](CodeGenFunction &) {});
1713 CGF.EmitOMPSimdFinal(
1714 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1715 // Emit final copy of the lastprivate variables at the end of loops.
1716 if (HasLastprivateClause)
1717 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
1718 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
1719 emitPostUpdateForReductionClause(
1720 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1722 CGF.EmitOMPLinearClauseFinal(
1723 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
1724 // Emit: if (PreCond) - end.
1726 CGF.EmitBranch(ContBlock);
1727 CGF.EmitBlock(ContBlock, true);
1730 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1731 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
1734 void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
1735 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1736 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1737 auto &RT = CGM.getOpenMPRuntime();
1739 const Expr *IVExpr = S.getIterationVariable();
1740 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1741 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1743 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
1745 // Start the loop with a block that tests the condition.
1746 auto CondBlock = createBasicBlock("omp.dispatch.cond");
1747 EmitBlock(CondBlock);
1748 const SourceRange &R = S.getSourceRange();
1749 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
1750 SourceLocToDebugLoc(R.getEnd()));
1752 llvm::Value *BoolCondVal = nullptr;
1753 if (!DynamicOrOrdered) {
1754 // UB = min(UB, GlobalUB)
1755 EmitIgnoredExpr(S.getEnsureUpperBound());
1757 EmitIgnoredExpr(S.getInit());
1759 BoolCondVal = EvaluateExprAsBool(S.getCond());
1761 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, IL,
1765 // If there are any cleanups between here and the loop-exit scope,
1766 // create a block to stage a loop exit along.
1767 auto ExitBlock = LoopExit.getBlock();
1768 if (LoopScope.requiresCleanups())
1769 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
1771 auto LoopBody = createBasicBlock("omp.dispatch.body");
1772 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
1773 if (ExitBlock != LoopExit.getBlock()) {
1774 EmitBlock(ExitBlock);
1775 EmitBranchThroughCleanup(LoopExit);
1777 EmitBlock(LoopBody);
1779 // Emit "IV = LB" (in case of static schedule, we have already calculated new
1780 // LB for loop condition and emitted it above).
1781 if (DynamicOrOrdered)
1782 EmitIgnoredExpr(S.getInit());
1784 // Create a block for the increment.
1785 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
1786 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1788 // Generate !llvm.loop.parallel metadata for loads and stores for loops
1789 // with dynamic/guided scheduling and without ordered clause.
1790 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
1791 LoopStack.setParallel(!IsMonotonic);
1793 EmitOMPSimdInit(S, IsMonotonic);
1795 SourceLocation Loc = S.getLocStart();
1796 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
1797 [&S, LoopExit](CodeGenFunction &CGF) {
1798 CGF.EmitOMPLoopBody(S, LoopExit);
1799 CGF.EmitStopPoint(&S);
1801 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
1803 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
1804 CGF, Loc, IVSize, IVSigned);
1808 EmitBlock(Continue.getBlock());
1809 BreakContinueStack.pop_back();
1810 if (!DynamicOrOrdered) {
1811 // Emit "LB = LB + Stride", "UB = UB + Stride".
1812 EmitIgnoredExpr(S.getNextLowerBound());
1813 EmitIgnoredExpr(S.getNextUpperBound());
1816 EmitBranch(CondBlock);
1818 // Emit the fall-through block.
1819 EmitBlock(LoopExit.getBlock());
1821 // Tell the runtime we are done.
1822 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
1823 if (!DynamicOrOrdered)
1824 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
1826 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
1829 void CodeGenFunction::EmitOMPForOuterLoop(
1830 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
1831 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
1832 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1833 auto &RT = CGM.getOpenMPRuntime();
1835 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
1836 const bool DynamicOrOrdered =
1837 Ordered || RT.isDynamic(ScheduleKind.Schedule);
1840 !RT.isStaticNonchunked(ScheduleKind.Schedule,
1841 /*Chunked=*/Chunk != nullptr)) &&
1842 "static non-chunked schedule does not need outer loop");
1846 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1847 // When schedule(dynamic,chunk_size) is specified, the iterations are
1848 // distributed to threads in the team in chunks as the threads request them.
1849 // Each thread executes a chunk of iterations, then requests another chunk,
1850 // until no chunks remain to be distributed. Each chunk contains chunk_size
1851 // iterations, except for the last chunk to be distributed, which may have
1852 // fewer iterations. When no chunk_size is specified, it defaults to 1.
1854 // When schedule(guided,chunk_size) is specified, the iterations are assigned
1855 // to threads in the team in chunks as the executing threads request them.
1856 // Each thread executes a chunk of iterations, then requests another chunk,
1857 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
1858 // each chunk is proportional to the number of unassigned iterations divided
1859 // by the number of threads in the team, decreasing to 1. For a chunk_size
1860 // with value k (greater than 1), the size of each chunk is determined in the
1861 // same way, with the restriction that the chunks do not contain fewer than k
1862 // iterations (except for the last chunk to be assigned, which may have fewer
1863 // than k iterations).
1865 // When schedule(auto) is specified, the decision regarding scheduling is
1866 // delegated to the compiler and/or runtime system. The programmer gives the
1867 // implementation the freedom to choose any possible mapping of iterations to
1868 // threads in the team.
1870 // When schedule(runtime) is specified, the decision regarding scheduling is
1871 // deferred until run time, and the schedule and chunk size are taken from the
1872 // run-sched-var ICV. If the ICV is set to auto, the schedule is
1873 // implementation defined
1875 // while(__kmpc_dispatch_next(&LB, &UB)) {
1877 // while (idx <= UB) { BODY; ++idx;
1878 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
1882 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
1883 // When schedule(static, chunk_size) is specified, iterations are divided into
1884 // chunks of size chunk_size, and the chunks are assigned to the threads in
1885 // the team in a round-robin fashion in the order of the thread number.
1887 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
1888 // while (idx <= UB) { BODY; ++idx; } // inner loop
1894 const Expr *IVExpr = S.getIterationVariable();
1895 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1896 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1898 if (DynamicOrOrdered) {
1899 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
1900 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
1901 IVSigned, Ordered, UBVal, Chunk);
1903 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
1904 Ordered, IL, LB, UB, ST, Chunk);
1907 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, Ordered, LB, UB,
1911 void CodeGenFunction::EmitOMPDistributeOuterLoop(
1912 OpenMPDistScheduleClauseKind ScheduleKind,
1913 const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
1914 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
1916 auto &RT = CGM.getOpenMPRuntime();
1919 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
1923 const Expr *IVExpr = S.getIterationVariable();
1924 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
1925 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
1927 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
1928 IVSize, IVSigned, /* Ordered = */ false,
1929 IL, LB, UB, ST, Chunk);
1931 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false,
1932 S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk);
1935 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
1936 const OMPDistributeParallelForDirective &S) {
1937 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1938 CGM.getOpenMPRuntime().emitInlinedDirective(
1939 *this, OMPD_distribute_parallel_for,
1940 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1941 OMPLoopScope PreInitScope(CGF, S);
1942 OMPCancelStackRAII CancelRegion(CGF, OMPD_distribute_parallel_for,
1943 /*HasCancel=*/false);
1945 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1949 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
1950 const OMPDistributeParallelForSimdDirective &S) {
1951 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1952 CGM.getOpenMPRuntime().emitInlinedDirective(
1953 *this, OMPD_distribute_parallel_for_simd,
1954 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1955 OMPLoopScope PreInitScope(CGF, S);
1957 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1961 void CodeGenFunction::EmitOMPDistributeSimdDirective(
1962 const OMPDistributeSimdDirective &S) {
1963 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1964 CGM.getOpenMPRuntime().emitInlinedDirective(
1965 *this, OMPD_distribute_simd,
1966 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1967 OMPLoopScope PreInitScope(CGF, S);
1969 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1973 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
1974 const OMPTargetParallelForSimdDirective &S) {
1975 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1976 CGM.getOpenMPRuntime().emitInlinedDirective(
1977 *this, OMPD_target_parallel_for_simd,
1978 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1979 OMPLoopScope PreInitScope(CGF, S);
1981 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1985 void CodeGenFunction::EmitOMPTargetSimdDirective(
1986 const OMPTargetSimdDirective &S) {
1987 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1988 CGM.getOpenMPRuntime().emitInlinedDirective(
1989 *this, OMPD_target_simd, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
1990 OMPLoopScope PreInitScope(CGF, S);
1992 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
1996 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
1997 const OMPTeamsDistributeDirective &S) {
1998 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
1999 CGM.getOpenMPRuntime().emitInlinedDirective(
2000 *this, OMPD_teams_distribute,
2001 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2002 OMPLoopScope PreInitScope(CGF, S);
2004 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2008 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
2009 const OMPTeamsDistributeSimdDirective &S) {
2010 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2011 CGM.getOpenMPRuntime().emitInlinedDirective(
2012 *this, OMPD_teams_distribute_simd,
2013 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2014 OMPLoopScope PreInitScope(CGF, S);
2016 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2020 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
2021 const OMPTeamsDistributeParallelForSimdDirective &S) {
2022 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2023 CGM.getOpenMPRuntime().emitInlinedDirective(
2024 *this, OMPD_teams_distribute_parallel_for_simd,
2025 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2026 OMPLoopScope PreInitScope(CGF, S);
2028 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2032 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
2033 const OMPTeamsDistributeParallelForDirective &S) {
2034 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2035 CGM.getOpenMPRuntime().emitInlinedDirective(
2036 *this, OMPD_teams_distribute_parallel_for,
2037 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2038 OMPLoopScope PreInitScope(CGF, S);
2040 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2044 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
2045 const OMPTargetTeamsDistributeDirective &S) {
2046 CGM.getOpenMPRuntime().emitInlinedDirective(
2047 *this, OMPD_target_teams_distribute,
2048 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2050 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2054 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
2055 const OMPTargetTeamsDistributeParallelForDirective &S) {
2056 CGM.getOpenMPRuntime().emitInlinedDirective(
2057 *this, OMPD_target_teams_distribute_parallel_for,
2058 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2060 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2064 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
2065 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
2066 CGM.getOpenMPRuntime().emitInlinedDirective(
2067 *this, OMPD_target_teams_distribute_parallel_for_simd,
2068 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2070 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2074 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
2075 const OMPTargetTeamsDistributeSimdDirective &S) {
2076 CGM.getOpenMPRuntime().emitInlinedDirective(
2077 *this, OMPD_target_teams_distribute_simd,
2078 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2080 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2084 /// \brief Emit a helper variable and return corresponding lvalue.
2085 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2086 const DeclRefExpr *Helper) {
2087 auto VDecl = cast<VarDecl>(Helper->getDecl());
2088 CGF.EmitVarDecl(*VDecl);
2089 return CGF.EmitLValue(Helper);
2093 struct ScheduleKindModifiersTy {
2094 OpenMPScheduleClauseKind Kind;
2095 OpenMPScheduleClauseModifier M1;
2096 OpenMPScheduleClauseModifier M2;
2097 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
2098 OpenMPScheduleClauseModifier M1,
2099 OpenMPScheduleClauseModifier M2)
2100 : Kind(Kind), M1(M1), M2(M2) {}
2104 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
2105 // Emit the loop iteration variable.
2106 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2107 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2108 EmitVarDecl(*IVDecl);
2110 // Emit the iterations count variable.
2111 // If it is not a variable, Sema decided to calculate iterations count on each
2112 // iteration (e.g., it is foldable into a constant).
2113 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2114 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2115 // Emit calculation of the iterations count.
2116 EmitIgnoredExpr(S.getCalcLastIteration());
2119 auto &RT = CGM.getOpenMPRuntime();
2121 bool HasLastprivateClause;
2122 // Check pre-condition.
2124 OMPLoopScope PreInitScope(*this, S);
2125 // Skip the entire loop if we don't meet the precondition.
2126 // If the condition constant folds and can be elided, avoid emitting the
2129 llvm::BasicBlock *ContBlock = nullptr;
2130 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2134 auto *ThenBlock = createBasicBlock("omp.precond.then");
2135 ContBlock = createBasicBlock("omp.precond.end");
2136 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2137 getProfileCount(&S));
2138 EmitBlock(ThenBlock);
2139 incrementProfileCounter(&S);
2142 bool Ordered = false;
2143 if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
2144 if (OrderedClause->getNumForLoops())
2145 RT.emitDoacrossInit(*this, S);
2150 llvm::DenseSet<const Expr *> EmittedFinals;
2151 emitAlignedClause(*this, S);
2152 EmitOMPLinearClauseInit(S);
2153 // Emit helper vars inits.
2155 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2157 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2159 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2161 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2163 // Emit 'then' code.
2165 OMPPrivateScope LoopScope(*this);
2166 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2167 // Emit implicit barrier to synchronize threads and avoid data races on
2168 // initialization of firstprivate variables and post-update of
2169 // lastprivate variables.
2170 CGM.getOpenMPRuntime().emitBarrierCall(
2171 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2172 /*ForceSimpleCall=*/true);
2174 EmitOMPPrivateClause(S, LoopScope);
2175 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2176 EmitOMPReductionClauseInit(S, LoopScope);
2177 EmitOMPPrivateLoopCounters(S, LoopScope);
2178 EmitOMPLinearClause(S, LoopScope);
2179 (void)LoopScope.Privatize();
2181 // Detect the loop schedule kind and chunk.
2182 llvm::Value *Chunk = nullptr;
2183 OpenMPScheduleTy ScheduleKind;
2184 if (auto *C = S.getSingleClause<OMPScheduleClause>()) {
2185 ScheduleKind.Schedule = C->getScheduleKind();
2186 ScheduleKind.M1 = C->getFirstScheduleModifier();
2187 ScheduleKind.M2 = C->getSecondScheduleModifier();
2188 if (const auto *Ch = C->getChunkSize()) {
2189 Chunk = EmitScalarExpr(Ch);
2190 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2191 S.getIterationVariable()->getType(),
2195 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2196 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2197 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
2198 // If the static schedule kind is specified or if the ordered clause is
2199 // specified, and if no monotonic modifier is specified, the effect will
2200 // be as if the monotonic modifier was specified.
2201 if (RT.isStaticNonchunked(ScheduleKind.Schedule,
2202 /* Chunked */ Chunk != nullptr) &&
2204 if (isOpenMPSimdDirective(S.getDirectiveKind()))
2205 EmitOMPSimdInit(S, /*IsMonotonic=*/true);
2206 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2207 // When no chunk_size is specified, the iteration space is divided into
2208 // chunks that are approximately equal in size, and at most one chunk is
2209 // distributed to each thread. Note that the size of the chunks is
2210 // unspecified in this case.
2211 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
2212 IVSize, IVSigned, Ordered,
2213 IL.getAddress(), LB.getAddress(),
2214 UB.getAddress(), ST.getAddress());
2216 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2217 // UB = min(UB, GlobalUB);
2218 EmitIgnoredExpr(S.getEnsureUpperBound());
2220 EmitIgnoredExpr(S.getInit());
2221 // while (idx <= UB) { BODY; ++idx; }
2222 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2224 [&S, LoopExit](CodeGenFunction &CGF) {
2225 CGF.EmitOMPLoopBody(S, LoopExit);
2226 CGF.EmitStopPoint(&S);
2228 [](CodeGenFunction &) {});
2229 EmitBlock(LoopExit.getBlock());
2230 // Tell the runtime we are done.
2231 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2232 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2234 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2236 const bool IsMonotonic =
2237 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
2238 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
2239 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
2240 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
2241 // Emit the outer loop, which requests its work chunk [LB..UB] from
2242 // runtime and runs the inner loop to process it.
2243 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
2244 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2245 IL.getAddress(), Chunk);
2247 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2249 [&](CodeGenFunction &CGF) -> llvm::Value * {
2250 return CGF.Builder.CreateIsNotNull(
2251 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2254 EmitOMPReductionClauseFinal(
2255 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
2256 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
2257 : /*Parallel only*/ OMPD_parallel);
2258 // Emit post-update of the reduction variables if IsLastIter != 0.
2259 emitPostUpdateForReductionClause(
2260 *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2261 return CGF.Builder.CreateIsNotNull(
2262 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2264 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2265 if (HasLastprivateClause)
2266 EmitOMPLastprivateClauseFinal(
2267 S, isOpenMPSimdDirective(S.getDirectiveKind()),
2268 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
2270 EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2271 return CGF.Builder.CreateIsNotNull(
2272 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2274 // We're now done with the loop, so jump to the continuation block.
2276 EmitBranch(ContBlock);
2277 EmitBlock(ContBlock, true);
2280 return HasLastprivateClause;
2283 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
2284 bool HasLastprivates = false;
2285 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2286 PrePostActionTy &) {
2287 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
2288 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2291 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2292 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
2296 // Emit an implicit barrier at the end.
2297 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2298 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2302 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
2303 bool HasLastprivates = false;
2304 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
2305 PrePostActionTy &) {
2306 HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
2309 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2310 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2313 // Emit an implicit barrier at the end.
2314 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
2315 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
2319 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
2321 llvm::Value *Init = nullptr) {
2322 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
2324 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
2328 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
2329 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
2330 auto *CS = dyn_cast<CompoundStmt>(Stmt);
2331 bool HasLastprivates = false;
2332 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
2333 PrePostActionTy &) {
2334 auto &C = CGF.CGM.getContext();
2335 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2336 // Emit helper vars inits.
2337 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
2338 CGF.Builder.getInt32(0));
2339 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
2340 : CGF.Builder.getInt32(0);
2342 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
2343 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
2344 CGF.Builder.getInt32(1));
2345 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
2346 CGF.Builder.getInt32(0));
2348 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
2349 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2350 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
2351 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
2352 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
2353 // Generate condition for loop.
2354 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
2355 OK_Ordinary, S.getLocStart(), FPOptions());
2356 // Increment for loop counter.
2357 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
2359 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
2360 // Iterate through all sections and emit a switch construct:
2363 // <SectionStmt[0]>;
2366 // case <NumSection> - 1:
2367 // <SectionStmt[<NumSection> - 1]>;
2370 // .omp.sections.exit:
2371 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
2372 auto *SwitchStmt = CGF.Builder.CreateSwitch(
2373 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
2374 CS == nullptr ? 1 : CS->size());
2376 unsigned CaseNumber = 0;
2377 for (auto *SubStmt : CS->children()) {
2378 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2379 CGF.EmitBlock(CaseBB);
2380 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
2381 CGF.EmitStmt(SubStmt);
2382 CGF.EmitBranch(ExitBB);
2386 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
2387 CGF.EmitBlock(CaseBB);
2388 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
2390 CGF.EmitBranch(ExitBB);
2392 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2395 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2396 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
2397 // Emit implicit barrier to synchronize threads and avoid data races on
2398 // initialization of firstprivate variables and post-update of lastprivate
2400 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
2401 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2402 /*ForceSimpleCall=*/true);
2404 CGF.EmitOMPPrivateClause(S, LoopScope);
2405 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2406 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2407 (void)LoopScope.Privatize();
2409 // Emit static non-chunked loop.
2410 OpenMPScheduleTy ScheduleKind;
2411 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
2412 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
2413 CGF, S.getLocStart(), ScheduleKind, /*IVSize=*/32,
2414 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), LB.getAddress(),
2415 UB.getAddress(), ST.getAddress());
2416 // UB = min(UB, GlobalUB);
2417 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
2418 auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
2419 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
2420 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
2422 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
2423 // while (idx <= UB) { BODY; ++idx; }
2424 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
2425 [](CodeGenFunction &) {});
2426 // Tell the runtime we are done.
2427 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
2428 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
2430 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
2431 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
2432 // Emit post-update of the reduction variables if IsLastIter != 0.
2433 emitPostUpdateForReductionClause(
2434 CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
2435 return CGF.Builder.CreateIsNotNull(
2436 CGF.EmitLoadOfScalar(IL, S.getLocStart()));
2439 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2440 if (HasLastprivates)
2441 CGF.EmitOMPLastprivateClauseFinal(
2442 S, /*NoFinals=*/false,
2443 CGF.Builder.CreateIsNotNull(
2444 CGF.EmitLoadOfScalar(IL, S.getLocStart())));
2447 bool HasCancel = false;
2448 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
2449 HasCancel = OSD->hasCancel();
2450 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
2451 HasCancel = OPSD->hasCancel();
2452 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
2453 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
2455 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
2456 // clause. Otherwise the barrier will be generated by the codegen for the
2458 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
2459 // Emit implicit barrier to synchronize threads and avoid data races on
2460 // initialization of firstprivate variables.
2461 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2466 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
2468 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2471 // Emit an implicit barrier at the end.
2472 if (!S.getSingleClause<OMPNowaitClause>()) {
2473 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
2478 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
2479 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2480 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2482 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2483 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
2487 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
2488 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
2489 llvm::SmallVector<const Expr *, 8> DestExprs;
2490 llvm::SmallVector<const Expr *, 8> SrcExprs;
2491 llvm::SmallVector<const Expr *, 8> AssignmentOps;
2492 // Check if there are any 'copyprivate' clauses associated with this
2493 // 'single' construct.
2494 // Build a list of copyprivate variables along with helper expressions
2495 // (<source>, <destination>, <destination>=<source> expressions)
2496 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
2497 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
2498 DestExprs.append(C->destination_exprs().begin(),
2499 C->destination_exprs().end());
2500 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
2501 AssignmentOps.append(C->assignment_ops().begin(),
2502 C->assignment_ops().end());
2504 // Emit code for 'single' region along with 'copyprivate' clauses
2505 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2507 OMPPrivateScope SingleScope(CGF);
2508 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
2509 CGF.EmitOMPPrivateClause(S, SingleScope);
2510 (void)SingleScope.Privatize();
2511 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2514 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2515 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
2516 CopyprivateVars, DestExprs,
2517 SrcExprs, AssignmentOps);
2519 // Emit an implicit barrier at the end (to avoid data race on firstprivate
2520 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
2521 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
2522 CGM.getOpenMPRuntime().emitBarrierCall(
2523 *this, S.getLocStart(),
2524 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
2528 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
2529 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2531 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2533 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2534 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
2537 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
2538 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2540 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2542 Expr *Hint = nullptr;
2543 if (auto *HintClause = S.getSingleClause<OMPHintClause>())
2544 Hint = HintClause->getHint();
2545 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2546 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
2547 S.getDirectiveName().getAsString(),
2548 CodeGen, S.getLocStart(), Hint);
2551 void CodeGenFunction::EmitOMPParallelForDirective(
2552 const OMPParallelForDirective &S) {
2553 // Emit directive as a combined directive that consists of two implicit
2554 // directives: 'parallel' with 'for' directive.
2555 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2556 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
2557 CGF.EmitOMPWorksharingLoop(S);
2559 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
2562 void CodeGenFunction::EmitOMPParallelForSimdDirective(
2563 const OMPParallelForSimdDirective &S) {
2564 // Emit directive as a combined directive that consists of two implicit
2565 // directives: 'parallel' with 'for' directive.
2566 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2567 CGF.EmitOMPWorksharingLoop(S);
2569 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
2572 void CodeGenFunction::EmitOMPParallelSectionsDirective(
2573 const OMPParallelSectionsDirective &S) {
2574 // Emit directive as a combined directive that consists of two implicit
2575 // directives: 'parallel' with 'sections' directive.
2576 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2577 CGF.EmitSections(S);
2579 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
2582 void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
2583 const RegionCodeGenTy &BodyGen,
2584 const TaskGenTy &TaskGen,
2585 OMPTaskDataTy &Data) {
2586 // Emit outlined function for task construct.
2587 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2588 auto *I = CS->getCapturedDecl()->param_begin();
2589 auto *PartId = std::next(I);
2590 auto *TaskT = std::next(I, 4);
2591 // Check if the task is final
2592 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
2593 // If the condition constant folds and can be elided, try to avoid emitting
2594 // the condition and the dead arm of the if/else.
2595 auto *Cond = Clause->getCondition();
2597 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
2598 Data.Final.setInt(CondConstant);
2600 Data.Final.setPointer(EvaluateExprAsBool(Cond));
2602 // By default the task is not final.
2603 Data.Final.setInt(/*IntVal=*/false);
2605 // Check if the task has 'priority' clause.
2606 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
2607 auto *Prio = Clause->getPriority();
2608 Data.Priority.setInt(/*IntVal=*/true);
2609 Data.Priority.setPointer(EmitScalarConversion(
2610 EmitScalarExpr(Prio), Prio->getType(),
2611 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
2612 Prio->getExprLoc()));
2614 // The first function argument for tasks is a thread id, the second one is a
2615 // part id (0 for tied tasks, >=0 for untied task).
2616 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
2617 // Get list of private variables.
2618 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
2619 auto IRef = C->varlist_begin();
2620 for (auto *IInit : C->private_copies()) {
2621 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2622 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2623 Data.PrivateVars.push_back(*IRef);
2624 Data.PrivateCopies.push_back(IInit);
2629 EmittedAsPrivate.clear();
2630 // Get list of firstprivate variables.
2631 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
2632 auto IRef = C->varlist_begin();
2633 auto IElemInitRef = C->inits().begin();
2634 for (auto *IInit : C->private_copies()) {
2635 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2636 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2637 Data.FirstprivateVars.push_back(*IRef);
2638 Data.FirstprivateCopies.push_back(IInit);
2639 Data.FirstprivateInits.push_back(*IElemInitRef);
2645 // Get list of lastprivate variables (for taskloops).
2646 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
2647 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
2648 auto IRef = C->varlist_begin();
2649 auto ID = C->destination_exprs().begin();
2650 for (auto *IInit : C->private_copies()) {
2651 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
2652 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
2653 Data.LastprivateVars.push_back(*IRef);
2654 Data.LastprivateCopies.push_back(IInit);
2656 LastprivateDstsOrigs.insert(
2657 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
2658 cast<DeclRefExpr>(*IRef)});
2663 // Build list of dependences.
2664 for (const auto *C : S.getClausesOfKind<OMPDependClause>())
2665 for (auto *IRef : C->varlists())
2666 Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
2667 auto &&CodeGen = [&Data, CS, &BodyGen, &LastprivateDstsOrigs](
2668 CodeGenFunction &CGF, PrePostActionTy &Action) {
2669 // Set proper addresses for generated private copies.
2670 OMPPrivateScope Scope(CGF);
2671 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
2672 !Data.LastprivateVars.empty()) {
2673 auto *CopyFn = CGF.Builder.CreateLoad(
2674 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
2675 auto *PrivatesPtr = CGF.Builder.CreateLoad(
2676 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
2678 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
2679 llvm::SmallVector<llvm::Value *, 16> CallArgs;
2680 CallArgs.push_back(PrivatesPtr);
2681 for (auto *E : Data.PrivateVars) {
2682 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2683 Address PrivatePtr = CGF.CreateMemTemp(
2684 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
2685 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2686 CallArgs.push_back(PrivatePtr.getPointer());
2688 for (auto *E : Data.FirstprivateVars) {
2689 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2690 Address PrivatePtr =
2691 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2692 ".firstpriv.ptr.addr");
2693 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2694 CallArgs.push_back(PrivatePtr.getPointer());
2696 for (auto *E : Data.LastprivateVars) {
2697 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2698 Address PrivatePtr =
2699 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
2700 ".lastpriv.ptr.addr");
2701 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
2702 CallArgs.push_back(PrivatePtr.getPointer());
2704 CGF.EmitRuntimeCall(CopyFn, CallArgs);
2705 for (auto &&Pair : LastprivateDstsOrigs) {
2706 auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
2708 const_cast<VarDecl *>(OrigVD),
2709 /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
2711 Pair.second->getType(), VK_LValue, Pair.second->getExprLoc());
2712 Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
2713 return CGF.EmitLValue(&DRE).getAddress();
2716 for (auto &&Pair : PrivatePtrs) {
2717 Address Replacement(CGF.Builder.CreateLoad(Pair.second),
2718 CGF.getContext().getDeclAlign(Pair.first));
2719 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
2722 (void)Scope.Privatize();
2727 auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
2728 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
2729 Data.NumberOfParts);
2730 OMPLexicalScope Scope(*this, S);
2731 TaskGen(*this, OutlinedFn, Data);
2734 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
2735 // Emit outlined function for task construct.
2736 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2737 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
2738 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
2739 const Expr *IfCond = nullptr;
2740 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2741 if (C->getNameModifier() == OMPD_unknown ||
2742 C->getNameModifier() == OMPD_task) {
2743 IfCond = C->getCondition();
2749 // Check if we should emit tied or untied task.
2750 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
2751 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
2752 CGF.EmitStmt(CS->getCapturedStmt());
2754 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
2755 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
2756 const OMPTaskDataTy &Data) {
2757 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn,
2758 SharedsTy, CapturedStruct, IfCond,
2761 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
2764 void CodeGenFunction::EmitOMPTaskyieldDirective(
2765 const OMPTaskyieldDirective &S) {
2766 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
2769 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
2770 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
2773 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
2774 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
2777 void CodeGenFunction::EmitOMPTaskgroupDirective(
2778 const OMPTaskgroupDirective &S) {
2779 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2781 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2783 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2784 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
2787 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
2788 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
2789 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
2790 return llvm::makeArrayRef(FlushClause->varlist_begin(),
2791 FlushClause->varlist_end());
2794 }(), S.getLocStart());
2797 void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
2798 // Emit the loop iteration variable.
2799 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
2800 auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
2801 EmitVarDecl(*IVDecl);
2803 // Emit the iterations count variable.
2804 // If it is not a variable, Sema decided to calculate iterations count on each
2805 // iteration (e.g., it is foldable into a constant).
2806 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2807 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2808 // Emit calculation of the iterations count.
2809 EmitIgnoredExpr(S.getCalcLastIteration());
2812 auto &RT = CGM.getOpenMPRuntime();
2814 bool HasLastprivateClause = false;
2815 // Check pre-condition.
2817 OMPLoopScope PreInitScope(*this, S);
2818 // Skip the entire loop if we don't meet the precondition.
2819 // If the condition constant folds and can be elided, avoid emitting the
2822 llvm::BasicBlock *ContBlock = nullptr;
2823 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2827 auto *ThenBlock = createBasicBlock("omp.precond.then");
2828 ContBlock = createBasicBlock("omp.precond.end");
2829 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
2830 getProfileCount(&S));
2831 EmitBlock(ThenBlock);
2832 incrementProfileCounter(&S);
2835 // Emit 'then' code.
2837 // Emit helper vars inits.
2839 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2841 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2843 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
2845 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
2847 OMPPrivateScope LoopScope(*this);
2848 if (EmitOMPFirstprivateClause(S, LoopScope)) {
2849 // Emit implicit barrier to synchronize threads and avoid data races on
2850 // initialization of firstprivate variables and post-update of
2851 // lastprivate variables.
2852 CGM.getOpenMPRuntime().emitBarrierCall(
2853 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
2854 /*ForceSimpleCall=*/true);
2856 EmitOMPPrivateClause(S, LoopScope);
2857 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
2858 EmitOMPPrivateLoopCounters(S, LoopScope);
2859 (void)LoopScope.Privatize();
2861 // Detect the distribute schedule kind and chunk.
2862 llvm::Value *Chunk = nullptr;
2863 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
2864 if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
2865 ScheduleKind = C->getDistScheduleKind();
2866 if (const auto *Ch = C->getChunkSize()) {
2867 Chunk = EmitScalarExpr(Ch);
2868 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
2869 S.getIterationVariable()->getType(),
2873 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2874 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2876 // OpenMP [2.10.8, distribute Construct, Description]
2877 // If dist_schedule is specified, kind must be static. If specified,
2878 // iterations are divided into chunks of size chunk_size, chunks are
2879 // assigned to the teams of the league in a round-robin fashion in the
2880 // order of the team number. When no chunk_size is specified, the
2881 // iteration space is divided into chunks that are approximately equal
2882 // in size, and at most one chunk is distributed to each team of the
2883 // league. The size of the chunks is unspecified in this case.
2884 if (RT.isStaticNonchunked(ScheduleKind,
2885 /* Chunked */ Chunk != nullptr)) {
2886 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
2887 IVSize, IVSigned, /* Ordered = */ false,
2888 IL.getAddress(), LB.getAddress(),
2889 UB.getAddress(), ST.getAddress());
2891 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
2892 // UB = min(UB, GlobalUB);
2893 EmitIgnoredExpr(S.getEnsureUpperBound());
2895 EmitIgnoredExpr(S.getInit());
2896 // while (idx <= UB) { BODY; ++idx; }
2897 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
2899 [&S, LoopExit](CodeGenFunction &CGF) {
2900 CGF.EmitOMPLoopBody(S, LoopExit);
2901 CGF.EmitStopPoint(&S);
2903 [](CodeGenFunction &) {});
2904 EmitBlock(LoopExit.getBlock());
2905 // Tell the runtime we are done.
2906 RT.emitForStaticFinish(*this, S.getLocStart());
2908 // Emit the outer loop, which requests its work chunk [LB..UB] from
2909 // runtime and runs the inner loop to process it.
2910 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope,
2911 LB.getAddress(), UB.getAddress(), ST.getAddress(),
2912 IL.getAddress(), Chunk);
2915 // Emit final copy of the lastprivate variables if IsLastIter != 0.
2916 if (HasLastprivateClause)
2917 EmitOMPLastprivateClauseFinal(
2918 S, /*NoFinals=*/false,
2919 Builder.CreateIsNotNull(
2920 EmitLoadOfScalar(IL, S.getLocStart())));
2923 // We're now done with the loop, so jump to the continuation block.
2925 EmitBranch(ContBlock);
2926 EmitBlock(ContBlock, true);
2931 void CodeGenFunction::EmitOMPDistributeDirective(
2932 const OMPDistributeDirective &S) {
2933 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2934 CGF.EmitOMPDistributeLoop(S);
2936 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2937 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
2941 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
2942 const CapturedStmt *S) {
2943 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2944 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
2945 CGF.CapturedStmtInfo = &CapStmtInfo;
2946 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
2947 Fn->addFnAttr(llvm::Attribute::NoInline);
2951 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
2952 if (!S.getAssociatedStmt()) {
2953 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
2954 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
2957 auto *C = S.getSingleClause<OMPSIMDClause>();
2958 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
2959 PrePostActionTy &Action) {
2961 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
2962 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
2963 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
2964 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
2965 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
2969 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
2972 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
2973 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
2976 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
2977 QualType SrcType, QualType DestType,
2978 SourceLocation Loc) {
2979 assert(CGF.hasScalarEvaluationKind(DestType) &&
2980 "DestType must have scalar evaluation kind.");
2981 assert(!Val.isAggregate() && "Must be a scalar or complex.");
2982 return Val.isScalar()
2983 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
2985 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
2989 static CodeGenFunction::ComplexPairTy
2990 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
2991 QualType DestType, SourceLocation Loc) {
2992 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
2993 "DestType must have complex evaluation kind.");
2994 CodeGenFunction::ComplexPairTy ComplexVal;
2995 if (Val.isScalar()) {
2996 // Convert the input element to the element type of the complex.
2997 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
2998 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
2999 DestElementType, Loc);
3000 ComplexVal = CodeGenFunction::ComplexPairTy(
3001 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
3003 assert(Val.isComplex() && "Must be a scalar or complex.");
3004 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
3005 auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
3006 ComplexVal.first = CGF.EmitScalarConversion(
3007 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
3008 ComplexVal.second = CGF.EmitScalarConversion(
3009 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
3014 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
3015 LValue LVal, RValue RVal) {
3016 if (LVal.isGlobalReg()) {
3017 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
3019 CGF.EmitAtomicStore(RVal, LVal,
3020 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3021 : llvm::AtomicOrdering::Monotonic,
3022 LVal.isVolatile(), /*IsInit=*/false);
3026 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
3027 QualType RValTy, SourceLocation Loc) {
3028 switch (getEvaluationKind(LVal.getType())) {
3030 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
3031 *this, RVal, RValTy, LVal.getType(), Loc)),
3036 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
3040 llvm_unreachable("Must be a scalar or complex.");
3044 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
3045 const Expr *X, const Expr *V,
3046 SourceLocation Loc) {
3048 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
3049 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
3050 LValue XLValue = CGF.EmitLValue(X);
3051 LValue VLValue = CGF.EmitLValue(V);
3052 RValue Res = XLValue.isGlobalReg()
3053 ? CGF.EmitLoadOfLValue(XLValue, Loc)
3054 : CGF.EmitAtomicLoad(
3056 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3057 : llvm::AtomicOrdering::Monotonic,
3058 XLValue.isVolatile());
3059 // OpenMP, 2.12.6, atomic Construct
3060 // Any atomic construct with a seq_cst clause forces the atomically
3061 // performed operation to include an implicit flush operation without a
3064 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3065 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
3068 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
3069 const Expr *X, const Expr *E,
3070 SourceLocation Loc) {
3072 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
3073 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
3074 // OpenMP, 2.12.6, atomic Construct
3075 // Any atomic construct with a seq_cst clause forces the atomically
3076 // performed operation to include an implicit flush operation without a
3079 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3082 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
3084 BinaryOperatorKind BO,
3085 llvm::AtomicOrdering AO,
3086 bool IsXLHSInRHSPart) {
3087 auto &Context = CGF.CGM.getContext();
3088 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
3089 // expression is simple and atomic is allowed for the given type for the
3091 if (BO == BO_Comma || !Update.isScalar() ||
3092 !Update.getScalarVal()->getType()->isIntegerTy() ||
3093 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
3094 (Update.getScalarVal()->getType() !=
3095 X.getAddress().getElementType())) ||
3096 !X.getAddress().getElementType()->isIntegerTy() ||
3097 !Context.getTargetInfo().hasBuiltinAtomic(
3098 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
3099 return std::make_pair(false, RValue::get(nullptr));
3101 llvm::AtomicRMWInst::BinOp RMWOp;
3104 RMWOp = llvm::AtomicRMWInst::Add;
3107 if (!IsXLHSInRHSPart)
3108 return std::make_pair(false, RValue::get(nullptr));
3109 RMWOp = llvm::AtomicRMWInst::Sub;
3112 RMWOp = llvm::AtomicRMWInst::And;
3115 RMWOp = llvm::AtomicRMWInst::Or;
3118 RMWOp = llvm::AtomicRMWInst::Xor;
3121 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3122 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
3123 : llvm::AtomicRMWInst::Max)
3124 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
3125 : llvm::AtomicRMWInst::UMax);
3128 RMWOp = X.getType()->hasSignedIntegerRepresentation()
3129 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
3130 : llvm::AtomicRMWInst::Min)
3131 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
3132 : llvm::AtomicRMWInst::UMin);
3135 RMWOp = llvm::AtomicRMWInst::Xchg;
3144 return std::make_pair(false, RValue::get(nullptr));
3162 llvm_unreachable("Unsupported atomic update operation");
3164 auto *UpdateVal = Update.getScalarVal();
3165 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
3166 UpdateVal = CGF.Builder.CreateIntCast(
3167 IC, X.getAddress().getElementType(),
3168 X.getType()->hasSignedIntegerRepresentation());
3170 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
3171 return std::make_pair(true, RValue::get(Res));
3174 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
3175 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3176 llvm::AtomicOrdering AO, SourceLocation Loc,
3177 const llvm::function_ref<RValue(RValue)> &CommonGen) {
3178 // Update expressions are allowed to have the following forms:
3179 // x binop= expr; -> xrval + expr;
3180 // x++, ++x -> xrval + 1;
3181 // x--, --x -> xrval - 1;
3182 // x = x binop expr; -> xrval binop expr
3183 // x = expr Op x; - > expr binop xrval;
3184 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
3186 if (X.isGlobalReg()) {
3187 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
3189 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
3191 // Perform compare-and-swap procedure.
3192 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
3198 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
3199 const Expr *X, const Expr *E,
3200 const Expr *UE, bool IsXLHSInRHSPart,
3201 SourceLocation Loc) {
3202 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3203 "Update expr in 'atomic update' must be a binary operator.");
3204 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3205 // Update expressions are allowed to have the following forms:
3206 // x binop= expr; -> xrval + expr;
3207 // x++, ++x -> xrval + 1;
3208 // x--, --x -> xrval - 1;
3209 // x = x binop expr; -> xrval binop expr
3210 // x = expr Op x; - > expr binop xrval;
3211 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
3212 LValue XLValue = CGF.EmitLValue(X);
3213 RValue ExprRValue = CGF.EmitAnyExpr(E);
3214 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3215 : llvm::AtomicOrdering::Monotonic;
3216 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3217 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3218 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3219 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3221 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
3222 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3223 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3224 return CGF.EmitAnyExpr(UE);
3226 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
3227 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3228 // OpenMP, 2.12.6, atomic Construct
3229 // Any atomic construct with a seq_cst clause forces the atomically
3230 // performed operation to include an implicit flush operation without a
3233 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3236 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
3237 QualType SourceType, QualType ResType,
3238 SourceLocation Loc) {
3239 switch (CGF.getEvaluationKind(ResType)) {
3242 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
3244 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
3245 return RValue::getComplex(Res.first, Res.second);
3250 llvm_unreachable("Must be a scalar or complex.");
3253 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
3254 bool IsPostfixUpdate, const Expr *V,
3255 const Expr *X, const Expr *E,
3256 const Expr *UE, bool IsXLHSInRHSPart,
3257 SourceLocation Loc) {
3258 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
3259 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
3261 LValue VLValue = CGF.EmitLValue(V);
3262 LValue XLValue = CGF.EmitLValue(X);
3263 RValue ExprRValue = CGF.EmitAnyExpr(E);
3264 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
3265 : llvm::AtomicOrdering::Monotonic;
3266 QualType NewVValType;
3268 // 'x' is updated with some additional value.
3269 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
3270 "Update expr in 'atomic capture' must be a binary operator.");
3271 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
3272 // Update expressions are allowed to have the following forms:
3273 // x binop= expr; -> xrval + expr;
3274 // x++, ++x -> xrval + 1;
3275 // x--, --x -> xrval - 1;
3276 // x = x binop expr; -> xrval binop expr
3277 // x = expr Op x; - > expr binop xrval;
3278 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
3279 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
3280 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
3281 NewVValType = XRValExpr->getType();
3282 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
3283 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
3284 IsPostfixUpdate](RValue XRValue) -> RValue {
3285 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3286 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
3287 RValue Res = CGF.EmitAnyExpr(UE);
3288 NewVVal = IsPostfixUpdate ? XRValue : Res;
3291 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3292 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
3294 // 'atomicrmw' instruction was generated.
3295 if (IsPostfixUpdate) {
3296 // Use old value from 'atomicrmw'.
3297 NewVVal = Res.second;
3299 // 'atomicrmw' does not provide new value, so evaluate it using old
3301 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
3302 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
3303 NewVVal = CGF.EmitAnyExpr(UE);
3307 // 'x' is simply rewritten with some 'expr'.
3308 NewVValType = X->getType().getNonReferenceType();
3309 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
3310 X->getType().getNonReferenceType(), Loc);
3311 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) -> RValue {
3315 // Try to perform atomicrmw xchg, otherwise simple exchange.
3316 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
3317 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
3320 // 'atomicrmw' instruction was generated.
3321 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
3324 // Emit post-update store to 'v' of old/new 'x' value.
3325 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
3326 // OpenMP, 2.12.6, atomic Construct
3327 // Any atomic construct with a seq_cst clause forces the atomically
3328 // performed operation to include an implicit flush operation without a
3331 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
3334 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
3335 bool IsSeqCst, bool IsPostfixUpdate,
3336 const Expr *X, const Expr *V, const Expr *E,
3337 const Expr *UE, bool IsXLHSInRHSPart,
3338 SourceLocation Loc) {
3341 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
3344 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
3348 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
3351 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
3352 IsXLHSInRHSPart, Loc);
3356 case OMPC_num_threads:
3358 case OMPC_firstprivate:
3359 case OMPC_lastprivate:
3360 case OMPC_reduction:
3370 case OMPC_copyprivate:
3372 case OMPC_proc_bind:
3377 case OMPC_threadprivate:
3379 case OMPC_mergeable:
3384 case OMPC_num_teams:
3385 case OMPC_thread_limit:
3387 case OMPC_grainsize:
3389 case OMPC_num_tasks:
3391 case OMPC_dist_schedule:
3392 case OMPC_defaultmap:
3396 case OMPC_use_device_ptr:
3397 case OMPC_is_device_ptr:
3398 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
3402 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
3403 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
3404 OpenMPClauseKind Kind = OMPC_unknown;
3405 for (auto *C : S.clauses()) {
3406 // Find first clause (skip seq_cst clause, if it is first).
3407 if (C->getClauseKind() != OMPC_seq_cst) {
3408 Kind = C->getClauseKind();
3414 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
3415 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
3416 enterFullExpression(EWC);
3418 // Processing for statements under 'atomic capture'.
3419 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
3420 for (const auto *C : Compound->body()) {
3421 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
3422 enterFullExpression(EWC);
3427 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
3428 PrePostActionTy &) {
3429 CGF.EmitStopPoint(CS);
3430 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
3431 S.getV(), S.getExpr(), S.getUpdateExpr(),
3432 S.isXLHSInRHSPart(), S.getLocStart());
3434 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
3435 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
3438 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
3439 const OMPExecutableDirective &S,
3440 const RegionCodeGenTy &CodeGen) {
3441 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
3442 CodeGenModule &CGM = CGF.CGM;
3443 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
3445 llvm::Function *Fn = nullptr;
3446 llvm::Constant *FnID = nullptr;
3448 const Expr *IfCond = nullptr;
3449 // Check for the at most one if clause associated with the target region.
3450 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3451 if (C->getNameModifier() == OMPD_unknown ||
3452 C->getNameModifier() == OMPD_target) {
3453 IfCond = C->getCondition();
3458 // Check if we have any device clause associated with the directive.
3459 const Expr *Device = nullptr;
3460 if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
3461 Device = C->getDevice();
3464 // Check if we have an if clause whose conditional always evaluates to false
3465 // or if we do not have any targets specified. If so the target region is not
3466 // an offload entry point.
3467 bool IsOffloadEntry = true;
3470 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
3471 IsOffloadEntry = false;
3473 if (CGM.getLangOpts().OMPTargetTriples.empty())
3474 IsOffloadEntry = false;
3476 assert(CGF.CurFuncDecl && "No parent declaration for target region!");
3477 StringRef ParentName;
3478 // In case we have Ctors/Dtors we use the complete type variant to produce
3479 // the mangling of the device outlined kernel.
3480 if (auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
3481 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
3482 else if (auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
3483 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
3486 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl)));
3488 // Emit target region as a standalone region.
3489 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
3490 IsOffloadEntry, CodeGen);
3491 OMPLexicalScope Scope(CGF, S);
3492 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3493 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
3494 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device,
3498 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
3499 PrePostActionTy &Action) {
3500 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
3501 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3502 CGF.EmitOMPPrivateClause(S, PrivateScope);
3503 (void)PrivateScope.Privatize();
3506 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3509 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
3510 StringRef ParentName,
3511 const OMPTargetDirective &S) {
3512 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3513 emitTargetRegion(CGF, S, Action);
3516 llvm::Constant *Addr;
3517 // Emit target region as a standalone region.
3518 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3519 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3520 assert(Fn && Addr && "Target device function emission failed.");
3523 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
3524 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3525 emitTargetRegion(CGF, S, Action);
3527 emitCommonOMPTargetDirective(*this, S, CodeGen);
3530 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
3531 const OMPExecutableDirective &S,
3532 OpenMPDirectiveKind InnermostKind,
3533 const RegionCodeGenTy &CodeGen) {
3534 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
3535 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
3536 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
3538 const OMPNumTeamsClause *NT = S.getSingleClause<OMPNumTeamsClause>();
3539 const OMPThreadLimitClause *TL = S.getSingleClause<OMPThreadLimitClause>();
3541 Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr;
3542 Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr;
3544 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
3548 OMPTeamsScope Scope(CGF, S);
3549 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
3550 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
3551 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn,
3555 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
3556 // Emit teams region as a standalone region.
3557 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3558 OMPPrivateScope PrivateScope(CGF);
3559 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3560 CGF.EmitOMPPrivateClause(S, PrivateScope);
3561 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
3562 (void)PrivateScope.Privatize();
3563 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3564 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
3566 emitCommonOMPTeamsDirective(*this, S, OMPD_teams, CodeGen);
3567 emitPostUpdateForReductionClause(
3568 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
3571 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
3572 const OMPTargetTeamsDirective &S) {
3573 auto *CS = S.getCapturedStmt(OMPD_teams);
3575 auto &&CodeGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
3576 // TODO: Add support for clauses.
3577 CGF.EmitStmt(CS->getCapturedStmt());
3579 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen);
3582 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
3583 CodeGenModule &CGM, StringRef ParentName,
3584 const OMPTargetTeamsDirective &S) {
3585 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3586 emitTargetTeamsRegion(CGF, Action, S);
3589 llvm::Constant *Addr;
3590 // Emit target region as a standalone region.
3591 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3592 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3593 assert(Fn && Addr && "Target device function emission failed.");
3596 void CodeGenFunction::EmitOMPTargetTeamsDirective(
3597 const OMPTargetTeamsDirective &S) {
3598 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3599 emitTargetTeamsRegion(CGF, Action, S);
3601 emitCommonOMPTargetDirective(*this, S, CodeGen);
3604 void CodeGenFunction::EmitOMPCancellationPointDirective(
3605 const OMPCancellationPointDirective &S) {
3606 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
3607 S.getCancelRegion());
3610 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
3611 const Expr *IfCond = nullptr;
3612 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3613 if (C->getNameModifier() == OMPD_unknown ||
3614 C->getNameModifier() == OMPD_cancel) {
3615 IfCond = C->getCondition();
3619 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
3620 S.getCancelRegion());
3623 CodeGenFunction::JumpDest
3624 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
3625 if (Kind == OMPD_parallel || Kind == OMPD_task ||
3626 Kind == OMPD_target_parallel)
3628 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
3629 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
3630 Kind == OMPD_distribute_parallel_for ||
3631 Kind == OMPD_target_parallel_for);
3632 return OMPCancelStack.getExitBlock();
3635 void CodeGenFunction::EmitOMPUseDevicePtrClause(
3636 const OMPClause &NC, OMPPrivateScope &PrivateScope,
3637 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
3638 const auto &C = cast<OMPUseDevicePtrClause>(NC);
3639 auto OrigVarIt = C.varlist_begin();
3640 auto InitIt = C.inits().begin();
3641 for (auto PvtVarIt : C.private_copies()) {
3642 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
3643 auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
3644 auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
3646 // In order to identify the right initializer we need to match the
3647 // declaration used by the mapping logic. In some cases we may get
3648 // OMPCapturedExprDecl that refers to the original declaration.
3649 const ValueDecl *MatchingVD = OrigVD;
3650 if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
3651 // OMPCapturedExprDecl are used to privative fields of the current
3653 auto *ME = cast<MemberExpr>(OED->getInit());
3654 assert(isa<CXXThisExpr>(ME->getBase()) &&
3655 "Base should be the current struct!");
3656 MatchingVD = ME->getMemberDecl();
3659 // If we don't have information about the current list item, move on to
3661 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
3662 if (InitAddrIt == CaptureDeviceAddrMap.end())
3665 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
3666 // Initialize the temporary initialization variable with the address we
3667 // get from the runtime library. We have to cast the source address
3668 // because it is always a void *. References are materialized in the
3669 // privatization scope, so the initialization here disregards the fact
3670 // the original variable is a reference.
3672 getContext().getPointerType(OrigVD->getType().getNonReferenceType());
3673 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
3674 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
3675 setAddrOfLocalVar(InitVD, InitAddr);
3677 // Emit private declaration, it will be initialized by the value we
3678 // declaration we just added to the local declarations map.
3681 // The initialization variables reached its purpose in the emission
3682 // ofthe previous declaration, so we don't need it anymore.
3683 LocalDeclMap.erase(InitVD);
3685 // Return the address of the private variable.
3686 return GetAddrOfLocalVar(PvtVD);
3688 assert(IsRegistered && "firstprivate var already registered as private");
3689 // Silence the warning about unused variable.
3697 // Generate the instructions for '#pragma omp target data' directive.
3698 void CodeGenFunction::EmitOMPTargetDataDirective(
3699 const OMPTargetDataDirective &S) {
3700 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
3702 // Create a pre/post action to signal the privatization of the device pointer.
3703 // This action can be replaced by the OpenMP runtime code generation to
3704 // deactivate privatization.
3705 bool PrivatizeDevicePointers = false;
3706 class DevicePointerPrivActionTy : public PrePostActionTy {
3707 bool &PrivatizeDevicePointers;
3710 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
3711 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
3712 void Enter(CodeGenFunction &CGF) override {
3713 PrivatizeDevicePointers = true;
3716 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
3718 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
3719 CodeGenFunction &CGF, PrePostActionTy &Action) {
3720 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3722 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
3725 // Codegen that selects wheather to generate the privatization code or not.
3726 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
3727 &InnermostCodeGen](CodeGenFunction &CGF,
3728 PrePostActionTy &Action) {
3729 RegionCodeGenTy RCG(InnermostCodeGen);
3730 PrivatizeDevicePointers = false;
3732 // Call the pre-action to change the status of PrivatizeDevicePointers if
3736 if (PrivatizeDevicePointers) {
3737 OMPPrivateScope PrivateScope(CGF);
3738 // Emit all instances of the use_device_ptr clause.
3739 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
3740 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
3741 Info.CaptureDeviceAddrMap);
3742 (void)PrivateScope.Privatize();
3748 // Forward the provided action to the privatization codegen.
3749 RegionCodeGenTy PrivRCG(PrivCodeGen);
3750 PrivRCG.setAction(Action);
3752 // Notwithstanding the body of the region is emitted as inlined directive,
3753 // we don't use an inline scope as changes in the references inside the
3754 // region are expected to be visible outside, so we do not privative them.
3755 OMPLexicalScope Scope(CGF, S);
3756 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
3760 RegionCodeGenTy RCG(CodeGen);
3762 // If we don't have target devices, don't bother emitting the data mapping
3764 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
3769 // Check if we have any if clause associated with the directive.
3770 const Expr *IfCond = nullptr;
3771 if (auto *C = S.getSingleClause<OMPIfClause>())
3772 IfCond = C->getCondition();
3774 // Check if we have any device clause associated with the directive.
3775 const Expr *Device = nullptr;
3776 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3777 Device = C->getDevice();
3779 // Set the action to signal privatization of device pointers.
3780 RCG.setAction(PrivAction);
3782 // Emit region code.
3783 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
3787 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
3788 const OMPTargetEnterDataDirective &S) {
3789 // If we don't have target devices, don't bother emitting the data mapping
3791 if (CGM.getLangOpts().OMPTargetTriples.empty())
3794 // Check if we have any if clause associated with the directive.
3795 const Expr *IfCond = nullptr;
3796 if (auto *C = S.getSingleClause<OMPIfClause>())
3797 IfCond = C->getCondition();
3799 // Check if we have any device clause associated with the directive.
3800 const Expr *Device = nullptr;
3801 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3802 Device = C->getDevice();
3804 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3807 void CodeGenFunction::EmitOMPTargetExitDataDirective(
3808 const OMPTargetExitDataDirective &S) {
3809 // If we don't have target devices, don't bother emitting the data mapping
3811 if (CGM.getLangOpts().OMPTargetTriples.empty())
3814 // Check if we have any if clause associated with the directive.
3815 const Expr *IfCond = nullptr;
3816 if (auto *C = S.getSingleClause<OMPIfClause>())
3817 IfCond = C->getCondition();
3819 // Check if we have any device clause associated with the directive.
3820 const Expr *Device = nullptr;
3821 if (auto *C = S.getSingleClause<OMPDeviceClause>())
3822 Device = C->getDevice();
3824 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
3827 static void emitTargetParallelRegion(CodeGenFunction &CGF,
3828 const OMPTargetParallelDirective &S,
3829 PrePostActionTy &Action) {
3830 // Get the captured statement associated with the 'parallel' region.
3831 auto *CS = S.getCapturedStmt(OMPD_parallel);
3833 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &) {
3834 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
3835 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
3836 CGF.EmitOMPPrivateClause(S, PrivateScope);
3837 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
3838 (void)PrivateScope.Privatize();
3839 // TODO: Add support for clauses.
3840 CGF.EmitStmt(CS->getCapturedStmt());
3841 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
3843 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen);
3844 emitPostUpdateForReductionClause(
3845 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
3848 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
3849 CodeGenModule &CGM, StringRef ParentName,
3850 const OMPTargetParallelDirective &S) {
3851 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3852 emitTargetParallelRegion(CGF, S, Action);
3855 llvm::Constant *Addr;
3856 // Emit target region as a standalone region.
3857 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3858 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3859 assert(Fn && Addr && "Target device function emission failed.");
3862 void CodeGenFunction::EmitOMPTargetParallelDirective(
3863 const OMPTargetParallelDirective &S) {
3864 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3865 emitTargetParallelRegion(CGF, S, Action);
3867 emitCommonOMPTargetDirective(*this, S, CodeGen);
3870 void CodeGenFunction::EmitOMPTargetParallelForDirective(
3871 const OMPTargetParallelForDirective &S) {
3872 // TODO: codegen for target parallel for.
3875 /// Emit a helper variable and return corresponding lvalue.
3876 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
3877 const ImplicitParamDecl *PVD,
3878 CodeGenFunction::OMPPrivateScope &Privates) {
3879 auto *VDecl = cast<VarDecl>(Helper->getDecl());
3880 Privates.addPrivate(
3881 VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); });
3884 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
3885 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
3886 // Emit outlined function for task construct.
3887 auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
3888 auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
3889 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
3890 const Expr *IfCond = nullptr;
3891 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
3892 if (C->getNameModifier() == OMPD_unknown ||
3893 C->getNameModifier() == OMPD_taskloop) {
3894 IfCond = C->getCondition();
3900 // Check if taskloop must be emitted without taskgroup.
3901 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
3902 // TODO: Check if we should emit tied or untied task.
3904 // Set scheduling for taskloop
3905 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
3907 Data.Schedule.setInt(/*IntVal=*/false);
3908 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
3909 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
3911 Data.Schedule.setInt(/*IntVal=*/true);
3912 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
3915 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
3917 // for (IV in 0..LastIteration) BODY;
3918 // <Final counter/linear vars updates>;
3922 // Emit: if (PreCond) - begin.
3923 // If the condition constant folds and can be elided, avoid emitting the
3926 llvm::BasicBlock *ContBlock = nullptr;
3927 OMPLoopScope PreInitScope(CGF, S);
3928 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3932 auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
3933 ContBlock = CGF.createBasicBlock("taskloop.if.end");
3934 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
3935 CGF.getProfileCount(&S));
3936 CGF.EmitBlock(ThenBlock);
3937 CGF.incrementProfileCounter(&S);
3940 if (isOpenMPSimdDirective(S.getDirectiveKind()))
3941 CGF.EmitOMPSimdInit(S);
3943 OMPPrivateScope LoopScope(CGF);
3944 // Emit helper vars inits.
3945 enum { LowerBound = 5, UpperBound, Stride, LastIter };
3946 auto *I = CS->getCapturedDecl()->param_begin();
3947 auto *LBP = std::next(I, LowerBound);
3948 auto *UBP = std::next(I, UpperBound);
3949 auto *STP = std::next(I, Stride);
3950 auto *LIP = std::next(I, LastIter);
3951 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
3953 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
3955 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
3956 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
3958 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
3959 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
3960 (void)LoopScope.Privatize();
3961 // Emit the loop iteration variable.
3962 const Expr *IVExpr = S.getIterationVariable();
3963 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
3964 CGF.EmitVarDecl(*IVDecl);
3965 CGF.EmitIgnoredExpr(S.getInit());
3967 // Emit the iterations count variable.
3968 // If it is not a variable, Sema decided to calculate iterations count on
3969 // each iteration (e.g., it is foldable into a constant).
3970 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3971 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3972 // Emit calculation of the iterations count.
3973 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
3976 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
3978 [&S](CodeGenFunction &CGF) {
3979 CGF.EmitOMPLoopBody(S, JumpDest());
3980 CGF.EmitStopPoint(&S);
3982 [](CodeGenFunction &) {});
3983 // Emit: if (PreCond) - end.
3985 CGF.EmitBranch(ContBlock);
3986 CGF.EmitBlock(ContBlock, true);
3988 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3989 if (HasLastprivateClause) {
3990 CGF.EmitOMPLastprivateClauseFinal(
3991 S, isOpenMPSimdDirective(S.getDirectiveKind()),
3992 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
3993 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
3994 (*LIP)->getType(), S.getLocStart())));
3997 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
3998 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
3999 const OMPTaskDataTy &Data) {
4000 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) {
4001 OMPLoopScope PreInitScope(CGF, S);
4002 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
4003 OutlinedFn, SharedsTy,
4004 CapturedStruct, IfCond, Data);
4006 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
4009 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
4012 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
4013 EmitOMPTaskLoopBasedDirective(S);
4016 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
4017 const OMPTaskLoopSimdDirective &S) {
4018 EmitOMPTaskLoopBasedDirective(S);
4021 // Generate the instructions for '#pragma omp target update' directive.
4022 void CodeGenFunction::EmitOMPTargetUpdateDirective(
4023 const OMPTargetUpdateDirective &S) {
4024 // If we don't have target devices, don't bother emitting the data mapping
4026 if (CGM.getLangOpts().OMPTargetTriples.empty())
4029 // Check if we have any if clause associated with the directive.
4030 const Expr *IfCond = nullptr;
4031 if (auto *C = S.getSingleClause<OMPIfClause>())
4032 IfCond = C->getCondition();
4034 // Check if we have any device clause associated with the directive.
4035 const Expr *Device = nullptr;
4036 if (auto *C = S.getSingleClause<OMPDeviceClause>())
4037 Device = C->getDevice();
4039 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);