1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Stmt nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "clang/Basic/DiagnosticSema.h"
24 #include "clang/Basic/PrettyStackTrace.h"
25 #include "clang/Basic/SourceManager.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/IR/Assumptions.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/InlineAsm.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/MDBuilder.h"
34 #include "llvm/Support/SaveAndRestore.h"
36 using namespace clang;
37 using namespace CodeGen;
39 //===----------------------------------------------------------------------===//
41 //===----------------------------------------------------------------------===//
43 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
44 if (CGDebugInfo *DI = getDebugInfo()) {
46 Loc = S->getBeginLoc();
47 DI->EmitLocation(Builder, Loc);
53 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
54 assert(S && "Null statement?");
55 PGO.setCurrentStmt(S);
57 // These statements have their own debug info handling.
58 if (EmitSimpleStmt(S, Attrs))
61 // Check if we are generating unreachable code.
62 if (!HaveInsertPoint()) {
63 // If so, and the statement doesn't contain a label, then we do not need to
64 // generate actual code. This is safe because (1) the current point is
65 // unreachable, so we don't need to execute the code, and (2) we've already
66 // handled the statements which update internal data structures (like the
67 // local variable map) which could be used by subsequent statements.
68 if (!ContainsLabel(S)) {
69 // Verify that any decl statements were handled as simple, they may be in
70 // scope of subsequent reachable statements.
71 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
75 // Otherwise, make a new block to hold the code.
79 // Generate a stoppoint if we are emitting debug info.
82 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
84 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
85 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
86 EmitSimpleOMPExecutableDirective(*D);
91 switch (S->getStmtClass()) {
92 case Stmt::NoStmtClass:
93 case Stmt::CXXCatchStmtClass:
94 case Stmt::SEHExceptStmtClass:
95 case Stmt::SEHFinallyStmtClass:
96 case Stmt::MSDependentExistsStmtClass:
97 llvm_unreachable("invalid statement class to emit generically");
98 case Stmt::NullStmtClass:
99 case Stmt::CompoundStmtClass:
100 case Stmt::DeclStmtClass:
101 case Stmt::LabelStmtClass:
102 case Stmt::AttributedStmtClass:
103 case Stmt::GotoStmtClass:
104 case Stmt::BreakStmtClass:
105 case Stmt::ContinueStmtClass:
106 case Stmt::DefaultStmtClass:
107 case Stmt::CaseStmtClass:
108 case Stmt::SEHLeaveStmtClass:
109 llvm_unreachable("should have emitted these statements as simple");
111 #define STMT(Type, Base)
112 #define ABSTRACT_STMT(Op)
113 #define EXPR(Type, Base) \
114 case Stmt::Type##Class:
115 #include "clang/AST/StmtNodes.inc"
117 // Remember the block we came in on.
118 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
119 assert(incoming && "expression emission must have an insertion point");
121 EmitIgnoredExpr(cast<Expr>(S));
123 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
124 assert(outgoing && "expression emission cleared block!");
126 // The expression emitters assume (reasonably!) that the insertion
127 // point is always set. To maintain that, the call-emission code
128 // for noreturn functions has to enter a new block with no
129 // predecessors. We want to kill that block and mark the current
130 // insertion point unreachable in the common case of a call like
131 // "exit();". Since expression emission doesn't otherwise create
132 // blocks with no predecessors, we can just test for that.
133 // However, we must be careful not to do this to our incoming
134 // block, because *statement* emission does sometimes create
135 // reachable blocks which will have no predecessors until later in
136 // the function. This occurs with, e.g., labels that are not
137 // reachable by fallthrough.
138 if (incoming != outgoing && outgoing->use_empty()) {
139 outgoing->eraseFromParent();
140 Builder.ClearInsertionPoint();
145 case Stmt::IndirectGotoStmtClass:
146 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
148 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
149 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
150 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
151 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
153 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
155 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
156 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
157 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
158 case Stmt::CoroutineBodyStmtClass:
159 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
161 case Stmt::CoreturnStmtClass:
162 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
164 case Stmt::CapturedStmtClass: {
165 const CapturedStmt *CS = cast<CapturedStmt>(S);
166 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
169 case Stmt::ObjCAtTryStmtClass:
170 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
172 case Stmt::ObjCAtCatchStmtClass:
174 "@catch statements should be handled by EmitObjCAtTryStmt");
175 case Stmt::ObjCAtFinallyStmtClass:
177 "@finally statements should be handled by EmitObjCAtTryStmt");
178 case Stmt::ObjCAtThrowStmtClass:
179 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
181 case Stmt::ObjCAtSynchronizedStmtClass:
182 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
184 case Stmt::ObjCForCollectionStmtClass:
185 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
187 case Stmt::ObjCAutoreleasePoolStmtClass:
188 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
191 case Stmt::CXXTryStmtClass:
192 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
194 case Stmt::CXXForRangeStmtClass:
195 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
197 case Stmt::SEHTryStmtClass:
198 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
200 case Stmt::OMPMetaDirectiveClass:
201 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
203 case Stmt::OMPCanonicalLoopClass:
204 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
206 case Stmt::OMPParallelDirectiveClass:
207 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
209 case Stmt::OMPSimdDirectiveClass:
210 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
212 case Stmt::OMPTileDirectiveClass:
213 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
215 case Stmt::OMPUnrollDirectiveClass:
216 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
218 case Stmt::OMPForDirectiveClass:
219 EmitOMPForDirective(cast<OMPForDirective>(*S));
221 case Stmt::OMPForSimdDirectiveClass:
222 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
224 case Stmt::OMPSectionsDirectiveClass:
225 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
227 case Stmt::OMPSectionDirectiveClass:
228 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
230 case Stmt::OMPSingleDirectiveClass:
231 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
233 case Stmt::OMPMasterDirectiveClass:
234 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
236 case Stmt::OMPCriticalDirectiveClass:
237 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
239 case Stmt::OMPParallelForDirectiveClass:
240 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
242 case Stmt::OMPParallelForSimdDirectiveClass:
243 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
245 case Stmt::OMPParallelMasterDirectiveClass:
246 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
248 case Stmt::OMPParallelSectionsDirectiveClass:
249 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
251 case Stmt::OMPTaskDirectiveClass:
252 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
254 case Stmt::OMPTaskyieldDirectiveClass:
255 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
257 case Stmt::OMPBarrierDirectiveClass:
258 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
260 case Stmt::OMPTaskwaitDirectiveClass:
261 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
263 case Stmt::OMPTaskgroupDirectiveClass:
264 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
266 case Stmt::OMPFlushDirectiveClass:
267 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
269 case Stmt::OMPDepobjDirectiveClass:
270 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
272 case Stmt::OMPScanDirectiveClass:
273 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
275 case Stmt::OMPOrderedDirectiveClass:
276 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
278 case Stmt::OMPAtomicDirectiveClass:
279 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
281 case Stmt::OMPTargetDirectiveClass:
282 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
284 case Stmt::OMPTeamsDirectiveClass:
285 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
287 case Stmt::OMPCancellationPointDirectiveClass:
288 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
290 case Stmt::OMPCancelDirectiveClass:
291 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
293 case Stmt::OMPTargetDataDirectiveClass:
294 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
296 case Stmt::OMPTargetEnterDataDirectiveClass:
297 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
299 case Stmt::OMPTargetExitDataDirectiveClass:
300 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
302 case Stmt::OMPTargetParallelDirectiveClass:
303 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
305 case Stmt::OMPTargetParallelForDirectiveClass:
306 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
308 case Stmt::OMPTaskLoopDirectiveClass:
309 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
311 case Stmt::OMPTaskLoopSimdDirectiveClass:
312 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
314 case Stmt::OMPMasterTaskLoopDirectiveClass:
315 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
317 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
318 EmitOMPMasterTaskLoopSimdDirective(
319 cast<OMPMasterTaskLoopSimdDirective>(*S));
321 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
322 EmitOMPParallelMasterTaskLoopDirective(
323 cast<OMPParallelMasterTaskLoopDirective>(*S));
325 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
326 EmitOMPParallelMasterTaskLoopSimdDirective(
327 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
329 case Stmt::OMPDistributeDirectiveClass:
330 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
332 case Stmt::OMPTargetUpdateDirectiveClass:
333 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
335 case Stmt::OMPDistributeParallelForDirectiveClass:
336 EmitOMPDistributeParallelForDirective(
337 cast<OMPDistributeParallelForDirective>(*S));
339 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
340 EmitOMPDistributeParallelForSimdDirective(
341 cast<OMPDistributeParallelForSimdDirective>(*S));
343 case Stmt::OMPDistributeSimdDirectiveClass:
344 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
346 case Stmt::OMPTargetParallelForSimdDirectiveClass:
347 EmitOMPTargetParallelForSimdDirective(
348 cast<OMPTargetParallelForSimdDirective>(*S));
350 case Stmt::OMPTargetSimdDirectiveClass:
351 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
353 case Stmt::OMPTeamsDistributeDirectiveClass:
354 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
356 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
357 EmitOMPTeamsDistributeSimdDirective(
358 cast<OMPTeamsDistributeSimdDirective>(*S));
360 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
361 EmitOMPTeamsDistributeParallelForSimdDirective(
362 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
364 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
365 EmitOMPTeamsDistributeParallelForDirective(
366 cast<OMPTeamsDistributeParallelForDirective>(*S));
368 case Stmt::OMPTargetTeamsDirectiveClass:
369 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
371 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
372 EmitOMPTargetTeamsDistributeDirective(
373 cast<OMPTargetTeamsDistributeDirective>(*S));
375 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
376 EmitOMPTargetTeamsDistributeParallelForDirective(
377 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
379 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
380 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
381 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
383 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
384 EmitOMPTargetTeamsDistributeSimdDirective(
385 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
387 case Stmt::OMPInteropDirectiveClass:
388 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
390 case Stmt::OMPDispatchDirectiveClass:
391 llvm_unreachable("Dispatch directive not supported yet.");
393 case Stmt::OMPMaskedDirectiveClass:
394 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
396 case Stmt::OMPGenericLoopDirectiveClass:
397 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
402 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
403 ArrayRef<const Attr *> Attrs) {
404 switch (S->getStmtClass()) {
407 case Stmt::NullStmtClass:
409 case Stmt::CompoundStmtClass:
410 EmitCompoundStmt(cast<CompoundStmt>(*S));
412 case Stmt::DeclStmtClass:
413 EmitDeclStmt(cast<DeclStmt>(*S));
415 case Stmt::LabelStmtClass:
416 EmitLabelStmt(cast<LabelStmt>(*S));
418 case Stmt::AttributedStmtClass:
419 EmitAttributedStmt(cast<AttributedStmt>(*S));
421 case Stmt::GotoStmtClass:
422 EmitGotoStmt(cast<GotoStmt>(*S));
424 case Stmt::BreakStmtClass:
425 EmitBreakStmt(cast<BreakStmt>(*S));
427 case Stmt::ContinueStmtClass:
428 EmitContinueStmt(cast<ContinueStmt>(*S));
430 case Stmt::DefaultStmtClass:
431 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
433 case Stmt::CaseStmtClass:
434 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
436 case Stmt::SEHLeaveStmtClass:
437 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
443 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
444 /// this captures the expression result of the last sub-statement and returns it
445 /// (for use by the statement expression extension).
446 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
447 AggValueSlot AggSlot) {
448 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
449 "LLVM IR generation of compound statement ('{}')");
451 // Keep track of the current cleanup stack depth, including debug scopes.
452 LexicalScope Scope(*this, S.getSourceRange());
454 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
458 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
460 AggValueSlot AggSlot) {
462 const Stmt *ExprResult = S.getStmtExprResult();
463 assert((!GetLast || (GetLast && ExprResult)) &&
464 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
466 Address RetAlloca = Address::invalid();
468 for (auto *CurStmt : S.body()) {
469 if (GetLast && ExprResult == CurStmt) {
470 // We have to special case labels here. They are statements, but when put
471 // at the end of a statement expression, they yield the value of their
472 // subexpression. Handle this by walking through all labels we encounter,
473 // emitting them before we evaluate the subexpr.
474 // Similar issues arise for attributed statements.
475 while (!isa<Expr>(ExprResult)) {
476 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
477 EmitLabel(LS->getDecl());
478 ExprResult = LS->getSubStmt();
479 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
480 // FIXME: Update this if we ever have attributes that affect the
481 // semantics of an expression.
482 ExprResult = AS->getSubStmt();
484 llvm_unreachable("unknown value statement");
490 const Expr *E = cast<Expr>(ExprResult);
491 QualType ExprTy = E->getType();
492 if (hasAggregateEvaluationKind(ExprTy)) {
493 EmitAggExpr(E, AggSlot);
495 // We can't return an RValue here because there might be cleanups at
496 // the end of the StmtExpr. Because of that, we have to emit the result
497 // here into a temporary alloca.
498 RetAlloca = CreateMemTemp(ExprTy);
499 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
510 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
511 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
513 // If there is a cleanup stack, then we it isn't worth trying to
514 // simplify this block (we would need to remove it from the scope map
515 // and cleanup entry).
516 if (!EHStack.empty())
519 // Can only simplify direct branches.
520 if (!BI || !BI->isUnconditional())
523 // Can only simplify empty blocks.
524 if (BI->getIterator() != BB->begin())
527 BB->replaceAllUsesWith(BI->getSuccessor(0));
528 BI->eraseFromParent();
529 BB->eraseFromParent();
532 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
533 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
535 // Fall out of the current block (if necessary).
538 if (IsFinished && BB->use_empty()) {
543 // Place the block after the current block, if possible, or else at
544 // the end of the function.
545 if (CurBB && CurBB->getParent())
546 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
548 CurFn->getBasicBlockList().push_back(BB);
549 Builder.SetInsertPoint(BB);
552 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
553 // Emit a branch from the current block to the target one if this
554 // was a real block. If this was just a fall-through block after a
555 // terminator, don't emit it.
556 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
558 if (!CurBB || CurBB->getTerminator()) {
559 // If there is no insert point or the previous block is already
560 // terminated, don't touch it.
562 // Otherwise, create a fall-through branch.
563 Builder.CreateBr(Target);
566 Builder.ClearInsertionPoint();
569 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
570 bool inserted = false;
571 for (llvm::User *u : block->users()) {
572 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
573 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
581 CurFn->getBasicBlockList().push_back(block);
583 Builder.SetInsertPoint(block);
586 CodeGenFunction::JumpDest
587 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
588 JumpDest &Dest = LabelMap[D];
589 if (Dest.isValid()) return Dest;
591 // Create, but don't insert, the new block.
592 Dest = JumpDest(createBasicBlock(D->getName()),
593 EHScopeStack::stable_iterator::invalid(),
594 NextCleanupDestIndex++);
598 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
599 // Add this label to the current lexical scope if we're within any
600 // normal cleanups. Jumps "in" to this label --- when permitted by
601 // the language --- may need to be routed around such cleanups.
602 if (EHStack.hasNormalCleanups() && CurLexicalScope)
603 CurLexicalScope->addLabel(D);
605 JumpDest &Dest = LabelMap[D];
607 // If we didn't need a forward reference to this label, just go
608 // ahead and create a destination at the current scope.
609 if (!Dest.isValid()) {
610 Dest = getJumpDestInCurrentScope(D->getName());
612 // Otherwise, we need to give this label a target depth and remove
613 // it from the branch-fixups list.
615 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
616 Dest.setScopeDepth(EHStack.stable_begin());
617 ResolveBranchFixups(Dest.getBlock());
620 EmitBlock(Dest.getBlock());
622 // Emit debug info for labels.
623 if (CGDebugInfo *DI = getDebugInfo()) {
624 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
625 DI->setLocation(D->getLocation());
626 DI->EmitLabel(D, Builder);
630 incrementProfileCounter(D->getStmt());
633 /// Change the cleanup scope of the labels in this lexical scope to
634 /// match the scope of the enclosing context.
635 void CodeGenFunction::LexicalScope::rescopeLabels() {
636 assert(!Labels.empty());
637 EHScopeStack::stable_iterator innermostScope
638 = CGF.EHStack.getInnermostNormalCleanup();
640 // Change the scope depth of all the labels.
641 for (SmallVectorImpl<const LabelDecl*>::const_iterator
642 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
643 assert(CGF.LabelMap.count(*i));
644 JumpDest &dest = CGF.LabelMap.find(*i)->second;
645 assert(dest.getScopeDepth().isValid());
646 assert(innermostScope.encloses(dest.getScopeDepth()));
647 dest.setScopeDepth(innermostScope);
650 // Reparent the labels if the new scope also has cleanups.
651 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
652 ParentScope->Labels.append(Labels.begin(), Labels.end());
657 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
658 EmitLabel(S.getDecl());
660 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
661 if (getLangOpts().EHAsynch && S.isSideEntry())
662 EmitSehCppScopeBegin();
664 EmitStmt(S.getSubStmt());
667 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
668 bool nomerge = false;
669 const CallExpr *musttail = nullptr;
671 for (const auto *A : S.getAttrs()) {
672 if (A->getKind() == attr::NoMerge) {
675 if (A->getKind() == attr::MustTail) {
676 const Stmt *Sub = S.getSubStmt();
677 const ReturnStmt *R = cast<ReturnStmt>(Sub);
678 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
681 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
682 SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
683 EmitStmt(S.getSubStmt(), S.getAttrs());
686 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
687 // If this code is reachable then emit a stop point (if generating
688 // debug info). We have to do this ourselves because we are on the
689 // "simple" statement path.
690 if (HaveInsertPoint())
693 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
697 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
698 if (const LabelDecl *Target = S.getConstantTarget()) {
699 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
703 // Ensure that we have an i8* for our PHI node.
704 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
706 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
708 // Get the basic block for the indirect goto.
709 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
711 // The first instruction in the block has to be the PHI for the switch dest,
712 // add an entry for this branch.
713 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
715 EmitBranch(IndGotoBB);
718 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
719 // The else branch of a consteval if statement is always the only branch that
720 // can be runtime evaluated.
721 if (S.isConsteval()) {
722 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
724 RunCleanupsScope ExecutedScope(*this);
730 // C99 6.8.4.1: The first substatement is executed if the expression compares
731 // unequal to 0. The condition must be a scalar type.
732 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
735 EmitStmt(S.getInit());
737 if (S.getConditionVariable())
738 EmitDecl(*S.getConditionVariable());
740 // If the condition constant folds and can be elided, try to avoid emitting
741 // the condition and the dead arm of the if/else.
743 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
745 // Figure out which block (then or else) is executed.
746 const Stmt *Executed = S.getThen();
747 const Stmt *Skipped = S.getElse();
748 if (!CondConstant) // Condition false?
749 std::swap(Executed, Skipped);
751 // If the skipped block has no labels in it, just emit the executed block.
752 // This avoids emitting dead code and simplifies the CFG substantially.
753 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
755 incrementProfileCounter(&S);
757 RunCleanupsScope ExecutedScope(*this);
764 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
765 // the conditional branch.
766 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
767 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
768 llvm::BasicBlock *ElseBlock = ContBlock;
770 ElseBlock = createBasicBlock("if.else");
772 // Prefer the PGO based weights over the likelihood attribute.
773 // When the build isn't optimized the metadata isn't used, so don't generate
775 Stmt::Likelihood LH = Stmt::LH_None;
776 uint64_t Count = getProfileCount(S.getThen());
777 if (!Count && CGM.getCodeGenOpts().OptimizationLevel)
778 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
779 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
781 // Emit the 'then' code.
782 EmitBlock(ThenBlock);
783 incrementProfileCounter(&S);
785 RunCleanupsScope ThenScope(*this);
786 EmitStmt(S.getThen());
788 EmitBranch(ContBlock);
790 // Emit the 'else' code if present.
791 if (const Stmt *Else = S.getElse()) {
793 // There is no need to emit line number for an unconditional branch.
794 auto NL = ApplyDebugLocation::CreateEmpty(*this);
795 EmitBlock(ElseBlock);
798 RunCleanupsScope ElseScope(*this);
802 // There is no need to emit line number for an unconditional branch.
803 auto NL = ApplyDebugLocation::CreateEmpty(*this);
804 EmitBranch(ContBlock);
808 // Emit the continuation block for code after the if.
809 EmitBlock(ContBlock, true);
812 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
813 ArrayRef<const Attr *> WhileAttrs) {
814 // Emit the header for the loop, which will also become
815 // the continue target.
816 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
817 EmitBlock(LoopHeader.getBlock());
819 // Create an exit block for when the condition fails, which will
820 // also become the break target.
821 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
823 // Store the blocks to use for break and continue.
824 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
826 // C++ [stmt.while]p2:
827 // When the condition of a while statement is a declaration, the
828 // scope of the variable that is declared extends from its point
829 // of declaration (3.3.2) to the end of the while statement.
831 // The object created in a condition is destroyed and created
832 // with each iteration of the loop.
833 RunCleanupsScope ConditionScope(*this);
835 if (S.getConditionVariable())
836 EmitDecl(*S.getConditionVariable());
838 // Evaluate the conditional in the while header. C99 6.8.5.1: The
839 // evaluation of the controlling expression takes place before each
840 // execution of the loop body.
841 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
843 // while(1) is common, avoid extra exit blocks. Be sure
844 // to correctly handle break/continue though.
845 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
846 bool CondIsConstInt = C != nullptr;
847 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
848 const SourceRange &R = S.getSourceRange();
849 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
850 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
851 SourceLocToDebugLoc(R.getEnd()),
852 checkIfLoopMustProgress(CondIsConstInt));
854 // As long as the condition is true, go to the loop body.
855 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
856 if (EmitBoolCondBranch) {
857 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
858 if (ConditionScope.requiresCleanups())
859 ExitBlock = createBasicBlock("while.exit");
860 llvm::MDNode *Weights =
861 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
862 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
863 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
864 BoolCondVal, Stmt::getLikelihood(S.getBody()));
865 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
867 if (ExitBlock != LoopExit.getBlock()) {
868 EmitBlock(ExitBlock);
869 EmitBranchThroughCleanup(LoopExit);
871 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
872 CGM.getDiags().Report(A->getLocation(),
873 diag::warn_attribute_has_no_effect_on_infinite_loop)
874 << A << A->getRange();
875 CGM.getDiags().Report(
877 diag::note_attribute_has_no_effect_on_infinite_loop_here)
878 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
881 // Emit the loop body. We have to emit this in a cleanup scope
882 // because it might be a singleton DeclStmt.
884 RunCleanupsScope BodyScope(*this);
886 incrementProfileCounter(&S);
887 EmitStmt(S.getBody());
890 BreakContinueStack.pop_back();
892 // Immediately force cleanup.
893 ConditionScope.ForceCleanup();
896 // Branch to the loop header again.
897 EmitBranch(LoopHeader.getBlock());
901 // Emit the exit block.
902 EmitBlock(LoopExit.getBlock(), true);
904 // The LoopHeader typically is just a branch if we skipped emitting
905 // a branch, try to erase it.
906 if (!EmitBoolCondBranch)
907 SimplifyForwardingBlocks(LoopHeader.getBlock());
910 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
911 ArrayRef<const Attr *> DoAttrs) {
912 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
913 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
915 uint64_t ParentCount = getCurrentProfileCount();
917 // Store the blocks to use for break and continue.
918 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
920 // Emit the body of the loop.
921 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
923 EmitBlockWithFallThrough(LoopBody, &S);
925 RunCleanupsScope BodyScope(*this);
926 EmitStmt(S.getBody());
929 EmitBlock(LoopCond.getBlock());
931 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
932 // after each execution of the loop body."
934 // Evaluate the conditional in the while header.
935 // C99 6.8.5p2/p4: The first substatement is executed if the expression
936 // compares unequal to 0. The condition must be a scalar type.
937 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
939 BreakContinueStack.pop_back();
941 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
942 // to correctly handle break/continue though.
943 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
944 bool CondIsConstInt = C;
945 bool EmitBoolCondBranch = !C || !C->isZero();
947 const SourceRange &R = S.getSourceRange();
948 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
949 SourceLocToDebugLoc(R.getBegin()),
950 SourceLocToDebugLoc(R.getEnd()),
951 checkIfLoopMustProgress(CondIsConstInt));
953 // As long as the condition is true, iterate the loop.
954 if (EmitBoolCondBranch) {
955 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
956 Builder.CreateCondBr(
957 BoolCondVal, LoopBody, LoopExit.getBlock(),
958 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
963 // Emit the exit block.
964 EmitBlock(LoopExit.getBlock());
966 // The DoCond block typically is just a branch if we skipped
967 // emitting a branch, try to erase it.
968 if (!EmitBoolCondBranch)
969 SimplifyForwardingBlocks(LoopCond.getBlock());
972 void CodeGenFunction::EmitForStmt(const ForStmt &S,
973 ArrayRef<const Attr *> ForAttrs) {
974 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
976 LexicalScope ForScope(*this, S.getSourceRange());
978 // Evaluate the first part before the loop.
980 EmitStmt(S.getInit());
982 // Start the loop with a block that tests the condition.
983 // If there's an increment, the continue scope will be overwritten
985 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
986 llvm::BasicBlock *CondBlock = CondDest.getBlock();
987 EmitBlock(CondBlock);
989 Expr::EvalResult Result;
990 bool CondIsConstInt =
991 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
993 const SourceRange &R = S.getSourceRange();
994 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
995 SourceLocToDebugLoc(R.getBegin()),
996 SourceLocToDebugLoc(R.getEnd()),
997 checkIfLoopMustProgress(CondIsConstInt));
999 // Create a cleanup scope for the condition variable cleanups.
1000 LexicalScope ConditionScope(*this, S.getSourceRange());
1002 // If the for loop doesn't have an increment we can just use the condition as
1003 // the continue block. Otherwise, if there is no condition variable, we can
1004 // form the continue block now. If there is a condition variable, we can't
1005 // form the continue block until after we've emitted the condition, because
1006 // the condition is in scope in the increment, but Sema's jump diagnostics
1007 // ensure that there are no continues from the condition variable that jump
1008 // to the loop increment.
1011 Continue = CondDest;
1012 else if (!S.getConditionVariable())
1013 Continue = getJumpDestInCurrentScope("for.inc");
1014 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1017 // If the for statement has a condition scope, emit the local variable
1019 if (S.getConditionVariable()) {
1020 EmitDecl(*S.getConditionVariable());
1022 // We have entered the condition variable's scope, so we're now able to
1023 // jump to the continue block.
1024 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1025 BreakContinueStack.back().ContinueBlock = Continue;
1028 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1029 // If there are any cleanups between here and the loop-exit scope,
1030 // create a block to stage a loop exit along.
1031 if (ForScope.requiresCleanups())
1032 ExitBlock = createBasicBlock("for.cond.cleanup");
1034 // As long as the condition is true, iterate the loop.
1035 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1037 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1038 // compares unequal to 0. The condition must be a scalar type.
1039 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1040 llvm::MDNode *Weights =
1041 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1042 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1043 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1044 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1046 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1048 if (ExitBlock != LoopExit.getBlock()) {
1049 EmitBlock(ExitBlock);
1050 EmitBranchThroughCleanup(LoopExit);
1055 // Treat it as a non-zero constant. Don't even create a new block for the
1056 // body, just fall into it.
1058 incrementProfileCounter(&S);
1061 // Create a separate cleanup scope for the body, in case it is not
1062 // a compound statement.
1063 RunCleanupsScope BodyScope(*this);
1064 EmitStmt(S.getBody());
1067 // If there is an increment, emit it next.
1069 EmitBlock(Continue.getBlock());
1070 EmitStmt(S.getInc());
1073 BreakContinueStack.pop_back();
1075 ConditionScope.ForceCleanup();
1078 EmitBranch(CondBlock);
1080 ForScope.ForceCleanup();
1084 // Emit the fall-through block.
1085 EmitBlock(LoopExit.getBlock(), true);
1089 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1090 ArrayRef<const Attr *> ForAttrs) {
1091 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1093 LexicalScope ForScope(*this, S.getSourceRange());
1095 // Evaluate the first pieces before the loop.
1097 EmitStmt(S.getInit());
1098 EmitStmt(S.getRangeStmt());
1099 EmitStmt(S.getBeginStmt());
1100 EmitStmt(S.getEndStmt());
1102 // Start the loop with a block that tests the condition.
1103 // If there's an increment, the continue scope will be overwritten
1105 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1106 EmitBlock(CondBlock);
1108 const SourceRange &R = S.getSourceRange();
1109 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1110 SourceLocToDebugLoc(R.getBegin()),
1111 SourceLocToDebugLoc(R.getEnd()));
1113 // If there are any cleanups between here and the loop-exit scope,
1114 // create a block to stage a loop exit along.
1115 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1116 if (ForScope.requiresCleanups())
1117 ExitBlock = createBasicBlock("for.cond.cleanup");
1119 // The loop body, consisting of the specified body and the loop variable.
1120 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1122 // The body is executed if the expression, contextually converted
1123 // to bool, is true.
1124 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1125 llvm::MDNode *Weights =
1126 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1127 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1128 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1129 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1130 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1132 if (ExitBlock != LoopExit.getBlock()) {
1133 EmitBlock(ExitBlock);
1134 EmitBranchThroughCleanup(LoopExit);
1138 incrementProfileCounter(&S);
1140 // Create a block for the increment. In case of a 'continue', we jump there.
1141 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1143 // Store the blocks to use for break and continue.
1144 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1147 // Create a separate cleanup scope for the loop variable and body.
1148 LexicalScope BodyScope(*this, S.getSourceRange());
1149 EmitStmt(S.getLoopVarStmt());
1150 EmitStmt(S.getBody());
1154 // If there is an increment, emit it next.
1155 EmitBlock(Continue.getBlock());
1156 EmitStmt(S.getInc());
1158 BreakContinueStack.pop_back();
1160 EmitBranch(CondBlock);
1162 ForScope.ForceCleanup();
1166 // Emit the fall-through block.
1167 EmitBlock(LoopExit.getBlock(), true);
1170 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1171 if (RV.isScalar()) {
1172 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1173 } else if (RV.isAggregate()) {
1174 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1175 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1176 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1178 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1181 EmitBranchThroughCleanup(ReturnBlock);
1185 // RAII struct used to save and restore a return statment's result expression.
1186 struct SaveRetExprRAII {
1187 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1188 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1189 CGF.RetExpr = RetExpr;
1191 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1192 const Expr *OldRetExpr;
1193 CodeGenFunction &CGF;
1197 /// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1198 /// codegen it as 'tail call ...; ret void;'.
1199 static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1200 const CGFunctionInfo *CurFnInfo) {
1201 auto calleeQualType = CE->getCallee()->getType();
1202 const FunctionType *calleeType = nullptr;
1203 if (calleeQualType->isFunctionPointerType() ||
1204 calleeQualType->isFunctionReferenceType() ||
1205 calleeQualType->isBlockPointerType() ||
1206 calleeQualType->isMemberFunctionPointerType()) {
1207 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1208 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1210 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1211 if (auto methodDecl = CMCE->getMethodDecl()) {
1212 // getMethodDecl() doesn't handle member pointers at the moment.
1213 calleeType = methodDecl->getType()->castAs<FunctionType>();
1220 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1221 (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
1222 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1223 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1224 Builder.CreateRetVoid();
1225 Builder.ClearInsertionPoint();
1229 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1230 /// if the function returns void, or may be missing one if the function returns
1231 /// non-void. Fun stuff :).
1232 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1233 if (requiresReturnValueCheck()) {
1234 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1236 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1237 llvm::GlobalVariable::PrivateLinkage, SLoc);
1238 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1239 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1240 assert(ReturnLocation.isValid() && "No valid return location");
1241 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1245 // Returning from an outlined SEH helper is UB, and we already warn on it.
1246 if (IsOutlinedSEHHelper) {
1247 Builder.CreateUnreachable();
1248 Builder.ClearInsertionPoint();
1251 // Emit the result value, even if unused, to evaluate the side effects.
1252 const Expr *RV = S.getRetValue();
1254 // Record the result expression of the return statement. The recorded
1255 // expression is used to determine whether a block capture's lifetime should
1256 // end at the end of the full expression as opposed to the end of the scope
1257 // enclosing the block expression.
1259 // This permits a small, easily-implemented exception to our over-conservative
1260 // rules about not jumping to statements following block literals with
1261 // non-trivial cleanups.
1262 SaveRetExprRAII SaveRetExpr(RV, *this);
1264 RunCleanupsScope cleanupScope(*this);
1265 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1266 RV = EWC->getSubExpr();
1267 // FIXME: Clean this up by using an LValue for ReturnTemp,
1268 // EmitStoreThroughLValue, and EmitAnyExpr.
1269 // Check if the NRVO candidate was not globalized in OpenMP mode.
1270 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1271 S.getNRVOCandidate()->isNRVOVariable() &&
1272 (!getLangOpts().OpenMP ||
1273 !CGM.getOpenMPRuntime()
1274 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1276 // Apply the named return value optimization for this return statement,
1277 // which means doing nothing: the appropriate result has already been
1278 // constructed into the NRVO variable.
1280 // If there is an NRVO flag for this variable, set it to 1 into indicate
1281 // that the cleanup code should not destroy the variable.
1282 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1283 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1284 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1285 // Make sure not to return anything, but evaluate the expression
1286 // for side effects.
1289 if (auto *CE = dyn_cast<CallExpr>(RV))
1290 makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1293 // Do nothing (return value is left uninitialized)
1294 } else if (FnRetTy->isReferenceType()) {
1295 // If this function returns a reference, take the address of the expression
1296 // rather than the value.
1297 RValue Result = EmitReferenceBindingToExpr(RV);
1298 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1300 switch (getEvaluationKind(RV->getType())) {
1302 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1305 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1309 EmitAggExpr(RV, AggValueSlot::forAddr(
1310 ReturnValue, Qualifiers(),
1311 AggValueSlot::IsDestructed,
1312 AggValueSlot::DoesNotNeedGCBarriers,
1313 AggValueSlot::IsNotAliased,
1314 getOverlapForReturnValue()));
1320 if (!RV || RV->isEvaluatable(getContext()))
1321 ++NumSimpleReturnExprs;
1323 cleanupScope.ForceCleanup();
1324 EmitBranchThroughCleanup(ReturnBlock);
1327 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1328 // As long as debug info is modeled with instructions, we have to ensure we
1329 // have a place to insert here and write the stop point here.
1330 if (HaveInsertPoint())
1333 for (const auto *I : S.decls())
1337 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1338 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1340 // If this code is reachable then emit a stop point (if generating
1341 // debug info). We have to do this ourselves because we are on the
1342 // "simple" statement path.
1343 if (HaveInsertPoint())
1346 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1349 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1350 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1352 // If this code is reachable then emit a stop point (if generating
1353 // debug info). We have to do this ourselves because we are on the
1354 // "simple" statement path.
1355 if (HaveInsertPoint())
1358 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1361 /// EmitCaseStmtRange - If case statement range is not too big then
1362 /// add multiple cases to switch instruction, one for each value within
1363 /// the range. If range is too big then emit "if" condition check.
1364 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1365 ArrayRef<const Attr *> Attrs) {
1366 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1368 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1369 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1371 // Emit the code for this case. We do this first to make sure it is
1372 // properly chained from our predecessor before generating the
1373 // switch machinery to enter this block.
1374 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1375 EmitBlockWithFallThrough(CaseDest, &S);
1376 EmitStmt(S.getSubStmt());
1378 // If range is empty, do nothing.
1379 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1382 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1383 llvm::APInt Range = RHS - LHS;
1384 // FIXME: parameters such as this should not be hardcoded.
1385 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1386 // Range is small enough to add multiple switch instruction cases.
1387 uint64_t Total = getProfileCount(&S);
1388 unsigned NCases = Range.getZExtValue() + 1;
1389 // We only have one region counter for the entire set of cases here, so we
1390 // need to divide the weights evenly between the generated cases, ensuring
1391 // that the total weight is preserved. E.g., a weight of 5 over three cases
1392 // will be distributed as weights of 2, 2, and 1.
1393 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1394 for (unsigned I = 0; I != NCases; ++I) {
1396 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1397 else if (SwitchLikelihood)
1398 SwitchLikelihood->push_back(LH);
1402 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1408 // The range is too big. Emit "if" condition into a new block,
1409 // making sure to save and restore the current insertion point.
1410 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1412 // Push this test onto the chain of range checks (which terminates
1413 // in the default basic block). The switch's default will be changed
1414 // to the top of this chain after switch emission is complete.
1415 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1416 CaseRangeBlock = createBasicBlock("sw.caserange");
1418 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1419 Builder.SetInsertPoint(CaseRangeBlock);
1421 // Emit range check.
1423 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1425 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1427 llvm::MDNode *Weights = nullptr;
1428 if (SwitchWeights) {
1429 uint64_t ThisCount = getProfileCount(&S);
1430 uint64_t DefaultCount = (*SwitchWeights)[0];
1431 Weights = createProfileWeights(ThisCount, DefaultCount);
1433 // Since we're chaining the switch default through each large case range, we
1434 // need to update the weight for the default, ie, the first case, to include
1436 (*SwitchWeights)[0] += ThisCount;
1437 } else if (SwitchLikelihood)
1438 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1440 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1442 // Restore the appropriate insertion point.
1444 Builder.SetInsertPoint(RestoreBB);
1446 Builder.ClearInsertionPoint();
1449 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1450 ArrayRef<const Attr *> Attrs) {
1451 // If there is no enclosing switch instance that we're aware of, then this
1452 // case statement and its block can be elided. This situation only happens
1453 // when we've constant-folded the switch, are emitting the constant case,
1454 // and part of the constant case includes another case statement. For
1455 // instance: switch (4) { case 4: do { case 5: } while (1); }
1457 EmitStmt(S.getSubStmt());
1461 // Handle case ranges.
1463 EmitCaseStmtRange(S, Attrs);
1467 llvm::ConstantInt *CaseVal =
1468 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1469 if (SwitchLikelihood)
1470 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1472 // If the body of the case is just a 'break', try to not emit an empty block.
1473 // If we're profiling or we're not optimizing, leave the block in for better
1474 // debug and coverage analysis.
1475 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1476 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1477 isa<BreakStmt>(S.getSubStmt())) {
1478 JumpDest Block = BreakContinueStack.back().BreakBlock;
1480 // Only do this optimization if there are no cleanups that need emitting.
1481 if (isObviouslyBranchWithoutCleanups(Block)) {
1483 SwitchWeights->push_back(getProfileCount(&S));
1484 SwitchInsn->addCase(CaseVal, Block.getBlock());
1486 // If there was a fallthrough into this case, make sure to redirect it to
1487 // the end of the switch as well.
1488 if (Builder.GetInsertBlock()) {
1489 Builder.CreateBr(Block.getBlock());
1490 Builder.ClearInsertionPoint();
1496 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1497 EmitBlockWithFallThrough(CaseDest, &S);
1499 SwitchWeights->push_back(getProfileCount(&S));
1500 SwitchInsn->addCase(CaseVal, CaseDest);
1502 // Recursively emitting the statement is acceptable, but is not wonderful for
1503 // code where we have many case statements nested together, i.e.:
1507 // Handling this recursively will create a new block for each case statement
1508 // that falls through to the next case which is IR intensive. It also causes
1509 // deep recursion which can run into stack depth limitations. Handle
1510 // sequential non-range case statements specially.
1512 // TODO When the next case has a likelihood attribute the code returns to the
1513 // recursive algorithm. Maybe improve this case if it becomes common practice
1514 // to use a lot of attributes.
1515 const CaseStmt *CurCase = &S;
1516 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1518 // Otherwise, iteratively add consecutive cases to this switch stmt.
1519 while (NextCase && NextCase->getRHS() == nullptr) {
1521 llvm::ConstantInt *CaseVal =
1522 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1525 SwitchWeights->push_back(getProfileCount(NextCase));
1526 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1527 CaseDest = createBasicBlock("sw.bb");
1528 EmitBlockWithFallThrough(CaseDest, CurCase);
1530 // Since this loop is only executed when the CaseStmt has no attributes
1531 // use a hard-coded value.
1532 if (SwitchLikelihood)
1533 SwitchLikelihood->push_back(Stmt::LH_None);
1535 SwitchInsn->addCase(CaseVal, CaseDest);
1536 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1539 // Generate a stop point for debug info if the case statement is
1540 // followed by a default statement. A fallthrough case before a
1541 // default case gets its own branch target.
1542 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1543 EmitStopPoint(CurCase);
1545 // Normal default recursion for non-cases.
1546 EmitStmt(CurCase->getSubStmt());
1549 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1550 ArrayRef<const Attr *> Attrs) {
1551 // If there is no enclosing switch instance that we're aware of, then this
1552 // default statement can be elided. This situation only happens when we've
1553 // constant-folded the switch.
1555 EmitStmt(S.getSubStmt());
1559 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1560 assert(DefaultBlock->empty() &&
1561 "EmitDefaultStmt: Default block already defined?");
1563 if (SwitchLikelihood)
1564 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1566 EmitBlockWithFallThrough(DefaultBlock, &S);
1568 EmitStmt(S.getSubStmt());
1571 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1572 /// constant value that is being switched on, see if we can dead code eliminate
1573 /// the body of the switch to a simple series of statements to emit. Basically,
1574 /// on a switch (5) we want to find these statements:
1576 /// printf(...); <--
1580 /// and add them to the ResultStmts vector. If it is unsafe to do this
1581 /// transformation (for example, one of the elided statements contains a label
1582 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1583 /// should include statements after it (e.g. the printf() line is a substmt of
1584 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1585 /// statement, then return CSFC_Success.
1587 /// If Case is non-null, then we are looking for the specified case, checking
1588 /// that nothing we jump over contains labels. If Case is null, then we found
1589 /// the case and are looking for the break.
1591 /// If the recursive walk actually finds our Case, then we set FoundCase to
1594 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1595 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1596 const SwitchCase *Case,
1598 SmallVectorImpl<const Stmt*> &ResultStmts) {
1599 // If this is a null statement, just succeed.
1601 return Case ? CSFC_Success : CSFC_FallThrough;
1603 // If this is the switchcase (case 4: or default) that we're looking for, then
1604 // we're in business. Just add the substatement.
1605 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1608 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1612 // Otherwise, this is some other case or default statement, just ignore it.
1613 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1617 // If we are in the live part of the code and we found our break statement,
1618 // return a success!
1619 if (!Case && isa<BreakStmt>(S))
1620 return CSFC_Success;
1622 // If this is a switch statement, then it might contain the SwitchCase, the
1623 // break, or neither.
1624 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1625 // Handle this as two cases: we might be looking for the SwitchCase (if so
1626 // the skipped statements must be skippable) or we might already have it.
1627 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1628 bool StartedInLiveCode = FoundCase;
1629 unsigned StartSize = ResultStmts.size();
1631 // If we've not found the case yet, scan through looking for it.
1633 // Keep track of whether we see a skipped declaration. The code could be
1634 // using the declaration even if it is skipped, so we can't optimize out
1635 // the decl if the kept statements might refer to it.
1636 bool HadSkippedDecl = false;
1638 // If we're looking for the case, just see if we can skip each of the
1640 for (; Case && I != E; ++I) {
1641 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1643 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1644 case CSFC_Failure: return CSFC_Failure;
1646 // A successful result means that either 1) that the statement doesn't
1647 // have the case and is skippable, or 2) does contain the case value
1648 // and also contains the break to exit the switch. In the later case,
1649 // we just verify the rest of the statements are elidable.
1651 // If we found the case and skipped declarations, we can't do the
1654 return CSFC_Failure;
1656 for (++I; I != E; ++I)
1657 if (CodeGenFunction::ContainsLabel(*I, true))
1658 return CSFC_Failure;
1659 return CSFC_Success;
1662 case CSFC_FallThrough:
1663 // If we have a fallthrough condition, then we must have found the
1664 // case started to include statements. Consider the rest of the
1665 // statements in the compound statement as candidates for inclusion.
1666 assert(FoundCase && "Didn't find case but returned fallthrough?");
1667 // We recursively found Case, so we're not looking for it anymore.
1670 // If we found the case and skipped declarations, we can't do the
1673 return CSFC_Failure;
1679 return CSFC_Success;
1681 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1684 // If we have statements in our range, then we know that the statements are
1685 // live and need to be added to the set of statements we're tracking.
1686 bool AnyDecls = false;
1687 for (; I != E; ++I) {
1688 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1690 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1691 case CSFC_Failure: return CSFC_Failure;
1692 case CSFC_FallThrough:
1693 // A fallthrough result means that the statement was simple and just
1694 // included in ResultStmt, keep adding them afterwards.
1697 // A successful result means that we found the break statement and
1698 // stopped statement inclusion. We just ensure that any leftover stmts
1699 // are skippable and return success ourselves.
1700 for (++I; I != E; ++I)
1701 if (CodeGenFunction::ContainsLabel(*I, true))
1702 return CSFC_Failure;
1703 return CSFC_Success;
1707 // If we're about to fall out of a scope without hitting a 'break;', we
1708 // can't perform the optimization if there were any decls in that scope
1709 // (we'd lose their end-of-lifetime).
1711 // If the entire compound statement was live, there's one more thing we
1712 // can try before giving up: emit the whole thing as a single statement.
1713 // We can do that unless the statement contains a 'break;'.
1714 // FIXME: Such a break must be at the end of a construct within this one.
1715 // We could emit this by just ignoring the BreakStmts entirely.
1716 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1717 ResultStmts.resize(StartSize);
1718 ResultStmts.push_back(S);
1720 return CSFC_Failure;
1724 return CSFC_FallThrough;
1727 // Okay, this is some other statement that we don't handle explicitly, like a
1728 // for statement or increment etc. If we are skipping over this statement,
1729 // just verify it doesn't have labels, which would make it invalid to elide.
1731 if (CodeGenFunction::ContainsLabel(S, true))
1732 return CSFC_Failure;
1733 return CSFC_Success;
1736 // Otherwise, we want to include this statement. Everything is cool with that
1737 // so long as it doesn't contain a break out of the switch we're in.
1738 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1740 // Otherwise, everything is great. Include the statement and tell the caller
1741 // that we fall through and include the next statement as well.
1742 ResultStmts.push_back(S);
1743 return CSFC_FallThrough;
1746 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1747 /// then invoke CollectStatementsForCase to find the list of statements to emit
1748 /// for a switch on constant. See the comment above CollectStatementsForCase
1749 /// for more details.
1750 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1751 const llvm::APSInt &ConstantCondValue,
1752 SmallVectorImpl<const Stmt*> &ResultStmts,
1754 const SwitchCase *&ResultCase) {
1755 // First step, find the switch case that is being branched to. We can do this
1756 // efficiently by scanning the SwitchCase list.
1757 const SwitchCase *Case = S.getSwitchCaseList();
1758 const DefaultStmt *DefaultCase = nullptr;
1760 for (; Case; Case = Case->getNextSwitchCase()) {
1761 // It's either a default or case. Just remember the default statement in
1762 // case we're not jumping to any numbered cases.
1763 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1768 // Check to see if this case is the one we're looking for.
1769 const CaseStmt *CS = cast<CaseStmt>(Case);
1770 // Don't handle case ranges yet.
1771 if (CS->getRHS()) return false;
1773 // If we found our case, remember it as 'case'.
1774 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1778 // If we didn't find a matching case, we use a default if it exists, or we
1779 // elide the whole switch body!
1781 // It is safe to elide the body of the switch if it doesn't contain labels
1782 // etc. If it is safe, return successfully with an empty ResultStmts list.
1784 return !CodeGenFunction::ContainsLabel(&S);
1788 // Ok, we know which case is being jumped to, try to collect all the
1789 // statements that follow it. This can fail for a variety of reasons. Also,
1790 // check to see that the recursive walk actually found our case statement.
1791 // Insane cases like this can fail to find it in the recursive walk since we
1792 // don't handle every stmt kind:
1796 bool FoundCase = false;
1798 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1799 ResultStmts) != CSFC_Failure &&
1803 static Optional<SmallVector<uint64_t, 16>>
1804 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1805 // Are there enough branches to weight them?
1806 if (Likelihoods.size() <= 1)
1809 uint64_t NumUnlikely = 0;
1810 uint64_t NumNone = 0;
1811 uint64_t NumLikely = 0;
1812 for (const auto LH : Likelihoods) {
1814 case Stmt::LH_Unlikely:
1820 case Stmt::LH_Likely:
1826 // Is there a likelihood attribute used?
1827 if (NumUnlikely == 0 && NumLikely == 0)
1830 // When multiple cases share the same code they can be combined during
1831 // optimization. In that case the weights of the branch will be the sum of
1832 // the individual weights. Make sure the combined sum of all neutral cases
1833 // doesn't exceed the value of a single likely attribute.
1834 // The additions both avoid divisions by 0 and make sure the weights of None
1835 // don't exceed the weight of Likely.
1836 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1837 const uint64_t None = Likely / (NumNone + 1);
1838 const uint64_t Unlikely = 0;
1840 SmallVector<uint64_t, 16> Result;
1841 Result.reserve(Likelihoods.size());
1842 for (const auto LH : Likelihoods) {
1844 case Stmt::LH_Unlikely:
1845 Result.push_back(Unlikely);
1848 Result.push_back(None);
1850 case Stmt::LH_Likely:
1851 Result.push_back(Likely);
1859 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1860 // Handle nested switch statements.
1861 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1862 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1863 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1864 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1866 // See if we can constant fold the condition of the switch and therefore only
1867 // emit the live case statement (if any) of the switch.
1868 llvm::APSInt ConstantCondValue;
1869 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1870 SmallVector<const Stmt*, 4> CaseStmts;
1871 const SwitchCase *Case = nullptr;
1872 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1873 getContext(), Case)) {
1875 incrementProfileCounter(Case);
1876 RunCleanupsScope ExecutedScope(*this);
1879 EmitStmt(S.getInit());
1881 // Emit the condition variable if needed inside the entire cleanup scope
1882 // used by this special case for constant folded switches.
1883 if (S.getConditionVariable())
1884 EmitDecl(*S.getConditionVariable());
1886 // At this point, we are no longer "within" a switch instance, so
1887 // we can temporarily enforce this to ensure that any embedded case
1888 // statements are not emitted.
1889 SwitchInsn = nullptr;
1891 // Okay, we can dead code eliminate everything except this case. Emit the
1892 // specified series of statements and we're good.
1893 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1894 EmitStmt(CaseStmts[i]);
1895 incrementProfileCounter(&S);
1897 // Now we want to restore the saved switch instance so that nested
1898 // switches continue to function properly
1899 SwitchInsn = SavedSwitchInsn;
1905 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1907 RunCleanupsScope ConditionScope(*this);
1910 EmitStmt(S.getInit());
1912 if (S.getConditionVariable())
1913 EmitDecl(*S.getConditionVariable());
1914 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1916 // Create basic block to hold stuff that comes after switch
1917 // statement. We also need to create a default block now so that
1918 // explicit case ranges tests can have a place to jump to on
1920 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1921 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1922 if (PGO.haveRegionCounts()) {
1923 // Walk the SwitchCase list to find how many there are.
1924 uint64_t DefaultCount = 0;
1925 unsigned NumCases = 0;
1926 for (const SwitchCase *Case = S.getSwitchCaseList();
1928 Case = Case->getNextSwitchCase()) {
1929 if (isa<DefaultStmt>(Case))
1930 DefaultCount = getProfileCount(Case);
1933 SwitchWeights = new SmallVector<uint64_t, 16>();
1934 SwitchWeights->reserve(NumCases);
1935 // The default needs to be first. We store the edge count, so we already
1936 // know the right weight.
1937 SwitchWeights->push_back(DefaultCount);
1938 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1939 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1940 // Initialize the default case.
1941 SwitchLikelihood->push_back(Stmt::LH_None);
1944 CaseRangeBlock = DefaultBlock;
1946 // Clear the insertion point to indicate we are in unreachable code.
1947 Builder.ClearInsertionPoint();
1949 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1950 // then reuse last ContinueBlock.
1951 JumpDest OuterContinue;
1952 if (!BreakContinueStack.empty())
1953 OuterContinue = BreakContinueStack.back().ContinueBlock;
1955 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1957 // Emit switch body.
1958 EmitStmt(S.getBody());
1960 BreakContinueStack.pop_back();
1962 // Update the default block in case explicit case range tests have
1963 // been chained on top.
1964 SwitchInsn->setDefaultDest(CaseRangeBlock);
1966 // If a default was never emitted:
1967 if (!DefaultBlock->getParent()) {
1968 // If we have cleanups, emit the default block so that there's a
1969 // place to jump through the cleanups from.
1970 if (ConditionScope.requiresCleanups()) {
1971 EmitBlock(DefaultBlock);
1973 // Otherwise, just forward the default block to the switch end.
1975 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1976 delete DefaultBlock;
1980 ConditionScope.ForceCleanup();
1982 // Emit continuation.
1983 EmitBlock(SwitchExit.getBlock(), true);
1984 incrementProfileCounter(&S);
1986 // If the switch has a condition wrapped by __builtin_unpredictable,
1987 // create metadata that specifies that the switch is unpredictable.
1988 // Don't bother if not optimizing because that metadata would not be used.
1989 auto *Call = dyn_cast<CallExpr>(S.getCond());
1990 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1991 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1992 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1993 llvm::MDBuilder MDHelper(getLLVMContext());
1994 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1995 MDHelper.createUnpredictable());
1999 if (SwitchWeights) {
2000 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2001 "switch weights do not match switch cases");
2002 // If there's only one jump destination there's no sense weighting it.
2003 if (SwitchWeights->size() > 1)
2004 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2005 createProfileWeights(*SwitchWeights));
2006 delete SwitchWeights;
2007 } else if (SwitchLikelihood) {
2008 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2009 "switch likelihoods do not match switch cases");
2010 Optional<SmallVector<uint64_t, 16>> LHW =
2011 getLikelihoodWeights(*SwitchLikelihood);
2013 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2014 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2015 createProfileWeights(*LHW));
2017 delete SwitchLikelihood;
2019 SwitchInsn = SavedSwitchInsn;
2020 SwitchWeights = SavedSwitchWeights;
2021 SwitchLikelihood = SavedSwitchLikelihood;
2022 CaseRangeBlock = SavedCRBlock;
2026 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2027 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2030 while (*Constraint) {
2031 switch (*Constraint) {
2033 Result += Target.convertConstraint(Constraint);
2039 case '=': // Will see this and the following in mult-alt constraints.
2042 case '#': // Ignore the rest of the constraint alternative.
2043 while (Constraint[1] && Constraint[1] != ',')
2048 Result += *Constraint;
2049 while (Constraint[1] && Constraint[1] == *Constraint)
2060 "Must pass output names to constraints with a symbolic name");
2062 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2063 assert(result && "Could not resolve symbolic name"); (void)result;
2064 Result += llvm::utostr(Index);
2075 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2076 /// as using a particular register add that as a constraint that will be used
2077 /// in this asm stmt.
2079 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2080 const TargetInfo &Target, CodeGenModule &CGM,
2081 const AsmStmt &Stmt, const bool EarlyClobber,
2082 std::string *GCCReg = nullptr) {
2083 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2086 const ValueDecl &Value = *AsmDeclRef->getDecl();
2087 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2090 if (Variable->getStorageClass() != SC_Register)
2092 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2095 StringRef Register = Attr->getLabel();
2096 assert(Target.isValidGCCRegisterName(Register));
2097 // We're using validateOutputConstraint here because we only care if
2098 // this is a register constraint.
2099 TargetInfo::ConstraintInfo Info(Constraint, "");
2100 if (Target.validateOutputConstraint(Info) &&
2101 !Info.allowsRegister()) {
2102 CGM.ErrorUnsupported(&Stmt, "__asm__");
2105 // Canonicalize the register here before returning it.
2106 Register = Target.getNormalizedGCCRegisterName(Register);
2107 if (GCCReg != nullptr)
2108 *GCCReg = Register.str();
2109 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2112 std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2113 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2114 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2115 if (Info.allowsRegister() || !Info.allowsMemory()) {
2116 if (CodeGenFunction::hasScalarEvaluationKind(InputType))
2117 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2119 llvm::Type *Ty = ConvertType(InputType);
2120 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2121 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2122 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2123 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2124 Ty = llvm::PointerType::getUnqual(Ty);
2126 return {Builder.CreateLoad(
2127 Builder.CreateBitCast(InputValue.getAddress(*this), Ty)),
2132 Address Addr = InputValue.getAddress(*this);
2133 ConstraintStr += '*';
2134 return {Addr.getPointer(), Addr.getElementType()};
2137 std::pair<llvm::Value *, llvm::Type *>
2138 CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2139 const Expr *InputExpr,
2140 std::string &ConstraintStr) {
2141 // If this can't be a register or memory, i.e., has to be a constant
2142 // (immediate or symbolic), try to emit it as such.
2143 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2144 if (Info.requiresImmediateConstant()) {
2145 Expr::EvalResult EVResult;
2146 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2148 llvm::APSInt IntResult;
2149 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2151 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2154 Expr::EvalResult Result;
2155 if (InputExpr->EvaluateAsInt(Result, getContext()))
2156 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2160 if (Info.allowsRegister() || !Info.allowsMemory())
2161 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2162 return {EmitScalarExpr(InputExpr), nullptr};
2163 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2164 return {EmitScalarExpr(InputExpr), nullptr};
2165 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2166 LValue Dest = EmitLValue(InputExpr);
2167 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2168 InputExpr->getExprLoc());
2171 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2172 /// asm call instruction. The !srcloc MDNode contains a list of constant
2173 /// integers which are the source locations of the start of each line in the
2175 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2176 CodeGenFunction &CGF) {
2177 SmallVector<llvm::Metadata *, 8> Locs;
2178 // Add the location of the first line to the MDNode.
2179 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2180 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2181 StringRef StrVal = Str->getString();
2182 if (!StrVal.empty()) {
2183 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2184 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2185 unsigned StartToken = 0;
2186 unsigned ByteOffset = 0;
2188 // Add the location of the start of each subsequent line of the asm to the
2190 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2191 if (StrVal[i] != '\n') continue;
2192 SourceLocation LineLoc = Str->getLocationOfByte(
2193 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2194 Locs.push_back(llvm::ConstantAsMetadata::get(
2195 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2199 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2202 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2203 bool HasUnwindClobber, bool ReadOnly,
2204 bool ReadNone, bool NoMerge, const AsmStmt &S,
2205 const std::vector<llvm::Type *> &ResultRegTypes,
2206 const std::vector<llvm::Type *> &ArgElemTypes,
2207 CodeGenFunction &CGF,
2208 std::vector<llvm::Value *> &RegResults) {
2209 if (!HasUnwindClobber)
2210 Result.addFnAttr(llvm::Attribute::NoUnwind);
2213 Result.addFnAttr(llvm::Attribute::NoMerge);
2214 // Attach readnone and readonly attributes.
2215 if (!HasSideEffect) {
2217 Result.addFnAttr(llvm::Attribute::ReadNone);
2219 Result.addFnAttr(llvm::Attribute::ReadOnly);
2222 // Add elementtype attribute for indirect constraints.
2223 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2225 auto Attr = llvm::Attribute::get(
2226 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2227 Result.addParamAttr(Pair.index(), Attr);
2231 // Slap the source location of the inline asm into a !srcloc metadata on the
2233 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2234 Result.setMetadata("srcloc",
2235 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2237 // At least put the line number on MS inline asm blobs.
2238 llvm::Constant *Loc =
2239 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2240 Result.setMetadata("srcloc",
2241 llvm::MDNode::get(CGF.getLLVMContext(),
2242 llvm::ConstantAsMetadata::get(Loc)));
2245 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2246 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2247 // convergent (meaning, they may call an intrinsically convergent op, such
2248 // as bar.sync, and so can't have certain optimizations applied around
2250 Result.addFnAttr(llvm::Attribute::Convergent);
2251 // Extract all of the register value results from the asm.
2252 if (ResultRegTypes.size() == 1) {
2253 RegResults.push_back(&Result);
2255 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2256 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2257 RegResults.push_back(Tmp);
2262 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2263 // Assemble the final asm string.
2264 std::string AsmString = S.generateAsmString(getContext());
2266 // Get all the output and input constraints together.
2267 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2268 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2270 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2272 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2273 Name = GAS->getOutputName(i);
2274 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2275 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2276 assert(IsValid && "Failed to parse output constraint");
2277 OutputConstraintInfos.push_back(Info);
2280 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2282 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2283 Name = GAS->getInputName(i);
2284 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2286 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2287 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2288 InputConstraintInfos.push_back(Info);
2291 std::string Constraints;
2293 std::vector<LValue> ResultRegDests;
2294 std::vector<QualType> ResultRegQualTys;
2295 std::vector<llvm::Type *> ResultRegTypes;
2296 std::vector<llvm::Type *> ResultTruncRegTypes;
2297 std::vector<llvm::Type *> ArgTypes;
2298 std::vector<llvm::Type *> ArgElemTypes;
2299 std::vector<llvm::Value*> Args;
2300 llvm::BitVector ResultTypeRequiresCast;
2302 // Keep track of inout constraints.
2303 std::string InOutConstraints;
2304 std::vector<llvm::Value*> InOutArgs;
2305 std::vector<llvm::Type*> InOutArgTypes;
2306 std::vector<llvm::Type*> InOutArgElemTypes;
2308 // Keep track of out constraints for tied input operand.
2309 std::vector<std::string> OutputConstraints;
2311 // Keep track of defined physregs.
2312 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2314 // An inline asm can be marked readonly if it meets the following conditions:
2315 // - it doesn't have any sideeffects
2316 // - it doesn't clobber memory
2317 // - it doesn't return a value by-reference
2318 // It can be marked readnone if it doesn't have any input memory constraints
2319 // in addition to meeting the conditions listed above.
2320 bool ReadOnly = true, ReadNone = true;
2322 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2323 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2325 // Simplify the output constraint.
2326 std::string OutputConstraint(S.getOutputConstraint(i));
2327 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2328 getTarget(), &OutputConstraintInfos);
2330 const Expr *OutExpr = S.getOutputExpr(i);
2331 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2334 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2335 getTarget(), CGM, S,
2336 Info.earlyClobber(),
2338 // Give an error on multiple outputs to same physreg.
2339 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2340 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2342 OutputConstraints.push_back(OutputConstraint);
2343 LValue Dest = EmitLValue(OutExpr);
2344 if (!Constraints.empty())
2347 // If this is a register output, then make the inline asm return it
2348 // by-value. If this is a memory result, return the value by-reference.
2349 QualType QTy = OutExpr->getType();
2350 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2351 hasAggregateEvaluationKind(QTy);
2352 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2354 Constraints += "=" + OutputConstraint;
2355 ResultRegQualTys.push_back(QTy);
2356 ResultRegDests.push_back(Dest);
2358 llvm::Type *Ty = ConvertTypeForMem(QTy);
2359 const bool RequiresCast = Info.allowsRegister() &&
2360 (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2361 Ty->isAggregateType());
2363 ResultTruncRegTypes.push_back(Ty);
2364 ResultTypeRequiresCast.push_back(RequiresCast);
2367 unsigned Size = getContext().getTypeSize(QTy);
2368 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2370 ResultRegTypes.push_back(Ty);
2371 // If this output is tied to an input, and if the input is larger, then
2372 // we need to set the actual result type of the inline asm node to be the
2373 // same as the input type.
2374 if (Info.hasMatchingInput()) {
2376 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2377 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2378 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2381 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2383 QualType InputTy = S.getInputExpr(InputNo)->getType();
2384 QualType OutputType = OutExpr->getType();
2386 uint64_t InputSize = getContext().getTypeSize(InputTy);
2387 if (getContext().getTypeSize(OutputType) < InputSize) {
2388 // Form the asm to return the value as a larger integer or fp type.
2389 ResultRegTypes.back() = ConvertType(InputTy);
2392 if (llvm::Type* AdjTy =
2393 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2394 ResultRegTypes.back()))
2395 ResultRegTypes.back() = AdjTy;
2397 CGM.getDiags().Report(S.getAsmLoc(),
2398 diag::err_asm_invalid_type_in_input)
2399 << OutExpr->getType() << OutputConstraint;
2402 // Update largest vector width for any vector types.
2403 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2404 LargestVectorWidth =
2405 std::max((uint64_t)LargestVectorWidth,
2406 VT->getPrimitiveSizeInBits().getKnownMinSize());
2408 Address DestAddr = Dest.getAddress(*this);
2409 // Matrix types in memory are represented by arrays, but accessed through
2410 // vector pointers, with the alignment specified on the access operation.
2411 // For inline assembly, update pointer arguments to use vector pointers.
2412 // Otherwise there will be a mis-match if the matrix is also an
2413 // input-argument which is represented as vector.
2414 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2415 DestAddr = Builder.CreateElementBitCast(
2416 DestAddr, ConvertType(OutExpr->getType()));
2418 ArgTypes.push_back(DestAddr.getType());
2419 ArgElemTypes.push_back(DestAddr.getElementType());
2420 Args.push_back(DestAddr.getPointer());
2421 Constraints += "=*";
2422 Constraints += OutputConstraint;
2423 ReadOnly = ReadNone = false;
2426 if (Info.isReadWrite()) {
2427 InOutConstraints += ',';
2429 const Expr *InputExpr = S.getOutputExpr(i);
2431 llvm::Type *ArgElemType;
2432 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2433 Info, Dest, InputExpr->getType(), InOutConstraints,
2434 InputExpr->getExprLoc());
2436 if (llvm::Type* AdjTy =
2437 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2439 Arg = Builder.CreateBitCast(Arg, AdjTy);
2441 // Update largest vector width for any vector types.
2442 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2443 LargestVectorWidth =
2444 std::max((uint64_t)LargestVectorWidth,
2445 VT->getPrimitiveSizeInBits().getKnownMinSize());
2446 // Only tie earlyclobber physregs.
2447 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2448 InOutConstraints += llvm::utostr(i);
2450 InOutConstraints += OutputConstraint;
2452 InOutArgTypes.push_back(Arg->getType());
2453 InOutArgElemTypes.push_back(ArgElemType);
2454 InOutArgs.push_back(Arg);
2458 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2459 // to the return value slot. Only do this when returning in registers.
2460 if (isa<MSAsmStmt>(&S)) {
2461 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2462 if (RetAI.isDirect() || RetAI.isExtend()) {
2463 // Make a fake lvalue for the return value slot.
2464 LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
2465 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2466 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2467 ResultRegDests, AsmString, S.getNumOutputs());
2472 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2473 const Expr *InputExpr = S.getInputExpr(i);
2475 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2477 if (Info.allowsMemory())
2480 if (!Constraints.empty())
2483 // Simplify the input constraint.
2484 std::string InputConstraint(S.getInputConstraint(i));
2485 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2486 &OutputConstraintInfos);
2488 InputConstraint = AddVariableConstraints(
2489 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2490 getTarget(), CGM, S, false /* No EarlyClobber */);
2492 std::string ReplaceConstraint (InputConstraint);
2494 llvm::Type *ArgElemType;
2495 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2497 // If this input argument is tied to a larger output result, extend the
2498 // input to be the same size as the output. The LLVM backend wants to see
2499 // the input and output of a matching constraint be the same size. Note
2500 // that GCC does not define what the top bits are here. We use zext because
2501 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2502 if (Info.hasTiedOperand()) {
2503 unsigned Output = Info.getTiedOperand();
2504 QualType OutputType = S.getOutputExpr(Output)->getType();
2505 QualType InputTy = InputExpr->getType();
2507 if (getContext().getTypeSize(OutputType) >
2508 getContext().getTypeSize(InputTy)) {
2509 // Use ptrtoint as appropriate so that we can do our extension.
2510 if (isa<llvm::PointerType>(Arg->getType()))
2511 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2512 llvm::Type *OutputTy = ConvertType(OutputType);
2513 if (isa<llvm::IntegerType>(OutputTy))
2514 Arg = Builder.CreateZExt(Arg, OutputTy);
2515 else if (isa<llvm::PointerType>(OutputTy))
2516 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2518 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2519 Arg = Builder.CreateFPExt(Arg, OutputTy);
2522 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2523 ReplaceConstraint = OutputConstraints[Output];
2525 if (llvm::Type* AdjTy =
2526 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2528 Arg = Builder.CreateBitCast(Arg, AdjTy);
2530 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2531 << InputExpr->getType() << InputConstraint;
2533 // Update largest vector width for any vector types.
2534 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2535 LargestVectorWidth =
2536 std::max((uint64_t)LargestVectorWidth,
2537 VT->getPrimitiveSizeInBits().getKnownMinSize());
2539 ArgTypes.push_back(Arg->getType());
2540 ArgElemTypes.push_back(ArgElemType);
2541 Args.push_back(Arg);
2542 Constraints += InputConstraint;
2545 // Append the "input" part of inout constraints.
2546 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2547 ArgTypes.push_back(InOutArgTypes[i]);
2548 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2549 Args.push_back(InOutArgs[i]);
2551 Constraints += InOutConstraints;
2554 SmallVector<llvm::BasicBlock *, 16> Transfer;
2555 llvm::BasicBlock *Fallthrough = nullptr;
2556 bool IsGCCAsmGoto = false;
2557 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2558 IsGCCAsmGoto = GS->isAsmGoto();
2560 for (const auto *E : GS->labels()) {
2561 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2562 Transfer.push_back(Dest.getBlock());
2563 llvm::BlockAddress *BA =
2564 llvm::BlockAddress::get(CurFn, Dest.getBlock());
2566 ArgTypes.push_back(BA->getType());
2567 ArgElemTypes.push_back(nullptr);
2568 if (!Constraints.empty())
2572 Fallthrough = createBasicBlock("asm.fallthrough");
2576 bool HasUnwindClobber = false;
2579 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2580 StringRef Clobber = S.getClobber(i);
2582 if (Clobber == "memory")
2583 ReadOnly = ReadNone = false;
2584 else if (Clobber == "unwind") {
2585 HasUnwindClobber = true;
2587 } else if (Clobber != "cc") {
2588 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2589 if (CGM.getCodeGenOpts().StackClashProtector &&
2590 getTarget().isSPRegName(Clobber)) {
2591 CGM.getDiags().Report(S.getAsmLoc(),
2592 diag::warn_stack_clash_protection_inline_asm);
2596 if (isa<MSAsmStmt>(&S)) {
2597 if (Clobber == "eax" || Clobber == "edx") {
2598 if (Constraints.find("=&A") != std::string::npos)
2600 std::string::size_type position1 =
2601 Constraints.find("={" + Clobber.str() + "}");
2602 if (position1 != std::string::npos) {
2603 Constraints.insert(position1 + 1, "&");
2606 std::string::size_type position2 = Constraints.find("=A");
2607 if (position2 != std::string::npos) {
2608 Constraints.insert(position2 + 1, "&");
2613 if (!Constraints.empty())
2616 Constraints += "~{";
2617 Constraints += Clobber;
2621 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2622 "unwind clobber can't be used with asm goto");
2624 // Add machine specific clobbers
2625 std::string MachineClobbers = getTarget().getClobbers();
2626 if (!MachineClobbers.empty()) {
2627 if (!Constraints.empty())
2629 Constraints += MachineClobbers;
2632 llvm::Type *ResultType;
2633 if (ResultRegTypes.empty())
2634 ResultType = VoidTy;
2635 else if (ResultRegTypes.size() == 1)
2636 ResultType = ResultRegTypes[0];
2638 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2640 llvm::FunctionType *FTy =
2641 llvm::FunctionType::get(ResultType, ArgTypes, false);
2643 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2645 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2646 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2647 ? llvm::InlineAsm::AD_ATT
2648 : llvm::InlineAsm::AD_Intel;
2649 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2650 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2652 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2653 FTy, AsmString, Constraints, HasSideEffect,
2654 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2655 std::vector<llvm::Value*> RegResults;
2657 llvm::CallBrInst *Result =
2658 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2659 EmitBlock(Fallthrough);
2660 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2661 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2662 ResultRegTypes, ArgElemTypes, *this, RegResults);
2663 } else if (HasUnwindClobber) {
2664 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2665 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2666 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2669 llvm::CallInst *Result =
2670 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2671 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2672 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2673 ResultRegTypes, ArgElemTypes, *this, RegResults);
2676 assert(RegResults.size() == ResultRegTypes.size());
2677 assert(RegResults.size() == ResultTruncRegTypes.size());
2678 assert(RegResults.size() == ResultRegDests.size());
2679 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2680 // in which case its size may grow.
2681 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2682 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2683 llvm::Value *Tmp = RegResults[i];
2684 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2686 // If the result type of the LLVM IR asm doesn't match the result type of
2687 // the expression, do the conversion.
2688 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2690 // Truncate the integer result to the right size, note that TruncTy can be
2692 if (TruncTy->isFloatingPointTy())
2693 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2694 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2695 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2696 Tmp = Builder.CreateTrunc(Tmp,
2697 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2698 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2699 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2700 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2701 Tmp = Builder.CreatePtrToInt(Tmp,
2702 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2703 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2704 } else if (TruncTy->isIntegerTy()) {
2705 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2706 } else if (TruncTy->isVectorTy()) {
2707 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2711 LValue Dest = ResultRegDests[i];
2712 // ResultTypeRequiresCast elements correspond to the first
2713 // ResultTypeRequiresCast.size() elements of RegResults.
2714 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2715 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2716 Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2717 ResultRegTypes[i]->getPointerTo());
2718 if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
2719 Builder.CreateStore(Tmp, A);
2723 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2725 const Expr *OutExpr = S.getOutputExpr(i);
2727 OutExpr->getExprLoc(),
2728 "impossible constraint in asm: can't store value into a register");
2731 Dest = MakeAddrLValue(A, Ty);
2733 EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2737 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2738 const RecordDecl *RD = S.getCapturedRecordDecl();
2739 QualType RecordTy = getContext().getRecordType(RD);
2741 // Initialize the captured struct.
2743 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2745 RecordDecl::field_iterator CurField = RD->field_begin();
2746 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2747 E = S.capture_init_end();
2748 I != E; ++I, ++CurField) {
2749 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2750 if (CurField->hasCapturedVLAType()) {
2751 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2753 EmitInitializerForField(*CurField, LV, *I);
2760 /// Generate an outlined function for the body of a CapturedStmt, store any
2761 /// captured variables into the captured struct, and call the outlined function.
2763 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2764 LValue CapStruct = InitCapturedStruct(S);
2766 // Emit the CapturedDecl
2767 CodeGenFunction CGF(CGM, true);
2768 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2769 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2770 delete CGF.CapturedStmtInfo;
2772 // Emit call to the helper function.
2773 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2778 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2779 LValue CapStruct = InitCapturedStruct(S);
2780 return CapStruct.getAddress(*this);
2783 /// Creates the outlined function for a CapturedStmt.
2785 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2786 assert(CapturedStmtInfo &&
2787 "CapturedStmtInfo should be set when generating the captured function");
2788 const CapturedDecl *CD = S.getCapturedDecl();
2789 const RecordDecl *RD = S.getCapturedRecordDecl();
2790 SourceLocation Loc = S.getBeginLoc();
2791 assert(CD->hasBody() && "missing CapturedDecl body");
2793 // Build the argument list.
2794 ASTContext &Ctx = CGM.getContext();
2795 FunctionArgList Args;
2796 Args.append(CD->param_begin(), CD->param_end());
2798 // Create the function declaration.
2799 const CGFunctionInfo &FuncInfo =
2800 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2801 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2804 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2805 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2806 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2807 if (CD->isNothrow())
2808 F->addFnAttr(llvm::Attribute::NoUnwind);
2810 // Generate the function.
2811 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2812 CD->getBody()->getBeginLoc());
2813 // Set the context parameter in CapturedStmtInfo.
2814 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2815 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2817 // Initialize variable-length arrays.
2818 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2819 Ctx.getTagDeclType(RD));
2820 for (auto *FD : RD->fields()) {
2821 if (FD->hasCapturedVLAType()) {
2823 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2825 auto VAT = FD->getCapturedVLAType();
2826 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2830 // If 'this' is captured, load it into CXXThisValue.
2831 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2832 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2833 LValue ThisLValue = EmitLValueForField(Base, FD);
2834 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2837 PGO.assignRegionCounters(GlobalDecl(CD), F);
2838 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2839 FinishFunction(CD->getBodyRBrace());