1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Stmt nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "clang/Basic/Builtins.h"
21 #include "clang/Basic/DiagnosticSema.h"
22 #include "clang/Basic/PrettyStackTrace.h"
23 #include "clang/Basic/SourceManager.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/InlineAsm.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/MDBuilder.h"
31 #include "llvm/Support/SaveAndRestore.h"
33 using namespace clang;
34 using namespace CodeGen;
36 //===----------------------------------------------------------------------===//
38 //===----------------------------------------------------------------------===//
40 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
41 if (CGDebugInfo *DI = getDebugInfo()) {
43 Loc = S->getBeginLoc();
44 DI->EmitLocation(Builder, Loc);
50 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
51 assert(S && "Null statement?");
52 PGO.setCurrentStmt(S);
54 // These statements have their own debug info handling.
55 if (EmitSimpleStmt(S, Attrs))
58 // Check if we are generating unreachable code.
59 if (!HaveInsertPoint()) {
60 // If so, and the statement doesn't contain a label, then we do not need to
61 // generate actual code. This is safe because (1) the current point is
62 // unreachable, so we don't need to execute the code, and (2) we've already
63 // handled the statements which update internal data structures (like the
64 // local variable map) which could be used by subsequent statements.
65 if (!ContainsLabel(S)) {
66 // Verify that any decl statements were handled as simple, they may be in
67 // scope of subsequent reachable statements.
68 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
72 // Otherwise, make a new block to hold the code.
76 // Generate a stoppoint if we are emitting debug info.
79 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
81 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
82 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
83 EmitSimpleOMPExecutableDirective(*D);
88 switch (S->getStmtClass()) {
89 case Stmt::NoStmtClass:
90 case Stmt::CXXCatchStmtClass:
91 case Stmt::SEHExceptStmtClass:
92 case Stmt::SEHFinallyStmtClass:
93 case Stmt::MSDependentExistsStmtClass:
94 llvm_unreachable("invalid statement class to emit generically");
95 case Stmt::NullStmtClass:
96 case Stmt::CompoundStmtClass:
97 case Stmt::DeclStmtClass:
98 case Stmt::LabelStmtClass:
99 case Stmt::AttributedStmtClass:
100 case Stmt::GotoStmtClass:
101 case Stmt::BreakStmtClass:
102 case Stmt::ContinueStmtClass:
103 case Stmt::DefaultStmtClass:
104 case Stmt::CaseStmtClass:
105 case Stmt::SEHLeaveStmtClass:
106 llvm_unreachable("should have emitted these statements as simple");
108 #define STMT(Type, Base)
109 #define ABSTRACT_STMT(Op)
110 #define EXPR(Type, Base) \
111 case Stmt::Type##Class:
112 #include "clang/AST/StmtNodes.inc"
114 // Remember the block we came in on.
115 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
116 assert(incoming && "expression emission must have an insertion point");
118 EmitIgnoredExpr(cast<Expr>(S));
120 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
121 assert(outgoing && "expression emission cleared block!");
123 // The expression emitters assume (reasonably!) that the insertion
124 // point is always set. To maintain that, the call-emission code
125 // for noreturn functions has to enter a new block with no
126 // predecessors. We want to kill that block and mark the current
127 // insertion point unreachable in the common case of a call like
128 // "exit();". Since expression emission doesn't otherwise create
129 // blocks with no predecessors, we can just test for that.
130 // However, we must be careful not to do this to our incoming
131 // block, because *statement* emission does sometimes create
132 // reachable blocks which will have no predecessors until later in
133 // the function. This occurs with, e.g., labels that are not
134 // reachable by fallthrough.
135 if (incoming != outgoing && outgoing->use_empty()) {
136 outgoing->eraseFromParent();
137 Builder.ClearInsertionPoint();
142 case Stmt::IndirectGotoStmtClass:
143 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
145 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
146 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
147 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
148 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
150 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
152 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
153 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
154 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
155 case Stmt::CoroutineBodyStmtClass:
156 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
158 case Stmt::CoreturnStmtClass:
159 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
161 case Stmt::CapturedStmtClass: {
162 const CapturedStmt *CS = cast<CapturedStmt>(S);
163 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
166 case Stmt::ObjCAtTryStmtClass:
167 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
169 case Stmt::ObjCAtCatchStmtClass:
171 "@catch statements should be handled by EmitObjCAtTryStmt");
172 case Stmt::ObjCAtFinallyStmtClass:
174 "@finally statements should be handled by EmitObjCAtTryStmt");
175 case Stmt::ObjCAtThrowStmtClass:
176 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
178 case Stmt::ObjCAtSynchronizedStmtClass:
179 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
181 case Stmt::ObjCForCollectionStmtClass:
182 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
184 case Stmt::ObjCAutoreleasePoolStmtClass:
185 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
188 case Stmt::CXXTryStmtClass:
189 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
191 case Stmt::CXXForRangeStmtClass:
192 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
194 case Stmt::SEHTryStmtClass:
195 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
197 case Stmt::OMPParallelDirectiveClass:
198 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
200 case Stmt::OMPSimdDirectiveClass:
201 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
203 case Stmt::OMPForDirectiveClass:
204 EmitOMPForDirective(cast<OMPForDirective>(*S));
206 case Stmt::OMPForSimdDirectiveClass:
207 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
209 case Stmt::OMPSectionsDirectiveClass:
210 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
212 case Stmt::OMPSectionDirectiveClass:
213 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
215 case Stmt::OMPSingleDirectiveClass:
216 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
218 case Stmt::OMPMasterDirectiveClass:
219 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
221 case Stmt::OMPCriticalDirectiveClass:
222 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
224 case Stmt::OMPParallelForDirectiveClass:
225 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
227 case Stmt::OMPParallelForSimdDirectiveClass:
228 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
230 case Stmt::OMPParallelMasterDirectiveClass:
231 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
233 case Stmt::OMPParallelSectionsDirectiveClass:
234 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
236 case Stmt::OMPTaskDirectiveClass:
237 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
239 case Stmt::OMPTaskyieldDirectiveClass:
240 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
242 case Stmt::OMPBarrierDirectiveClass:
243 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
245 case Stmt::OMPTaskwaitDirectiveClass:
246 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
248 case Stmt::OMPTaskgroupDirectiveClass:
249 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
251 case Stmt::OMPFlushDirectiveClass:
252 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
254 case Stmt::OMPDepobjDirectiveClass:
255 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
257 case Stmt::OMPScanDirectiveClass:
258 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
260 case Stmt::OMPOrderedDirectiveClass:
261 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
263 case Stmt::OMPAtomicDirectiveClass:
264 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
266 case Stmt::OMPTargetDirectiveClass:
267 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
269 case Stmt::OMPTeamsDirectiveClass:
270 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
272 case Stmt::OMPCancellationPointDirectiveClass:
273 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
275 case Stmt::OMPCancelDirectiveClass:
276 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
278 case Stmt::OMPTargetDataDirectiveClass:
279 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
281 case Stmt::OMPTargetEnterDataDirectiveClass:
282 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
284 case Stmt::OMPTargetExitDataDirectiveClass:
285 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
287 case Stmt::OMPTargetParallelDirectiveClass:
288 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
290 case Stmt::OMPTargetParallelForDirectiveClass:
291 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
293 case Stmt::OMPTaskLoopDirectiveClass:
294 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
296 case Stmt::OMPTaskLoopSimdDirectiveClass:
297 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
299 case Stmt::OMPMasterTaskLoopDirectiveClass:
300 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
302 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
303 EmitOMPMasterTaskLoopSimdDirective(
304 cast<OMPMasterTaskLoopSimdDirective>(*S));
306 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
307 EmitOMPParallelMasterTaskLoopDirective(
308 cast<OMPParallelMasterTaskLoopDirective>(*S));
310 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
311 EmitOMPParallelMasterTaskLoopSimdDirective(
312 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
314 case Stmt::OMPDistributeDirectiveClass:
315 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
317 case Stmt::OMPTargetUpdateDirectiveClass:
318 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
320 case Stmt::OMPDistributeParallelForDirectiveClass:
321 EmitOMPDistributeParallelForDirective(
322 cast<OMPDistributeParallelForDirective>(*S));
324 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
325 EmitOMPDistributeParallelForSimdDirective(
326 cast<OMPDistributeParallelForSimdDirective>(*S));
328 case Stmt::OMPDistributeSimdDirectiveClass:
329 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
331 case Stmt::OMPTargetParallelForSimdDirectiveClass:
332 EmitOMPTargetParallelForSimdDirective(
333 cast<OMPTargetParallelForSimdDirective>(*S));
335 case Stmt::OMPTargetSimdDirectiveClass:
336 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
338 case Stmt::OMPTeamsDistributeDirectiveClass:
339 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
341 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
342 EmitOMPTeamsDistributeSimdDirective(
343 cast<OMPTeamsDistributeSimdDirective>(*S));
345 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
346 EmitOMPTeamsDistributeParallelForSimdDirective(
347 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
349 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
350 EmitOMPTeamsDistributeParallelForDirective(
351 cast<OMPTeamsDistributeParallelForDirective>(*S));
353 case Stmt::OMPTargetTeamsDirectiveClass:
354 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
356 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
357 EmitOMPTargetTeamsDistributeDirective(
358 cast<OMPTargetTeamsDistributeDirective>(*S));
360 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
361 EmitOMPTargetTeamsDistributeParallelForDirective(
362 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
364 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
365 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
366 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
368 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
369 EmitOMPTargetTeamsDistributeSimdDirective(
370 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
375 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
376 ArrayRef<const Attr *> Attrs) {
377 switch (S->getStmtClass()) {
380 case Stmt::NullStmtClass:
382 case Stmt::CompoundStmtClass:
383 EmitCompoundStmt(cast<CompoundStmt>(*S));
385 case Stmt::DeclStmtClass:
386 EmitDeclStmt(cast<DeclStmt>(*S));
388 case Stmt::LabelStmtClass:
389 EmitLabelStmt(cast<LabelStmt>(*S));
391 case Stmt::AttributedStmtClass:
392 EmitAttributedStmt(cast<AttributedStmt>(*S));
394 case Stmt::GotoStmtClass:
395 EmitGotoStmt(cast<GotoStmt>(*S));
397 case Stmt::BreakStmtClass:
398 EmitBreakStmt(cast<BreakStmt>(*S));
400 case Stmt::ContinueStmtClass:
401 EmitContinueStmt(cast<ContinueStmt>(*S));
403 case Stmt::DefaultStmtClass:
404 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
406 case Stmt::CaseStmtClass:
407 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
409 case Stmt::SEHLeaveStmtClass:
410 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
416 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
417 /// this captures the expression result of the last sub-statement and returns it
418 /// (for use by the statement expression extension).
419 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
420 AggValueSlot AggSlot) {
421 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
422 "LLVM IR generation of compound statement ('{}')");
424 // Keep track of the current cleanup stack depth, including debug scopes.
425 LexicalScope Scope(*this, S.getSourceRange());
427 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
431 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
433 AggValueSlot AggSlot) {
435 const Stmt *ExprResult = S.getStmtExprResult();
436 assert((!GetLast || (GetLast && ExprResult)) &&
437 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
439 Address RetAlloca = Address::invalid();
441 for (auto *CurStmt : S.body()) {
442 if (GetLast && ExprResult == CurStmt) {
443 // We have to special case labels here. They are statements, but when put
444 // at the end of a statement expression, they yield the value of their
445 // subexpression. Handle this by walking through all labels we encounter,
446 // emitting them before we evaluate the subexpr.
447 // Similar issues arise for attributed statements.
448 while (!isa<Expr>(ExprResult)) {
449 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
450 EmitLabel(LS->getDecl());
451 ExprResult = LS->getSubStmt();
452 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
453 // FIXME: Update this if we ever have attributes that affect the
454 // semantics of an expression.
455 ExprResult = AS->getSubStmt();
457 llvm_unreachable("unknown value statement");
463 const Expr *E = cast<Expr>(ExprResult);
464 QualType ExprTy = E->getType();
465 if (hasAggregateEvaluationKind(ExprTy)) {
466 EmitAggExpr(E, AggSlot);
468 // We can't return an RValue here because there might be cleanups at
469 // the end of the StmtExpr. Because of that, we have to emit the result
470 // here into a temporary alloca.
471 RetAlloca = CreateMemTemp(ExprTy);
472 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
483 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
484 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
486 // If there is a cleanup stack, then we it isn't worth trying to
487 // simplify this block (we would need to remove it from the scope map
488 // and cleanup entry).
489 if (!EHStack.empty())
492 // Can only simplify direct branches.
493 if (!BI || !BI->isUnconditional())
496 // Can only simplify empty blocks.
497 if (BI->getIterator() != BB->begin())
500 BB->replaceAllUsesWith(BI->getSuccessor(0));
501 BI->eraseFromParent();
502 BB->eraseFromParent();
505 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
506 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
508 // Fall out of the current block (if necessary).
511 if (IsFinished && BB->use_empty()) {
516 // Place the block after the current block, if possible, or else at
517 // the end of the function.
518 if (CurBB && CurBB->getParent())
519 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
521 CurFn->getBasicBlockList().push_back(BB);
522 Builder.SetInsertPoint(BB);
525 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
526 // Emit a branch from the current block to the target one if this
527 // was a real block. If this was just a fall-through block after a
528 // terminator, don't emit it.
529 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
531 if (!CurBB || CurBB->getTerminator()) {
532 // If there is no insert point or the previous block is already
533 // terminated, don't touch it.
535 // Otherwise, create a fall-through branch.
536 Builder.CreateBr(Target);
539 Builder.ClearInsertionPoint();
542 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
543 bool inserted = false;
544 for (llvm::User *u : block->users()) {
545 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
546 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
554 CurFn->getBasicBlockList().push_back(block);
556 Builder.SetInsertPoint(block);
559 CodeGenFunction::JumpDest
560 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
561 JumpDest &Dest = LabelMap[D];
562 if (Dest.isValid()) return Dest;
564 // Create, but don't insert, the new block.
565 Dest = JumpDest(createBasicBlock(D->getName()),
566 EHScopeStack::stable_iterator::invalid(),
567 NextCleanupDestIndex++);
571 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
572 // Add this label to the current lexical scope if we're within any
573 // normal cleanups. Jumps "in" to this label --- when permitted by
574 // the language --- may need to be routed around such cleanups.
575 if (EHStack.hasNormalCleanups() && CurLexicalScope)
576 CurLexicalScope->addLabel(D);
578 JumpDest &Dest = LabelMap[D];
580 // If we didn't need a forward reference to this label, just go
581 // ahead and create a destination at the current scope.
582 if (!Dest.isValid()) {
583 Dest = getJumpDestInCurrentScope(D->getName());
585 // Otherwise, we need to give this label a target depth and remove
586 // it from the branch-fixups list.
588 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
589 Dest.setScopeDepth(EHStack.stable_begin());
590 ResolveBranchFixups(Dest.getBlock());
593 EmitBlock(Dest.getBlock());
595 // Emit debug info for labels.
596 if (CGDebugInfo *DI = getDebugInfo()) {
597 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
598 DI->setLocation(D->getLocation());
599 DI->EmitLabel(D, Builder);
603 incrementProfileCounter(D->getStmt());
606 /// Change the cleanup scope of the labels in this lexical scope to
607 /// match the scope of the enclosing context.
608 void CodeGenFunction::LexicalScope::rescopeLabels() {
609 assert(!Labels.empty());
610 EHScopeStack::stable_iterator innermostScope
611 = CGF.EHStack.getInnermostNormalCleanup();
613 // Change the scope depth of all the labels.
614 for (SmallVectorImpl<const LabelDecl*>::const_iterator
615 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
616 assert(CGF.LabelMap.count(*i));
617 JumpDest &dest = CGF.LabelMap.find(*i)->second;
618 assert(dest.getScopeDepth().isValid());
619 assert(innermostScope.encloses(dest.getScopeDepth()));
620 dest.setScopeDepth(innermostScope);
623 // Reparent the labels if the new scope also has cleanups.
624 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
625 ParentScope->Labels.append(Labels.begin(), Labels.end());
630 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
631 EmitLabel(S.getDecl());
632 EmitStmt(S.getSubStmt());
635 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
636 bool nomerge = false;
637 for (const auto *A : S.getAttrs())
638 if (A->getKind() == attr::NoMerge) {
642 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
643 EmitStmt(S.getSubStmt(), S.getAttrs());
646 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
647 // If this code is reachable then emit a stop point (if generating
648 // debug info). We have to do this ourselves because we are on the
649 // "simple" statement path.
650 if (HaveInsertPoint())
653 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
657 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
658 if (const LabelDecl *Target = S.getConstantTarget()) {
659 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
663 // Ensure that we have an i8* for our PHI node.
664 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
666 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
668 // Get the basic block for the indirect goto.
669 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
671 // The first instruction in the block has to be the PHI for the switch dest,
672 // add an entry for this branch.
673 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
675 EmitBranch(IndGotoBB);
678 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
679 // C99 6.8.4.1: The first substatement is executed if the expression compares
680 // unequal to 0. The condition must be a scalar type.
681 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
684 EmitStmt(S.getInit());
686 if (S.getConditionVariable())
687 EmitDecl(*S.getConditionVariable());
689 // If the condition constant folds and can be elided, try to avoid emitting
690 // the condition and the dead arm of the if/else.
692 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
694 // Figure out which block (then or else) is executed.
695 const Stmt *Executed = S.getThen();
696 const Stmt *Skipped = S.getElse();
697 if (!CondConstant) // Condition false?
698 std::swap(Executed, Skipped);
700 // If the skipped block has no labels in it, just emit the executed block.
701 // This avoids emitting dead code and simplifies the CFG substantially.
702 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
704 incrementProfileCounter(&S);
706 RunCleanupsScope ExecutedScope(*this);
713 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
714 // the conditional branch.
715 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
716 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
717 llvm::BasicBlock *ElseBlock = ContBlock;
719 ElseBlock = createBasicBlock("if.else");
721 // Prefer the PGO based weights over the likelihood attribute.
722 // When the build isn't optimized the metadata isn't used, so don't generate
724 Stmt::Likelihood LH = Stmt::LH_None;
725 uint64_t Count = getProfileCount(S.getThen());
726 if (!Count && CGM.getCodeGenOpts().OptimizationLevel)
727 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
728 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
730 // Emit the 'then' code.
731 EmitBlock(ThenBlock);
732 incrementProfileCounter(&S);
734 RunCleanupsScope ThenScope(*this);
735 EmitStmt(S.getThen());
737 EmitBranch(ContBlock);
739 // Emit the 'else' code if present.
740 if (const Stmt *Else = S.getElse()) {
742 // There is no need to emit line number for an unconditional branch.
743 auto NL = ApplyDebugLocation::CreateEmpty(*this);
744 EmitBlock(ElseBlock);
747 RunCleanupsScope ElseScope(*this);
751 // There is no need to emit line number for an unconditional branch.
752 auto NL = ApplyDebugLocation::CreateEmpty(*this);
753 EmitBranch(ContBlock);
757 // Emit the continuation block for code after the if.
758 EmitBlock(ContBlock, true);
761 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
762 ArrayRef<const Attr *> WhileAttrs) {
763 // Emit the header for the loop, which will also become
764 // the continue target.
765 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
766 EmitBlock(LoopHeader.getBlock());
768 // Create an exit block for when the condition fails, which will
769 // also become the break target.
770 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
772 // Store the blocks to use for break and continue.
773 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
775 // C++ [stmt.while]p2:
776 // When the condition of a while statement is a declaration, the
777 // scope of the variable that is declared extends from its point
778 // of declaration (3.3.2) to the end of the while statement.
780 // The object created in a condition is destroyed and created
781 // with each iteration of the loop.
782 RunCleanupsScope ConditionScope(*this);
784 if (S.getConditionVariable())
785 EmitDecl(*S.getConditionVariable());
787 // Evaluate the conditional in the while header. C99 6.8.5.1: The
788 // evaluation of the controlling expression takes place before each
789 // execution of the loop body.
790 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
792 // while(1) is common, avoid extra exit blocks. Be sure
793 // to correctly handle break/continue though.
794 bool EmitBoolCondBranch = true;
795 bool LoopMustProgress = false;
796 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) {
798 EmitBoolCondBranch = false;
799 FnIsMustProgress = false;
801 } else if (LanguageRequiresProgress())
802 LoopMustProgress = true;
804 const SourceRange &R = S.getSourceRange();
805 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
806 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
807 SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
809 // As long as the condition is true, go to the loop body.
810 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
811 if (EmitBoolCondBranch) {
812 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
813 if (ConditionScope.requiresCleanups())
814 ExitBlock = createBasicBlock("while.exit");
815 llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
816 S.getCond(), getProfileCount(S.getBody()), S.getBody());
817 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
819 if (ExitBlock != LoopExit.getBlock()) {
820 EmitBlock(ExitBlock);
821 EmitBranchThroughCleanup(LoopExit);
823 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
824 CGM.getDiags().Report(A->getLocation(),
825 diag::warn_attribute_has_no_effect_on_infinite_loop)
826 << A << A->getRange();
827 CGM.getDiags().Report(
829 diag::note_attribute_has_no_effect_on_infinite_loop_here)
830 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
833 // Emit the loop body. We have to emit this in a cleanup scope
834 // because it might be a singleton DeclStmt.
836 RunCleanupsScope BodyScope(*this);
838 incrementProfileCounter(&S);
839 EmitStmt(S.getBody());
842 BreakContinueStack.pop_back();
844 // Immediately force cleanup.
845 ConditionScope.ForceCleanup();
848 // Branch to the loop header again.
849 EmitBranch(LoopHeader.getBlock());
853 // Emit the exit block.
854 EmitBlock(LoopExit.getBlock(), true);
856 // The LoopHeader typically is just a branch if we skipped emitting
857 // a branch, try to erase it.
858 if (!EmitBoolCondBranch)
859 SimplifyForwardingBlocks(LoopHeader.getBlock());
862 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
863 ArrayRef<const Attr *> DoAttrs) {
864 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
865 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
867 uint64_t ParentCount = getCurrentProfileCount();
869 // Store the blocks to use for break and continue.
870 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
872 // Emit the body of the loop.
873 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
875 EmitBlockWithFallThrough(LoopBody, &S);
877 RunCleanupsScope BodyScope(*this);
878 EmitStmt(S.getBody());
881 EmitBlock(LoopCond.getBlock());
883 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
884 // after each execution of the loop body."
886 // Evaluate the conditional in the while header.
887 // C99 6.8.5p2/p4: The first substatement is executed if the expression
888 // compares unequal to 0. The condition must be a scalar type.
889 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
891 BreakContinueStack.pop_back();
893 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
894 // to correctly handle break/continue though.
895 bool EmitBoolCondBranch = true;
896 bool LoopMustProgress = false;
897 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) {
899 EmitBoolCondBranch = false;
901 FnIsMustProgress = false;
902 } else if (LanguageRequiresProgress())
903 LoopMustProgress = true;
905 const SourceRange &R = S.getSourceRange();
906 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
907 SourceLocToDebugLoc(R.getBegin()),
908 SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
910 // As long as the condition is true, iterate the loop.
911 if (EmitBoolCondBranch) {
912 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
913 Builder.CreateCondBr(
914 BoolCondVal, LoopBody, LoopExit.getBlock(),
915 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
920 // Emit the exit block.
921 EmitBlock(LoopExit.getBlock());
923 // The DoCond block typically is just a branch if we skipped
924 // emitting a branch, try to erase it.
925 if (!EmitBoolCondBranch)
926 SimplifyForwardingBlocks(LoopCond.getBlock());
929 void CodeGenFunction::EmitForStmt(const ForStmt &S,
930 ArrayRef<const Attr *> ForAttrs) {
931 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
933 LexicalScope ForScope(*this, S.getSourceRange());
935 // Evaluate the first part before the loop.
937 EmitStmt(S.getInit());
939 // Start the loop with a block that tests the condition.
940 // If there's an increment, the continue scope will be overwritten
942 JumpDest Continue = getJumpDestInCurrentScope("for.cond");
943 llvm::BasicBlock *CondBlock = Continue.getBlock();
944 EmitBlock(CondBlock);
946 bool LoopMustProgress = false;
947 Expr::EvalResult Result;
948 if (LanguageRequiresProgress()) {
950 FnIsMustProgress = false;
951 } else if (!S.getCond()->EvaluateAsInt(Result, getContext())) {
952 LoopMustProgress = true;
956 const SourceRange &R = S.getSourceRange();
957 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
958 SourceLocToDebugLoc(R.getBegin()),
959 SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
961 // If the for loop doesn't have an increment we can just use the
962 // condition as the continue block. Otherwise we'll need to create
963 // a block for it (in the current scope, i.e. in the scope of the
964 // condition), and that we will become our continue block.
966 Continue = getJumpDestInCurrentScope("for.inc");
968 // Store the blocks to use for break and continue.
969 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
971 // Create a cleanup scope for the condition variable cleanups.
972 LexicalScope ConditionScope(*this, S.getSourceRange());
975 // If the for statement has a condition scope, emit the local variable
977 if (S.getConditionVariable()) {
978 EmitDecl(*S.getConditionVariable());
981 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
982 // If there are any cleanups between here and the loop-exit scope,
983 // create a block to stage a loop exit along.
984 if (ForScope.requiresCleanups())
985 ExitBlock = createBasicBlock("for.cond.cleanup");
987 // As long as the condition is true, iterate the loop.
988 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
990 // C99 6.8.5p2/p4: The first substatement is executed if the expression
991 // compares unequal to 0. The condition must be a scalar type.
992 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
993 llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
994 S.getCond(), getProfileCount(S.getBody()), S.getBody());
996 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
998 FnIsMustProgress = false;
1000 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1002 if (ExitBlock != LoopExit.getBlock()) {
1003 EmitBlock(ExitBlock);
1004 EmitBranchThroughCleanup(LoopExit);
1009 // Treat it as a non-zero constant. Don't even create a new block for the
1010 // body, just fall into it.
1012 incrementProfileCounter(&S);
1015 // Create a separate cleanup scope for the body, in case it is not
1016 // a compound statement.
1017 RunCleanupsScope BodyScope(*this);
1018 EmitStmt(S.getBody());
1021 // If there is an increment, emit it next.
1023 EmitBlock(Continue.getBlock());
1024 EmitStmt(S.getInc());
1027 BreakContinueStack.pop_back();
1029 ConditionScope.ForceCleanup();
1032 EmitBranch(CondBlock);
1034 ForScope.ForceCleanup();
1038 // Emit the fall-through block.
1039 EmitBlock(LoopExit.getBlock(), true);
1043 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1044 ArrayRef<const Attr *> ForAttrs) {
1045 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1047 LexicalScope ForScope(*this, S.getSourceRange());
1049 // Evaluate the first pieces before the loop.
1051 EmitStmt(S.getInit());
1052 EmitStmt(S.getRangeStmt());
1053 EmitStmt(S.getBeginStmt());
1054 EmitStmt(S.getEndStmt());
1056 // Start the loop with a block that tests the condition.
1057 // If there's an increment, the continue scope will be overwritten
1059 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1060 EmitBlock(CondBlock);
1062 const SourceRange &R = S.getSourceRange();
1063 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1064 SourceLocToDebugLoc(R.getBegin()),
1065 SourceLocToDebugLoc(R.getEnd()));
1067 // If there are any cleanups between here and the loop-exit scope,
1068 // create a block to stage a loop exit along.
1069 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1070 if (ForScope.requiresCleanups())
1071 ExitBlock = createBasicBlock("for.cond.cleanup");
1073 // The loop body, consisting of the specified body and the loop variable.
1074 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1076 // The body is executed if the expression, contextually converted
1077 // to bool, is true.
1078 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1079 llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
1080 S.getCond(), getProfileCount(S.getBody()), S.getBody());
1081 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1083 if (ExitBlock != LoopExit.getBlock()) {
1084 EmitBlock(ExitBlock);
1085 EmitBranchThroughCleanup(LoopExit);
1089 incrementProfileCounter(&S);
1091 // Create a block for the increment. In case of a 'continue', we jump there.
1092 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1094 // Store the blocks to use for break and continue.
1095 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1098 // Create a separate cleanup scope for the loop variable and body.
1099 LexicalScope BodyScope(*this, S.getSourceRange());
1100 EmitStmt(S.getLoopVarStmt());
1101 EmitStmt(S.getBody());
1105 // If there is an increment, emit it next.
1106 EmitBlock(Continue.getBlock());
1107 EmitStmt(S.getInc());
1109 BreakContinueStack.pop_back();
1111 EmitBranch(CondBlock);
1113 ForScope.ForceCleanup();
1117 // Emit the fall-through block.
1118 EmitBlock(LoopExit.getBlock(), true);
1121 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1122 if (RV.isScalar()) {
1123 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1124 } else if (RV.isAggregate()) {
1125 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1126 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1127 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1129 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1132 EmitBranchThroughCleanup(ReturnBlock);
1136 // RAII struct used to save and restore a return statment's result expression.
1137 struct SaveRetExprRAII {
1138 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1139 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1140 CGF.RetExpr = RetExpr;
1142 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1143 const Expr *OldRetExpr;
1144 CodeGenFunction &CGF;
1148 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1149 /// if the function returns void, or may be missing one if the function returns
1150 /// non-void. Fun stuff :).
1151 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1152 if (requiresReturnValueCheck()) {
1153 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1155 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1156 llvm::GlobalVariable::PrivateLinkage, SLoc);
1157 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1158 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1159 assert(ReturnLocation.isValid() && "No valid return location");
1160 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1164 // Returning from an outlined SEH helper is UB, and we already warn on it.
1165 if (IsOutlinedSEHHelper) {
1166 Builder.CreateUnreachable();
1167 Builder.ClearInsertionPoint();
1170 // Emit the result value, even if unused, to evaluate the side effects.
1171 const Expr *RV = S.getRetValue();
1173 // Record the result expression of the return statement. The recorded
1174 // expression is used to determine whether a block capture's lifetime should
1175 // end at the end of the full expression as opposed to the end of the scope
1176 // enclosing the block expression.
1178 // This permits a small, easily-implemented exception to our over-conservative
1179 // rules about not jumping to statements following block literals with
1180 // non-trivial cleanups.
1181 SaveRetExprRAII SaveRetExpr(RV, *this);
1183 RunCleanupsScope cleanupScope(*this);
1184 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1185 RV = EWC->getSubExpr();
1186 // FIXME: Clean this up by using an LValue for ReturnTemp,
1187 // EmitStoreThroughLValue, and EmitAnyExpr.
1188 // Check if the NRVO candidate was not globalized in OpenMP mode.
1189 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1190 S.getNRVOCandidate()->isNRVOVariable() &&
1191 (!getLangOpts().OpenMP ||
1192 !CGM.getOpenMPRuntime()
1193 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1195 // Apply the named return value optimization for this return statement,
1196 // which means doing nothing: the appropriate result has already been
1197 // constructed into the NRVO variable.
1199 // If there is an NRVO flag for this variable, set it to 1 into indicate
1200 // that the cleanup code should not destroy the variable.
1201 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1202 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1203 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1204 // Make sure not to return anything, but evaluate the expression
1205 // for side effects.
1209 // Do nothing (return value is left uninitialized)
1210 } else if (FnRetTy->isReferenceType()) {
1211 // If this function returns a reference, take the address of the expression
1212 // rather than the value.
1213 RValue Result = EmitReferenceBindingToExpr(RV);
1214 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1216 switch (getEvaluationKind(RV->getType())) {
1218 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1221 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1225 EmitAggExpr(RV, AggValueSlot::forAddr(
1226 ReturnValue, Qualifiers(),
1227 AggValueSlot::IsDestructed,
1228 AggValueSlot::DoesNotNeedGCBarriers,
1229 AggValueSlot::IsNotAliased,
1230 getOverlapForReturnValue()));
1236 if (!RV || RV->isEvaluatable(getContext()))
1237 ++NumSimpleReturnExprs;
1239 cleanupScope.ForceCleanup();
1240 EmitBranchThroughCleanup(ReturnBlock);
1243 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1244 // As long as debug info is modeled with instructions, we have to ensure we
1245 // have a place to insert here and write the stop point here.
1246 if (HaveInsertPoint())
1249 for (const auto *I : S.decls())
1253 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1254 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1256 // If this code is reachable then emit a stop point (if generating
1257 // debug info). We have to do this ourselves because we are on the
1258 // "simple" statement path.
1259 if (HaveInsertPoint())
1262 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1265 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1266 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1268 // If this code is reachable then emit a stop point (if generating
1269 // debug info). We have to do this ourselves because we are on the
1270 // "simple" statement path.
1271 if (HaveInsertPoint())
1274 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1277 /// EmitCaseStmtRange - If case statement range is not too big then
1278 /// add multiple cases to switch instruction, one for each value within
1279 /// the range. If range is too big then emit "if" condition check.
1280 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1281 ArrayRef<const Attr *> Attrs) {
1282 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1284 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1285 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1287 // Emit the code for this case. We do this first to make sure it is
1288 // properly chained from our predecessor before generating the
1289 // switch machinery to enter this block.
1290 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1291 EmitBlockWithFallThrough(CaseDest, &S);
1292 EmitStmt(S.getSubStmt());
1294 // If range is empty, do nothing.
1295 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1298 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1299 llvm::APInt Range = RHS - LHS;
1300 // FIXME: parameters such as this should not be hardcoded.
1301 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1302 // Range is small enough to add multiple switch instruction cases.
1303 uint64_t Total = getProfileCount(&S);
1304 unsigned NCases = Range.getZExtValue() + 1;
1305 // We only have one region counter for the entire set of cases here, so we
1306 // need to divide the weights evenly between the generated cases, ensuring
1307 // that the total weight is preserved. E.g., a weight of 5 over three cases
1308 // will be distributed as weights of 2, 2, and 1.
1309 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1310 for (unsigned I = 0; I != NCases; ++I) {
1312 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1313 else if (SwitchLikelihood)
1314 SwitchLikelihood->push_back(LH);
1318 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1324 // The range is too big. Emit "if" condition into a new block,
1325 // making sure to save and restore the current insertion point.
1326 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1328 // Push this test onto the chain of range checks (which terminates
1329 // in the default basic block). The switch's default will be changed
1330 // to the top of this chain after switch emission is complete.
1331 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1332 CaseRangeBlock = createBasicBlock("sw.caserange");
1334 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1335 Builder.SetInsertPoint(CaseRangeBlock);
1337 // Emit range check.
1339 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1341 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1343 llvm::MDNode *Weights = nullptr;
1344 if (SwitchWeights) {
1345 uint64_t ThisCount = getProfileCount(&S);
1346 uint64_t DefaultCount = (*SwitchWeights)[0];
1347 Weights = createProfileWeights(ThisCount, DefaultCount);
1349 // Since we're chaining the switch default through each large case range, we
1350 // need to update the weight for the default, ie, the first case, to include
1352 (*SwitchWeights)[0] += ThisCount;
1353 } else if (SwitchLikelihood)
1354 Weights = createBranchWeights(LH);
1356 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1358 // Restore the appropriate insertion point.
1360 Builder.SetInsertPoint(RestoreBB);
1362 Builder.ClearInsertionPoint();
1365 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1366 ArrayRef<const Attr *> Attrs) {
1367 // If there is no enclosing switch instance that we're aware of, then this
1368 // case statement and its block can be elided. This situation only happens
1369 // when we've constant-folded the switch, are emitting the constant case,
1370 // and part of the constant case includes another case statement. For
1371 // instance: switch (4) { case 4: do { case 5: } while (1); }
1373 EmitStmt(S.getSubStmt());
1377 // Handle case ranges.
1379 EmitCaseStmtRange(S, Attrs);
1383 llvm::ConstantInt *CaseVal =
1384 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1385 if (SwitchLikelihood)
1386 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1388 // If the body of the case is just a 'break', try to not emit an empty block.
1389 // If we're profiling or we're not optimizing, leave the block in for better
1390 // debug and coverage analysis.
1391 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1392 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1393 isa<BreakStmt>(S.getSubStmt())) {
1394 JumpDest Block = BreakContinueStack.back().BreakBlock;
1396 // Only do this optimization if there are no cleanups that need emitting.
1397 if (isObviouslyBranchWithoutCleanups(Block)) {
1399 SwitchWeights->push_back(getProfileCount(&S));
1400 SwitchInsn->addCase(CaseVal, Block.getBlock());
1402 // If there was a fallthrough into this case, make sure to redirect it to
1403 // the end of the switch as well.
1404 if (Builder.GetInsertBlock()) {
1405 Builder.CreateBr(Block.getBlock());
1406 Builder.ClearInsertionPoint();
1412 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1413 EmitBlockWithFallThrough(CaseDest, &S);
1415 SwitchWeights->push_back(getProfileCount(&S));
1416 SwitchInsn->addCase(CaseVal, CaseDest);
1418 // Recursively emitting the statement is acceptable, but is not wonderful for
1419 // code where we have many case statements nested together, i.e.:
1423 // Handling this recursively will create a new block for each case statement
1424 // that falls through to the next case which is IR intensive. It also causes
1425 // deep recursion which can run into stack depth limitations. Handle
1426 // sequential non-range case statements specially.
1428 // TODO When the next case has a likelihood attribute the code returns to the
1429 // recursive algorithm. Maybe improve this case if it becomes common practice
1430 // to use a lot of attributes.
1431 const CaseStmt *CurCase = &S;
1432 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1434 // Otherwise, iteratively add consecutive cases to this switch stmt.
1435 while (NextCase && NextCase->getRHS() == nullptr) {
1437 llvm::ConstantInt *CaseVal =
1438 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1441 SwitchWeights->push_back(getProfileCount(NextCase));
1442 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1443 CaseDest = createBasicBlock("sw.bb");
1444 EmitBlockWithFallThrough(CaseDest, CurCase);
1446 // Since this loop is only executed when the CaseStmt has no attributes
1447 // use a hard-coded value.
1448 if (SwitchLikelihood)
1449 SwitchLikelihood->push_back(Stmt::LH_None);
1451 SwitchInsn->addCase(CaseVal, CaseDest);
1452 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1455 // Normal default recursion for non-cases.
1456 EmitStmt(CurCase->getSubStmt());
1459 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1460 ArrayRef<const Attr *> Attrs) {
1461 // If there is no enclosing switch instance that we're aware of, then this
1462 // default statement can be elided. This situation only happens when we've
1463 // constant-folded the switch.
1465 EmitStmt(S.getSubStmt());
1469 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1470 assert(DefaultBlock->empty() &&
1471 "EmitDefaultStmt: Default block already defined?");
1473 if (SwitchLikelihood)
1474 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1476 EmitBlockWithFallThrough(DefaultBlock, &S);
1478 EmitStmt(S.getSubStmt());
1481 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1482 /// constant value that is being switched on, see if we can dead code eliminate
1483 /// the body of the switch to a simple series of statements to emit. Basically,
1484 /// on a switch (5) we want to find these statements:
1486 /// printf(...); <--
1490 /// and add them to the ResultStmts vector. If it is unsafe to do this
1491 /// transformation (for example, one of the elided statements contains a label
1492 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1493 /// should include statements after it (e.g. the printf() line is a substmt of
1494 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1495 /// statement, then return CSFC_Success.
1497 /// If Case is non-null, then we are looking for the specified case, checking
1498 /// that nothing we jump over contains labels. If Case is null, then we found
1499 /// the case and are looking for the break.
1501 /// If the recursive walk actually finds our Case, then we set FoundCase to
1504 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1505 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1506 const SwitchCase *Case,
1508 SmallVectorImpl<const Stmt*> &ResultStmts) {
1509 // If this is a null statement, just succeed.
1511 return Case ? CSFC_Success : CSFC_FallThrough;
1513 // If this is the switchcase (case 4: or default) that we're looking for, then
1514 // we're in business. Just add the substatement.
1515 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1518 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1522 // Otherwise, this is some other case or default statement, just ignore it.
1523 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1527 // If we are in the live part of the code and we found our break statement,
1528 // return a success!
1529 if (!Case && isa<BreakStmt>(S))
1530 return CSFC_Success;
1532 // If this is a switch statement, then it might contain the SwitchCase, the
1533 // break, or neither.
1534 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1535 // Handle this as two cases: we might be looking for the SwitchCase (if so
1536 // the skipped statements must be skippable) or we might already have it.
1537 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1538 bool StartedInLiveCode = FoundCase;
1539 unsigned StartSize = ResultStmts.size();
1541 // If we've not found the case yet, scan through looking for it.
1543 // Keep track of whether we see a skipped declaration. The code could be
1544 // using the declaration even if it is skipped, so we can't optimize out
1545 // the decl if the kept statements might refer to it.
1546 bool HadSkippedDecl = false;
1548 // If we're looking for the case, just see if we can skip each of the
1550 for (; Case && I != E; ++I) {
1551 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1553 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1554 case CSFC_Failure: return CSFC_Failure;
1556 // A successful result means that either 1) that the statement doesn't
1557 // have the case and is skippable, or 2) does contain the case value
1558 // and also contains the break to exit the switch. In the later case,
1559 // we just verify the rest of the statements are elidable.
1561 // If we found the case and skipped declarations, we can't do the
1564 return CSFC_Failure;
1566 for (++I; I != E; ++I)
1567 if (CodeGenFunction::ContainsLabel(*I, true))
1568 return CSFC_Failure;
1569 return CSFC_Success;
1572 case CSFC_FallThrough:
1573 // If we have a fallthrough condition, then we must have found the
1574 // case started to include statements. Consider the rest of the
1575 // statements in the compound statement as candidates for inclusion.
1576 assert(FoundCase && "Didn't find case but returned fallthrough?");
1577 // We recursively found Case, so we're not looking for it anymore.
1580 // If we found the case and skipped declarations, we can't do the
1583 return CSFC_Failure;
1589 return CSFC_Success;
1591 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1594 // If we have statements in our range, then we know that the statements are
1595 // live and need to be added to the set of statements we're tracking.
1596 bool AnyDecls = false;
1597 for (; I != E; ++I) {
1598 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1600 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1601 case CSFC_Failure: return CSFC_Failure;
1602 case CSFC_FallThrough:
1603 // A fallthrough result means that the statement was simple and just
1604 // included in ResultStmt, keep adding them afterwards.
1607 // A successful result means that we found the break statement and
1608 // stopped statement inclusion. We just ensure that any leftover stmts
1609 // are skippable and return success ourselves.
1610 for (++I; I != E; ++I)
1611 if (CodeGenFunction::ContainsLabel(*I, true))
1612 return CSFC_Failure;
1613 return CSFC_Success;
1617 // If we're about to fall out of a scope without hitting a 'break;', we
1618 // can't perform the optimization if there were any decls in that scope
1619 // (we'd lose their end-of-lifetime).
1621 // If the entire compound statement was live, there's one more thing we
1622 // can try before giving up: emit the whole thing as a single statement.
1623 // We can do that unless the statement contains a 'break;'.
1624 // FIXME: Such a break must be at the end of a construct within this one.
1625 // We could emit this by just ignoring the BreakStmts entirely.
1626 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1627 ResultStmts.resize(StartSize);
1628 ResultStmts.push_back(S);
1630 return CSFC_Failure;
1634 return CSFC_FallThrough;
1637 // Okay, this is some other statement that we don't handle explicitly, like a
1638 // for statement or increment etc. If we are skipping over this statement,
1639 // just verify it doesn't have labels, which would make it invalid to elide.
1641 if (CodeGenFunction::ContainsLabel(S, true))
1642 return CSFC_Failure;
1643 return CSFC_Success;
1646 // Otherwise, we want to include this statement. Everything is cool with that
1647 // so long as it doesn't contain a break out of the switch we're in.
1648 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1650 // Otherwise, everything is great. Include the statement and tell the caller
1651 // that we fall through and include the next statement as well.
1652 ResultStmts.push_back(S);
1653 return CSFC_FallThrough;
1656 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1657 /// then invoke CollectStatementsForCase to find the list of statements to emit
1658 /// for a switch on constant. See the comment above CollectStatementsForCase
1659 /// for more details.
1660 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1661 const llvm::APSInt &ConstantCondValue,
1662 SmallVectorImpl<const Stmt*> &ResultStmts,
1664 const SwitchCase *&ResultCase) {
1665 // First step, find the switch case that is being branched to. We can do this
1666 // efficiently by scanning the SwitchCase list.
1667 const SwitchCase *Case = S.getSwitchCaseList();
1668 const DefaultStmt *DefaultCase = nullptr;
1670 for (; Case; Case = Case->getNextSwitchCase()) {
1671 // It's either a default or case. Just remember the default statement in
1672 // case we're not jumping to any numbered cases.
1673 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1678 // Check to see if this case is the one we're looking for.
1679 const CaseStmt *CS = cast<CaseStmt>(Case);
1680 // Don't handle case ranges yet.
1681 if (CS->getRHS()) return false;
1683 // If we found our case, remember it as 'case'.
1684 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1688 // If we didn't find a matching case, we use a default if it exists, or we
1689 // elide the whole switch body!
1691 // It is safe to elide the body of the switch if it doesn't contain labels
1692 // etc. If it is safe, return successfully with an empty ResultStmts list.
1694 return !CodeGenFunction::ContainsLabel(&S);
1698 // Ok, we know which case is being jumped to, try to collect all the
1699 // statements that follow it. This can fail for a variety of reasons. Also,
1700 // check to see that the recursive walk actually found our case statement.
1701 // Insane cases like this can fail to find it in the recursive walk since we
1702 // don't handle every stmt kind:
1706 bool FoundCase = false;
1708 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1709 ResultStmts) != CSFC_Failure &&
1713 static Optional<SmallVector<uint64_t, 16>>
1714 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1715 // Are there enough branches to weight them?
1716 if (Likelihoods.size() <= 1)
1719 uint64_t NumUnlikely = 0;
1720 uint64_t NumNone = 0;
1721 uint64_t NumLikely = 0;
1722 for (const auto LH : Likelihoods) {
1724 case Stmt::LH_Unlikely:
1730 case Stmt::LH_Likely:
1736 // Is there a likelihood attribute used?
1737 if (NumUnlikely == 0 && NumLikely == 0)
1740 // When multiple cases share the same code they can be combined during
1741 // optimization. In that case the weights of the branch will be the sum of
1742 // the individual weights. Make sure the combined sum of all neutral cases
1743 // doesn't exceed the value of a single likely attribute.
1744 // The additions both avoid divisions by 0 and make sure the weights of None
1745 // don't exceed the weight of Likely.
1746 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1747 const uint64_t None = Likely / (NumNone + 1);
1748 const uint64_t Unlikely = 0;
1750 SmallVector<uint64_t, 16> Result;
1751 Result.reserve(Likelihoods.size());
1752 for (const auto LH : Likelihoods) {
1754 case Stmt::LH_Unlikely:
1755 Result.push_back(Unlikely);
1758 Result.push_back(None);
1760 case Stmt::LH_Likely:
1761 Result.push_back(Likely);
1769 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1770 // Handle nested switch statements.
1771 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1772 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1773 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1774 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1776 // See if we can constant fold the condition of the switch and therefore only
1777 // emit the live case statement (if any) of the switch.
1778 llvm::APSInt ConstantCondValue;
1779 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1780 SmallVector<const Stmt*, 4> CaseStmts;
1781 const SwitchCase *Case = nullptr;
1782 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1783 getContext(), Case)) {
1785 incrementProfileCounter(Case);
1786 RunCleanupsScope ExecutedScope(*this);
1789 EmitStmt(S.getInit());
1791 // Emit the condition variable if needed inside the entire cleanup scope
1792 // used by this special case for constant folded switches.
1793 if (S.getConditionVariable())
1794 EmitDecl(*S.getConditionVariable());
1796 // At this point, we are no longer "within" a switch instance, so
1797 // we can temporarily enforce this to ensure that any embedded case
1798 // statements are not emitted.
1799 SwitchInsn = nullptr;
1801 // Okay, we can dead code eliminate everything except this case. Emit the
1802 // specified series of statements and we're good.
1803 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1804 EmitStmt(CaseStmts[i]);
1805 incrementProfileCounter(&S);
1807 // Now we want to restore the saved switch instance so that nested
1808 // switches continue to function properly
1809 SwitchInsn = SavedSwitchInsn;
1815 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1817 RunCleanupsScope ConditionScope(*this);
1820 EmitStmt(S.getInit());
1822 if (S.getConditionVariable())
1823 EmitDecl(*S.getConditionVariable());
1824 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1826 // Create basic block to hold stuff that comes after switch
1827 // statement. We also need to create a default block now so that
1828 // explicit case ranges tests can have a place to jump to on
1830 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1831 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1832 if (PGO.haveRegionCounts()) {
1833 // Walk the SwitchCase list to find how many there are.
1834 uint64_t DefaultCount = 0;
1835 unsigned NumCases = 0;
1836 for (const SwitchCase *Case = S.getSwitchCaseList();
1838 Case = Case->getNextSwitchCase()) {
1839 if (isa<DefaultStmt>(Case))
1840 DefaultCount = getProfileCount(Case);
1843 SwitchWeights = new SmallVector<uint64_t, 16>();
1844 SwitchWeights->reserve(NumCases);
1845 // The default needs to be first. We store the edge count, so we already
1846 // know the right weight.
1847 SwitchWeights->push_back(DefaultCount);
1848 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1849 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1850 // Initialize the default case.
1851 SwitchLikelihood->push_back(Stmt::LH_None);
1854 CaseRangeBlock = DefaultBlock;
1856 // Clear the insertion point to indicate we are in unreachable code.
1857 Builder.ClearInsertionPoint();
1859 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1860 // then reuse last ContinueBlock.
1861 JumpDest OuterContinue;
1862 if (!BreakContinueStack.empty())
1863 OuterContinue = BreakContinueStack.back().ContinueBlock;
1865 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1867 // Emit switch body.
1868 EmitStmt(S.getBody());
1870 BreakContinueStack.pop_back();
1872 // Update the default block in case explicit case range tests have
1873 // been chained on top.
1874 SwitchInsn->setDefaultDest(CaseRangeBlock);
1876 // If a default was never emitted:
1877 if (!DefaultBlock->getParent()) {
1878 // If we have cleanups, emit the default block so that there's a
1879 // place to jump through the cleanups from.
1880 if (ConditionScope.requiresCleanups()) {
1881 EmitBlock(DefaultBlock);
1883 // Otherwise, just forward the default block to the switch end.
1885 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1886 delete DefaultBlock;
1890 ConditionScope.ForceCleanup();
1892 // Emit continuation.
1893 EmitBlock(SwitchExit.getBlock(), true);
1894 incrementProfileCounter(&S);
1896 // If the switch has a condition wrapped by __builtin_unpredictable,
1897 // create metadata that specifies that the switch is unpredictable.
1898 // Don't bother if not optimizing because that metadata would not be used.
1899 auto *Call = dyn_cast<CallExpr>(S.getCond());
1900 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1901 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1902 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1903 llvm::MDBuilder MDHelper(getLLVMContext());
1904 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1905 MDHelper.createUnpredictable());
1909 if (SwitchWeights) {
1910 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1911 "switch weights do not match switch cases");
1912 // If there's only one jump destination there's no sense weighting it.
1913 if (SwitchWeights->size() > 1)
1914 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1915 createProfileWeights(*SwitchWeights));
1916 delete SwitchWeights;
1917 } else if (SwitchLikelihood) {
1918 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
1919 "switch likelihoods do not match switch cases");
1920 Optional<SmallVector<uint64_t, 16>> LHW =
1921 getLikelihoodWeights(*SwitchLikelihood);
1923 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
1924 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1925 createProfileWeights(*LHW));
1927 delete SwitchLikelihood;
1929 SwitchInsn = SavedSwitchInsn;
1930 SwitchWeights = SavedSwitchWeights;
1931 SwitchLikelihood = SavedSwitchLikelihood;
1932 CaseRangeBlock = SavedCRBlock;
1936 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1937 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1940 while (*Constraint) {
1941 switch (*Constraint) {
1943 Result += Target.convertConstraint(Constraint);
1949 case '=': // Will see this and the following in mult-alt constraints.
1952 case '#': // Ignore the rest of the constraint alternative.
1953 while (Constraint[1] && Constraint[1] != ',')
1958 Result += *Constraint;
1959 while (Constraint[1] && Constraint[1] == *Constraint)
1970 "Must pass output names to constraints with a symbolic name");
1972 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1973 assert(result && "Could not resolve symbolic name"); (void)result;
1974 Result += llvm::utostr(Index);
1985 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1986 /// as using a particular register add that as a constraint that will be used
1987 /// in this asm stmt.
1989 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1990 const TargetInfo &Target, CodeGenModule &CGM,
1991 const AsmStmt &Stmt, const bool EarlyClobber,
1992 std::string *GCCReg = nullptr) {
1993 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1996 const ValueDecl &Value = *AsmDeclRef->getDecl();
1997 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2000 if (Variable->getStorageClass() != SC_Register)
2002 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2005 StringRef Register = Attr->getLabel();
2006 assert(Target.isValidGCCRegisterName(Register));
2007 // We're using validateOutputConstraint here because we only care if
2008 // this is a register constraint.
2009 TargetInfo::ConstraintInfo Info(Constraint, "");
2010 if (Target.validateOutputConstraint(Info) &&
2011 !Info.allowsRegister()) {
2012 CGM.ErrorUnsupported(&Stmt, "__asm__");
2015 // Canonicalize the register here before returning it.
2016 Register = Target.getNormalizedGCCRegisterName(Register);
2017 if (GCCReg != nullptr)
2018 *GCCReg = Register.str();
2019 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2023 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2024 LValue InputValue, QualType InputType,
2025 std::string &ConstraintStr,
2026 SourceLocation Loc) {
2028 if (Info.allowsRegister() || !Info.allowsMemory()) {
2029 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
2030 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
2032 llvm::Type *Ty = ConvertType(InputType);
2033 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2034 if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
2035 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2036 Ty = llvm::PointerType::getUnqual(Ty);
2038 Arg = Builder.CreateLoad(
2039 Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
2041 Arg = InputValue.getPointer(*this);
2042 ConstraintStr += '*';
2046 Arg = InputValue.getPointer(*this);
2047 ConstraintStr += '*';
2053 llvm::Value* CodeGenFunction::EmitAsmInput(
2054 const TargetInfo::ConstraintInfo &Info,
2055 const Expr *InputExpr,
2056 std::string &ConstraintStr) {
2057 // If this can't be a register or memory, i.e., has to be a constant
2058 // (immediate or symbolic), try to emit it as such.
2059 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2060 if (Info.requiresImmediateConstant()) {
2061 Expr::EvalResult EVResult;
2062 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2064 llvm::APSInt IntResult;
2065 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2067 return llvm::ConstantInt::get(getLLVMContext(), IntResult);
2070 Expr::EvalResult Result;
2071 if (InputExpr->EvaluateAsInt(Result, getContext()))
2072 return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
2075 if (Info.allowsRegister() || !Info.allowsMemory())
2076 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2077 return EmitScalarExpr(InputExpr);
2078 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2079 return EmitScalarExpr(InputExpr);
2080 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2081 LValue Dest = EmitLValue(InputExpr);
2082 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2083 InputExpr->getExprLoc());
2086 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2087 /// asm call instruction. The !srcloc MDNode contains a list of constant
2088 /// integers which are the source locations of the start of each line in the
2090 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2091 CodeGenFunction &CGF) {
2092 SmallVector<llvm::Metadata *, 8> Locs;
2093 // Add the location of the first line to the MDNode.
2094 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2095 CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
2096 StringRef StrVal = Str->getString();
2097 if (!StrVal.empty()) {
2098 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2099 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2100 unsigned StartToken = 0;
2101 unsigned ByteOffset = 0;
2103 // Add the location of the start of each subsequent line of the asm to the
2105 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2106 if (StrVal[i] != '\n') continue;
2107 SourceLocation LineLoc = Str->getLocationOfByte(
2108 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2109 Locs.push_back(llvm::ConstantAsMetadata::get(
2110 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
2114 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2117 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2118 bool ReadOnly, bool ReadNone, bool NoMerge,
2120 const std::vector<llvm::Type *> &ResultRegTypes,
2121 CodeGenFunction &CGF,
2122 std::vector<llvm::Value *> &RegResults) {
2123 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2124 llvm::Attribute::NoUnwind);
2126 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2127 llvm::Attribute::NoMerge);
2128 // Attach readnone and readonly attributes.
2129 if (!HasSideEffect) {
2131 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2132 llvm::Attribute::ReadNone);
2134 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2135 llvm::Attribute::ReadOnly);
2138 // Slap the source location of the inline asm into a !srcloc metadata on the
2140 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2141 Result.setMetadata("srcloc",
2142 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2144 // At least put the line number on MS inline asm blobs.
2145 llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
2146 S.getAsmLoc().getRawEncoding());
2147 Result.setMetadata("srcloc",
2148 llvm::MDNode::get(CGF.getLLVMContext(),
2149 llvm::ConstantAsMetadata::get(Loc)));
2152 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2153 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2154 // convergent (meaning, they may call an intrinsically convergent op, such
2155 // as bar.sync, and so can't have certain optimizations applied around
2157 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2158 llvm::Attribute::Convergent);
2159 // Extract all of the register value results from the asm.
2160 if (ResultRegTypes.size() == 1) {
2161 RegResults.push_back(&Result);
2163 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2164 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2165 RegResults.push_back(Tmp);
2170 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2171 // Assemble the final asm string.
2172 std::string AsmString = S.generateAsmString(getContext());
2174 // Get all the output and input constraints together.
2175 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2176 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2178 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2180 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2181 Name = GAS->getOutputName(i);
2182 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2183 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2184 assert(IsValid && "Failed to parse output constraint");
2185 OutputConstraintInfos.push_back(Info);
2188 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2190 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2191 Name = GAS->getInputName(i);
2192 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2194 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2195 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2196 InputConstraintInfos.push_back(Info);
2199 std::string Constraints;
2201 std::vector<LValue> ResultRegDests;
2202 std::vector<QualType> ResultRegQualTys;
2203 std::vector<llvm::Type *> ResultRegTypes;
2204 std::vector<llvm::Type *> ResultTruncRegTypes;
2205 std::vector<llvm::Type *> ArgTypes;
2206 std::vector<llvm::Value*> Args;
2207 llvm::BitVector ResultTypeRequiresCast;
2209 // Keep track of inout constraints.
2210 std::string InOutConstraints;
2211 std::vector<llvm::Value*> InOutArgs;
2212 std::vector<llvm::Type*> InOutArgTypes;
2214 // Keep track of out constraints for tied input operand.
2215 std::vector<std::string> OutputConstraints;
2217 // Keep track of defined physregs.
2218 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2220 // An inline asm can be marked readonly if it meets the following conditions:
2221 // - it doesn't have any sideeffects
2222 // - it doesn't clobber memory
2223 // - it doesn't return a value by-reference
2224 // It can be marked readnone if it doesn't have any input memory constraints
2225 // in addition to meeting the conditions listed above.
2226 bool ReadOnly = true, ReadNone = true;
2228 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2229 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2231 // Simplify the output constraint.
2232 std::string OutputConstraint(S.getOutputConstraint(i));
2233 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2234 getTarget(), &OutputConstraintInfos);
2236 const Expr *OutExpr = S.getOutputExpr(i);
2237 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2240 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2241 getTarget(), CGM, S,
2242 Info.earlyClobber(),
2244 // Give an error on multiple outputs to same physreg.
2245 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2246 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2248 OutputConstraints.push_back(OutputConstraint);
2249 LValue Dest = EmitLValue(OutExpr);
2250 if (!Constraints.empty())
2253 // If this is a register output, then make the inline asm return it
2254 // by-value. If this is a memory result, return the value by-reference.
2255 bool isScalarizableAggregate =
2256 hasAggregateEvaluationKind(OutExpr->getType());
2257 if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) ||
2258 isScalarizableAggregate)) {
2259 Constraints += "=" + OutputConstraint;
2260 ResultRegQualTys.push_back(OutExpr->getType());
2261 ResultRegDests.push_back(Dest);
2262 ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
2263 if (Info.allowsRegister() && isScalarizableAggregate) {
2264 ResultTypeRequiresCast.push_back(true);
2265 unsigned Size = getContext().getTypeSize(OutExpr->getType());
2266 llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
2267 ResultRegTypes.push_back(ConvTy);
2269 ResultTypeRequiresCast.push_back(false);
2270 ResultRegTypes.push_back(ResultTruncRegTypes.back());
2272 // If this output is tied to an input, and if the input is larger, then
2273 // we need to set the actual result type of the inline asm node to be the
2274 // same as the input type.
2275 if (Info.hasMatchingInput()) {
2277 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2278 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2279 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2282 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2284 QualType InputTy = S.getInputExpr(InputNo)->getType();
2285 QualType OutputType = OutExpr->getType();
2287 uint64_t InputSize = getContext().getTypeSize(InputTy);
2288 if (getContext().getTypeSize(OutputType) < InputSize) {
2289 // Form the asm to return the value as a larger integer or fp type.
2290 ResultRegTypes.back() = ConvertType(InputTy);
2293 if (llvm::Type* AdjTy =
2294 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2295 ResultRegTypes.back()))
2296 ResultRegTypes.back() = AdjTy;
2298 CGM.getDiags().Report(S.getAsmLoc(),
2299 diag::err_asm_invalid_type_in_input)
2300 << OutExpr->getType() << OutputConstraint;
2303 // Update largest vector width for any vector types.
2304 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2305 LargestVectorWidth =
2306 std::max((uint64_t)LargestVectorWidth,
2307 VT->getPrimitiveSizeInBits().getKnownMinSize());
2309 llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
2310 llvm::Value *DestPtr = Dest.getPointer(*this);
2311 // Matrix types in memory are represented by arrays, but accessed through
2312 // vector pointers, with the alignment specified on the access operation.
2313 // For inline assembly, update pointer arguments to use vector pointers.
2314 // Otherwise there will be a mis-match if the matrix is also an
2315 // input-argument which is represented as vector.
2316 if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
2317 DestAddrTy = llvm::PointerType::get(
2318 ConvertType(OutExpr->getType()),
2319 cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
2320 DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
2322 ArgTypes.push_back(DestAddrTy);
2323 Args.push_back(DestPtr);
2324 Constraints += "=*";
2325 Constraints += OutputConstraint;
2326 ReadOnly = ReadNone = false;
2329 if (Info.isReadWrite()) {
2330 InOutConstraints += ',';
2332 const Expr *InputExpr = S.getOutputExpr(i);
2333 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2335 InputExpr->getExprLoc());
2337 if (llvm::Type* AdjTy =
2338 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2340 Arg = Builder.CreateBitCast(Arg, AdjTy);
2342 // Update largest vector width for any vector types.
2343 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2344 LargestVectorWidth =
2345 std::max((uint64_t)LargestVectorWidth,
2346 VT->getPrimitiveSizeInBits().getKnownMinSize());
2347 // Only tie earlyclobber physregs.
2348 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2349 InOutConstraints += llvm::utostr(i);
2351 InOutConstraints += OutputConstraint;
2353 InOutArgTypes.push_back(Arg->getType());
2354 InOutArgs.push_back(Arg);
2358 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2359 // to the return value slot. Only do this when returning in registers.
2360 if (isa<MSAsmStmt>(&S)) {
2361 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2362 if (RetAI.isDirect() || RetAI.isExtend()) {
2363 // Make a fake lvalue for the return value slot.
2364 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2365 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2366 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2367 ResultRegDests, AsmString, S.getNumOutputs());
2372 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2373 const Expr *InputExpr = S.getInputExpr(i);
2375 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2377 if (Info.allowsMemory())
2380 if (!Constraints.empty())
2383 // Simplify the input constraint.
2384 std::string InputConstraint(S.getInputConstraint(i));
2385 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2386 &OutputConstraintInfos);
2388 InputConstraint = AddVariableConstraints(
2389 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2390 getTarget(), CGM, S, false /* No EarlyClobber */);
2392 std::string ReplaceConstraint (InputConstraint);
2393 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2395 // If this input argument is tied to a larger output result, extend the
2396 // input to be the same size as the output. The LLVM backend wants to see
2397 // the input and output of a matching constraint be the same size. Note
2398 // that GCC does not define what the top bits are here. We use zext because
2399 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2400 if (Info.hasTiedOperand()) {
2401 unsigned Output = Info.getTiedOperand();
2402 QualType OutputType = S.getOutputExpr(Output)->getType();
2403 QualType InputTy = InputExpr->getType();
2405 if (getContext().getTypeSize(OutputType) >
2406 getContext().getTypeSize(InputTy)) {
2407 // Use ptrtoint as appropriate so that we can do our extension.
2408 if (isa<llvm::PointerType>(Arg->getType()))
2409 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2410 llvm::Type *OutputTy = ConvertType(OutputType);
2411 if (isa<llvm::IntegerType>(OutputTy))
2412 Arg = Builder.CreateZExt(Arg, OutputTy);
2413 else if (isa<llvm::PointerType>(OutputTy))
2414 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2416 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2417 Arg = Builder.CreateFPExt(Arg, OutputTy);
2420 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2421 ReplaceConstraint = OutputConstraints[Output];
2423 if (llvm::Type* AdjTy =
2424 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2426 Arg = Builder.CreateBitCast(Arg, AdjTy);
2428 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2429 << InputExpr->getType() << InputConstraint;
2431 // Update largest vector width for any vector types.
2432 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2433 LargestVectorWidth =
2434 std::max((uint64_t)LargestVectorWidth,
2435 VT->getPrimitiveSizeInBits().getKnownMinSize());
2437 ArgTypes.push_back(Arg->getType());
2438 Args.push_back(Arg);
2439 Constraints += InputConstraint;
2443 SmallVector<llvm::BasicBlock *, 16> Transfer;
2444 llvm::BasicBlock *Fallthrough = nullptr;
2445 bool IsGCCAsmGoto = false;
2446 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2447 IsGCCAsmGoto = GS->isAsmGoto();
2449 for (const auto *E : GS->labels()) {
2450 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2451 Transfer.push_back(Dest.getBlock());
2452 llvm::BlockAddress *BA =
2453 llvm::BlockAddress::get(CurFn, Dest.getBlock());
2455 ArgTypes.push_back(BA->getType());
2456 if (!Constraints.empty())
2460 Fallthrough = createBasicBlock("asm.fallthrough");
2464 // Append the "input" part of inout constraints last.
2465 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2466 ArgTypes.push_back(InOutArgTypes[i]);
2467 Args.push_back(InOutArgs[i]);
2469 Constraints += InOutConstraints;
2472 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2473 StringRef Clobber = S.getClobber(i);
2475 if (Clobber == "memory")
2476 ReadOnly = ReadNone = false;
2477 else if (Clobber != "cc") {
2478 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2479 if (CGM.getCodeGenOpts().StackClashProtector &&
2480 getTarget().isSPRegName(Clobber)) {
2481 CGM.getDiags().Report(S.getAsmLoc(),
2482 diag::warn_stack_clash_protection_inline_asm);
2486 if (!Constraints.empty())
2489 Constraints += "~{";
2490 Constraints += Clobber;
2494 // Add machine specific clobbers
2495 std::string MachineClobbers = getTarget().getClobbers();
2496 if (!MachineClobbers.empty()) {
2497 if (!Constraints.empty())
2499 Constraints += MachineClobbers;
2502 llvm::Type *ResultType;
2503 if (ResultRegTypes.empty())
2504 ResultType = VoidTy;
2505 else if (ResultRegTypes.size() == 1)
2506 ResultType = ResultRegTypes[0];
2508 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2510 llvm::FunctionType *FTy =
2511 llvm::FunctionType::get(ResultType, ArgTypes, false);
2513 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2514 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2515 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2516 llvm::InlineAsm *IA =
2517 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2518 /* IsAlignStack */ false, AsmDialect);
2519 std::vector<llvm::Value*> RegResults;
2521 llvm::CallBrInst *Result =
2522 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2523 EmitBlock(Fallthrough);
2524 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2525 ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
2528 llvm::CallInst *Result =
2529 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2530 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2531 ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
2535 assert(RegResults.size() == ResultRegTypes.size());
2536 assert(RegResults.size() == ResultTruncRegTypes.size());
2537 assert(RegResults.size() == ResultRegDests.size());
2538 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2539 // in which case its size may grow.
2540 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2541 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2542 llvm::Value *Tmp = RegResults[i];
2544 // If the result type of the LLVM IR asm doesn't match the result type of
2545 // the expression, do the conversion.
2546 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2547 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2549 // Truncate the integer result to the right size, note that TruncTy can be
2551 if (TruncTy->isFloatingPointTy())
2552 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2553 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2554 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2555 Tmp = Builder.CreateTrunc(Tmp,
2556 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2557 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2558 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2559 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2560 Tmp = Builder.CreatePtrToInt(Tmp,
2561 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2562 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2563 } else if (TruncTy->isIntegerTy()) {
2564 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2565 } else if (TruncTy->isVectorTy()) {
2566 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2570 LValue Dest = ResultRegDests[i];
2571 // ResultTypeRequiresCast elements correspond to the first
2572 // ResultTypeRequiresCast.size() elements of RegResults.
2573 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2574 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2575 Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2576 ResultRegTypes[i]->getPointerTo());
2577 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2579 const Expr *OutExpr = S.getOutputExpr(i);
2581 OutExpr->getExprLoc(),
2582 "impossible constraint in asm: can't store value into a register");
2585 Dest = MakeAddrLValue(A, Ty);
2587 EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2591 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2592 const RecordDecl *RD = S.getCapturedRecordDecl();
2593 QualType RecordTy = getContext().getRecordType(RD);
2595 // Initialize the captured struct.
2597 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2599 RecordDecl::field_iterator CurField = RD->field_begin();
2600 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2601 E = S.capture_init_end();
2602 I != E; ++I, ++CurField) {
2603 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2604 if (CurField->hasCapturedVLAType()) {
2605 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2607 EmitInitializerForField(*CurField, LV, *I);
2614 /// Generate an outlined function for the body of a CapturedStmt, store any
2615 /// captured variables into the captured struct, and call the outlined function.
2617 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2618 LValue CapStruct = InitCapturedStruct(S);
2620 // Emit the CapturedDecl
2621 CodeGenFunction CGF(CGM, true);
2622 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2623 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2624 delete CGF.CapturedStmtInfo;
2626 // Emit call to the helper function.
2627 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2632 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2633 LValue CapStruct = InitCapturedStruct(S);
2634 return CapStruct.getAddress(*this);
2637 /// Creates the outlined function for a CapturedStmt.
2639 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2640 assert(CapturedStmtInfo &&
2641 "CapturedStmtInfo should be set when generating the captured function");
2642 const CapturedDecl *CD = S.getCapturedDecl();
2643 const RecordDecl *RD = S.getCapturedRecordDecl();
2644 SourceLocation Loc = S.getBeginLoc();
2645 assert(CD->hasBody() && "missing CapturedDecl body");
2647 // Build the argument list.
2648 ASTContext &Ctx = CGM.getContext();
2649 FunctionArgList Args;
2650 Args.append(CD->param_begin(), CD->param_end());
2652 // Create the function declaration.
2653 const CGFunctionInfo &FuncInfo =
2654 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2655 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2658 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2659 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2660 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2661 if (CD->isNothrow())
2662 F->addFnAttr(llvm::Attribute::NoUnwind);
2664 // Generate the function.
2665 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2666 CD->getBody()->getBeginLoc());
2667 // Set the context parameter in CapturedStmtInfo.
2668 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2669 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2671 // Initialize variable-length arrays.
2672 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2673 Ctx.getTagDeclType(RD));
2674 for (auto *FD : RD->fields()) {
2675 if (FD->hasCapturedVLAType()) {
2677 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2679 auto VAT = FD->getCapturedVLAType();
2680 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2684 // If 'this' is captured, load it into CXXThisValue.
2685 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2686 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2687 LValue ThisLValue = EmitLValueForField(Base, FD);
2688 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2691 PGO.assignRegionCounters(GlobalDecl(CD), F);
2692 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2693 FinishFunction(CD->getBodyRBrace());