1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Stmt nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGDebugInfo.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/StmtVisitor.h"
19 #include "clang/Basic/PrettyStackTrace.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "clang/Sema/LoopHint.h"
22 #include "clang/Sema/SemaDiagnostic.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/InlineAsm.h"
27 #include "llvm/IR/Intrinsics.h"
28 using namespace clang;
29 using namespace CodeGen;
31 //===----------------------------------------------------------------------===//
33 //===----------------------------------------------------------------------===//
35 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
36 if (CGDebugInfo *DI = getDebugInfo()) {
38 Loc = S->getLocStart();
39 DI->EmitLocation(Builder, Loc);
45 void CodeGenFunction::EmitStmt(const Stmt *S) {
46 assert(S && "Null statement?");
47 PGO.setCurrentStmt(S);
49 // These statements have their own debug info handling.
50 if (EmitSimpleStmt(S))
53 // Check if we are generating unreachable code.
54 if (!HaveInsertPoint()) {
55 // If so, and the statement doesn't contain a label, then we do not need to
56 // generate actual code. This is safe because (1) the current point is
57 // unreachable, so we don't need to execute the code, and (2) we've already
58 // handled the statements which update internal data structures (like the
59 // local variable map) which could be used by subsequent statements.
60 if (!ContainsLabel(S)) {
61 // Verify that any decl statements were handled as simple, they may be in
62 // scope of subsequent reachable statements.
63 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
67 // Otherwise, make a new block to hold the code.
71 // Generate a stoppoint if we are emitting debug info.
74 switch (S->getStmtClass()) {
75 case Stmt::NoStmtClass:
76 case Stmt::CXXCatchStmtClass:
77 case Stmt::SEHExceptStmtClass:
78 case Stmt::SEHFinallyStmtClass:
79 case Stmt::MSDependentExistsStmtClass:
80 llvm_unreachable("invalid statement class to emit generically");
81 case Stmt::NullStmtClass:
82 case Stmt::CompoundStmtClass:
83 case Stmt::DeclStmtClass:
84 case Stmt::LabelStmtClass:
85 case Stmt::AttributedStmtClass:
86 case Stmt::GotoStmtClass:
87 case Stmt::BreakStmtClass:
88 case Stmt::ContinueStmtClass:
89 case Stmt::DefaultStmtClass:
90 case Stmt::CaseStmtClass:
91 case Stmt::SEHLeaveStmtClass:
92 llvm_unreachable("should have emitted these statements as simple");
94 #define STMT(Type, Base)
95 #define ABSTRACT_STMT(Op)
96 #define EXPR(Type, Base) \
97 case Stmt::Type##Class:
98 #include "clang/AST/StmtNodes.inc"
100 // Remember the block we came in on.
101 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
102 assert(incoming && "expression emission must have an insertion point");
104 EmitIgnoredExpr(cast<Expr>(S));
106 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
107 assert(outgoing && "expression emission cleared block!");
109 // The expression emitters assume (reasonably!) that the insertion
110 // point is always set. To maintain that, the call-emission code
111 // for noreturn functions has to enter a new block with no
112 // predecessors. We want to kill that block and mark the current
113 // insertion point unreachable in the common case of a call like
114 // "exit();". Since expression emission doesn't otherwise create
115 // blocks with no predecessors, we can just test for that.
116 // However, we must be careful not to do this to our incoming
117 // block, because *statement* emission does sometimes create
118 // reachable blocks which will have no predecessors until later in
119 // the function. This occurs with, e.g., labels that are not
120 // reachable by fallthrough.
121 if (incoming != outgoing && outgoing->use_empty()) {
122 outgoing->eraseFromParent();
123 Builder.ClearInsertionPoint();
128 case Stmt::IndirectGotoStmtClass:
129 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
131 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
132 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
133 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
134 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
136 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
138 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
139 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
140 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
141 case Stmt::CapturedStmtClass: {
142 const CapturedStmt *CS = cast<CapturedStmt>(S);
143 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
146 case Stmt::ObjCAtTryStmtClass:
147 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
149 case Stmt::ObjCAtCatchStmtClass:
151 "@catch statements should be handled by EmitObjCAtTryStmt");
152 case Stmt::ObjCAtFinallyStmtClass:
154 "@finally statements should be handled by EmitObjCAtTryStmt");
155 case Stmt::ObjCAtThrowStmtClass:
156 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
158 case Stmt::ObjCAtSynchronizedStmtClass:
159 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
161 case Stmt::ObjCForCollectionStmtClass:
162 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
164 case Stmt::ObjCAutoreleasePoolStmtClass:
165 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
168 case Stmt::CXXTryStmtClass:
169 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
171 case Stmt::CXXForRangeStmtClass:
172 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
174 case Stmt::SEHTryStmtClass:
175 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
177 case Stmt::OMPParallelDirectiveClass:
178 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
180 case Stmt::OMPSimdDirectiveClass:
181 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
183 case Stmt::OMPForDirectiveClass:
184 EmitOMPForDirective(cast<OMPForDirective>(*S));
186 case Stmt::OMPForSimdDirectiveClass:
187 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
189 case Stmt::OMPSectionsDirectiveClass:
190 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
192 case Stmt::OMPSectionDirectiveClass:
193 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
195 case Stmt::OMPSingleDirectiveClass:
196 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
198 case Stmt::OMPMasterDirectiveClass:
199 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
201 case Stmt::OMPCriticalDirectiveClass:
202 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
204 case Stmt::OMPParallelForDirectiveClass:
205 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
207 case Stmt::OMPParallelForSimdDirectiveClass:
208 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
210 case Stmt::OMPParallelSectionsDirectiveClass:
211 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
213 case Stmt::OMPTaskDirectiveClass:
214 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
216 case Stmt::OMPTaskyieldDirectiveClass:
217 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
219 case Stmt::OMPBarrierDirectiveClass:
220 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
222 case Stmt::OMPTaskwaitDirectiveClass:
223 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
225 case Stmt::OMPFlushDirectiveClass:
226 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
228 case Stmt::OMPOrderedDirectiveClass:
229 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
231 case Stmt::OMPAtomicDirectiveClass:
232 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
234 case Stmt::OMPTargetDirectiveClass:
235 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
237 case Stmt::OMPTeamsDirectiveClass:
238 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
243 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
244 switch (S->getStmtClass()) {
245 default: return false;
246 case Stmt::NullStmtClass: break;
247 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
248 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
249 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
250 case Stmt::AttributedStmtClass:
251 EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
252 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
253 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
254 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
255 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
256 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
257 case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
263 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
264 /// this captures the expression result of the last sub-statement and returns it
265 /// (for use by the statement expression extension).
266 llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
267 AggValueSlot AggSlot) {
268 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
269 "LLVM IR generation of compound statement ('{}')");
271 // Keep track of the current cleanup stack depth, including debug scopes.
272 LexicalScope Scope(*this, S.getSourceRange());
274 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
278 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
280 AggValueSlot AggSlot) {
282 for (CompoundStmt::const_body_iterator I = S.body_begin(),
283 E = S.body_end()-GetLast; I != E; ++I)
286 llvm::Value *RetAlloca = nullptr;
288 // We have to special case labels here. They are statements, but when put
289 // at the end of a statement expression, they yield the value of their
290 // subexpression. Handle this by walking through all labels we encounter,
291 // emitting them before we evaluate the subexpr.
292 const Stmt *LastStmt = S.body_back();
293 while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
294 EmitLabel(LS->getDecl());
295 LastStmt = LS->getSubStmt();
300 QualType ExprTy = cast<Expr>(LastStmt)->getType();
301 if (hasAggregateEvaluationKind(ExprTy)) {
302 EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
304 // We can't return an RValue here because there might be cleanups at
305 // the end of the StmtExpr. Because of that, we have to emit the result
306 // here into a temporary alloca.
307 RetAlloca = CreateMemTemp(ExprTy);
308 EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
317 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
318 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
320 // If there is a cleanup stack, then we it isn't worth trying to
321 // simplify this block (we would need to remove it from the scope map
322 // and cleanup entry).
323 if (!EHStack.empty())
326 // Can only simplify direct branches.
327 if (!BI || !BI->isUnconditional())
330 // Can only simplify empty blocks.
331 if (BI != BB->begin())
334 BB->replaceAllUsesWith(BI->getSuccessor(0));
335 BI->eraseFromParent();
336 BB->eraseFromParent();
339 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
340 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
342 // Fall out of the current block (if necessary).
345 if (IsFinished && BB->use_empty()) {
350 // Place the block after the current block, if possible, or else at
351 // the end of the function.
352 if (CurBB && CurBB->getParent())
353 CurFn->getBasicBlockList().insertAfter(CurBB, BB);
355 CurFn->getBasicBlockList().push_back(BB);
356 Builder.SetInsertPoint(BB);
359 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
360 // Emit a branch from the current block to the target one if this
361 // was a real block. If this was just a fall-through block after a
362 // terminator, don't emit it.
363 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
365 if (!CurBB || CurBB->getTerminator()) {
366 // If there is no insert point or the previous block is already
367 // terminated, don't touch it.
369 // Otherwise, create a fall-through branch.
370 Builder.CreateBr(Target);
373 Builder.ClearInsertionPoint();
376 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
377 bool inserted = false;
378 for (llvm::User *u : block->users()) {
379 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
380 CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
387 CurFn->getBasicBlockList().push_back(block);
389 Builder.SetInsertPoint(block);
392 CodeGenFunction::JumpDest
393 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
394 JumpDest &Dest = LabelMap[D];
395 if (Dest.isValid()) return Dest;
397 // Create, but don't insert, the new block.
398 Dest = JumpDest(createBasicBlock(D->getName()),
399 EHScopeStack::stable_iterator::invalid(),
400 NextCleanupDestIndex++);
404 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
405 // Add this label to the current lexical scope if we're within any
406 // normal cleanups. Jumps "in" to this label --- when permitted by
407 // the language --- may need to be routed around such cleanups.
408 if (EHStack.hasNormalCleanups() && CurLexicalScope)
409 CurLexicalScope->addLabel(D);
411 JumpDest &Dest = LabelMap[D];
413 // If we didn't need a forward reference to this label, just go
414 // ahead and create a destination at the current scope.
415 if (!Dest.isValid()) {
416 Dest = getJumpDestInCurrentScope(D->getName());
418 // Otherwise, we need to give this label a target depth and remove
419 // it from the branch-fixups list.
421 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
422 Dest.setScopeDepth(EHStack.stable_begin());
423 ResolveBranchFixups(Dest.getBlock());
426 EmitBlock(Dest.getBlock());
427 incrementProfileCounter(D->getStmt());
430 /// Change the cleanup scope of the labels in this lexical scope to
431 /// match the scope of the enclosing context.
432 void CodeGenFunction::LexicalScope::rescopeLabels() {
433 assert(!Labels.empty());
434 EHScopeStack::stable_iterator innermostScope
435 = CGF.EHStack.getInnermostNormalCleanup();
437 // Change the scope depth of all the labels.
438 for (SmallVectorImpl<const LabelDecl*>::const_iterator
439 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
440 assert(CGF.LabelMap.count(*i));
441 JumpDest &dest = CGF.LabelMap.find(*i)->second;
442 assert(dest.getScopeDepth().isValid());
443 assert(innermostScope.encloses(dest.getScopeDepth()));
444 dest.setScopeDepth(innermostScope);
447 // Reparent the labels if the new scope also has cleanups.
448 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
449 ParentScope->Labels.append(Labels.begin(), Labels.end());
454 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
455 EmitLabel(S.getDecl());
456 EmitStmt(S.getSubStmt());
459 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
460 const Stmt *SubStmt = S.getSubStmt();
461 switch (SubStmt->getStmtClass()) {
462 case Stmt::DoStmtClass:
463 EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs());
465 case Stmt::ForStmtClass:
466 EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs());
468 case Stmt::WhileStmtClass:
469 EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs());
471 case Stmt::CXXForRangeStmtClass:
472 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs());
479 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
480 // If this code is reachable then emit a stop point (if generating
481 // debug info). We have to do this ourselves because we are on the
482 // "simple" statement path.
483 if (HaveInsertPoint())
486 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
490 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
491 if (const LabelDecl *Target = S.getConstantTarget()) {
492 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
496 // Ensure that we have an i8* for our PHI node.
497 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
499 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
501 // Get the basic block for the indirect goto.
502 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
504 // The first instruction in the block has to be the PHI for the switch dest,
505 // add an entry for this branch.
506 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
508 EmitBranch(IndGotoBB);
511 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
512 // C99 6.8.4.1: The first substatement is executed if the expression compares
513 // unequal to 0. The condition must be a scalar type.
514 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
516 if (S.getConditionVariable())
517 EmitAutoVarDecl(*S.getConditionVariable());
519 // If the condition constant folds and can be elided, try to avoid emitting
520 // the condition and the dead arm of the if/else.
522 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
523 // Figure out which block (then or else) is executed.
524 const Stmt *Executed = S.getThen();
525 const Stmt *Skipped = S.getElse();
526 if (!CondConstant) // Condition false?
527 std::swap(Executed, Skipped);
529 // If the skipped block has no labels in it, just emit the executed block.
530 // This avoids emitting dead code and simplifies the CFG substantially.
531 if (!ContainsLabel(Skipped)) {
533 incrementProfileCounter(&S);
535 RunCleanupsScope ExecutedScope(*this);
542 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
543 // the conditional branch.
544 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
545 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
546 llvm::BasicBlock *ElseBlock = ContBlock;
548 ElseBlock = createBasicBlock("if.else");
550 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
551 getProfileCount(S.getThen()));
553 // Emit the 'then' code.
554 EmitBlock(ThenBlock);
555 incrementProfileCounter(&S);
557 RunCleanupsScope ThenScope(*this);
558 EmitStmt(S.getThen());
560 EmitBranch(ContBlock);
562 // Emit the 'else' code if present.
563 if (const Stmt *Else = S.getElse()) {
565 // There is no need to emit line number for an unconditional branch.
566 auto NL = ApplyDebugLocation::CreateEmpty(*this);
567 EmitBlock(ElseBlock);
570 RunCleanupsScope ElseScope(*this);
574 // There is no need to emit line number for an unconditional branch.
575 auto NL = ApplyDebugLocation::CreateEmpty(*this);
576 EmitBranch(ContBlock);
580 // Emit the continuation block for code after the if.
581 EmitBlock(ContBlock, true);
584 void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
585 llvm::BranchInst *CondBr,
586 ArrayRef<const Attr *> Attrs) {
587 // Return if there are no hints.
591 // Add vectorize and unroll hints to the metadata on the conditional branch.
593 // FIXME: Should this really start with a size of 1?
594 SmallVector<llvm::Metadata *, 2> Metadata(1);
595 for (const auto *Attr : Attrs) {
596 const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
598 // Skip non loop hint attributes
602 LoopHintAttr::OptionType Option = LH->getOption();
603 LoopHintAttr::LoopHintState State = LH->getState();
604 const char *MetadataName;
606 case LoopHintAttr::Vectorize:
607 case LoopHintAttr::VectorizeWidth:
608 MetadataName = "llvm.loop.vectorize.width";
610 case LoopHintAttr::Interleave:
611 case LoopHintAttr::InterleaveCount:
612 MetadataName = "llvm.loop.interleave.count";
614 case LoopHintAttr::Unroll:
615 // With the unroll loop hint, a non-zero value indicates full unrolling.
616 MetadataName = State == LoopHintAttr::Disable ? "llvm.loop.unroll.disable"
617 : "llvm.loop.unroll.full";
619 case LoopHintAttr::UnrollCount:
620 MetadataName = "llvm.loop.unroll.count";
624 Expr *ValueExpr = LH->getValue();
627 llvm::APSInt ValueAPS =
628 ValueExpr->EvaluateKnownConstInt(CGM.getContext());
629 ValueInt = static_cast<int>(ValueAPS.getSExtValue());
632 llvm::Constant *Value;
633 llvm::MDString *Name;
635 case LoopHintAttr::Vectorize:
636 case LoopHintAttr::Interleave:
637 if (State != LoopHintAttr::Disable) {
638 // FIXME: In the future I will modifiy the behavior of the metadata
639 // so we can enable/disable vectorization and interleaving separately.
640 Name = llvm::MDString::get(Context, "llvm.loop.vectorize.enable");
641 Value = Builder.getTrue();
644 // Vectorization/interleaving is disabled, set width/count to 1.
647 case LoopHintAttr::VectorizeWidth:
648 case LoopHintAttr::InterleaveCount:
649 case LoopHintAttr::UnrollCount:
650 Name = llvm::MDString::get(Context, MetadataName);
651 Value = llvm::ConstantInt::get(Int32Ty, ValueInt);
653 case LoopHintAttr::Unroll:
654 Name = llvm::MDString::get(Context, MetadataName);
659 SmallVector<llvm::Metadata *, 2> OpValues;
660 OpValues.push_back(Name);
662 OpValues.push_back(llvm::ConstantAsMetadata::get(Value));
664 // Set or overwrite metadata indicated by Name.
665 Metadata.push_back(llvm::MDNode::get(Context, OpValues));
668 // FIXME: This condition is never false. Should it be an assert?
669 if (!Metadata.empty()) {
670 // Add llvm.loop MDNode to CondBr.
671 llvm::MDNode *LoopID = llvm::MDNode::get(Context, Metadata);
672 LoopID->replaceOperandWith(0, LoopID); // First op points to itself.
674 CondBr->setMetadata("llvm.loop", LoopID);
678 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
679 ArrayRef<const Attr *> WhileAttrs) {
680 // Emit the header for the loop, which will also become
681 // the continue target.
682 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
683 EmitBlock(LoopHeader.getBlock());
685 LoopStack.push(LoopHeader.getBlock());
687 // Create an exit block for when the condition fails, which will
688 // also become the break target.
689 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
691 // Store the blocks to use for break and continue.
692 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
694 // C++ [stmt.while]p2:
695 // When the condition of a while statement is a declaration, the
696 // scope of the variable that is declared extends from its point
697 // of declaration (3.3.2) to the end of the while statement.
699 // The object created in a condition is destroyed and created
700 // with each iteration of the loop.
701 RunCleanupsScope ConditionScope(*this);
703 if (S.getConditionVariable())
704 EmitAutoVarDecl(*S.getConditionVariable());
706 // Evaluate the conditional in the while header. C99 6.8.5.1: The
707 // evaluation of the controlling expression takes place before each
708 // execution of the loop body.
709 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
711 // while(1) is common, avoid extra exit blocks. Be sure
712 // to correctly handle break/continue though.
713 bool EmitBoolCondBranch = true;
714 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
716 EmitBoolCondBranch = false;
718 // As long as the condition is true, go to the loop body.
719 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
720 if (EmitBoolCondBranch) {
721 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
722 if (ConditionScope.requiresCleanups())
723 ExitBlock = createBasicBlock("while.exit");
724 llvm::BranchInst *CondBr = Builder.CreateCondBr(
725 BoolCondVal, LoopBody, ExitBlock,
726 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
728 if (ExitBlock != LoopExit.getBlock()) {
729 EmitBlock(ExitBlock);
730 EmitBranchThroughCleanup(LoopExit);
733 // Attach metadata to loop body conditional branch.
734 EmitCondBrHints(LoopBody->getContext(), CondBr, WhileAttrs);
737 // Emit the loop body. We have to emit this in a cleanup scope
738 // because it might be a singleton DeclStmt.
740 RunCleanupsScope BodyScope(*this);
742 incrementProfileCounter(&S);
743 EmitStmt(S.getBody());
746 BreakContinueStack.pop_back();
748 // Immediately force cleanup.
749 ConditionScope.ForceCleanup();
752 // Branch to the loop header again.
753 EmitBranch(LoopHeader.getBlock());
757 // Emit the exit block.
758 EmitBlock(LoopExit.getBlock(), true);
760 // The LoopHeader typically is just a branch if we skipped emitting
761 // a branch, try to erase it.
762 if (!EmitBoolCondBranch)
763 SimplifyForwardingBlocks(LoopHeader.getBlock());
766 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
767 ArrayRef<const Attr *> DoAttrs) {
768 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
769 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
771 uint64_t ParentCount = getCurrentProfileCount();
773 // Store the blocks to use for break and continue.
774 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
776 // Emit the body of the loop.
777 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
779 LoopStack.push(LoopBody);
781 EmitBlockWithFallThrough(LoopBody, &S);
783 RunCleanupsScope BodyScope(*this);
784 EmitStmt(S.getBody());
787 EmitBlock(LoopCond.getBlock());
789 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
790 // after each execution of the loop body."
792 // Evaluate the conditional in the while header.
793 // C99 6.8.5p2/p4: The first substatement is executed if the expression
794 // compares unequal to 0. The condition must be a scalar type.
795 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
797 BreakContinueStack.pop_back();
799 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
800 // to correctly handle break/continue though.
801 bool EmitBoolCondBranch = true;
802 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
804 EmitBoolCondBranch = false;
806 // As long as the condition is true, iterate the loop.
807 if (EmitBoolCondBranch) {
808 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
809 llvm::BranchInst *CondBr = Builder.CreateCondBr(
810 BoolCondVal, LoopBody, LoopExit.getBlock(),
811 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
813 // Attach metadata to loop body conditional branch.
814 EmitCondBrHints(LoopBody->getContext(), CondBr, DoAttrs);
819 // Emit the exit block.
820 EmitBlock(LoopExit.getBlock());
822 // The DoCond block typically is just a branch if we skipped
823 // emitting a branch, try to erase it.
824 if (!EmitBoolCondBranch)
825 SimplifyForwardingBlocks(LoopCond.getBlock());
828 void CodeGenFunction::EmitForStmt(const ForStmt &S,
829 ArrayRef<const Attr *> ForAttrs) {
830 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
832 LexicalScope ForScope(*this, S.getSourceRange());
834 // Evaluate the first part before the loop.
836 EmitStmt(S.getInit());
838 // Start the loop with a block that tests the condition.
839 // If there's an increment, the continue scope will be overwritten
841 JumpDest Continue = getJumpDestInCurrentScope("for.cond");
842 llvm::BasicBlock *CondBlock = Continue.getBlock();
843 EmitBlock(CondBlock);
845 LoopStack.push(CondBlock);
847 // If the for loop doesn't have an increment we can just use the
848 // condition as the continue block. Otherwise we'll need to create
849 // a block for it (in the current scope, i.e. in the scope of the
850 // condition), and that we will become our continue block.
852 Continue = getJumpDestInCurrentScope("for.inc");
854 // Store the blocks to use for break and continue.
855 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
857 // Create a cleanup scope for the condition variable cleanups.
858 LexicalScope ConditionScope(*this, S.getSourceRange());
861 // If the for statement has a condition scope, emit the local variable
863 if (S.getConditionVariable()) {
864 EmitAutoVarDecl(*S.getConditionVariable());
867 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
868 // If there are any cleanups between here and the loop-exit scope,
869 // create a block to stage a loop exit along.
870 if (ForScope.requiresCleanups())
871 ExitBlock = createBasicBlock("for.cond.cleanup");
873 // As long as the condition is true, iterate the loop.
874 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
876 // C99 6.8.5p2/p4: The first substatement is executed if the expression
877 // compares unequal to 0. The condition must be a scalar type.
878 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
879 llvm::BranchInst *CondBr = Builder.CreateCondBr(
880 BoolCondVal, ForBody, ExitBlock,
881 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
883 // Attach metadata to loop body conditional branch.
884 EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs);
886 if (ExitBlock != LoopExit.getBlock()) {
887 EmitBlock(ExitBlock);
888 EmitBranchThroughCleanup(LoopExit);
893 // Treat it as a non-zero constant. Don't even create a new block for the
894 // body, just fall into it.
896 incrementProfileCounter(&S);
899 // Create a separate cleanup scope for the body, in case it is not
900 // a compound statement.
901 RunCleanupsScope BodyScope(*this);
902 EmitStmt(S.getBody());
905 // If there is an increment, emit it next.
907 EmitBlock(Continue.getBlock());
908 EmitStmt(S.getInc());
911 BreakContinueStack.pop_back();
913 ConditionScope.ForceCleanup();
916 EmitBranch(CondBlock);
918 ForScope.ForceCleanup();
922 // Emit the fall-through block.
923 EmitBlock(LoopExit.getBlock(), true);
927 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
928 ArrayRef<const Attr *> ForAttrs) {
929 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
931 LexicalScope ForScope(*this, S.getSourceRange());
933 // Evaluate the first pieces before the loop.
934 EmitStmt(S.getRangeStmt());
935 EmitStmt(S.getBeginEndStmt());
937 // Start the loop with a block that tests the condition.
938 // If there's an increment, the continue scope will be overwritten
940 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
941 EmitBlock(CondBlock);
943 LoopStack.push(CondBlock);
945 // If there are any cleanups between here and the loop-exit scope,
946 // create a block to stage a loop exit along.
947 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
948 if (ForScope.requiresCleanups())
949 ExitBlock = createBasicBlock("for.cond.cleanup");
951 // The loop body, consisting of the specified body and the loop variable.
952 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
954 // The body is executed if the expression, contextually converted
956 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
957 llvm::BranchInst *CondBr = Builder.CreateCondBr(
958 BoolCondVal, ForBody, ExitBlock,
959 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
961 // Attach metadata to loop body conditional branch.
962 EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs);
964 if (ExitBlock != LoopExit.getBlock()) {
965 EmitBlock(ExitBlock);
966 EmitBranchThroughCleanup(LoopExit);
970 incrementProfileCounter(&S);
972 // Create a block for the increment. In case of a 'continue', we jump there.
973 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
975 // Store the blocks to use for break and continue.
976 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
979 // Create a separate cleanup scope for the loop variable and body.
980 LexicalScope BodyScope(*this, S.getSourceRange());
981 EmitStmt(S.getLoopVarStmt());
982 EmitStmt(S.getBody());
986 // If there is an increment, emit it next.
987 EmitBlock(Continue.getBlock());
988 EmitStmt(S.getInc());
990 BreakContinueStack.pop_back();
992 EmitBranch(CondBlock);
994 ForScope.ForceCleanup();
998 // Emit the fall-through block.
999 EmitBlock(LoopExit.getBlock(), true);
1002 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1003 if (RV.isScalar()) {
1004 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1005 } else if (RV.isAggregate()) {
1006 EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
1008 EmitStoreOfComplex(RV.getComplexVal(),
1009 MakeNaturalAlignAddrLValue(ReturnValue, Ty),
1012 EmitBranchThroughCleanup(ReturnBlock);
1015 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1016 /// if the function returns void, or may be missing one if the function returns
1017 /// non-void. Fun stuff :).
1018 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1019 // Returning from an outlined SEH helper is UB, and we already warn on it.
1020 if (IsOutlinedSEHHelper) {
1021 Builder.CreateUnreachable();
1022 Builder.ClearInsertionPoint();
1025 // Emit the result value, even if unused, to evalute the side effects.
1026 const Expr *RV = S.getRetValue();
1028 // Treat block literals in a return expression as if they appeared
1029 // in their own scope. This permits a small, easily-implemented
1030 // exception to our over-conservative rules about not jumping to
1031 // statements following block literals with non-trivial cleanups.
1032 RunCleanupsScope cleanupScope(*this);
1033 if (const ExprWithCleanups *cleanups =
1034 dyn_cast_or_null<ExprWithCleanups>(RV)) {
1035 enterFullExpression(cleanups);
1036 RV = cleanups->getSubExpr();
1039 // FIXME: Clean this up by using an LValue for ReturnTemp,
1040 // EmitStoreThroughLValue, and EmitAnyExpr.
1041 if (getLangOpts().ElideConstructors &&
1042 S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
1043 // Apply the named return value optimization for this return statement,
1044 // which means doing nothing: the appropriate result has already been
1045 // constructed into the NRVO variable.
1047 // If there is an NRVO flag for this variable, set it to 1 into indicate
1048 // that the cleanup code should not destroy the variable.
1049 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1050 Builder.CreateStore(Builder.getTrue(), NRVOFlag);
1051 } else if (!ReturnValue || (RV && RV->getType()->isVoidType())) {
1052 // Make sure not to return anything, but evaluate the expression
1053 // for side effects.
1057 // Do nothing (return value is left uninitialized)
1058 } else if (FnRetTy->isReferenceType()) {
1059 // If this function returns a reference, take the address of the expression
1060 // rather than the value.
1061 RValue Result = EmitReferenceBindingToExpr(RV);
1062 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1064 switch (getEvaluationKind(RV->getType())) {
1066 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1069 EmitComplexExprIntoLValue(RV,
1070 MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()),
1073 case TEK_Aggregate: {
1074 CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
1075 EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment,
1077 AggValueSlot::IsDestructed,
1078 AggValueSlot::DoesNotNeedGCBarriers,
1079 AggValueSlot::IsNotAliased));
1086 if (!RV || RV->isEvaluatable(getContext()))
1087 ++NumSimpleReturnExprs;
1089 cleanupScope.ForceCleanup();
1090 EmitBranchThroughCleanup(ReturnBlock);
1093 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1094 // As long as debug info is modeled with instructions, we have to ensure we
1095 // have a place to insert here and write the stop point here.
1096 if (HaveInsertPoint())
1099 for (const auto *I : S.decls())
1103 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1104 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1106 // If this code is reachable then emit a stop point (if generating
1107 // debug info). We have to do this ourselves because we are on the
1108 // "simple" statement path.
1109 if (HaveInsertPoint())
1112 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1115 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1116 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1118 // If this code is reachable then emit a stop point (if generating
1119 // debug info). We have to do this ourselves because we are on the
1120 // "simple" statement path.
1121 if (HaveInsertPoint())
1124 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1127 /// EmitCaseStmtRange - If case statement range is not too big then
1128 /// add multiple cases to switch instruction, one for each value within
1129 /// the range. If range is too big then emit "if" condition check.
1130 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
1131 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1133 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1134 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1136 // Emit the code for this case. We do this first to make sure it is
1137 // properly chained from our predecessor before generating the
1138 // switch machinery to enter this block.
1139 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1140 EmitBlockWithFallThrough(CaseDest, &S);
1141 EmitStmt(S.getSubStmt());
1143 // If range is empty, do nothing.
1144 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1147 llvm::APInt Range = RHS - LHS;
1148 // FIXME: parameters such as this should not be hardcoded.
1149 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1150 // Range is small enough to add multiple switch instruction cases.
1151 uint64_t Total = getProfileCount(&S);
1152 unsigned NCases = Range.getZExtValue() + 1;
1153 // We only have one region counter for the entire set of cases here, so we
1154 // need to divide the weights evenly between the generated cases, ensuring
1155 // that the total weight is preserved. E.g., a weight of 5 over three cases
1156 // will be distributed as weights of 2, 2, and 1.
1157 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1158 for (unsigned I = 0; I != NCases; ++I) {
1160 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1163 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1169 // The range is too big. Emit "if" condition into a new block,
1170 // making sure to save and restore the current insertion point.
1171 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1173 // Push this test onto the chain of range checks (which terminates
1174 // in the default basic block). The switch's default will be changed
1175 // to the top of this chain after switch emission is complete.
1176 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1177 CaseRangeBlock = createBasicBlock("sw.caserange");
1179 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1180 Builder.SetInsertPoint(CaseRangeBlock);
1182 // Emit range check.
1184 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1186 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1188 llvm::MDNode *Weights = nullptr;
1189 if (SwitchWeights) {
1190 uint64_t ThisCount = getProfileCount(&S);
1191 uint64_t DefaultCount = (*SwitchWeights)[0];
1192 Weights = createProfileWeights(ThisCount, DefaultCount);
1194 // Since we're chaining the switch default through each large case range, we
1195 // need to update the weight for the default, ie, the first case, to include
1197 (*SwitchWeights)[0] += ThisCount;
1199 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1201 // Restore the appropriate insertion point.
1203 Builder.SetInsertPoint(RestoreBB);
1205 Builder.ClearInsertionPoint();
1208 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
1209 // If there is no enclosing switch instance that we're aware of, then this
1210 // case statement and its block can be elided. This situation only happens
1211 // when we've constant-folded the switch, are emitting the constant case,
1212 // and part of the constant case includes another case statement. For
1213 // instance: switch (4) { case 4: do { case 5: } while (1); }
1215 EmitStmt(S.getSubStmt());
1219 // Handle case ranges.
1221 EmitCaseStmtRange(S);
1225 llvm::ConstantInt *CaseVal =
1226 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1228 // If the body of the case is just a 'break', try to not emit an empty block.
1229 // If we're profiling or we're not optimizing, leave the block in for better
1230 // debug and coverage analysis.
1231 if (!CGM.getCodeGenOpts().ProfileInstrGenerate &&
1232 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1233 isa<BreakStmt>(S.getSubStmt())) {
1234 JumpDest Block = BreakContinueStack.back().BreakBlock;
1236 // Only do this optimization if there are no cleanups that need emitting.
1237 if (isObviouslyBranchWithoutCleanups(Block)) {
1239 SwitchWeights->push_back(getProfileCount(&S));
1240 SwitchInsn->addCase(CaseVal, Block.getBlock());
1242 // If there was a fallthrough into this case, make sure to redirect it to
1243 // the end of the switch as well.
1244 if (Builder.GetInsertBlock()) {
1245 Builder.CreateBr(Block.getBlock());
1246 Builder.ClearInsertionPoint();
1252 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1253 EmitBlockWithFallThrough(CaseDest, &S);
1255 SwitchWeights->push_back(getProfileCount(&S));
1256 SwitchInsn->addCase(CaseVal, CaseDest);
1258 // Recursively emitting the statement is acceptable, but is not wonderful for
1259 // code where we have many case statements nested together, i.e.:
1263 // Handling this recursively will create a new block for each case statement
1264 // that falls through to the next case which is IR intensive. It also causes
1265 // deep recursion which can run into stack depth limitations. Handle
1266 // sequential non-range case statements specially.
1267 const CaseStmt *CurCase = &S;
1268 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1270 // Otherwise, iteratively add consecutive cases to this switch stmt.
1271 while (NextCase && NextCase->getRHS() == nullptr) {
1273 llvm::ConstantInt *CaseVal =
1274 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1277 SwitchWeights->push_back(getProfileCount(NextCase));
1278 if (CGM.getCodeGenOpts().ProfileInstrGenerate) {
1279 CaseDest = createBasicBlock("sw.bb");
1280 EmitBlockWithFallThrough(CaseDest, &S);
1283 SwitchInsn->addCase(CaseVal, CaseDest);
1284 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1287 // Normal default recursion for non-cases.
1288 EmitStmt(CurCase->getSubStmt());
1291 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1292 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1293 assert(DefaultBlock->empty() &&
1294 "EmitDefaultStmt: Default block already defined?");
1296 EmitBlockWithFallThrough(DefaultBlock, &S);
1298 EmitStmt(S.getSubStmt());
1301 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1302 /// constant value that is being switched on, see if we can dead code eliminate
1303 /// the body of the switch to a simple series of statements to emit. Basically,
1304 /// on a switch (5) we want to find these statements:
1306 /// printf(...); <--
1310 /// and add them to the ResultStmts vector. If it is unsafe to do this
1311 /// transformation (for example, one of the elided statements contains a label
1312 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1313 /// should include statements after it (e.g. the printf() line is a substmt of
1314 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1315 /// statement, then return CSFC_Success.
1317 /// If Case is non-null, then we are looking for the specified case, checking
1318 /// that nothing we jump over contains labels. If Case is null, then we found
1319 /// the case and are looking for the break.
1321 /// If the recursive walk actually finds our Case, then we set FoundCase to
1324 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1325 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1326 const SwitchCase *Case,
1328 SmallVectorImpl<const Stmt*> &ResultStmts) {
1329 // If this is a null statement, just succeed.
1331 return Case ? CSFC_Success : CSFC_FallThrough;
1333 // If this is the switchcase (case 4: or default) that we're looking for, then
1334 // we're in business. Just add the substatement.
1335 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1338 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1342 // Otherwise, this is some other case or default statement, just ignore it.
1343 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1347 // If we are in the live part of the code and we found our break statement,
1348 // return a success!
1349 if (!Case && isa<BreakStmt>(S))
1350 return CSFC_Success;
1352 // If this is a switch statement, then it might contain the SwitchCase, the
1353 // break, or neither.
1354 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1355 // Handle this as two cases: we might be looking for the SwitchCase (if so
1356 // the skipped statements must be skippable) or we might already have it.
1357 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1359 // Keep track of whether we see a skipped declaration. The code could be
1360 // using the declaration even if it is skipped, so we can't optimize out
1361 // the decl if the kept statements might refer to it.
1362 bool HadSkippedDecl = false;
1364 // If we're looking for the case, just see if we can skip each of the
1366 for (; Case && I != E; ++I) {
1367 HadSkippedDecl |= isa<DeclStmt>(*I);
1369 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1370 case CSFC_Failure: return CSFC_Failure;
1372 // A successful result means that either 1) that the statement doesn't
1373 // have the case and is skippable, or 2) does contain the case value
1374 // and also contains the break to exit the switch. In the later case,
1375 // we just verify the rest of the statements are elidable.
1377 // If we found the case and skipped declarations, we can't do the
1380 return CSFC_Failure;
1382 for (++I; I != E; ++I)
1383 if (CodeGenFunction::ContainsLabel(*I, true))
1384 return CSFC_Failure;
1385 return CSFC_Success;
1388 case CSFC_FallThrough:
1389 // If we have a fallthrough condition, then we must have found the
1390 // case started to include statements. Consider the rest of the
1391 // statements in the compound statement as candidates for inclusion.
1392 assert(FoundCase && "Didn't find case but returned fallthrough?");
1393 // We recursively found Case, so we're not looking for it anymore.
1396 // If we found the case and skipped declarations, we can't do the
1399 return CSFC_Failure;
1405 // If we have statements in our range, then we know that the statements are
1406 // live and need to be added to the set of statements we're tracking.
1407 for (; I != E; ++I) {
1408 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1409 case CSFC_Failure: return CSFC_Failure;
1410 case CSFC_FallThrough:
1411 // A fallthrough result means that the statement was simple and just
1412 // included in ResultStmt, keep adding them afterwards.
1415 // A successful result means that we found the break statement and
1416 // stopped statement inclusion. We just ensure that any leftover stmts
1417 // are skippable and return success ourselves.
1418 for (++I; I != E; ++I)
1419 if (CodeGenFunction::ContainsLabel(*I, true))
1420 return CSFC_Failure;
1421 return CSFC_Success;
1425 return Case ? CSFC_Success : CSFC_FallThrough;
1428 // Okay, this is some other statement that we don't handle explicitly, like a
1429 // for statement or increment etc. If we are skipping over this statement,
1430 // just verify it doesn't have labels, which would make it invalid to elide.
1432 if (CodeGenFunction::ContainsLabel(S, true))
1433 return CSFC_Failure;
1434 return CSFC_Success;
1437 // Otherwise, we want to include this statement. Everything is cool with that
1438 // so long as it doesn't contain a break out of the switch we're in.
1439 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1441 // Otherwise, everything is great. Include the statement and tell the caller
1442 // that we fall through and include the next statement as well.
1443 ResultStmts.push_back(S);
1444 return CSFC_FallThrough;
1447 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1448 /// then invoke CollectStatementsForCase to find the list of statements to emit
1449 /// for a switch on constant. See the comment above CollectStatementsForCase
1450 /// for more details.
1451 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1452 const llvm::APSInt &ConstantCondValue,
1453 SmallVectorImpl<const Stmt*> &ResultStmts,
1455 const SwitchCase *&ResultCase) {
1456 // First step, find the switch case that is being branched to. We can do this
1457 // efficiently by scanning the SwitchCase list.
1458 const SwitchCase *Case = S.getSwitchCaseList();
1459 const DefaultStmt *DefaultCase = nullptr;
1461 for (; Case; Case = Case->getNextSwitchCase()) {
1462 // It's either a default or case. Just remember the default statement in
1463 // case we're not jumping to any numbered cases.
1464 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1469 // Check to see if this case is the one we're looking for.
1470 const CaseStmt *CS = cast<CaseStmt>(Case);
1471 // Don't handle case ranges yet.
1472 if (CS->getRHS()) return false;
1474 // If we found our case, remember it as 'case'.
1475 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1479 // If we didn't find a matching case, we use a default if it exists, or we
1480 // elide the whole switch body!
1482 // It is safe to elide the body of the switch if it doesn't contain labels
1483 // etc. If it is safe, return successfully with an empty ResultStmts list.
1485 return !CodeGenFunction::ContainsLabel(&S);
1489 // Ok, we know which case is being jumped to, try to collect all the
1490 // statements that follow it. This can fail for a variety of reasons. Also,
1491 // check to see that the recursive walk actually found our case statement.
1492 // Insane cases like this can fail to find it in the recursive walk since we
1493 // don't handle every stmt kind:
1497 bool FoundCase = false;
1499 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1500 ResultStmts) != CSFC_Failure &&
1504 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1505 // Handle nested switch statements.
1506 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1507 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1508 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1510 // See if we can constant fold the condition of the switch and therefore only
1511 // emit the live case statement (if any) of the switch.
1512 llvm::APSInt ConstantCondValue;
1513 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1514 SmallVector<const Stmt*, 4> CaseStmts;
1515 const SwitchCase *Case = nullptr;
1516 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1517 getContext(), Case)) {
1519 incrementProfileCounter(Case);
1520 RunCleanupsScope ExecutedScope(*this);
1522 // Emit the condition variable if needed inside the entire cleanup scope
1523 // used by this special case for constant folded switches.
1524 if (S.getConditionVariable())
1525 EmitAutoVarDecl(*S.getConditionVariable());
1527 // At this point, we are no longer "within" a switch instance, so
1528 // we can temporarily enforce this to ensure that any embedded case
1529 // statements are not emitted.
1530 SwitchInsn = nullptr;
1532 // Okay, we can dead code eliminate everything except this case. Emit the
1533 // specified series of statements and we're good.
1534 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1535 EmitStmt(CaseStmts[i]);
1536 incrementProfileCounter(&S);
1538 // Now we want to restore the saved switch instance so that nested
1539 // switches continue to function properly
1540 SwitchInsn = SavedSwitchInsn;
1546 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1548 RunCleanupsScope ConditionScope(*this);
1549 if (S.getConditionVariable())
1550 EmitAutoVarDecl(*S.getConditionVariable());
1551 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1553 // Create basic block to hold stuff that comes after switch
1554 // statement. We also need to create a default block now so that
1555 // explicit case ranges tests can have a place to jump to on
1557 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1558 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1559 if (PGO.haveRegionCounts()) {
1560 // Walk the SwitchCase list to find how many there are.
1561 uint64_t DefaultCount = 0;
1562 unsigned NumCases = 0;
1563 for (const SwitchCase *Case = S.getSwitchCaseList();
1565 Case = Case->getNextSwitchCase()) {
1566 if (isa<DefaultStmt>(Case))
1567 DefaultCount = getProfileCount(Case);
1570 SwitchWeights = new SmallVector<uint64_t, 16>();
1571 SwitchWeights->reserve(NumCases);
1572 // The default needs to be first. We store the edge count, so we already
1573 // know the right weight.
1574 SwitchWeights->push_back(DefaultCount);
1576 CaseRangeBlock = DefaultBlock;
1578 // Clear the insertion point to indicate we are in unreachable code.
1579 Builder.ClearInsertionPoint();
1581 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1582 // then reuse last ContinueBlock.
1583 JumpDest OuterContinue;
1584 if (!BreakContinueStack.empty())
1585 OuterContinue = BreakContinueStack.back().ContinueBlock;
1587 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1589 // Emit switch body.
1590 EmitStmt(S.getBody());
1592 BreakContinueStack.pop_back();
1594 // Update the default block in case explicit case range tests have
1595 // been chained on top.
1596 SwitchInsn->setDefaultDest(CaseRangeBlock);
1598 // If a default was never emitted:
1599 if (!DefaultBlock->getParent()) {
1600 // If we have cleanups, emit the default block so that there's a
1601 // place to jump through the cleanups from.
1602 if (ConditionScope.requiresCleanups()) {
1603 EmitBlock(DefaultBlock);
1605 // Otherwise, just forward the default block to the switch end.
1607 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1608 delete DefaultBlock;
1612 ConditionScope.ForceCleanup();
1614 // Emit continuation.
1615 EmitBlock(SwitchExit.getBlock(), true);
1616 incrementProfileCounter(&S);
1618 if (SwitchWeights) {
1619 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1620 "switch weights do not match switch cases");
1621 // If there's only one jump destination there's no sense weighting it.
1622 if (SwitchWeights->size() > 1)
1623 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1624 createProfileWeights(*SwitchWeights));
1625 delete SwitchWeights;
1627 SwitchInsn = SavedSwitchInsn;
1628 SwitchWeights = SavedSwitchWeights;
1629 CaseRangeBlock = SavedCRBlock;
1633 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1634 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1637 while (*Constraint) {
1638 switch (*Constraint) {
1640 Result += Target.convertConstraint(Constraint);
1646 case '=': // Will see this and the following in mult-alt constraints.
1649 case '#': // Ignore the rest of the constraint alternative.
1650 while (Constraint[1] && Constraint[1] != ',')
1655 Result += *Constraint;
1656 while (Constraint[1] && Constraint[1] == *Constraint)
1667 "Must pass output names to constraints with a symbolic name");
1669 bool result = Target.resolveSymbolicName(Constraint,
1671 OutCons->size(), Index);
1672 assert(result && "Could not resolve symbolic name"); (void)result;
1673 Result += llvm::utostr(Index);
1684 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1685 /// as using a particular register add that as a constraint that will be used
1686 /// in this asm stmt.
1688 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1689 const TargetInfo &Target, CodeGenModule &CGM,
1690 const AsmStmt &Stmt, const bool EarlyClobber) {
1691 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1694 const ValueDecl &Value = *AsmDeclRef->getDecl();
1695 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1698 if (Variable->getStorageClass() != SC_Register)
1700 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1703 StringRef Register = Attr->getLabel();
1704 assert(Target.isValidGCCRegisterName(Register));
1705 // We're using validateOutputConstraint here because we only care if
1706 // this is a register constraint.
1707 TargetInfo::ConstraintInfo Info(Constraint, "");
1708 if (Target.validateOutputConstraint(Info) &&
1709 !Info.allowsRegister()) {
1710 CGM.ErrorUnsupported(&Stmt, "__asm__");
1713 // Canonicalize the register here before returning it.
1714 Register = Target.getNormalizedGCCRegisterName(Register);
1715 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
1719 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1720 LValue InputValue, QualType InputType,
1721 std::string &ConstraintStr,
1722 SourceLocation Loc) {
1724 if (Info.allowsRegister() || !Info.allowsMemory()) {
1725 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1726 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1728 llvm::Type *Ty = ConvertType(InputType);
1729 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1730 if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1731 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1732 Ty = llvm::PointerType::getUnqual(Ty);
1734 Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
1737 Arg = InputValue.getAddress();
1738 ConstraintStr += '*';
1742 Arg = InputValue.getAddress();
1743 ConstraintStr += '*';
1749 llvm::Value* CodeGenFunction::EmitAsmInput(
1750 const TargetInfo::ConstraintInfo &Info,
1751 const Expr *InputExpr,
1752 std::string &ConstraintStr) {
1753 if (Info.allowsRegister() || !Info.allowsMemory())
1754 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1755 return EmitScalarExpr(InputExpr);
1757 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1758 LValue Dest = EmitLValue(InputExpr);
1759 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1760 InputExpr->getExprLoc());
1763 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1764 /// asm call instruction. The !srcloc MDNode contains a list of constant
1765 /// integers which are the source locations of the start of each line in the
1767 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1768 CodeGenFunction &CGF) {
1769 SmallVector<llvm::Metadata *, 8> Locs;
1770 // Add the location of the first line to the MDNode.
1771 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1772 CGF.Int32Ty, Str->getLocStart().getRawEncoding())));
1773 StringRef StrVal = Str->getString();
1774 if (!StrVal.empty()) {
1775 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1776 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1778 // Add the location of the start of each subsequent line of the asm to the
1780 for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
1781 if (StrVal[i] != '\n') continue;
1782 SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
1784 Locs.push_back(llvm::ConstantAsMetadata::get(
1785 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1789 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1792 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
1793 // Assemble the final asm string.
1794 std::string AsmString = S.generateAsmString(getContext());
1796 // Get all the output and input constraints together.
1797 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1798 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1800 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1802 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1803 Name = GAS->getOutputName(i);
1804 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
1805 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
1806 assert(IsValid && "Failed to parse output constraint");
1807 OutputConstraintInfos.push_back(Info);
1810 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1812 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1813 Name = GAS->getInputName(i);
1814 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
1816 getTarget().validateInputConstraint(OutputConstraintInfos.data(),
1817 S.getNumOutputs(), Info);
1818 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1819 InputConstraintInfos.push_back(Info);
1822 std::string Constraints;
1824 std::vector<LValue> ResultRegDests;
1825 std::vector<QualType> ResultRegQualTys;
1826 std::vector<llvm::Type *> ResultRegTypes;
1827 std::vector<llvm::Type *> ResultTruncRegTypes;
1828 std::vector<llvm::Type *> ArgTypes;
1829 std::vector<llvm::Value*> Args;
1831 // Keep track of inout constraints.
1832 std::string InOutConstraints;
1833 std::vector<llvm::Value*> InOutArgs;
1834 std::vector<llvm::Type*> InOutArgTypes;
1836 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1837 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
1839 // Simplify the output constraint.
1840 std::string OutputConstraint(S.getOutputConstraint(i));
1841 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
1844 const Expr *OutExpr = S.getOutputExpr(i);
1845 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
1847 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
1848 getTarget(), CGM, S,
1849 Info.earlyClobber());
1851 LValue Dest = EmitLValue(OutExpr);
1852 if (!Constraints.empty())
1855 // If this is a register output, then make the inline asm return it
1856 // by-value. If this is a memory result, return the value by-reference.
1857 if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
1858 Constraints += "=" + OutputConstraint;
1859 ResultRegQualTys.push_back(OutExpr->getType());
1860 ResultRegDests.push_back(Dest);
1861 ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
1862 ResultTruncRegTypes.push_back(ResultRegTypes.back());
1864 // If this output is tied to an input, and if the input is larger, then
1865 // we need to set the actual result type of the inline asm node to be the
1866 // same as the input type.
1867 if (Info.hasMatchingInput()) {
1869 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
1870 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
1871 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
1874 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
1876 QualType InputTy = S.getInputExpr(InputNo)->getType();
1877 QualType OutputType = OutExpr->getType();
1879 uint64_t InputSize = getContext().getTypeSize(InputTy);
1880 if (getContext().getTypeSize(OutputType) < InputSize) {
1881 // Form the asm to return the value as a larger integer or fp type.
1882 ResultRegTypes.back() = ConvertType(InputTy);
1885 if (llvm::Type* AdjTy =
1886 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1887 ResultRegTypes.back()))
1888 ResultRegTypes.back() = AdjTy;
1890 CGM.getDiags().Report(S.getAsmLoc(),
1891 diag::err_asm_invalid_type_in_input)
1892 << OutExpr->getType() << OutputConstraint;
1895 ArgTypes.push_back(Dest.getAddress()->getType());
1896 Args.push_back(Dest.getAddress());
1897 Constraints += "=*";
1898 Constraints += OutputConstraint;
1901 if (Info.isReadWrite()) {
1902 InOutConstraints += ',';
1904 const Expr *InputExpr = S.getOutputExpr(i);
1905 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
1907 InputExpr->getExprLoc());
1909 if (llvm::Type* AdjTy =
1910 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1912 Arg = Builder.CreateBitCast(Arg, AdjTy);
1914 if (Info.allowsRegister())
1915 InOutConstraints += llvm::utostr(i);
1917 InOutConstraints += OutputConstraint;
1919 InOutArgTypes.push_back(Arg->getType());
1920 InOutArgs.push_back(Arg);
1924 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
1925 // to the return value slot. Only do this when returning in registers.
1926 if (isa<MSAsmStmt>(&S)) {
1927 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
1928 if (RetAI.isDirect() || RetAI.isExtend()) {
1929 // Make a fake lvalue for the return value slot.
1930 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
1931 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
1932 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
1933 ResultRegDests, AsmString, S.getNumOutputs());
1938 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1939 const Expr *InputExpr = S.getInputExpr(i);
1941 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
1943 if (!Constraints.empty())
1946 // Simplify the input constraint.
1947 std::string InputConstraint(S.getInputConstraint(i));
1948 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
1949 &OutputConstraintInfos);
1951 InputConstraint = AddVariableConstraints(
1952 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
1953 getTarget(), CGM, S, false /* No EarlyClobber */);
1955 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
1957 // If this input argument is tied to a larger output result, extend the
1958 // input to be the same size as the output. The LLVM backend wants to see
1959 // the input and output of a matching constraint be the same size. Note
1960 // that GCC does not define what the top bits are here. We use zext because
1961 // that is usually cheaper, but LLVM IR should really get an anyext someday.
1962 if (Info.hasTiedOperand()) {
1963 unsigned Output = Info.getTiedOperand();
1964 QualType OutputType = S.getOutputExpr(Output)->getType();
1965 QualType InputTy = InputExpr->getType();
1967 if (getContext().getTypeSize(OutputType) >
1968 getContext().getTypeSize(InputTy)) {
1969 // Use ptrtoint as appropriate so that we can do our extension.
1970 if (isa<llvm::PointerType>(Arg->getType()))
1971 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
1972 llvm::Type *OutputTy = ConvertType(OutputType);
1973 if (isa<llvm::IntegerType>(OutputTy))
1974 Arg = Builder.CreateZExt(Arg, OutputTy);
1975 else if (isa<llvm::PointerType>(OutputTy))
1976 Arg = Builder.CreateZExt(Arg, IntPtrTy);
1978 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
1979 Arg = Builder.CreateFPExt(Arg, OutputTy);
1983 if (llvm::Type* AdjTy =
1984 getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
1986 Arg = Builder.CreateBitCast(Arg, AdjTy);
1988 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
1989 << InputExpr->getType() << InputConstraint;
1991 ArgTypes.push_back(Arg->getType());
1992 Args.push_back(Arg);
1993 Constraints += InputConstraint;
1996 // Append the "input" part of inout constraints last.
1997 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
1998 ArgTypes.push_back(InOutArgTypes[i]);
1999 Args.push_back(InOutArgs[i]);
2001 Constraints += InOutConstraints;
2004 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2005 StringRef Clobber = S.getClobber(i);
2007 if (Clobber != "memory" && Clobber != "cc")
2008 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2010 if (!Constraints.empty())
2013 Constraints += "~{";
2014 Constraints += Clobber;
2018 // Add machine specific clobbers
2019 std::string MachineClobbers = getTarget().getClobbers();
2020 if (!MachineClobbers.empty()) {
2021 if (!Constraints.empty())
2023 Constraints += MachineClobbers;
2026 llvm::Type *ResultType;
2027 if (ResultRegTypes.empty())
2028 ResultType = VoidTy;
2029 else if (ResultRegTypes.size() == 1)
2030 ResultType = ResultRegTypes[0];
2032 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2034 llvm::FunctionType *FTy =
2035 llvm::FunctionType::get(ResultType, ArgTypes, false);
2037 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2038 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2039 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2040 llvm::InlineAsm *IA =
2041 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2042 /* IsAlignStack */ false, AsmDialect);
2043 llvm::CallInst *Result = Builder.CreateCall(IA, Args);
2044 Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2045 llvm::Attribute::NoUnwind);
2047 // Slap the source location of the inline asm into a !srcloc metadata on the
2049 if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) {
2050 Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
2053 // At least put the line number on MS inline asm blobs.
2054 auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding());
2055 Result->setMetadata("srcloc",
2056 llvm::MDNode::get(getLLVMContext(),
2057 llvm::ConstantAsMetadata::get(Loc)));
2060 // Extract all of the register value results from the asm.
2061 std::vector<llvm::Value*> RegResults;
2062 if (ResultRegTypes.size() == 1) {
2063 RegResults.push_back(Result);
2065 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2066 llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
2067 RegResults.push_back(Tmp);
2071 assert(RegResults.size() == ResultRegTypes.size());
2072 assert(RegResults.size() == ResultTruncRegTypes.size());
2073 assert(RegResults.size() == ResultRegDests.size());
2074 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2075 llvm::Value *Tmp = RegResults[i];
2077 // If the result type of the LLVM IR asm doesn't match the result type of
2078 // the expression, do the conversion.
2079 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2080 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2082 // Truncate the integer result to the right size, note that TruncTy can be
2084 if (TruncTy->isFloatingPointTy())
2085 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2086 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2087 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2088 Tmp = Builder.CreateTrunc(Tmp,
2089 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2090 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2091 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2092 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2093 Tmp = Builder.CreatePtrToInt(Tmp,
2094 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2095 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2096 } else if (TruncTy->isIntegerTy()) {
2097 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2098 } else if (TruncTy->isVectorTy()) {
2099 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2103 EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
2107 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2108 const RecordDecl *RD = S.getCapturedRecordDecl();
2109 QualType RecordTy = getContext().getRecordType(RD);
2111 // Initialize the captured struct.
2112 LValue SlotLV = MakeNaturalAlignAddrLValue(
2113 CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2115 RecordDecl::field_iterator CurField = RD->field_begin();
2116 for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(),
2117 E = S.capture_init_end();
2118 I != E; ++I, ++CurField) {
2119 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2120 if (CurField->hasCapturedVLAType()) {
2121 auto VAT = CurField->getCapturedVLAType();
2122 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2124 EmitInitializerForField(*CurField, LV, *I, None);
2131 /// Generate an outlined function for the body of a CapturedStmt, store any
2132 /// captured variables into the captured struct, and call the outlined function.
2134 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2135 LValue CapStruct = InitCapturedStruct(S);
2137 // Emit the CapturedDecl
2138 CodeGenFunction CGF(CGM, true);
2139 CGF.CapturedStmtInfo = new CGCapturedStmtInfo(S, K);
2140 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2141 delete CGF.CapturedStmtInfo;
2143 // Emit call to the helper function.
2144 EmitCallOrInvoke(F, CapStruct.getAddress());
2150 CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2151 LValue CapStruct = InitCapturedStruct(S);
2152 return CapStruct.getAddress();
2155 /// Creates the outlined function for a CapturedStmt.
2157 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2158 assert(CapturedStmtInfo &&
2159 "CapturedStmtInfo should be set when generating the captured function");
2160 const CapturedDecl *CD = S.getCapturedDecl();
2161 const RecordDecl *RD = S.getCapturedRecordDecl();
2162 SourceLocation Loc = S.getLocStart();
2163 assert(CD->hasBody() && "missing CapturedDecl body");
2165 // Build the argument list.
2166 ASTContext &Ctx = CGM.getContext();
2167 FunctionArgList Args;
2168 Args.append(CD->param_begin(), CD->param_end());
2170 // Create the function declaration.
2171 FunctionType::ExtInfo ExtInfo;
2172 const CGFunctionInfo &FuncInfo =
2173 CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
2174 /*IsVariadic=*/false);
2175 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2178 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2179 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2180 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2181 if (CD->isNothrow())
2182 F->addFnAttr(llvm::Attribute::NoUnwind);
2184 // Generate the function.
2185 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args,
2187 CD->getBody()->getLocStart());
2188 // Set the context parameter in CapturedStmtInfo.
2189 llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()];
2190 assert(DeclPtr && "missing context parameter for CapturedStmt");
2191 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2193 // Initialize variable-length arrays.
2194 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2195 Ctx.getTagDeclType(RD));
2196 for (auto *FD : RD->fields()) {
2197 if (FD->hasCapturedVLAType()) {
2198 auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD),
2199 S.getLocStart()).getScalarVal();
2200 auto VAT = FD->getCapturedVLAType();
2201 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2205 // If 'this' is captured, load it into CXXThisValue.
2206 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2207 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2208 LValue ThisLValue = EmitLValueForField(Base, FD);
2209 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2212 PGO.assignRegionCounters(CD, F);
2213 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2214 FinishFunction(CD->getBodyRBrace());