1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is the internal per-function state used for llvm translation.
12 //===----------------------------------------------------------------------===//
14 #ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15 #define CLANG_CODEGEN_CODEGENFUNCTION_H
17 #include "clang/AST/Type.h"
18 #include "clang/AST/ExprCXX.h"
19 #include "clang/AST/ExprObjC.h"
20 #include "clang/AST/CharUnits.h"
21 #include "clang/Basic/ABI.h"
22 #include "clang/Basic/TargetInfo.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Support/ValueHandle.h"
26 #include "CodeGenModule.h"
27 #include "CGBuilder.h"
45 class CXXDestructorDecl;
49 class EnumConstantDecl;
51 class FunctionProtoType;
53 class ObjCContainerDecl;
54 class ObjCInterfaceDecl;
57 class ObjCImplementationDecl;
58 class ObjCPropertyImplDecl;
60 class TargetCodeGenInfo;
62 class ObjCForCollectionStmt;
64 class ObjCAtThrowStmt;
65 class ObjCAtSynchronizedStmt;
75 class BlockFieldFlags;
77 /// A branch fixup. These are required when emitting a goto to a
78 /// label which hasn't been emitted yet. The goto is optimistically
79 /// emitted as a branch to the basic block for the label, and (if it
80 /// occurs in a scope with non-trivial cleanups) a fixup is added to
81 /// the innermost cleanup. When a (normal) cleanup is popped, any
82 /// unresolved fixups in that scope are threaded through the cleanup.
84 /// The block containing the terminator which needs to be modified
85 /// into a switch if this fixup is resolved into the current scope.
86 /// If null, LatestBranch points directly to the destination.
87 llvm::BasicBlock *OptimisticBranchBlock;
89 /// The ultimate destination of the branch.
91 /// This can be set to null to indicate that this fixup was
92 /// successfully resolved.
93 llvm::BasicBlock *Destination;
95 /// The destination index value.
96 unsigned DestinationIndex;
98 /// The initial branch of the fixup.
99 llvm::BranchInst *InitialBranch;
102 template <class T> struct InvariantValue {
104 typedef T saved_type;
105 static bool needsSaving(type value) { return false; }
106 static saved_type save(CodeGenFunction &CGF, type value) { return value; }
107 static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
110 /// A metaprogramming class for ensuring that a value will dominate an
111 /// arbitrary position in a function.
112 template <class T> struct DominatingValue : InvariantValue<T> {};
114 template <class T, bool mightBeInstruction =
115 llvm::is_base_of<llvm::Value, T>::value &&
116 !llvm::is_base_of<llvm::Constant, T>::value &&
117 !llvm::is_base_of<llvm::BasicBlock, T>::value>
118 struct DominatingPointer;
119 template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
120 // template <class T> struct DominatingPointer<T,true> at end of file
122 template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127 NormalAndEHCleanup = EHCleanup | NormalCleanup,
129 InactiveCleanup = 0x4,
130 InactiveEHCleanup = EHCleanup | InactiveCleanup,
131 InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
132 InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
135 /// A stack of scopes which respond to exceptions, including cleanups
136 /// and catch blocks.
139 /// A saved depth on the scope stack. This is necessary because
140 /// pushing scopes onto the stack invalidates iterators.
141 class stable_iterator {
142 friend class EHScopeStack;
144 /// Offset from StartOfData to EndOfBuffer.
147 stable_iterator(ptrdiff_t Size) : Size(Size) {}
150 static stable_iterator invalid() { return stable_iterator(-1); }
151 stable_iterator() : Size(-1) {}
153 bool isValid() const { return Size >= 0; }
155 /// Returns true if this scope encloses I.
156 /// Returns false if I is invalid.
157 /// This scope must be valid.
158 bool encloses(stable_iterator I) const { return Size <= I.Size; }
160 /// Returns true if this scope strictly encloses I: that is,
161 /// if it encloses I and is not I.
162 /// Returns false is I is invalid.
163 /// This scope must be valid.
164 bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
166 friend bool operator==(stable_iterator A, stable_iterator B) {
167 return A.Size == B.Size;
169 friend bool operator!=(stable_iterator A, stable_iterator B) {
170 return A.Size != B.Size;
174 /// Information for lazily generating a cleanup. Subclasses must be
175 /// POD-like: cleanups will not be destructed, and they will be
176 /// allocated on the cleanup stack and freely copied and moved
179 /// Cleanup implementations should generally be declared in an
180 /// anonymous namespace.
183 // Anchor the construction vtable. We use the destructor because
184 // gcc gives an obnoxious warning if there are virtual methods
185 // with an accessible non-virtual destructor. Unfortunately,
186 // declaring this destructor makes it non-trivial, but there
187 // doesn't seem to be any other way around this warning.
189 // This destructor will never be called.
192 /// Emit the cleanup. For normal cleanups, this is run in the
193 /// same EH context as when the cleanup was pushed, i.e. the
194 /// immediately-enclosing context of the cleanup scope. For
195 /// EH cleanups, this is run in a terminate context.
197 // \param IsForEHCleanup true if this is for an EH cleanup, false
198 /// if for a normal cleanup.
199 virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
202 /// UnconditionalCleanupN stores its N parameters and just passes
203 /// them to the real cleanup function.
204 template <class T, class A0>
205 class UnconditionalCleanup1 : public Cleanup {
208 UnconditionalCleanup1(A0 a0) : a0(a0) {}
209 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
210 T::Emit(CGF, IsForEHCleanup, a0);
214 template <class T, class A0, class A1>
215 class UnconditionalCleanup2 : public Cleanup {
218 UnconditionalCleanup2(A0 a0, A1 a1) : a0(a0), a1(a1) {}
219 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
220 T::Emit(CGF, IsForEHCleanup, a0, a1);
224 /// ConditionalCleanupN stores the saved form of its N parameters,
225 /// then restores them and performs the cleanup.
226 template <class T, class A0>
227 class ConditionalCleanup1 : public Cleanup {
228 typedef typename DominatingValue<A0>::saved_type A0_saved;
231 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
232 A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
233 T::Emit(CGF, IsForEHCleanup, a0);
237 ConditionalCleanup1(A0_saved a0)
241 template <class T, class A0, class A1>
242 class ConditionalCleanup2 : public Cleanup {
243 typedef typename DominatingValue<A0>::saved_type A0_saved;
244 typedef typename DominatingValue<A1>::saved_type A1_saved;
248 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
249 A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
250 A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
251 T::Emit(CGF, IsForEHCleanup, a0, a1);
255 ConditionalCleanup2(A0_saved a0, A1_saved a1)
256 : a0_saved(a0), a1_saved(a1) {}
260 // The implementation for this class is in CGException.h and
261 // CGException.cpp; the definition is here because it's used as a
262 // member of CodeGenFunction.
264 /// The start of the scope-stack buffer, i.e. the allocated pointer
265 /// for the buffer. All of these pointers are either simultaneously
266 /// null or simultaneously valid.
269 /// The end of the buffer.
272 /// The first valid entry in the buffer.
275 /// The innermost normal cleanup on the stack.
276 stable_iterator InnermostNormalCleanup;
278 /// The innermost EH cleanup on the stack.
279 stable_iterator InnermostEHCleanup;
281 /// The number of catches on the stack.
284 /// The current EH destination index. Reset to FirstCatchIndex
285 /// whenever the last EH cleanup is popped.
286 unsigned NextEHDestIndex;
287 enum { FirstEHDestIndex = 1 };
289 /// The current set of branch fixups. A branch fixup is a jump to
290 /// an as-yet unemitted label, i.e. a label for which we don't yet
291 /// know the EH stack depth. Whenever we pop a cleanup, we have
292 /// to thread all the current branch fixups through it.
294 /// Fixups are recorded as the Use of the respective branch or
295 /// switch statement. The use points to the final destination.
296 /// When popping out of a cleanup, these uses are threaded through
297 /// the cleanup and adjusted to point to the new cleanup.
299 /// Note that branches are allowed to jump into protected scopes
300 /// in certain situations; e.g. the following code is legal:
301 /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
306 llvm::SmallVector<BranchFixup, 8> BranchFixups;
308 char *allocate(size_t Size);
310 void *pushCleanup(CleanupKind K, size_t DataSize);
313 EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
314 InnermostNormalCleanup(stable_end()),
315 InnermostEHCleanup(stable_end()),
316 CatchDepth(0), NextEHDestIndex(FirstEHDestIndex) {}
317 ~EHScopeStack() { delete[] StartOfBuffer; }
319 // Variadic templates would make this not terrible.
321 /// Push a lazily-created cleanup on the stack.
323 void pushCleanup(CleanupKind Kind) {
324 void *Buffer = pushCleanup(Kind, sizeof(T));
325 Cleanup *Obj = new(Buffer) T();
329 /// Push a lazily-created cleanup on the stack.
330 template <class T, class A0>
331 void pushCleanup(CleanupKind Kind, A0 a0) {
332 void *Buffer = pushCleanup(Kind, sizeof(T));
333 Cleanup *Obj = new(Buffer) T(a0);
337 /// Push a lazily-created cleanup on the stack.
338 template <class T, class A0, class A1>
339 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
340 void *Buffer = pushCleanup(Kind, sizeof(T));
341 Cleanup *Obj = new(Buffer) T(a0, a1);
345 /// Push a lazily-created cleanup on the stack.
346 template <class T, class A0, class A1, class A2>
347 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
348 void *Buffer = pushCleanup(Kind, sizeof(T));
349 Cleanup *Obj = new(Buffer) T(a0, a1, a2);
353 /// Push a lazily-created cleanup on the stack.
354 template <class T, class A0, class A1, class A2, class A3>
355 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
356 void *Buffer = pushCleanup(Kind, sizeof(T));
357 Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
361 /// Push a lazily-created cleanup on the stack.
362 template <class T, class A0, class A1, class A2, class A3, class A4>
363 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
364 void *Buffer = pushCleanup(Kind, sizeof(T));
365 Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
369 // Feel free to add more variants of the following:
371 /// Push a cleanup with non-constant storage requirements on the
372 /// stack. The cleanup type must provide an additional static method:
373 /// static size_t getExtraSize(size_t);
374 /// The argument to this method will be the value N, which will also
375 /// be passed as the first argument to the constructor.
377 /// The data stored in the extra storage must obey the same
378 /// restrictions as normal cleanup member data.
380 /// The pointer returned from this method is valid until the cleanup
381 /// stack is modified.
382 template <class T, class A0, class A1, class A2>
383 T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
384 void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
385 return new (Buffer) T(N, a0, a1, a2);
388 /// Pops a cleanup scope off the stack. This should only be called
389 /// by CodeGenFunction::PopCleanupBlock.
392 /// Push a set of catch handlers on the stack. The catch is
393 /// uninitialized and will need to have the given number of handlers
395 class EHCatchScope *pushCatch(unsigned NumHandlers);
397 /// Pops a catch scope off the stack.
400 /// Push an exceptions filter on the stack.
401 class EHFilterScope *pushFilter(unsigned NumFilters);
403 /// Pops an exceptions filter off the stack.
406 /// Push a terminate handler on the stack.
407 void pushTerminate();
409 /// Pops a terminate handler off the stack.
412 /// Determines whether the exception-scopes stack is empty.
413 bool empty() const { return StartOfData == EndOfBuffer; }
415 bool requiresLandingPad() const {
416 return (CatchDepth || hasEHCleanups());
419 /// Determines whether there are any normal cleanups on the stack.
420 bool hasNormalCleanups() const {
421 return InnermostNormalCleanup != stable_end();
424 /// Returns the innermost normal cleanup on the stack, or
425 /// stable_end() if there are no normal cleanups.
426 stable_iterator getInnermostNormalCleanup() const {
427 return InnermostNormalCleanup;
429 stable_iterator getInnermostActiveNormalCleanup() const; // CGException.h
431 /// Determines whether there are any EH cleanups on the stack.
432 bool hasEHCleanups() const {
433 return InnermostEHCleanup != stable_end();
436 /// Returns the innermost EH cleanup on the stack, or stable_end()
437 /// if there are no EH cleanups.
438 stable_iterator getInnermostEHCleanup() const {
439 return InnermostEHCleanup;
441 stable_iterator getInnermostActiveEHCleanup() const; // CGException.h
443 /// An unstable reference to a scope-stack depth. Invalidated by
444 /// pushes but not pops.
447 /// Returns an iterator pointing to the innermost EH scope.
448 iterator begin() const;
450 /// Returns an iterator pointing to the outermost EH scope.
451 iterator end() const;
453 /// Create a stable reference to the top of the EH stack. The
454 /// returned reference is valid until that scope is popped off the
456 stable_iterator stable_begin() const {
457 return stable_iterator(EndOfBuffer - StartOfData);
460 /// Create a stable reference to the bottom of the EH stack.
461 static stable_iterator stable_end() {
462 return stable_iterator(0);
465 /// Translates an iterator into a stable_iterator.
466 stable_iterator stabilize(iterator it) const;
468 /// Finds the nearest cleanup enclosing the given iterator.
469 /// Returns stable_iterator::invalid() if there are no such cleanups.
470 stable_iterator getEnclosingEHCleanup(iterator it) const;
472 /// Turn a stable reference to a scope depth into a unstable pointer
474 iterator find(stable_iterator save) const;
476 /// Removes the cleanup pointed to by the given stable_iterator.
477 void removeCleanup(stable_iterator save);
479 /// Add a branch fixup to the current cleanup scope.
480 BranchFixup &addBranchFixup() {
481 assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
482 BranchFixups.push_back(BranchFixup());
483 return BranchFixups.back();
486 unsigned getNumBranchFixups() const { return BranchFixups.size(); }
487 BranchFixup &getBranchFixup(unsigned I) {
488 assert(I < getNumBranchFixups());
489 return BranchFixups[I];
492 /// Pops lazily-removed fixups from the end of the list. This
493 /// should only be called by procedures which have just popped a
494 /// cleanup or resolved one or more fixups.
495 void popNullFixups();
497 /// Clears the branch-fixups list. This should only be called by
498 /// ResolveAllBranchFixups.
499 void clearFixups() { BranchFixups.clear(); }
501 /// Gets the next EH destination index.
502 unsigned getNextEHDestIndex() { return NextEHDestIndex++; }
505 /// CodeGenFunction - This class organizes the per-function state that is used
506 /// while generating LLVM code.
507 class CodeGenFunction : public CodeGenTypeCache {
508 CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
509 void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
511 friend class CGCXXABI;
513 /// A jump destination is an abstract label, branching to which may
514 /// require a jump out through normal cleanups.
516 JumpDest() : Block(0), ScopeDepth(), Index(0) {}
517 JumpDest(llvm::BasicBlock *Block,
518 EHScopeStack::stable_iterator Depth,
520 : Block(Block), ScopeDepth(Depth), Index(Index) {}
522 bool isValid() const { return Block != 0; }
523 llvm::BasicBlock *getBlock() const { return Block; }
524 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
525 unsigned getDestIndex() const { return Index; }
528 llvm::BasicBlock *Block;
529 EHScopeStack::stable_iterator ScopeDepth;
533 /// An unwind destination is an abstract label, branching to which
534 /// may require a jump out through EH cleanups.
536 UnwindDest() : Block(0), ScopeDepth(), Index(0) {}
537 UnwindDest(llvm::BasicBlock *Block,
538 EHScopeStack::stable_iterator Depth,
540 : Block(Block), ScopeDepth(Depth), Index(Index) {}
542 bool isValid() const { return Block != 0; }
543 llvm::BasicBlock *getBlock() const { return Block; }
544 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
545 unsigned getDestIndex() const { return Index; }
548 llvm::BasicBlock *Block;
549 EHScopeStack::stable_iterator ScopeDepth;
553 CodeGenModule &CGM; // Per-module state.
554 const TargetInfo &Target;
556 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
559 /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
560 /// This excludes BlockDecls.
561 const Decl *CurFuncDecl;
562 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
563 const Decl *CurCodeDecl;
564 const CGFunctionInfo *CurFnInfo;
566 llvm::Function *CurFn;
568 /// CurGD - The GlobalDecl for the current function being compiled.
571 /// ReturnBlock - Unified return block.
572 JumpDest ReturnBlock;
574 /// ReturnValue - The temporary alloca to hold the return value. This is null
575 /// iff the function has no return value.
576 llvm::Value *ReturnValue;
578 /// RethrowBlock - Unified rethrow block.
579 UnwindDest RethrowBlock;
581 /// AllocaInsertPoint - This is an instruction in the entry block before which
582 /// we prefer to insert allocas.
583 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
587 const CodeGen::CGBlockInfo *BlockInfo;
588 llvm::Value *BlockPointer;
590 /// \brief A mapping from NRVO variables to the flags used to indicate
591 /// when the NRVO has been applied to this variable.
592 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
594 EHScopeStack EHStack;
596 /// i32s containing the indexes of the cleanup destinations.
597 llvm::AllocaInst *NormalCleanupDest;
598 llvm::AllocaInst *EHCleanupDest;
600 unsigned NextCleanupDestIndex;
602 /// The exception slot. All landing pads write the current
603 /// exception pointer into this alloca.
604 llvm::Value *ExceptionSlot;
606 /// Emits a landing pad for the current EH stack.
607 llvm::BasicBlock *EmitLandingPad();
609 llvm::BasicBlock *getInvokeDestImpl();
611 /// Set up the last cleaup that was pushed as a conditional
612 /// full-expression cleanup.
613 void initFullExprCleanup();
616 typename DominatingValue<T>::saved_type saveValueInCond(T value) {
617 return DominatingValue<T>::save(*this, value);
621 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
623 llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
625 // A struct holding information about a finally block's IR
626 // generation. For now, doesn't actually hold anything.
630 FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
631 llvm::Constant *BeginCatchFn,
632 llvm::Constant *EndCatchFn,
633 llvm::Constant *RethrowFn);
634 void ExitFinallyBlock(FinallyInfo &FinallyInfo);
636 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
637 /// current full-expression. Safe against the possibility that
638 /// we're currently inside a conditionally-evaluated expression.
639 template <class T, class A0>
640 void pushFullExprCleanup(CleanupKind kind, A0 a0) {
641 // If we're not in a conditional branch, or if none of the
642 // arguments requires saving, then use the unconditional cleanup.
643 if (!isInConditionalBranch()) {
644 typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType;
645 return EHStack.pushCleanup<CleanupType>(kind, a0);
648 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
650 typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
651 EHStack.pushCleanup<CleanupType>(kind, a0_saved);
652 initFullExprCleanup();
655 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
656 /// current full-expression. Safe against the possibility that
657 /// we're currently inside a conditionally-evaluated expression.
658 template <class T, class A0, class A1>
659 void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
660 // If we're not in a conditional branch, or if none of the
661 // arguments requires saving, then use the unconditional cleanup.
662 if (!isInConditionalBranch()) {
663 typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType;
664 return EHStack.pushCleanup<CleanupType>(kind, a0, a1);
667 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
668 typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
670 typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
671 EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
672 initFullExprCleanup();
675 /// PushDestructorCleanup - Push a cleanup to call the
676 /// complete-object destructor of an object of the given type at the
677 /// given address. Does nothing if T is not a C++ class type with a
678 /// non-trivial destructor.
679 void PushDestructorCleanup(QualType T, llvm::Value *Addr);
681 /// PushDestructorCleanup - Push a cleanup to call the
682 /// complete-object variant of the given destructor on the object at
683 /// the given address.
684 void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
687 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
688 /// process all branch fixups.
689 void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
691 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
692 /// The block cannot be reactivated. Pops it if it's the top of the
694 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
696 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
697 /// Cannot be used to resurrect a deactivated cleanup.
698 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
700 /// \brief Enters a new scope for capturing cleanups, all of which
701 /// will be executed once the scope is exited.
702 class RunCleanupsScope {
703 CodeGenFunction& CGF;
704 EHScopeStack::stable_iterator CleanupStackDepth;
705 bool OldDidCallStackSave;
708 RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
709 RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
712 /// \brief Enter a new cleanup scope.
713 explicit RunCleanupsScope(CodeGenFunction &CGF)
714 : CGF(CGF), PerformCleanup(true)
716 CleanupStackDepth = CGF.EHStack.stable_begin();
717 OldDidCallStackSave = CGF.DidCallStackSave;
718 CGF.DidCallStackSave = false;
721 /// \brief Exit this cleanup scope, emitting any accumulated
723 ~RunCleanupsScope() {
724 if (PerformCleanup) {
725 CGF.DidCallStackSave = OldDidCallStackSave;
726 CGF.PopCleanupBlocks(CleanupStackDepth);
730 /// \brief Determine whether this scope requires any cleanups.
731 bool requiresCleanups() const {
732 return CGF.EHStack.stable_begin() != CleanupStackDepth;
735 /// \brief Force the emission of cleanups now, instead of waiting
736 /// until this object is destroyed.
737 void ForceCleanup() {
738 assert(PerformCleanup && "Already forced cleanup");
739 CGF.DidCallStackSave = OldDidCallStackSave;
740 CGF.PopCleanupBlocks(CleanupStackDepth);
741 PerformCleanup = false;
746 /// PopCleanupBlocks - Takes the old cleanup stack size and emits
747 /// the cleanup blocks that have been added.
748 void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
750 void ResolveBranchFixups(llvm::BasicBlock *Target);
752 /// The given basic block lies in the current EH scope, but may be a
753 /// target of a potentially scope-crossing jump; get a stable handle
754 /// to which we can perform this jump later.
755 JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
756 return JumpDest(Target,
757 EHStack.getInnermostNormalCleanup(),
758 NextCleanupDestIndex++);
761 /// The given basic block lies in the current EH scope, but may be a
762 /// target of a potentially scope-crossing jump; get a stable handle
763 /// to which we can perform this jump later.
764 JumpDest getJumpDestInCurrentScope(llvm::StringRef Name = llvm::StringRef()) {
765 return getJumpDestInCurrentScope(createBasicBlock(Name));
768 /// EmitBranchThroughCleanup - Emit a branch from the current insert
769 /// block through the normal cleanup handling code (if any) and then
771 void EmitBranchThroughCleanup(JumpDest Dest);
773 /// EmitBranchThroughEHCleanup - Emit a branch from the current
774 /// insert block through the EH cleanup handling code (if any) and
775 /// then on to \arg Dest.
776 void EmitBranchThroughEHCleanup(UnwindDest Dest);
778 /// getRethrowDest - Returns the unified outermost-scope rethrow
780 UnwindDest getRethrowDest();
782 /// An object to manage conditionally-evaluated expressions.
783 class ConditionalEvaluation {
784 llvm::BasicBlock *StartBB;
787 ConditionalEvaluation(CodeGenFunction &CGF)
788 : StartBB(CGF.Builder.GetInsertBlock()) {}
790 void begin(CodeGenFunction &CGF) {
791 assert(CGF.OutermostConditional != this);
792 if (!CGF.OutermostConditional)
793 CGF.OutermostConditional = this;
796 void end(CodeGenFunction &CGF) {
797 assert(CGF.OutermostConditional != 0);
798 if (CGF.OutermostConditional == this)
799 CGF.OutermostConditional = 0;
802 /// Returns a block which will be executed prior to each
803 /// evaluation of the conditional code.
804 llvm::BasicBlock *getStartingBlock() const {
809 /// isInConditionalBranch - Return true if we're currently emitting
810 /// one branch or the other of a conditional expression.
811 bool isInConditionalBranch() const { return OutermostConditional != 0; }
813 /// An RAII object to record that we're evaluating a statement
815 class StmtExprEvaluation {
816 CodeGenFunction &CGF;
818 /// We have to save the outermost conditional: cleanups in a
819 /// statement expression aren't conditional just because the
821 ConditionalEvaluation *SavedOutermostConditional;
824 StmtExprEvaluation(CodeGenFunction &CGF)
825 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
826 CGF.OutermostConditional = 0;
829 ~StmtExprEvaluation() {
830 CGF.OutermostConditional = SavedOutermostConditional;
831 CGF.EnsureInsertPoint();
835 /// An object which temporarily prevents a value from being
836 /// destroyed by aggressive peephole optimizations that assume that
837 /// all uses of a value have been realized in the IR.
838 class PeepholeProtection {
839 llvm::Instruction *Inst;
840 friend class CodeGenFunction;
843 PeepholeProtection() : Inst(0) {}
846 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
847 class OpaqueValueMapping {
848 CodeGenFunction &CGF;
849 const OpaqueValueExpr *OpaqueValue;
851 CodeGenFunction::PeepholeProtection Protection;
854 static bool shouldBindAsLValue(const Expr *expr) {
855 return expr->isGLValue() || expr->getType()->isRecordType();
858 /// Build the opaque value mapping for the given conditional
859 /// operator if it's the GNU ?: extension. This is a common
860 /// enough pattern that the convenience operator is really
863 OpaqueValueMapping(CodeGenFunction &CGF,
864 const AbstractConditionalOperator *op) : CGF(CGF) {
865 if (isa<ConditionalOperator>(op)) {
871 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
872 init(e->getOpaqueValue(), e->getCommon());
875 OpaqueValueMapping(CodeGenFunction &CGF,
876 const OpaqueValueExpr *opaqueValue,
878 : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(true) {
879 assert(opaqueValue && "no opaque value expression!");
880 assert(shouldBindAsLValue(opaqueValue));
884 OpaqueValueMapping(CodeGenFunction &CGF,
885 const OpaqueValueExpr *opaqueValue,
887 : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(false) {
888 assert(opaqueValue && "no opaque value expression!");
889 assert(!shouldBindAsLValue(opaqueValue));
894 assert(OpaqueValue && "mapping already popped!");
899 ~OpaqueValueMapping() {
900 if (OpaqueValue) popImpl();
906 CGF.OpaqueLValues.erase(OpaqueValue);
908 CGF.OpaqueRValues.erase(OpaqueValue);
909 CGF.unprotectFromPeepholes(Protection);
913 void init(const OpaqueValueExpr *ov, const Expr *e) {
915 BoundLValue = shouldBindAsLValue(ov);
916 assert(BoundLValue == shouldBindAsLValue(e)
917 && "inconsistent expression value kinds!");
919 initLValue(CGF.EmitLValue(e));
921 initRValue(CGF.EmitAnyExpr(e));
924 void initLValue(const LValue &lv) {
925 CGF.OpaqueLValues.insert(std::make_pair(OpaqueValue, lv));
928 void initRValue(const RValue &rv) {
929 // Work around an extremely aggressive peephole optimization in
930 // EmitScalarConversion which assumes that all other uses of a
932 Protection = CGF.protectFromPeepholes(rv);
933 CGF.OpaqueRValues.insert(std::make_pair(OpaqueValue, rv));
937 /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
938 /// number that holds the value.
939 unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
941 /// BuildBlockByrefAddress - Computes address location of the
942 /// variable which is declared as __block.
943 llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
946 CGDebugInfo *DebugInfo;
948 /// IndirectBranch - The first time an indirect goto is seen we create a block
949 /// with an indirect branch. Every time we see the address of a label taken,
950 /// we add the label to the indirect goto. Every subsequent indirect goto is
951 /// codegen'd as a jump to the IndirectBranch's basic block.
952 llvm::IndirectBrInst *IndirectBranch;
954 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
956 typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
957 DeclMapTy LocalDeclMap;
959 /// LabelMap - This keeps track of the LLVM basic block for each C label.
960 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
962 // BreakContinueStack - This keeps track of where break and continue
963 // statements should jump to.
964 struct BreakContinue {
965 BreakContinue(JumpDest Break, JumpDest Continue)
966 : BreakBlock(Break), ContinueBlock(Continue) {}
969 JumpDest ContinueBlock;
971 llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
973 /// SwitchInsn - This is nearest current switch instruction. It is null if if
974 /// current context is not in a switch.
975 llvm::SwitchInst *SwitchInsn;
977 /// CaseRangeBlock - This block holds if condition check for last case
978 /// statement range in current switch instruction.
979 llvm::BasicBlock *CaseRangeBlock;
981 /// OpaqueLValues - Keeps track of the current set of opaque value
983 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
984 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
986 // VLASizeMap - This keeps track of the associated size for each VLA type.
987 // We track this by the size expression rather than the type itself because
988 // in certain situations, like a const qualifier applied to an VLA typedef,
989 // multiple VLA types can share the same size expression.
990 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
991 // enter/leave scopes.
992 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
994 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
995 /// calling llvm.stacksave for multiple VLAs in the same scope.
996 bool DidCallStackSave;
998 /// A block containing a single 'unreachable' instruction. Created
999 /// lazily by getUnreachableBlock().
1000 llvm::BasicBlock *UnreachableBlock;
1002 /// CXXThisDecl - When generating code for a C++ member function,
1003 /// this will hold the implicit 'this' declaration.
1004 ImplicitParamDecl *CXXThisDecl;
1005 llvm::Value *CXXThisValue;
1007 /// CXXVTTDecl - When generating code for a base object constructor or
1008 /// base object destructor with virtual bases, this will hold the implicit
1010 ImplicitParamDecl *CXXVTTDecl;
1011 llvm::Value *CXXVTTValue;
1013 /// OutermostConditional - Points to the outermost active
1014 /// conditional control. This is used so that we know if a
1015 /// temporary should be destroyed conditionally.
1016 ConditionalEvaluation *OutermostConditional;
1019 /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1020 /// type as well as the field number that contains the actual data.
1021 llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
1022 unsigned> > ByRefValueInfo;
1024 llvm::BasicBlock *TerminateLandingPad;
1025 llvm::BasicBlock *TerminateHandler;
1026 llvm::BasicBlock *TrapBB;
1029 CodeGenFunction(CodeGenModule &cgm);
1031 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1032 ASTContext &getContext() const;
1033 CGDebugInfo *getDebugInfo() { return DebugInfo; }
1035 const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
1037 /// Returns a pointer to the function's exception object slot, which
1038 /// is assigned in every landing pad.
1039 llvm::Value *getExceptionSlot();
1041 llvm::Value *getNormalCleanupDestSlot();
1042 llvm::Value *getEHCleanupDestSlot();
1044 llvm::BasicBlock *getUnreachableBlock() {
1045 if (!UnreachableBlock) {
1046 UnreachableBlock = createBasicBlock("unreachable");
1047 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1049 return UnreachableBlock;
1052 llvm::BasicBlock *getInvokeDest() {
1053 if (!EHStack.requiresLandingPad()) return 0;
1054 return getInvokeDestImpl();
1057 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1059 //===--------------------------------------------------------------------===//
1061 //===--------------------------------------------------------------------===//
1063 void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1065 void StartObjCMethod(const ObjCMethodDecl *MD,
1066 const ObjCContainerDecl *CD);
1068 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1069 void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1070 const ObjCPropertyImplDecl *PID);
1071 void GenerateObjCGetterBody(ObjCIvarDecl *Ivar, bool IsAtomic, bool IsStrong);
1072 void GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
1073 ObjCIvarDecl *Ivar);
1075 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1076 ObjCMethodDecl *MD, bool ctor);
1078 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1079 /// for the given property.
1080 void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1081 const ObjCPropertyImplDecl *PID);
1082 bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1083 bool IvarTypeWithAggrGCObjects(QualType Ty);
1085 //===--------------------------------------------------------------------===//
1087 //===--------------------------------------------------------------------===//
1089 llvm::Value *EmitBlockLiteral(const BlockExpr *);
1090 llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1091 const CGBlockInfo &Info,
1092 const llvm::StructType *,
1093 llvm::Constant *BlockVarLayout);
1095 llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1096 const CGBlockInfo &Info,
1097 const Decl *OuterFuncDecl,
1098 const DeclMapTy &ldm);
1100 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1101 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1103 llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *,
1104 BlockFieldFlags flags,
1106 llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
1107 BlockFieldFlags flags,
1110 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1112 llvm::Value *LoadBlockStruct() {
1113 assert(BlockPointer && "no block pointer set!");
1114 return BlockPointer;
1117 void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1118 void AllocateBlockDecl(const BlockDeclRefExpr *E);
1119 llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
1120 return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
1122 llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1123 const llvm::Type *BuildByRefType(const VarDecl *var);
1125 void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
1126 void StartFunction(GlobalDecl GD, QualType RetTy,
1128 const FunctionArgList &Args,
1129 SourceLocation StartLoc);
1131 void EmitConstructorBody(FunctionArgList &Args);
1132 void EmitDestructorBody(FunctionArgList &Args);
1133 void EmitFunctionBody(FunctionArgList &Args);
1135 /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1136 /// emission when possible.
1137 void EmitReturnBlock();
1139 /// FinishFunction - Complete IR generation of the current function. It is
1140 /// legal to call this function even if there is no current insertion point.
1141 void FinishFunction(SourceLocation EndLoc=SourceLocation());
1143 /// GenerateThunk - Generate a thunk for the given method.
1144 void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
1146 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1147 FunctionArgList &Args);
1149 /// InitializeVTablePointer - Initialize the vtable pointer of the given
1152 void InitializeVTablePointer(BaseSubobject Base,
1153 const CXXRecordDecl *NearestVBase,
1154 uint64_t OffsetFromNearestVBase,
1155 llvm::Constant *VTable,
1156 const CXXRecordDecl *VTableClass);
1158 typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1159 void InitializeVTablePointers(BaseSubobject Base,
1160 const CXXRecordDecl *NearestVBase,
1161 uint64_t OffsetFromNearestVBase,
1162 bool BaseIsNonVirtualPrimaryBase,
1163 llvm::Constant *VTable,
1164 const CXXRecordDecl *VTableClass,
1165 VisitedVirtualBasesSetTy& VBases);
1167 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1169 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1171 llvm::Value *GetVTablePtr(llvm::Value *This, const llvm::Type *Ty);
1173 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1174 /// given phase of destruction for a destructor. The end result
1175 /// should call destructors on members and base classes in reverse
1176 /// order of their construction.
1177 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1179 /// ShouldInstrumentFunction - Return true if the current function should be
1180 /// instrumented with __cyg_profile_func_* calls
1181 bool ShouldInstrumentFunction();
1183 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1184 /// instrumentation function with the current function and the call site, if
1185 /// function instrumentation is enabled.
1186 void EmitFunctionInstrumentation(const char *Fn);
1188 /// EmitMCountInstrumentation - Emit call to .mcount.
1189 void EmitMCountInstrumentation();
1191 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1192 /// arguments for the given function. This is also responsible for naming the
1193 /// LLVM function arguments.
1194 void EmitFunctionProlog(const CGFunctionInfo &FI,
1196 const FunctionArgList &Args);
1198 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1199 /// given temporary.
1200 void EmitFunctionEpilog(const CGFunctionInfo &FI);
1202 /// EmitStartEHSpec - Emit the start of the exception spec.
1203 void EmitStartEHSpec(const Decl *D);
1205 /// EmitEndEHSpec - Emit the end of the exception spec.
1206 void EmitEndEHSpec(const Decl *D);
1208 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1209 llvm::BasicBlock *getTerminateLandingPad();
1211 /// getTerminateHandler - Return a handler (not a landing pad, just
1212 /// a catch handler) that just calls terminate. This is used when
1213 /// a terminate scope encloses a try.
1214 llvm::BasicBlock *getTerminateHandler();
1216 const llvm::Type *ConvertTypeForMem(QualType T);
1217 const llvm::Type *ConvertType(QualType T);
1218 const llvm::Type *ConvertType(const TypeDecl *T) {
1219 return ConvertType(getContext().getTypeDeclType(T));
1222 /// LoadObjCSelf - Load the value of self. This function is only valid while
1223 /// generating code for an Objective-C method.
1224 llvm::Value *LoadObjCSelf();
1226 /// TypeOfSelfObject - Return type of object that this self represents.
1227 QualType TypeOfSelfObject();
1229 /// hasAggregateLLVMType - Return true if the specified AST type will map into
1230 /// an aggregate LLVM type or is void.
1231 static bool hasAggregateLLVMType(QualType T);
1233 /// createBasicBlock - Create an LLVM basic block.
1234 llvm::BasicBlock *createBasicBlock(llvm::StringRef name = "",
1235 llvm::Function *parent = 0,
1236 llvm::BasicBlock *before = 0) {
1238 return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1240 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1244 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1246 JumpDest getJumpDestForLabel(const LabelDecl *S);
1248 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1249 /// another basic block, simplify it. This assumes that no other code could
1250 /// potentially reference the basic block.
1251 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1253 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1254 /// adding a fall-through branch from the current insert block if
1255 /// necessary. It is legal to call this function even if there is no current
1256 /// insertion point.
1258 /// IsFinished - If true, indicates that the caller has finished emitting
1259 /// branches to the given block and does not expect to emit code into it. This
1260 /// means the block can be ignored if it is unreachable.
1261 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1263 /// EmitBranch - Emit a branch to the specified basic block from the current
1264 /// insert block, taking care to avoid creation of branches from dummy
1265 /// blocks. It is legal to call this function even if there is no current
1266 /// insertion point.
1268 /// This function clears the current insertion point. The caller should follow
1269 /// calls to this function with calls to Emit*Block prior to generation new
1271 void EmitBranch(llvm::BasicBlock *Block);
1273 /// HaveInsertPoint - True if an insertion point is defined. If not, this
1274 /// indicates that the current code being emitted is unreachable.
1275 bool HaveInsertPoint() const {
1276 return Builder.GetInsertBlock() != 0;
1279 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1280 /// emitted IR has a place to go. Note that by definition, if this function
1281 /// creates a block then that block is unreachable; callers may do better to
1282 /// detect when no insertion point is defined and simply skip IR generation.
1283 void EnsureInsertPoint() {
1284 if (!HaveInsertPoint())
1285 EmitBlock(createBasicBlock());
1288 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1289 /// specified stmt yet.
1290 void ErrorUnsupported(const Stmt *S, const char *Type,
1291 bool OmitOnError=false);
1293 //===--------------------------------------------------------------------===//
1295 //===--------------------------------------------------------------------===//
1297 LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
1298 return LValue::MakeAddr(V, T, Alignment, getContext(),
1299 CGM.getTBAAInfo(T));
1302 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1303 /// block. The caller is responsible for setting an appropriate alignment on
1305 llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
1306 const llvm::Twine &Name = "tmp");
1308 /// InitTempAlloca - Provide an initial value for the given alloca.
1309 void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1311 /// CreateIRTemp - Create a temporary IR object of the given type, with
1312 /// appropriate alignment. This routine should only be used when an temporary
1313 /// value needs to be stored into an alloca (for example, to avoid explicit
1314 /// PHI construction), but the type is the IR type, not the type appropriate
1315 /// for storing in memory.
1316 llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
1318 /// CreateMemTemp - Create a temporary memory object of the given type, with
1319 /// appropriate alignment.
1320 llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
1322 /// CreateAggTemp - Create a temporary memory object for the given
1324 AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
1325 return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false);
1328 /// Emit a cast to void* in the appropriate address space.
1329 llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1331 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1332 /// expression and compare the result against zero, returning an Int1Ty value.
1333 llvm::Value *EvaluateExprAsBool(const Expr *E);
1335 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1336 void EmitIgnoredExpr(const Expr *E);
1338 /// EmitAnyExpr - Emit code to compute the specified expression which can have
1339 /// any type. The result is returned as an RValue struct. If this is an
1340 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1341 /// the result should be returned.
1343 /// \param IgnoreResult - True if the resulting value isn't used.
1344 RValue EmitAnyExpr(const Expr *E,
1345 AggValueSlot AggSlot = AggValueSlot::ignored(),
1346 bool IgnoreResult = false);
1348 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1349 // or the value of the expression, depending on how va_list is defined.
1350 llvm::Value *EmitVAListRef(const Expr *E);
1352 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1353 /// always be accessible even if no aggregate location is provided.
1354 RValue EmitAnyExprToTemp(const Expr *E);
1356 /// EmitsAnyExprToMem - Emits the code necessary to evaluate an
1357 /// arbitrary expression into the given memory location.
1358 void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1359 bool IsLocationVolatile,
1360 bool IsInitializer);
1362 /// EmitAggregateCopy - Emit an aggrate copy.
1364 /// \param isVolatile - True iff either the source or the destination is
1366 void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1367 QualType EltTy, bool isVolatile=false);
1369 /// StartBlock - Start new block named N. If insert block is a dummy block
1371 void StartBlock(const char *N);
1373 /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
1374 llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
1375 return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
1378 /// GetAddrOfLocalVar - Return the address of a local variable.
1379 llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1380 llvm::Value *Res = LocalDeclMap[VD];
1381 assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1385 /// getOpaqueLValueMapping - Given an opaque value expression (which
1386 /// must be mapped to an l-value), return its mapping.
1387 const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1388 assert(OpaqueValueMapping::shouldBindAsLValue(e));
1390 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1391 it = OpaqueLValues.find(e);
1392 assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1396 /// getOpaqueRValueMapping - Given an opaque value expression (which
1397 /// must be mapped to an r-value), return its mapping.
1398 const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1399 assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1401 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1402 it = OpaqueRValues.find(e);
1403 assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1407 /// getAccessedFieldNo - Given an encoded value and a result number, return
1408 /// the input field number being accessed.
1409 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1411 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1412 llvm::BasicBlock *GetIndirectGotoBlock();
1414 /// EmitNullInitialization - Generate code to set a value of the given type to
1415 /// null, If the type contains data member pointers, they will be initialized
1416 /// to -1 in accordance with the Itanium C++ ABI.
1417 void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1419 // EmitVAArg - Generate code to get an argument from the passed in pointer
1420 // and update it accordingly. The return value is a pointer to the argument.
1421 // FIXME: We should be able to get rid of this method and use the va_arg
1422 // instruction in LLVM instead once it works well enough.
1423 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1425 /// EmitVLASize - Generate code for any VLA size expressions that might occur
1426 /// in a variably modified type. If Ty is a VLA, will return the value that
1427 /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
1429 /// This function can be called with a null (unreachable) insert point.
1430 llvm::Value *EmitVLASize(QualType Ty);
1432 // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
1433 // of a variable length array type.
1434 llvm::Value *GetVLASize(const VariableArrayType *);
1436 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1437 /// generating code for an C++ member function.
1438 llvm::Value *LoadCXXThis() {
1439 assert(CXXThisValue && "no 'this' value for this function");
1440 return CXXThisValue;
1443 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1445 llvm::Value *LoadCXXVTT() {
1446 assert(CXXVTTValue && "no VTT value for this function");
1450 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1451 /// complete class to the given direct base.
1453 GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1454 const CXXRecordDecl *Derived,
1455 const CXXRecordDecl *Base,
1456 bool BaseIsVirtual);
1458 /// GetAddressOfBaseClass - This function will add the necessary delta to the
1459 /// load of 'this' and returns address of the base class.
1460 llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1461 const CXXRecordDecl *Derived,
1462 CastExpr::path_const_iterator PathBegin,
1463 CastExpr::path_const_iterator PathEnd,
1464 bool NullCheckValue);
1466 llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1467 const CXXRecordDecl *Derived,
1468 CastExpr::path_const_iterator PathBegin,
1469 CastExpr::path_const_iterator PathEnd,
1470 bool NullCheckValue);
1472 llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1473 const CXXRecordDecl *ClassDecl,
1474 const CXXRecordDecl *BaseClassDecl);
1476 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1477 CXXCtorType CtorType,
1478 const FunctionArgList &Args);
1479 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1480 bool ForVirtualBase, llvm::Value *This,
1481 CallExpr::const_arg_iterator ArgBeg,
1482 CallExpr::const_arg_iterator ArgEnd);
1484 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1485 llvm::Value *This, llvm::Value *Src,
1486 CallExpr::const_arg_iterator ArgBeg,
1487 CallExpr::const_arg_iterator ArgEnd);
1489 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1490 const ConstantArrayType *ArrayTy,
1491 llvm::Value *ArrayPtr,
1492 CallExpr::const_arg_iterator ArgBeg,
1493 CallExpr::const_arg_iterator ArgEnd,
1494 bool ZeroInitialization = false);
1496 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1497 llvm::Value *NumElements,
1498 llvm::Value *ArrayPtr,
1499 CallExpr::const_arg_iterator ArgBeg,
1500 CallExpr::const_arg_iterator ArgEnd,
1501 bool ZeroInitialization = false);
1503 void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
1504 const ArrayType *Array,
1507 void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
1508 llvm::Value *NumElements,
1511 llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
1512 const ArrayType *Array,
1515 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1516 bool ForVirtualBase, llvm::Value *This);
1518 void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
1519 llvm::Value *NumElements);
1521 void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
1523 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1524 void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1526 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1529 llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1530 llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1532 void EmitCheck(llvm::Value *, unsigned Size);
1534 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1535 bool isInc, bool isPre);
1536 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1537 bool isInc, bool isPre);
1538 //===--------------------------------------------------------------------===//
1539 // Declaration Emission
1540 //===--------------------------------------------------------------------===//
1542 /// EmitDecl - Emit a declaration.
1544 /// This function can be called with a null (unreachable) insert point.
1545 void EmitDecl(const Decl &D);
1547 /// EmitVarDecl - Emit a local variable declaration.
1549 /// This function can be called with a null (unreachable) insert point.
1550 void EmitVarDecl(const VarDecl &D);
1552 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1553 llvm::Value *Address);
1555 /// EmitAutoVarDecl - Emit an auto variable declaration.
1557 /// This function can be called with a null (unreachable) insert point.
1558 void EmitAutoVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
1560 void EmitStaticVarDecl(const VarDecl &D,
1561 llvm::GlobalValue::LinkageTypes Linkage);
1563 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1564 void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
1566 /// protectFromPeepholes - Protect a value that we're intending to
1567 /// store to the side, but which will probably be used later, from
1568 /// aggressive peepholing optimizations that might delete it.
1570 /// Pass the result to unprotectFromPeepholes to declare that
1571 /// protection is no longer required.
1573 /// There's no particular reason why this shouldn't apply to
1574 /// l-values, it's just that no existing peepholes work on pointers.
1575 PeepholeProtection protectFromPeepholes(RValue rvalue);
1576 void unprotectFromPeepholes(PeepholeProtection protection);
1578 //===--------------------------------------------------------------------===//
1579 // Statement Emission
1580 //===--------------------------------------------------------------------===//
1582 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1583 void EmitStopPoint(const Stmt *S);
1585 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1586 /// this function even if there is no current insertion point.
1588 /// This function may clear the current insertion point; callers should use
1589 /// EnsureInsertPoint if they wish to subsequently generate code without first
1590 /// calling EmitBlock, EmitBranch, or EmitStmt.
1591 void EmitStmt(const Stmt *S);
1593 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1594 /// necessarily require an insertion point or debug information; typically
1595 /// because the statement amounts to a jump or a container of other
1598 /// \return True if the statement was handled.
1599 bool EmitSimpleStmt(const Stmt *S);
1601 RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
1602 AggValueSlot AVS = AggValueSlot::ignored());
1604 /// EmitLabel - Emit the block for the given label. It is legal to call this
1605 /// function even if there is no current insertion point.
1606 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
1608 void EmitLabelStmt(const LabelStmt &S);
1609 void EmitGotoStmt(const GotoStmt &S);
1610 void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
1611 void EmitIfStmt(const IfStmt &S);
1612 void EmitWhileStmt(const WhileStmt &S);
1613 void EmitDoStmt(const DoStmt &S);
1614 void EmitForStmt(const ForStmt &S);
1615 void EmitReturnStmt(const ReturnStmt &S);
1616 void EmitDeclStmt(const DeclStmt &S);
1617 void EmitBreakStmt(const BreakStmt &S);
1618 void EmitContinueStmt(const ContinueStmt &S);
1619 void EmitSwitchStmt(const SwitchStmt &S);
1620 void EmitDefaultStmt(const DefaultStmt &S);
1621 void EmitCaseStmt(const CaseStmt &S);
1622 void EmitCaseStmtRange(const CaseStmt &S);
1623 void EmitAsmStmt(const AsmStmt &S);
1625 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
1626 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
1627 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
1628 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
1630 llvm::Constant *getUnwindResumeOrRethrowFn();
1631 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1632 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1634 void EmitCXXTryStmt(const CXXTryStmt &S);
1636 //===--------------------------------------------------------------------===//
1637 // LValue Expression Emission
1638 //===--------------------------------------------------------------------===//
1640 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
1641 RValue GetUndefRValue(QualType Ty);
1643 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
1644 /// and issue an ErrorUnsupported style diagnostic (using the
1646 RValue EmitUnsupportedRValue(const Expr *E,
1649 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
1650 /// an ErrorUnsupported style diagnostic (using the provided Name).
1651 LValue EmitUnsupportedLValue(const Expr *E,
1654 /// EmitLValue - Emit code to compute a designator that specifies the location
1655 /// of the expression.
1657 /// This can return one of two things: a simple address or a bitfield
1658 /// reference. In either case, the LLVM Value* in the LValue structure is
1659 /// guaranteed to be an LLVM pointer type.
1661 /// If this returns a bitfield reference, nothing about the pointee type of
1662 /// the LLVM value is known: For example, it may not be a pointer to an
1665 /// If this returns a normal address, and if the lvalue's C type is fixed
1666 /// size, this method guarantees that the returned pointer type will point to
1667 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
1668 /// variable length type, this is not possible.
1670 LValue EmitLValue(const Expr *E);
1672 /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
1673 /// checking code to guard against undefined behavior. This is only
1674 /// suitable when we know that the address will be used to access the
1676 LValue EmitCheckedLValue(const Expr *E);
1678 /// EmitToMemory - Change a scalar value from its value
1679 /// representation to its in-memory representation.
1680 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
1682 /// EmitFromMemory - Change a scalar value from its memory
1683 /// representation to its value representation.
1684 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
1686 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1687 /// care to appropriately convert from the memory representation to
1688 /// the LLVM value representation.
1689 llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
1690 unsigned Alignment, QualType Ty,
1691 llvm::MDNode *TBAAInfo = 0);
1693 /// EmitStoreOfScalar - Store a scalar value to an address, taking
1694 /// care to appropriately convert from the memory representation to
1695 /// the LLVM value representation.
1696 void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1697 bool Volatile, unsigned Alignment, QualType Ty,
1698 llvm::MDNode *TBAAInfo = 0);
1700 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
1701 /// this method emits the address of the lvalue, then loads the result as an
1702 /// rvalue, returning the rvalue.
1703 RValue EmitLoadOfLValue(LValue V, QualType LVType);
1704 RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
1705 RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
1706 RValue EmitLoadOfPropertyRefLValue(LValue LV,
1707 ReturnValueSlot Return = ReturnValueSlot());
1709 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1710 /// lvalue, where both are guaranteed to the have the same type, and that type
1712 void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
1713 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
1715 void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
1717 /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
1718 /// EmitStoreThroughLValue.
1720 /// \param Result [out] - If non-null, this will be set to a Value* for the
1721 /// bit-field contents after the store, appropriate for use as the result of
1722 /// an assignment to the bit-field.
1723 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
1724 llvm::Value **Result=0);
1726 /// Emit an l-value for an assignment (simple or compound) of complex type.
1727 LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
1728 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
1730 // Note: only availabe for agg return types
1731 LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
1732 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
1733 // Note: only available for agg return types
1734 LValue EmitCallExprLValue(const CallExpr *E);
1735 // Note: only available for agg return types
1736 LValue EmitVAArgExprLValue(const VAArgExpr *E);
1737 LValue EmitDeclRefLValue(const DeclRefExpr *E);
1738 LValue EmitStringLiteralLValue(const StringLiteral *E);
1739 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
1740 LValue EmitPredefinedLValue(const PredefinedExpr *E);
1741 LValue EmitUnaryOpLValue(const UnaryOperator *E);
1742 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
1743 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
1744 LValue EmitMemberExpr(const MemberExpr *E);
1745 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
1746 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
1747 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
1748 LValue EmitCastLValue(const CastExpr *E);
1749 LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
1750 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
1752 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
1753 const ObjCIvarDecl *Ivar);
1754 LValue EmitLValueForAnonRecordField(llvm::Value* Base,
1755 const IndirectFieldDecl* Field,
1756 unsigned CVRQualifiers);
1757 LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
1758 unsigned CVRQualifiers);
1760 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
1761 /// if the Field is a reference, this will return the address of the reference
1762 /// and not the address of the value stored in the reference.
1763 LValue EmitLValueForFieldInitialization(llvm::Value* Base,
1764 const FieldDecl* Field,
1765 unsigned CVRQualifiers);
1767 LValue EmitLValueForIvar(QualType ObjectTy,
1768 llvm::Value* Base, const ObjCIvarDecl *Ivar,
1769 unsigned CVRQualifiers);
1771 LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
1772 unsigned CVRQualifiers);
1774 LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
1776 LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
1777 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
1778 LValue EmitExprWithCleanupsLValue(const ExprWithCleanups *E);
1779 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
1781 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
1782 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
1783 LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
1784 LValue EmitStmtExprLValue(const StmtExpr *E);
1785 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
1786 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
1787 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
1789 //===--------------------------------------------------------------------===//
1790 // Scalar Expression Emission
1791 //===--------------------------------------------------------------------===//
1793 /// EmitCall - Generate a call of the given function, expecting the given
1794 /// result type, and using the given argument list which specifies both the
1795 /// LLVM arguments and the types they were derived from.
1797 /// \param TargetDecl - If given, the decl of the function in a direct call;
1798 /// used to set attributes on the call (noreturn, etc.).
1799 RValue EmitCall(const CGFunctionInfo &FnInfo,
1800 llvm::Value *Callee,
1801 ReturnValueSlot ReturnValue,
1802 const CallArgList &Args,
1803 const Decl *TargetDecl = 0,
1804 llvm::Instruction **callOrInvoke = 0);
1806 RValue EmitCall(QualType FnType, llvm::Value *Callee,
1807 ReturnValueSlot ReturnValue,
1808 CallExpr::const_arg_iterator ArgBeg,
1809 CallExpr::const_arg_iterator ArgEnd,
1810 const Decl *TargetDecl = 0);
1811 RValue EmitCallExpr(const CallExpr *E,
1812 ReturnValueSlot ReturnValue = ReturnValueSlot());
1814 llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
1815 llvm::Value * const *ArgBegin,
1816 llvm::Value * const *ArgEnd,
1817 const llvm::Twine &Name = "");
1819 llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
1820 const llvm::Type *Ty);
1821 llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
1822 llvm::Value *This, const llvm::Type *Ty);
1823 llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
1824 NestedNameSpecifier *Qual,
1825 const llvm::Type *Ty);
1827 llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
1829 const CXXRecordDecl *RD);
1831 RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
1832 llvm::Value *Callee,
1833 ReturnValueSlot ReturnValue,
1836 CallExpr::const_arg_iterator ArgBeg,
1837 CallExpr::const_arg_iterator ArgEnd);
1838 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
1839 ReturnValueSlot ReturnValue);
1840 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
1841 ReturnValueSlot ReturnValue);
1843 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
1844 const CXXMethodDecl *MD,
1845 ReturnValueSlot ReturnValue);
1848 RValue EmitBuiltinExpr(const FunctionDecl *FD,
1849 unsigned BuiltinID, const CallExpr *E);
1851 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
1853 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
1854 /// is unhandled by the current target.
1855 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1857 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1858 llvm::Value *EmitNeonCall(llvm::Function *F,
1859 llvm::SmallVectorImpl<llvm::Value*> &O,
1861 unsigned shift = 0, bool rightshift = false);
1862 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
1863 llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
1864 bool negateForRightShift);
1866 llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops);
1867 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1868 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1870 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
1871 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
1872 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
1873 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
1874 ReturnValueSlot Return = ReturnValueSlot());
1876 /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
1877 /// expression. Will emit a temporary variable if E is not an LValue.
1878 RValue EmitReferenceBindingToExpr(const Expr* E,
1879 const NamedDecl *InitializedDecl);
1881 //===--------------------------------------------------------------------===//
1882 // Expression Emission
1883 //===--------------------------------------------------------------------===//
1885 // Expressions are broken into three classes: scalar, complex, aggregate.
1887 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
1888 /// scalar type, returning the result.
1889 llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
1891 /// EmitScalarConversion - Emit a conversion from the specified type to the
1892 /// specified destination type, both of which are LLVM scalar types.
1893 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
1896 /// EmitComplexToScalarConversion - Emit a conversion from the specified
1897 /// complex type to the specified destination type, where the destination type
1898 /// is an LLVM scalar type.
1899 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
1903 /// EmitAggExpr - Emit the computation of the specified expression
1904 /// of aggregate type. The result is computed into the given slot,
1905 /// which may be null to indicate that the value is not needed.
1906 void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
1908 /// EmitAggExprToLValue - Emit the computation of the specified expression of
1909 /// aggregate type into a temporary LValue.
1910 LValue EmitAggExprToLValue(const Expr *E);
1912 /// EmitGCMemmoveCollectable - Emit special API for structs with object
1914 void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1917 /// EmitComplexExpr - Emit the computation of the specified expression of
1918 /// complex type, returning the result.
1919 ComplexPairTy EmitComplexExpr(const Expr *E,
1920 bool IgnoreReal = false,
1921 bool IgnoreImag = false);
1923 /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
1924 /// of complex type, storing into the specified Value*.
1925 void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
1926 bool DestIsVolatile);
1928 /// StoreComplexToAddr - Store a complex number into the specified address.
1929 void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
1930 bool DestIsVolatile);
1931 /// LoadComplexFromAddr - Load a complex number from the specified address.
1932 ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
1934 /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
1935 /// a static local variable.
1936 llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
1937 const char *Separator,
1938 llvm::GlobalValue::LinkageTypes Linkage);
1940 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
1941 /// global variable that has already been created for it. If the initializer
1942 /// has a different type than GV does, this may free GV and return a different
1943 /// one. Otherwise it just returns GV.
1944 llvm::GlobalVariable *
1945 AddInitializerToStaticVarDecl(const VarDecl &D,
1946 llvm::GlobalVariable *GV);
1949 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
1950 /// variable with global storage.
1951 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr);
1953 /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
1954 /// with the C++ runtime so that its destructor will be called at exit.
1955 void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
1956 llvm::Constant *DeclPtr);
1958 /// Emit code in this function to perform a guarded variable
1959 /// initialization. Guarded initializations are used when it's not
1960 /// possible to prove that an initialization will be done exactly
1961 /// once, e.g. with a static local variable or a static data member
1962 /// of a class template.
1963 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr);
1965 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
1967 void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
1968 llvm::Constant **Decls,
1971 /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
1973 void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
1974 const std::vector<std::pair<llvm::WeakVH,
1975 llvm::Constant*> > &DtorsAndObjects);
1977 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
1978 llvm::GlobalVariable *Addr);
1980 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
1982 void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
1985 RValue EmitExprWithCleanups(const ExprWithCleanups *E,
1986 AggValueSlot Slot =AggValueSlot::ignored());
1988 void EmitCXXThrowExpr(const CXXThrowExpr *E);
1990 //===--------------------------------------------------------------------===//
1992 //===--------------------------------------------------------------------===//
1994 /// ContainsLabel - Return true if the statement contains a label in it. If
1995 /// this statement is not executed normally, it not containing a label means
1996 /// that we can just remove the code.
1997 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
1999 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2000 /// to a constant, or if it does but contains a label, return 0. If it
2001 /// constant folds to 'true' and does not contain a label, return 1, if it
2002 /// constant folds to 'false' and does not contain a label, return -1.
2003 int ConstantFoldsToSimpleInteger(const Expr *Cond);
2005 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2006 /// if statement) to the specified blocks. Based on the condition, this might
2007 /// try to simplify the codegen of the conditional based on the branch.
2008 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2009 llvm::BasicBlock *FalseBlock);
2011 /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
2012 /// generate a branch around the created basic block as necessary.
2013 llvm::BasicBlock *getTrapBB();
2015 /// EmitCallArg - Emit a single call argument.
2016 RValue EmitCallArg(const Expr *E, QualType ArgType);
2018 /// EmitDelegateCallArg - We are performing a delegate call; that
2019 /// is, the current function is delegating to another one. Produce
2020 /// a r-value suitable for passing the given parameter.
2021 RValue EmitDelegateCallArg(const VarDecl *Param);
2024 void EmitReturnOfRValue(RValue RV, QualType Ty);
2026 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2027 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2029 /// \param AI - The first function argument of the expansion.
2030 /// \return The argument following the last expanded function
2032 llvm::Function::arg_iterator
2033 ExpandTypeFromArgs(QualType Ty, LValue Dst,
2034 llvm::Function::arg_iterator AI);
2036 /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2037 /// Ty, into individual arguments on the provided vector \arg Args. See
2038 /// ABIArgInfo::Expand.
2039 void ExpandTypeToArgs(QualType Ty, RValue Src,
2040 llvm::SmallVector<llvm::Value*, 16> &Args);
2042 llvm::Value* EmitAsmInput(const AsmStmt &S,
2043 const TargetInfo::ConstraintInfo &Info,
2044 const Expr *InputExpr, std::string &ConstraintStr);
2046 llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
2047 const TargetInfo::ConstraintInfo &Info,
2048 LValue InputValue, QualType InputType,
2049 std::string &ConstraintStr);
2051 /// EmitCallArgs - Emit call arguments for a function.
2052 /// The CallArgTypeInfo parameter is used for iterating over the known
2053 /// argument types of the function being called.
2054 template<typename T>
2055 void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2056 CallExpr::const_arg_iterator ArgBeg,
2057 CallExpr::const_arg_iterator ArgEnd) {
2058 CallExpr::const_arg_iterator Arg = ArgBeg;
2060 // First, use the argument types that the type info knows about
2061 if (CallArgTypeInfo) {
2062 for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2063 E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2064 assert(Arg != ArgEnd && "Running over edge of argument list!");
2065 QualType ArgType = *I;
2067 QualType ActualArgType = Arg->getType();
2068 if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2069 QualType ActualBaseType =
2070 ActualArgType->getAs<PointerType>()->getPointeeType();
2071 QualType ArgBaseType =
2072 ArgType->getAs<PointerType>()->getPointeeType();
2073 if (ArgBaseType->isVariableArrayType()) {
2074 if (const VariableArrayType *VAT =
2075 getContext().getAsVariableArrayType(ActualBaseType)) {
2076 if (!VAT->getSizeExpr())
2077 ActualArgType = ArgType;
2081 assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2083 getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2084 "type mismatch in call argument!");
2086 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
2090 // Either we've emitted all the call args, or we have a call to a
2091 // variadic function.
2092 assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2093 "Extra arguments in non-variadic function!");
2097 // If we still have any arguments, emit them using the type of the argument.
2098 for (; Arg != ArgEnd; ++Arg) {
2099 QualType ArgType = Arg->getType();
2100 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
2105 const TargetCodeGenInfo &getTargetHooks() const {
2106 return CGM.getTargetCodeGenInfo();
2109 void EmitDeclMetadata();
2112 /// Helper class with most of the code for saving a value for a
2113 /// conditional expression cleanup.
2114 struct DominatingLLVMValue {
2115 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2117 /// Answer whether the given value needs extra work to be saved.
2118 static bool needsSaving(llvm::Value *value) {
2119 // If it's not an instruction, we don't need to save.
2120 if (!isa<llvm::Instruction>(value)) return false;
2122 // If it's an instruction in the entry block, we don't need to save.
2123 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2124 return (block != &block->getParent()->getEntryBlock());
2127 /// Try to save the given value.
2128 static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2129 if (!needsSaving(value)) return saved_type(value, false);
2131 // Otherwise we need an alloca.
2132 llvm::Value *alloca =
2133 CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2134 CGF.Builder.CreateStore(value, alloca);
2136 return saved_type(alloca, true);
2139 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2140 if (!value.getInt()) return value.getPointer();
2141 return CGF.Builder.CreateLoad(value.getPointer());
2145 /// A partial specialization of DominatingValue for llvm::Values that
2146 /// might be llvm::Instructions.
2147 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2149 static type restore(CodeGenFunction &CGF, saved_type value) {
2150 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2154 /// A specialization of DominatingValue for RValue.
2155 template <> struct DominatingValue<RValue> {
2156 typedef RValue type;
2158 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2159 AggregateAddress, ComplexAddress };
2163 saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2166 static bool needsSaving(RValue value);
2167 static saved_type save(CodeGenFunction &CGF, RValue value);
2168 RValue restore(CodeGenFunction &CGF);
2170 // implementations in CGExprCXX.cpp
2173 static bool needsSaving(type value) {
2174 return saved_type::needsSaving(value);
2176 static saved_type save(CodeGenFunction &CGF, type value) {
2177 return saved_type::save(CGF, value);
2179 static type restore(CodeGenFunction &CGF, saved_type value) {
2180 return value.restore(CGF);
2184 } // end namespace CodeGen
2185 } // end namespace clang