1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
19 //===----------------------------------------------------------------------===//
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/CodeGen/ConstantInitBuilder.h"
29 #include "clang/AST/Mangle.h"
30 #include "clang/AST/Type.h"
31 #include "clang/AST/StmtCXX.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
38 using namespace clang;
39 using namespace CodeGen;
42 class ItaniumCXXABI : public CodeGen::CGCXXABI {
43 /// VTables - All the vtables which have been defined.
44 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 bool UseARMMethodPtrABI;
48 bool UseARMGuardVarABI;
49 bool Use32BitVTableOffsetABI;
51 ItaniumMangleContext &getMangleContext() {
52 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
56 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
57 bool UseARMMethodPtrABI = false,
58 bool UseARMGuardVarABI = false) :
59 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
60 UseARMGuardVarABI(UseARMGuardVarABI),
61 Use32BitVTableOffsetABI(false) { }
63 bool classifyReturnType(CGFunctionInfo &FI) const override;
65 bool passClassIndirect(const CXXRecordDecl *RD) const {
66 // Clang <= 4 used the pre-C++11 rule, which ignores move operations.
67 // The PS4 platform ABI follows the behavior of Clang 3.2.
68 if (CGM.getCodeGenOpts().getClangABICompat() <=
69 CodeGenOptions::ClangABI::Ver4 ||
70 CGM.getTriple().getOS() == llvm::Triple::PS4)
71 return RD->hasNonTrivialDestructor() ||
72 RD->hasNonTrivialCopyConstructor();
73 return !canCopyArgument(RD);
76 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
77 // If C++ prohibits us from making a copy, pass by address.
78 if (passClassIndirect(RD))
83 bool isThisCompleteObject(GlobalDecl GD) const override {
84 // The Itanium ABI has separate complete-object vs. base-object
85 // variants of both constructors and destructors.
86 if (isa<CXXDestructorDecl>(GD.getDecl())) {
87 switch (GD.getDtorType()) {
96 llvm_unreachable("emitting dtor comdat as function?");
98 llvm_unreachable("bad dtor kind");
100 if (isa<CXXConstructorDecl>(GD.getDecl())) {
101 switch (GD.getCtorType()) {
108 case Ctor_CopyingClosure:
109 case Ctor_DefaultClosure:
110 llvm_unreachable("closure ctors in Itanium ABI?");
113 llvm_unreachable("emitting ctor comdat as function?");
115 llvm_unreachable("bad dtor kind");
122 bool isZeroInitializable(const MemberPointerType *MPT) override;
124 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
127 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
130 llvm::Value *&ThisPtrForCall,
131 llvm::Value *MemFnPtr,
132 const MemberPointerType *MPT) override;
135 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
138 const MemberPointerType *MPT) override;
140 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
142 llvm::Value *Src) override;
143 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
144 llvm::Constant *Src) override;
146 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
148 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
149 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
150 CharUnits offset) override;
151 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
152 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
153 CharUnits ThisAdjustment);
155 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
156 llvm::Value *L, llvm::Value *R,
157 const MemberPointerType *MPT,
158 bool Inequality) override;
160 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
162 const MemberPointerType *MPT) override;
164 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
165 Address Ptr, QualType ElementType,
166 const CXXDestructorDecl *Dtor) override;
168 /// Itanium says that an _Unwind_Exception has to be "double-word"
169 /// aligned (and thus the end of it is also so-aligned), meaning 16
170 /// bytes. Of course, that was written for the actual Itanium,
171 /// which is a 64-bit platform. Classically, the ABI doesn't really
172 /// specify the alignment on other platforms, but in practice
173 /// libUnwind declares the struct with __attribute__((aligned)), so
174 /// we assume that alignment here. (It's generally 16 bytes, but
175 /// some targets overwrite it.)
176 CharUnits getAlignmentOfExnObject() {
177 auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
178 return CGM.getContext().toCharUnitsFromBits(align);
181 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
182 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
184 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
187 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
188 llvm::Value *Exn) override;
190 void EmitFundamentalRTTIDescriptor(QualType Type, bool DLLExport);
191 void EmitFundamentalRTTIDescriptors(bool DLLExport);
192 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
194 getAddrOfCXXCatchHandlerType(QualType Ty,
195 QualType CatchHandlerType) override {
196 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
199 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
200 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
201 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
203 llvm::Type *StdTypeInfoPtrTy) override;
205 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
206 QualType SrcRecordTy) override;
208 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
209 QualType SrcRecordTy, QualType DestTy,
210 QualType DestRecordTy,
211 llvm::BasicBlock *CastEnd) override;
213 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
214 QualType SrcRecordTy,
215 QualType DestTy) override;
217 bool EmitBadCastCall(CodeGenFunction &CGF) override;
220 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
221 const CXXRecordDecl *ClassDecl,
222 const CXXRecordDecl *BaseClassDecl) override;
224 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
227 buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
228 SmallVectorImpl<CanQualType> &ArgTys) override;
230 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
231 CXXDtorType DT) const override {
232 // Itanium does not emit any destructor variant as an inline thunk.
233 // Delegating may occur as an optimization, but all variants are either
234 // emitted with external linkage or as linkonce if they are inline and used.
238 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
240 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
241 FunctionArgList &Params) override;
243 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
246 addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
247 CXXCtorType Type, bool ForVirtualBase,
248 bool Delegating, CallArgList &Args) override;
250 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
251 CXXDtorType Type, bool ForVirtualBase,
252 bool Delegating, Address This) override;
254 void emitVTableDefinitions(CodeGenVTables &CGVT,
255 const CXXRecordDecl *RD) override;
257 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
258 CodeGenFunction::VPtr Vptr) override;
260 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
265 getVTableAddressPoint(BaseSubobject Base,
266 const CXXRecordDecl *VTableClass) override;
268 llvm::Value *getVTableAddressPointInStructor(
269 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
270 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
272 llvm::Value *getVTableAddressPointInStructorWithVTT(
273 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
274 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
277 getVTableAddressPointForConstExpr(BaseSubobject Base,
278 const CXXRecordDecl *VTableClass) override;
280 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
281 CharUnits VPtrOffset) override;
283 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
284 Address This, llvm::Type *Ty,
285 SourceLocation Loc) override;
287 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
288 const CXXDestructorDecl *Dtor,
289 CXXDtorType DtorType,
291 const CXXMemberCallExpr *CE) override;
293 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
295 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
297 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
298 bool ReturnAdjustment) override {
299 // Allow inlining of thunks by emitting them with available_externally
300 // linkage together with vtables when needed.
301 if (ForVTable && !Thunk->hasLocalLinkage())
302 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
304 // Propagate dllexport storage, to enable the linker to generate import
305 // thunks as necessary (e.g. when a parent class has a key function and a
306 // child class doesn't, and the construction vtable for the parent in the
307 // child needs to reference the parent's thunks).
308 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
309 if (MD->hasAttr<DLLExportAttr>())
310 Thunk->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
313 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
314 const ThisAdjustment &TA) override;
316 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
317 const ReturnAdjustment &RA) override;
319 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
320 FunctionArgList &Args) const override {
321 assert(!Args.empty() && "expected the arglist to not be empty!");
322 return Args.size() - 1;
325 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
326 StringRef GetDeletedVirtualCallName() override
327 { return "__cxa_deleted_virtual"; }
329 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
330 Address InitializeArrayCookie(CodeGenFunction &CGF,
332 llvm::Value *NumElements,
333 const CXXNewExpr *expr,
334 QualType ElementType) override;
335 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
337 CharUnits cookieSize) override;
339 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
340 llvm::GlobalVariable *DeclPtr,
341 bool PerformInit) override;
342 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
343 llvm::Constant *dtor, llvm::Constant *addr) override;
345 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
347 void EmitThreadLocalInitFuncs(
349 ArrayRef<const VarDecl *> CXXThreadLocals,
350 ArrayRef<llvm::Function *> CXXThreadLocalInits,
351 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
353 bool usesThreadWrapperFunction() const override { return true; }
354 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
355 QualType LValType) override;
357 bool NeedsVTTParameter(GlobalDecl GD) override;
359 /**************************** RTTI Uniqueness ******************************/
362 /// Returns true if the ABI requires RTTI type_info objects to be unique
363 /// across a program.
364 virtual bool shouldRTTIBeUnique() const { return true; }
367 /// What sort of unique-RTTI behavior should we use?
368 enum RTTIUniquenessKind {
369 /// We are guaranteeing, or need to guarantee, that the RTTI string
373 /// We are not guaranteeing uniqueness for the RTTI string, so we
374 /// can demote to hidden visibility but must use string comparisons.
377 /// We are not guaranteeing uniqueness for the RTTI string, so we
378 /// have to use string comparisons, but we also have to emit it with
379 /// non-hidden visibility.
383 /// Return the required visibility status for the given type and linkage in
386 classifyRTTIUniqueness(QualType CanTy,
387 llvm::GlobalValue::LinkageTypes Linkage) const;
388 friend class ItaniumRTTIBuilder;
390 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
392 std::pair<llvm::Value *, const CXXRecordDecl *>
393 LoadVTablePtr(CodeGenFunction &CGF, Address This,
394 const CXXRecordDecl *RD) override;
397 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
398 const auto &VtableLayout =
399 CGM.getItaniumVTableContext().getVTableLayout(RD);
401 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
403 if (!VtableComponent.isUsedFunctionPointerKind())
406 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
407 if (!Method->getCanonicalDecl()->isInlined())
410 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
411 auto *Entry = CGM.GetGlobalValue(Name);
412 // This checks if virtual inline function has already been emitted.
413 // Note that it is possible that this inline function would be emitted
414 // after trying to emit vtable speculatively. Because of this we do
415 // an extra pass after emitting all deferred vtables to find and emit
416 // these vtables opportunistically.
417 if (!Entry || Entry->isDeclaration())
423 bool isVTableHidden(const CXXRecordDecl *RD) const {
424 const auto &VtableLayout =
425 CGM.getItaniumVTableContext().getVTableLayout(RD);
427 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
428 if (VtableComponent.isRTTIKind()) {
429 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
430 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
432 } else if (VtableComponent.isUsedFunctionPointerKind()) {
433 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
434 if (Method->getVisibility() == Visibility::HiddenVisibility &&
435 !Method->isDefined())
443 class ARMCXXABI : public ItaniumCXXABI {
445 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
446 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
447 /* UseARMGuardVarABI = */ true) {}
449 bool HasThisReturn(GlobalDecl GD) const override {
450 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
451 isa<CXXDestructorDecl>(GD.getDecl()) &&
452 GD.getDtorType() != Dtor_Deleting));
455 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
456 QualType ResTy) override;
458 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
459 Address InitializeArrayCookie(CodeGenFunction &CGF,
461 llvm::Value *NumElements,
462 const CXXNewExpr *expr,
463 QualType ElementType) override;
464 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
465 CharUnits cookieSize) override;
468 class iOS64CXXABI : public ARMCXXABI {
470 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
471 Use32BitVTableOffsetABI = true;
474 // ARM64 libraries are prepared for non-unique RTTI.
475 bool shouldRTTIBeUnique() const override { return false; }
478 class WebAssemblyCXXABI final : public ItaniumCXXABI {
480 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
481 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
482 /*UseARMGuardVarABI=*/true) {}
485 bool HasThisReturn(GlobalDecl GD) const override {
486 return isa<CXXConstructorDecl>(GD.getDecl()) ||
487 (isa<CXXDestructorDecl>(GD.getDecl()) &&
488 GD.getDtorType() != Dtor_Deleting);
490 bool canCallMismatchedFunctionType() const override { return false; }
494 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
495 switch (CGM.getTarget().getCXXABI().getKind()) {
496 // For IR-generation purposes, there's no significant difference
497 // between the ARM and iOS ABIs.
498 case TargetCXXABI::GenericARM:
499 case TargetCXXABI::iOS:
500 case TargetCXXABI::WatchOS:
501 return new ARMCXXABI(CGM);
503 case TargetCXXABI::iOS64:
504 return new iOS64CXXABI(CGM);
506 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
507 // include the other 32-bit ARM oddities: constructor/destructor return values
508 // and array cookies.
509 case TargetCXXABI::GenericAArch64:
510 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
511 /* UseARMGuardVarABI = */ true);
513 case TargetCXXABI::GenericMIPS:
514 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
516 case TargetCXXABI::WebAssembly:
517 return new WebAssemblyCXXABI(CGM);
519 case TargetCXXABI::GenericItanium:
520 if (CGM.getContext().getTargetInfo().getTriple().getArch()
521 == llvm::Triple::le32) {
522 // For PNaCl, use ARM-style method pointers so that PNaCl code
523 // does not assume anything about the alignment of function
525 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
526 /* UseARMGuardVarABI = */ false);
528 return new ItaniumCXXABI(CGM);
530 case TargetCXXABI::Microsoft:
531 llvm_unreachable("Microsoft ABI is not Itanium-based");
533 llvm_unreachable("bad ABI kind");
537 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
538 if (MPT->isMemberDataPointer())
539 return CGM.PtrDiffTy;
540 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
543 /// In the Itanium and ARM ABIs, method pointers have the form:
544 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
546 /// In the Itanium ABI:
547 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
548 /// - the this-adjustment is (memptr.adj)
549 /// - the virtual offset is (memptr.ptr - 1)
552 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
553 /// - the this-adjustment is (memptr.adj >> 1)
554 /// - the virtual offset is (memptr.ptr)
555 /// ARM uses 'adj' for the virtual flag because Thumb functions
556 /// may be only single-byte aligned.
558 /// If the member is virtual, the adjusted 'this' pointer points
559 /// to a vtable pointer from which the virtual offset is applied.
561 /// If the member is non-virtual, memptr.ptr is the address of
562 /// the function to call.
563 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
564 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
565 llvm::Value *&ThisPtrForCall,
566 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
567 CGBuilderTy &Builder = CGF.Builder;
569 const FunctionProtoType *FPT =
570 MPT->getPointeeType()->getAs<FunctionProtoType>();
571 const CXXRecordDecl *RD =
572 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
574 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
575 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
577 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
579 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
580 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
581 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
583 // Extract memptr.adj, which is in the second field.
584 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
586 // Compute the true adjustment.
587 llvm::Value *Adj = RawAdj;
588 if (UseARMMethodPtrABI)
589 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
591 // Apply the adjustment and cast back to the original struct type
593 llvm::Value *This = ThisAddr.getPointer();
594 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
595 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
596 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
597 ThisPtrForCall = This;
599 // Load the function pointer.
600 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
602 // If the LSB in the function pointer is 1, the function pointer points to
603 // a virtual function.
604 llvm::Value *IsVirtual;
605 if (UseARMMethodPtrABI)
606 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
608 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
609 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
610 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
612 // In the virtual path, the adjustment left 'This' pointing to the
613 // vtable of the correct base subobject. The "function pointer" is an
614 // offset within the vtable (+1 for the virtual flag on non-ARM).
615 CGF.EmitBlock(FnVirtual);
617 // Cast the adjusted this to a pointer to vtable pointer and load.
618 llvm::Type *VTableTy = Builder.getInt8PtrTy();
619 CharUnits VTablePtrAlign =
620 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
621 CGF.getPointerAlign());
622 llvm::Value *VTable =
623 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
626 // On ARM64, to reserve extra space in virtual member function pointers,
627 // we only pay attention to the low 32 bits of the offset.
628 llvm::Value *VTableOffset = FnAsInt;
629 if (!UseARMMethodPtrABI)
630 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
631 if (Use32BitVTableOffsetABI) {
632 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
633 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
635 VTable = Builder.CreateGEP(VTable, VTableOffset);
637 // Load the virtual function to call.
638 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
639 llvm::Value *VirtualFn =
640 Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
642 CGF.EmitBranch(FnEnd);
644 // In the non-virtual path, the function pointer is actually a
646 CGF.EmitBlock(FnNonVirtual);
647 llvm::Value *NonVirtualFn =
648 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
651 CGF.EmitBlock(FnEnd);
652 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
653 CalleePtr->addIncoming(VirtualFn, FnVirtual);
654 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
656 CGCallee Callee(FPT, CalleePtr);
660 /// Compute an l-value by applying the given pointer-to-member to a
662 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
663 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
664 const MemberPointerType *MPT) {
665 assert(MemPtr->getType() == CGM.PtrDiffTy);
667 CGBuilderTy &Builder = CGF.Builder;
670 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
672 // Apply the offset, which we assume is non-null.
674 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
676 // Cast the address to the appropriate pointer type, adopting the
677 // address space of the base pointer.
678 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
679 ->getPointerTo(Base.getAddressSpace());
680 return Builder.CreateBitCast(Addr, PType);
683 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
686 /// Bitcast conversions are always a no-op under Itanium.
688 /// Obligatory offset/adjustment diagram:
689 /// <-- offset --> <-- adjustment -->
690 /// |--------------------------|----------------------|--------------------|
691 /// ^Derived address point ^Base address point ^Member address point
693 /// So when converting a base member pointer to a derived member pointer,
694 /// we add the offset to the adjustment because the address point has
695 /// decreased; and conversely, when converting a derived MP to a base MP
696 /// we subtract the offset from the adjustment because the address point
699 /// The standard forbids (at compile time) conversion to and from
700 /// virtual bases, which is why we don't have to consider them here.
702 /// The standard forbids (at run time) casting a derived MP to a base
703 /// MP when the derived MP does not point to a member of the base.
704 /// This is why -1 is a reasonable choice for null data member
707 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
710 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
711 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
712 E->getCastKind() == CK_ReinterpretMemberPointer);
714 // Under Itanium, reinterprets don't require any additional processing.
715 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
717 // Use constant emission if we can.
718 if (isa<llvm::Constant>(src))
719 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
721 llvm::Constant *adj = getMemberPointerAdjustment(E);
722 if (!adj) return src;
724 CGBuilderTy &Builder = CGF.Builder;
725 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
727 const MemberPointerType *destTy =
728 E->getType()->castAs<MemberPointerType>();
730 // For member data pointers, this is just a matter of adding the
731 // offset if the source is non-null.
732 if (destTy->isMemberDataPointer()) {
735 dst = Builder.CreateNSWSub(src, adj, "adj");
737 dst = Builder.CreateNSWAdd(src, adj, "adj");
740 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
741 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
742 return Builder.CreateSelect(isNull, src, dst);
745 // The this-adjustment is left-shifted by 1 on ARM.
746 if (UseARMMethodPtrABI) {
747 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
749 adj = llvm::ConstantInt::get(adj->getType(), offset);
752 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
755 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
757 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
759 return Builder.CreateInsertValue(src, dstAdj, 1);
763 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
764 llvm::Constant *src) {
765 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
766 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
767 E->getCastKind() == CK_ReinterpretMemberPointer);
769 // Under Itanium, reinterprets don't require any additional processing.
770 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
772 // If the adjustment is trivial, we don't need to do anything.
773 llvm::Constant *adj = getMemberPointerAdjustment(E);
774 if (!adj) return src;
776 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
778 const MemberPointerType *destTy =
779 E->getType()->castAs<MemberPointerType>();
781 // For member data pointers, this is just a matter of adding the
782 // offset if the source is non-null.
783 if (destTy->isMemberDataPointer()) {
784 // null maps to null.
785 if (src->isAllOnesValue()) return src;
788 return llvm::ConstantExpr::getNSWSub(src, adj);
790 return llvm::ConstantExpr::getNSWAdd(src, adj);
793 // The this-adjustment is left-shifted by 1 on ARM.
794 if (UseARMMethodPtrABI) {
795 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
797 adj = llvm::ConstantInt::get(adj->getType(), offset);
800 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
801 llvm::Constant *dstAdj;
803 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
805 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
807 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
811 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
812 // Itanium C++ ABI 2.3:
813 // A NULL pointer is represented as -1.
814 if (MPT->isMemberDataPointer())
815 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
817 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
818 llvm::Constant *Values[2] = { Zero, Zero };
819 return llvm::ConstantStruct::getAnon(Values);
823 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
825 // Itanium C++ ABI 2.3:
826 // A pointer to data member is an offset from the base address of
827 // the class object containing it, represented as a ptrdiff_t
828 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
832 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
833 return BuildMemberPointer(MD, CharUnits::Zero());
836 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
837 CharUnits ThisAdjustment) {
838 assert(MD->isInstance() && "Member function must not be static!");
839 MD = MD->getCanonicalDecl();
841 CodeGenTypes &Types = CGM.getTypes();
843 // Get the function pointer (or index if this is a virtual function).
844 llvm::Constant *MemPtr[2];
845 if (MD->isVirtual()) {
846 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
848 const ASTContext &Context = getContext();
849 CharUnits PointerWidth =
850 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
851 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
853 if (UseARMMethodPtrABI) {
854 // ARM C++ ABI 3.2.1:
855 // This ABI specifies that adj contains twice the this
856 // adjustment, plus 1 if the member function is virtual. The
857 // least significant bit of adj then makes exactly the same
858 // discrimination as the least significant bit of ptr does for
860 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
861 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
862 2 * ThisAdjustment.getQuantity() + 1);
864 // Itanium C++ ABI 2.3:
865 // For a virtual function, [the pointer field] is 1 plus the
866 // virtual table offset (in bytes) of the function,
867 // represented as a ptrdiff_t.
868 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
869 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
870 ThisAdjustment.getQuantity());
873 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
875 // Check whether the function has a computable LLVM signature.
876 if (Types.isFuncTypeConvertible(FPT)) {
877 // The function has a computable LLVM signature; use the correct type.
878 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
880 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
881 // function type is incomplete.
884 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
886 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
887 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
888 (UseARMMethodPtrABI ? 2 : 1) *
889 ThisAdjustment.getQuantity());
892 return llvm::ConstantStruct::getAnon(MemPtr);
895 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
897 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
898 const ValueDecl *MPD = MP.getMemberPointerDecl();
900 return EmitNullMemberPointer(MPT);
902 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
904 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
905 return BuildMemberPointer(MD, ThisAdjustment);
907 CharUnits FieldOffset =
908 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
909 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
912 /// The comparison algorithm is pretty easy: the member pointers are
913 /// the same if they're either bitwise identical *or* both null.
915 /// ARM is different here only because null-ness is more complicated.
917 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
920 const MemberPointerType *MPT,
922 CGBuilderTy &Builder = CGF.Builder;
924 llvm::ICmpInst::Predicate Eq;
925 llvm::Instruction::BinaryOps And, Or;
927 Eq = llvm::ICmpInst::ICMP_NE;
928 And = llvm::Instruction::Or;
929 Or = llvm::Instruction::And;
931 Eq = llvm::ICmpInst::ICMP_EQ;
932 And = llvm::Instruction::And;
933 Or = llvm::Instruction::Or;
936 // Member data pointers are easy because there's a unique null
937 // value, so it just comes down to bitwise equality.
938 if (MPT->isMemberDataPointer())
939 return Builder.CreateICmp(Eq, L, R);
941 // For member function pointers, the tautologies are more complex.
942 // The Itanium tautology is:
943 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
944 // The ARM tautology is:
945 // (L == R) <==> (L.ptr == R.ptr &&
946 // (L.adj == R.adj ||
947 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
948 // The inequality tautologies have exactly the same structure, except
949 // applying De Morgan's laws.
951 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
952 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
954 // This condition tests whether L.ptr == R.ptr. This must always be
955 // true for equality to hold.
956 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
958 // This condition, together with the assumption that L.ptr == R.ptr,
959 // tests whether the pointers are both null. ARM imposes an extra
961 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
962 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
964 // This condition tests whether L.adj == R.adj. If this isn't
965 // true, the pointers are unequal unless they're both null.
966 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
967 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
968 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
970 // Null member function pointers on ARM clear the low bit of Adj,
971 // so the zero condition has to check that neither low bit is set.
972 if (UseARMMethodPtrABI) {
973 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
975 // Compute (l.adj | r.adj) & 1 and test it against zero.
976 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
977 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
978 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
980 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
983 // Tie together all our conditions.
984 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
985 Result = Builder.CreateBinOp(And, PtrEq, Result,
986 Inequality ? "memptr.ne" : "memptr.eq");
991 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
993 const MemberPointerType *MPT) {
994 CGBuilderTy &Builder = CGF.Builder;
996 /// For member data pointers, this is just a check against -1.
997 if (MPT->isMemberDataPointer()) {
998 assert(MemPtr->getType() == CGM.PtrDiffTy);
999 llvm::Value *NegativeOne =
1000 llvm::Constant::getAllOnesValue(MemPtr->getType());
1001 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1004 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1005 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1007 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1008 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1010 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1011 // (the virtual bit) is set.
1012 if (UseARMMethodPtrABI) {
1013 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1014 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1015 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1016 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1017 "memptr.isvirtual");
1018 Result = Builder.CreateOr(Result, IsVirtual);
1024 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1025 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1029 // If C++ prohibits us from making a copy, return by address.
1030 if (passClassIndirect(RD)) {
1031 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1032 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1038 /// The Itanium ABI requires non-zero initialization only for data
1039 /// member pointers, for which '0' is a valid offset.
1040 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1041 return MPT->isMemberFunctionPointer();
1044 /// The Itanium ABI always places an offset to the complete object
1045 /// at entry -2 in the vtable.
1046 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1047 const CXXDeleteExpr *DE,
1049 QualType ElementType,
1050 const CXXDestructorDecl *Dtor) {
1051 bool UseGlobalDelete = DE->isGlobalDelete();
1052 if (UseGlobalDelete) {
1053 // Derive the complete-object pointer, which is what we need
1054 // to pass to the deallocation function.
1056 // Grab the vtable pointer as an intptr_t*.
1058 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1059 llvm::Value *VTable =
1060 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1062 // Track back to entry -2 and pull out the offset there.
1063 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1064 VTable, -2, "complete-offset.ptr");
1065 llvm::Value *Offset =
1066 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1068 // Apply the offset.
1069 llvm::Value *CompletePtr =
1070 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1071 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1073 // If we're supposed to call the global delete, make sure we do so
1074 // even if the destructor throws.
1075 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1079 // FIXME: Provide a source location here even though there's no
1080 // CXXMemberCallExpr for dtor call.
1081 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1082 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1084 if (UseGlobalDelete)
1085 CGF.PopCleanupBlock();
1088 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1089 // void __cxa_rethrow();
1091 llvm::FunctionType *FTy =
1092 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1094 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1097 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1099 CGF.EmitRuntimeCallOrInvoke(Fn);
1102 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1103 // void *__cxa_allocate_exception(size_t thrown_size);
1105 llvm::FunctionType *FTy =
1106 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1108 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1111 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1112 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1113 // void (*dest) (void *));
1115 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1116 llvm::FunctionType *FTy =
1117 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1119 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1122 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1123 QualType ThrowType = E->getSubExpr()->getType();
1124 // Now allocate the exception object.
1125 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1126 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1128 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1129 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1130 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1132 CharUnits ExnAlign = getAlignmentOfExnObject();
1133 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1135 // Now throw the exception.
1136 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1139 // The address of the destructor. If the exception type has a
1140 // trivial destructor (or isn't a record), we just pass null.
1141 llvm::Constant *Dtor = nullptr;
1142 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1143 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1144 if (!Record->hasTrivialDestructor()) {
1145 CXXDestructorDecl *DtorD = Record->getDestructor();
1146 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1147 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1150 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1152 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1153 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1156 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1157 // void *__dynamic_cast(const void *sub,
1158 // const abi::__class_type_info *src,
1159 // const abi::__class_type_info *dst,
1160 // std::ptrdiff_t src2dst_offset);
1162 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1163 llvm::Type *PtrDiffTy =
1164 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1166 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1168 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1170 // Mark the function as nounwind readonly.
1171 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1172 llvm::Attribute::ReadOnly };
1173 llvm::AttributeList Attrs = llvm::AttributeList::get(
1174 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1176 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1179 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1180 // void __cxa_bad_cast();
1181 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1182 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1185 /// \brief Compute the src2dst_offset hint as described in the
1186 /// Itanium C++ ABI [2.9.7]
1187 static CharUnits computeOffsetHint(ASTContext &Context,
1188 const CXXRecordDecl *Src,
1189 const CXXRecordDecl *Dst) {
1190 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1191 /*DetectVirtual=*/false);
1193 // If Dst is not derived from Src we can skip the whole computation below and
1194 // return that Src is not a public base of Dst. Record all inheritance paths.
1195 if (!Dst->isDerivedFrom(Src, Paths))
1196 return CharUnits::fromQuantity(-2ULL);
1198 unsigned NumPublicPaths = 0;
1201 // Now walk all possible inheritance paths.
1202 for (const CXXBasePath &Path : Paths) {
1203 if (Path.Access != AS_public) // Ignore non-public inheritance.
1208 for (const CXXBasePathElement &PathElement : Path) {
1209 // If the path contains a virtual base class we can't give any hint.
1211 if (PathElement.Base->isVirtual())
1212 return CharUnits::fromQuantity(-1ULL);
1214 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1217 // Accumulate the base class offsets.
1218 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1219 Offset += L.getBaseClassOffset(
1220 PathElement.Base->getType()->getAsCXXRecordDecl());
1224 // -2: Src is not a public base of Dst.
1225 if (NumPublicPaths == 0)
1226 return CharUnits::fromQuantity(-2ULL);
1228 // -3: Src is a multiple public base type but never a virtual base type.
1229 if (NumPublicPaths > 1)
1230 return CharUnits::fromQuantity(-3ULL);
1232 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1233 // Return the offset of Src from the origin of Dst.
1237 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1238 // void __cxa_bad_typeid();
1239 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1241 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1244 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1245 QualType SrcRecordTy) {
1249 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1250 llvm::Value *Fn = getBadTypeidFn(CGF);
1251 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1252 CGF.Builder.CreateUnreachable();
1255 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1256 QualType SrcRecordTy,
1258 llvm::Type *StdTypeInfoPtrTy) {
1260 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1261 llvm::Value *Value =
1262 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1264 // Load the type info.
1265 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1266 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1269 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1270 QualType SrcRecordTy) {
1274 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1275 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1276 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1277 llvm::Type *PtrDiffLTy =
1278 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1279 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1281 llvm::Value *SrcRTTI =
1282 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1283 llvm::Value *DestRTTI =
1284 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1286 // Compute the offset hint.
1287 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1288 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1289 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1291 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1293 // Emit the call to __dynamic_cast.
1294 llvm::Value *Value = ThisAddr.getPointer();
1295 Value = CGF.EmitCastToVoidPtr(Value);
1297 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1298 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1299 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1301 /// C++ [expr.dynamic.cast]p9:
1302 /// A failed cast to reference type throws std::bad_cast
1303 if (DestTy->isReferenceType()) {
1304 llvm::BasicBlock *BadCastBlock =
1305 CGF.createBasicBlock("dynamic_cast.bad_cast");
1307 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1308 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1310 CGF.EmitBlock(BadCastBlock);
1311 EmitBadCastCall(CGF);
1317 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1319 QualType SrcRecordTy,
1321 llvm::Type *PtrDiffLTy =
1322 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1323 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1326 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1327 // Get the vtable pointer.
1328 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1331 // Get the offset-to-top from the vtable.
1332 llvm::Value *OffsetToTop =
1333 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1335 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1338 // Finally, add the offset to the pointer.
1339 llvm::Value *Value = ThisAddr.getPointer();
1340 Value = CGF.EmitCastToVoidPtr(Value);
1341 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1343 return CGF.Builder.CreateBitCast(Value, DestLTy);
1346 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1347 llvm::Value *Fn = getBadCastFn(CGF);
1348 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1349 CGF.Builder.CreateUnreachable();
1354 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1356 const CXXRecordDecl *ClassDecl,
1357 const CXXRecordDecl *BaseClassDecl) {
1358 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1359 CharUnits VBaseOffsetOffset =
1360 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1363 llvm::Value *VBaseOffsetPtr =
1364 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1365 "vbase.offset.ptr");
1366 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1367 CGM.PtrDiffTy->getPointerTo());
1369 llvm::Value *VBaseOffset =
1370 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1376 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1377 // Just make sure we're in sync with TargetCXXABI.
1378 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1380 // The constructor used for constructing this as a base class;
1381 // ignores virtual bases.
1382 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1384 // The constructor used for constructing this as a complete class;
1385 // constructs the virtual bases, then calls the base constructor.
1386 if (!D->getParent()->isAbstract()) {
1387 // We don't need to emit the complete ctor if the class is abstract.
1388 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1392 CGCXXABI::AddedStructorArgs
1393 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1394 SmallVectorImpl<CanQualType> &ArgTys) {
1395 ASTContext &Context = getContext();
1397 // All parameters are already in place except VTT, which goes after 'this'.
1398 // These are Clang types, so we don't need to worry about sret yet.
1400 // Check if we need to add a VTT parameter (which has type void **).
1401 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) {
1402 ArgTys.insert(ArgTys.begin() + 1,
1403 Context.getPointerType(Context.VoidPtrTy));
1404 return AddedStructorArgs::prefix(1);
1406 return AddedStructorArgs{};
1409 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1410 // The destructor used for destructing this as a base class; ignores
1412 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1414 // The destructor used for destructing this as a most-derived class;
1415 // call the base destructor and then destructs any virtual bases.
1416 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1418 // The destructor in a virtual table is always a 'deleting'
1419 // destructor, which calls the complete destructor and then uses the
1420 // appropriate operator delete.
1422 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1425 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1427 FunctionArgList &Params) {
1428 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1429 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1431 // Check if we need a VTT parameter as well.
1432 if (NeedsVTTParameter(CGF.CurGD)) {
1433 ASTContext &Context = getContext();
1435 // FIXME: avoid the fake decl
1436 QualType T = Context.getPointerType(Context.VoidPtrTy);
1437 auto *VTTDecl = ImplicitParamDecl::Create(
1438 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1439 T, ImplicitParamDecl::CXXVTT);
1440 Params.insert(Params.begin() + 1, VTTDecl);
1441 getStructorImplicitParamDecl(CGF) = VTTDecl;
1445 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1446 // Naked functions have no prolog.
1447 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1450 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1451 /// adjustments are required, becuase they are all handled by thunks.
1452 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1454 /// Initialize the 'vtt' slot if needed.
1455 if (getStructorImplicitParamDecl(CGF)) {
1456 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1457 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1460 /// If this is a function that the ABI specifies returns 'this', initialize
1461 /// the return slot to 'this' at the start of the function.
1463 /// Unlike the setting of return types, this is done within the ABI
1464 /// implementation instead of by clients of CGCXXABI because:
1465 /// 1) getThisValue is currently protected
1466 /// 2) in theory, an ABI could implement 'this' returns some other way;
1467 /// HasThisReturn only specifies a contract, not the implementation
1468 if (HasThisReturn(CGF.CurGD))
1469 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1472 CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1473 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1474 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1475 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1476 return AddedStructorArgs{};
1478 // Insert the implicit 'vtt' argument as the second argument.
1480 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1481 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1482 Args.insert(Args.begin() + 1,
1483 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
1484 return AddedStructorArgs::prefix(1); // Added one arg.
1487 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1488 const CXXDestructorDecl *DD,
1489 CXXDtorType Type, bool ForVirtualBase,
1490 bool Delegating, Address This) {
1491 GlobalDecl GD(DD, Type);
1492 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1493 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1496 if (getContext().getLangOpts().AppleKext &&
1497 Type != Dtor_Base && DD->isVirtual())
1498 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1501 CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
1504 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1505 This.getPointer(), VTT, VTTTy,
1509 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1510 const CXXRecordDecl *RD) {
1511 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1512 if (VTable->hasInitializer())
1515 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1516 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1517 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1518 llvm::Constant *RTTI =
1519 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1521 // Create and set the initializer.
1522 ConstantInitBuilder Builder(CGM);
1523 auto Components = Builder.beginStruct();
1524 CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1525 Components.finishAndSetAsInitializer(VTable);
1527 // Set the correct linkage.
1528 VTable->setLinkage(Linkage);
1530 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1531 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1533 // Set the right visibility.
1534 CGM.setGlobalVisibility(VTable, RD, ForDefinition);
1536 // Use pointer alignment for the vtable. Otherwise we would align them based
1537 // on the size of the initializer which doesn't make sense as only single
1539 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1540 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1542 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1543 // we will emit the typeinfo for the fundamental types. This is the
1544 // same behaviour as GCC.
1545 const DeclContext *DC = RD->getDeclContext();
1546 if (RD->getIdentifier() &&
1547 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1548 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1549 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1550 DC->getParent()->isTranslationUnit())
1551 EmitFundamentalRTTIDescriptors(RD->hasAttr<DLLExportAttr>());
1553 if (!VTable->isDeclarationForLinker())
1554 CGM.EmitVTableTypeMetadata(VTable, VTLayout);
1557 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1558 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1559 if (Vptr.NearestVBase == nullptr)
1561 return NeedsVTTParameter(CGF.CurGD);
1564 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1565 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1566 const CXXRecordDecl *NearestVBase) {
1568 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1569 NeedsVTTParameter(CGF.CurGD)) {
1570 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1573 return getVTableAddressPoint(Base, VTableClass);
1577 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1578 const CXXRecordDecl *VTableClass) {
1579 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1581 // Find the appropriate vtable within the vtable group, and the address point
1582 // within that vtable.
1583 VTableLayout::AddressPointLocation AddressPoint =
1584 CGM.getItaniumVTableContext()
1585 .getVTableLayout(VTableClass)
1586 .getAddressPoint(Base);
1587 llvm::Value *Indices[] = {
1588 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1589 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1590 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1593 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1594 Indices, /*InBounds=*/true,
1595 /*InRangeIndex=*/1);
1598 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1599 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1600 const CXXRecordDecl *NearestVBase) {
1601 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1602 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1604 // Get the secondary vpointer index.
1605 uint64_t VirtualPointerIndex =
1606 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1609 llvm::Value *VTT = CGF.LoadCXXVTT();
1610 if (VirtualPointerIndex)
1611 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1613 // And load the address point from the VTT.
1614 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1617 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1618 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1619 return getVTableAddressPoint(Base, VTableClass);
1622 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1623 CharUnits VPtrOffset) {
1624 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1626 llvm::GlobalVariable *&VTable = VTables[RD];
1630 // Queue up this vtable for possible deferred emission.
1631 CGM.addDeferredVTable(RD);
1633 SmallString<256> Name;
1634 llvm::raw_svector_ostream Out(Name);
1635 getMangleContext().mangleCXXVTable(RD, Out);
1637 const VTableLayout &VTLayout =
1638 CGM.getItaniumVTableContext().getVTableLayout(RD);
1639 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1641 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1642 Name, VTableType, llvm::GlobalValue::ExternalLinkage);
1643 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1644 CGM.setGlobalVisibility(VTable, RD, NotForDefinition);
1646 if (RD->hasAttr<DLLImportAttr>())
1647 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1648 else if (RD->hasAttr<DLLExportAttr>())
1649 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1654 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1658 SourceLocation Loc) {
1659 GD = GD.getCanonicalDecl();
1660 Ty = Ty->getPointerTo()->getPointerTo();
1661 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1662 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1664 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1666 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1667 VFunc = CGF.EmitVTableTypeCheckedLoad(
1668 MethodDecl->getParent(), VTable,
1669 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1671 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1673 llvm::Value *VFuncPtr =
1674 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1676 CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1678 // Add !invariant.load md to virtual function load to indicate that
1679 // function didn't change inside vtable.
1680 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1681 // help in devirtualization because it will only matter if we will have 2
1682 // the same virtual function loads from the same vtable load, which won't
1683 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1684 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1685 CGM.getCodeGenOpts().StrictVTablePointers)
1686 VFuncLoad->setMetadata(
1687 llvm::LLVMContext::MD_invariant_load,
1688 llvm::MDNode::get(CGM.getLLVMContext(),
1689 llvm::ArrayRef<llvm::Metadata *>()));
1693 CGCallee Callee(MethodDecl, VFunc);
1697 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1698 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1699 Address This, const CXXMemberCallExpr *CE) {
1700 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1701 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1703 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1704 Dtor, getFromDtorType(DtorType));
1705 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1707 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
1708 CE ? CE->getLocStart() : SourceLocation());
1710 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1711 This.getPointer(), /*ImplicitParam=*/nullptr,
1712 QualType(), CE, nullptr);
1716 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1717 CodeGenVTables &VTables = CGM.getVTables();
1718 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1719 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1722 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1723 // We don't emit available_externally vtables if we are in -fapple-kext mode
1724 // because kext mode does not permit devirtualization.
1725 if (CGM.getLangOpts().AppleKext)
1728 // If we don't have any not emitted inline virtual function, and if vtable is
1729 // not hidden, then we are safe to emit available_externally copy of vtable.
1730 // FIXME we can still emit a copy of the vtable if we
1731 // can emit definition of the inline functions.
1732 return !hasAnyUnusedVirtualInlineFunction(RD) && !isVTableHidden(RD);
1734 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1736 int64_t NonVirtualAdjustment,
1737 int64_t VirtualAdjustment,
1738 bool IsReturnAdjustment) {
1739 if (!NonVirtualAdjustment && !VirtualAdjustment)
1740 return InitialPtr.getPointer();
1742 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1744 // In a base-to-derived cast, the non-virtual adjustment is applied first.
1745 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1746 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1747 CharUnits::fromQuantity(NonVirtualAdjustment));
1750 // Perform the virtual adjustment if we have one.
1751 llvm::Value *ResultPtr;
1752 if (VirtualAdjustment) {
1753 llvm::Type *PtrDiffTy =
1754 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1756 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1757 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1759 llvm::Value *OffsetPtr =
1760 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1762 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1764 // Load the adjustment offset from the vtable.
1765 llvm::Value *Offset =
1766 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1768 // Adjust our pointer.
1769 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1771 ResultPtr = V.getPointer();
1774 // In a derived-to-base conversion, the non-virtual adjustment is
1776 if (NonVirtualAdjustment && IsReturnAdjustment) {
1777 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1778 NonVirtualAdjustment);
1781 // Cast back to the original type.
1782 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1785 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1787 const ThisAdjustment &TA) {
1788 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1789 TA.Virtual.Itanium.VCallOffsetOffset,
1790 /*IsReturnAdjustment=*/false);
1794 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1795 const ReturnAdjustment &RA) {
1796 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1797 RA.Virtual.Itanium.VBaseOffsetOffset,
1798 /*IsReturnAdjustment=*/true);
1801 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1802 RValue RV, QualType ResultType) {
1803 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1804 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1806 // Destructor thunks in the ARM ABI have indeterminate results.
1807 llvm::Type *T = CGF.ReturnValue.getElementType();
1808 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1809 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1812 /************************** Array allocation cookies **************************/
1814 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1815 // The array cookie is a size_t; pad that up to the element alignment.
1816 // The cookie is actually right-justified in that space.
1817 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1818 CGM.getContext().getTypeAlignInChars(elementType));
1821 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1823 llvm::Value *NumElements,
1824 const CXXNewExpr *expr,
1825 QualType ElementType) {
1826 assert(requiresArrayCookie(expr));
1828 unsigned AS = NewPtr.getAddressSpace();
1830 ASTContext &Ctx = getContext();
1831 CharUnits SizeSize = CGF.getSizeSize();
1833 // The size of the cookie.
1834 CharUnits CookieSize =
1835 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1836 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1838 // Compute an offset to the cookie.
1839 Address CookiePtr = NewPtr;
1840 CharUnits CookieOffset = CookieSize - SizeSize;
1841 if (!CookieOffset.isZero())
1842 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1844 // Write the number of elements into the appropriate slot.
1845 Address NumElementsPtr =
1846 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1847 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1849 // Handle the array cookie specially in ASan.
1850 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1851 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
1852 // The store to the CookiePtr does not need to be instrumented.
1853 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1854 llvm::FunctionType *FTy =
1855 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1857 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1858 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1861 // Finally, compute a pointer to the actual data buffer by skipping
1862 // over the cookie completely.
1863 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1866 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1868 CharUnits cookieSize) {
1869 // The element size is right-justified in the cookie.
1870 Address numElementsPtr = allocPtr;
1871 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1872 if (!numElementsOffset.isZero())
1874 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1876 unsigned AS = allocPtr.getAddressSpace();
1877 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1878 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1879 return CGF.Builder.CreateLoad(numElementsPtr);
1880 // In asan mode emit a function call instead of a regular load and let the
1881 // run-time deal with it: if the shadow is properly poisoned return the
1882 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1883 // We can't simply ignore this load using nosanitize metadata because
1884 // the metadata may be lost.
1885 llvm::FunctionType *FTy =
1886 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1888 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1889 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1892 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1893 // ARM says that the cookie is always:
1894 // struct array_cookie {
1895 // std::size_t element_size; // element_size != 0
1896 // std::size_t element_count;
1898 // But the base ABI doesn't give anything an alignment greater than
1899 // 8, so we can dismiss this as typical ABI-author blindness to
1900 // actual language complexity and round up to the element alignment.
1901 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1902 CGM.getContext().getTypeAlignInChars(elementType));
1905 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1907 llvm::Value *numElements,
1908 const CXXNewExpr *expr,
1909 QualType elementType) {
1910 assert(requiresArrayCookie(expr));
1912 // The cookie is always at the start of the buffer.
1913 Address cookie = newPtr;
1915 // The first element is the element size.
1916 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
1917 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1918 getContext().getTypeSizeInChars(elementType).getQuantity());
1919 CGF.Builder.CreateStore(elementSize, cookie);
1921 // The second element is the element count.
1922 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
1923 CGF.Builder.CreateStore(numElements, cookie);
1925 // Finally, compute a pointer to the actual data buffer by skipping
1926 // over the cookie completely.
1927 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1928 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
1931 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1933 CharUnits cookieSize) {
1934 // The number of elements is at offset sizeof(size_t) relative to
1935 // the allocated pointer.
1936 Address numElementsPtr
1937 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
1939 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1940 return CGF.Builder.CreateLoad(numElementsPtr);
1943 /*********************** Static local initialization **************************/
1945 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1946 llvm::PointerType *GuardPtrTy) {
1947 // int __cxa_guard_acquire(__guard *guard_object);
1948 llvm::FunctionType *FTy =
1949 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1950 GuardPtrTy, /*isVarArg=*/false);
1951 return CGM.CreateRuntimeFunction(
1952 FTy, "__cxa_guard_acquire",
1953 llvm::AttributeList::get(CGM.getLLVMContext(),
1954 llvm::AttributeList::FunctionIndex,
1955 llvm::Attribute::NoUnwind));
1958 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1959 llvm::PointerType *GuardPtrTy) {
1960 // void __cxa_guard_release(__guard *guard_object);
1961 llvm::FunctionType *FTy =
1962 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1963 return CGM.CreateRuntimeFunction(
1964 FTy, "__cxa_guard_release",
1965 llvm::AttributeList::get(CGM.getLLVMContext(),
1966 llvm::AttributeList::FunctionIndex,
1967 llvm::Attribute::NoUnwind));
1970 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1971 llvm::PointerType *GuardPtrTy) {
1972 // void __cxa_guard_abort(__guard *guard_object);
1973 llvm::FunctionType *FTy =
1974 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1975 return CGM.CreateRuntimeFunction(
1976 FTy, "__cxa_guard_abort",
1977 llvm::AttributeList::get(CGM.getLLVMContext(),
1978 llvm::AttributeList::FunctionIndex,
1979 llvm::Attribute::NoUnwind));
1983 struct CallGuardAbort final : EHScopeStack::Cleanup {
1984 llvm::GlobalVariable *Guard;
1985 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1987 void Emit(CodeGenFunction &CGF, Flags flags) override {
1988 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
1994 /// The ARM code here follows the Itanium code closely enough that we
1995 /// just special-case it at particular places.
1996 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1998 llvm::GlobalVariable *var,
1999 bool shouldPerformInit) {
2000 CGBuilderTy &Builder = CGF.Builder;
2002 // Inline variables that weren't instantiated from variable templates have
2003 // partially-ordered initialization within their translation unit.
2004 bool NonTemplateInline =
2006 !isTemplateInstantiation(D.getTemplateSpecializationKind());
2008 // We only need to use thread-safe statics for local non-TLS variables and
2009 // inline variables; other global initialization is always single-threaded
2010 // or (through lazy dynamic loading in multiple threads) unsequenced.
2011 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2012 (D.isLocalVarDecl() || NonTemplateInline) &&
2015 // If we have a global variable with internal linkage and thread-safe statics
2016 // are disabled, we can just let the guard variable be of type i8.
2017 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2019 llvm::IntegerType *guardTy;
2020 CharUnits guardAlignment;
2021 if (useInt8GuardVariable) {
2022 guardTy = CGF.Int8Ty;
2023 guardAlignment = CharUnits::One();
2025 // Guard variables are 64 bits in the generic ABI and size width on ARM
2026 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2027 if (UseARMGuardVarABI) {
2028 guardTy = CGF.SizeTy;
2029 guardAlignment = CGF.getSizeAlign();
2031 guardTy = CGF.Int64Ty;
2032 guardAlignment = CharUnits::fromQuantity(
2033 CGM.getDataLayout().getABITypeAlignment(guardTy));
2036 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2038 // Create the guard variable if we don't already have it (as we
2039 // might if we're double-emitting this function body).
2040 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2042 // Mangle the name for the guard.
2043 SmallString<256> guardName;
2045 llvm::raw_svector_ostream out(guardName);
2046 getMangleContext().mangleStaticGuardVariable(&D, out);
2049 // Create the guard variable with a zero-initializer.
2050 // Just absorb linkage and visibility from the guarded variable.
2051 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2052 false, var->getLinkage(),
2053 llvm::ConstantInt::get(guardTy, 0),
2055 guard->setVisibility(var->getVisibility());
2056 // If the variable is thread-local, so is its guard variable.
2057 guard->setThreadLocalMode(var->getThreadLocalMode());
2058 guard->setAlignment(guardAlignment.getQuantity());
2060 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2061 // group as the associated data object." In practice, this doesn't work for
2062 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2063 llvm::Comdat *C = var->getComdat();
2064 if (!D.isLocalVarDecl() && C &&
2065 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2066 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2067 guard->setComdat(C);
2068 // An inline variable's guard function is run from the per-TU
2069 // initialization function, not via a dedicated global ctor function, so
2070 // we can't put it in a comdat.
2071 if (!NonTemplateInline)
2072 CGF.CurFn->setComdat(C);
2073 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2074 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2077 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2080 Address guardAddr = Address(guard, guardAlignment);
2082 // Test whether the variable has completed initialization.
2084 // Itanium C++ ABI 3.3.2:
2085 // The following is pseudo-code showing how these functions can be used:
2086 // if (obj_guard.first_byte == 0) {
2087 // if ( __cxa_guard_acquire (&obj_guard) ) {
2089 // ... initialize the object ...;
2091 // __cxa_guard_abort (&obj_guard);
2094 // ... queue object destructor with __cxa_atexit() ...;
2095 // __cxa_guard_release (&obj_guard);
2099 // Load the first byte of the guard variable.
2100 llvm::LoadInst *LI =
2101 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2104 // An implementation supporting thread-safety on multiprocessor
2105 // systems must also guarantee that references to the initialized
2106 // object do not occur before the load of the initialization flag.
2108 // In LLVM, we do this by marking the load Acquire.
2110 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2112 // For ARM, we should only check the first bit, rather than the entire byte:
2114 // ARM C++ ABI 3.2.3.1:
2115 // To support the potential use of initialization guard variables
2116 // as semaphores that are the target of ARM SWP and LDREX/STREX
2117 // synchronizing instructions we define a static initialization
2118 // guard variable to be a 4-byte aligned, 4-byte word with the
2119 // following inline access protocol.
2120 // #define INITIALIZED 1
2121 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2122 // if (__cxa_guard_acquire(&obj_guard))
2126 // and similarly for ARM64:
2128 // ARM64 C++ ABI 3.2.2:
2129 // This ABI instead only specifies the value bit 0 of the static guard
2130 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2131 // variable is not initialized and 1 when it is.
2133 (UseARMGuardVarABI && !useInt8GuardVariable)
2134 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2136 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2138 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2139 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2141 // Check if the first byte of the guard variable is zero.
2142 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2143 CodeGenFunction::GuardKind::VariableGuard, &D);
2145 CGF.EmitBlock(InitCheckBlock);
2147 // Variables used when coping with thread-safe statics and exceptions.
2149 // Call __cxa_guard_acquire.
2151 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2153 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2155 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2156 InitBlock, EndBlock);
2158 // Call __cxa_guard_abort along the exceptional edge.
2159 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2161 CGF.EmitBlock(InitBlock);
2164 // Emit the initializer and add a global destructor if appropriate.
2165 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2168 // Pop the guard-abort cleanup if we pushed one.
2169 CGF.PopCleanupBlock();
2171 // Call __cxa_guard_release. This cannot throw.
2172 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2173 guardAddr.getPointer());
2175 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2178 CGF.EmitBlock(EndBlock);
2181 /// Register a global destructor using __cxa_atexit.
2182 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2183 llvm::Constant *dtor,
2184 llvm::Constant *addr,
2186 const char *Name = "__cxa_atexit";
2188 const llvm::Triple &T = CGF.getTarget().getTriple();
2189 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2192 // We're assuming that the destructor function is something we can
2193 // reasonably call with the default CC. Go ahead and cast it to the
2195 llvm::Type *dtorTy =
2196 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2198 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2199 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2200 llvm::FunctionType *atexitTy =
2201 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2203 // Fetch the actual function.
2204 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2205 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2206 fn->setDoesNotThrow();
2208 // Create a variable that binds the atexit to this shared object.
2209 llvm::Constant *handle =
2210 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2211 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2212 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2214 llvm::Value *args[] = {
2215 llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2216 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2219 CGF.EmitNounwindRuntimeCall(atexit, args);
2222 /// Register a global destructor as best as we know how.
2223 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2225 llvm::Constant *dtor,
2226 llvm::Constant *addr) {
2227 // Use __cxa_atexit if available.
2228 if (CGM.getCodeGenOpts().CXAAtExit)
2229 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2232 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2234 // In Apple kexts, we want to add a global destructor entry.
2235 // FIXME: shouldn't this be guarded by some variable?
2236 if (CGM.getLangOpts().AppleKext) {
2237 // Generate a global destructor entry.
2238 return CGM.AddCXXDtorEntry(dtor, addr);
2241 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2244 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2245 CodeGen::CodeGenModule &CGM) {
2246 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2247 // Darwin prefers to have references to thread local variables to go through
2248 // the thread wrapper instead of directly referencing the backing variable.
2249 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2250 CGM.getTarget().getTriple().isOSDarwin();
2253 /// Get the appropriate linkage for the wrapper function. This is essentially
2254 /// the weak form of the variable's linkage; every translation unit which needs
2255 /// the wrapper emits a copy, and we want the linker to merge them.
2256 static llvm::GlobalValue::LinkageTypes
2257 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2258 llvm::GlobalValue::LinkageTypes VarLinkage =
2259 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2261 // For internal linkage variables, we don't need an external or weak wrapper.
2262 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2265 // If the thread wrapper is replaceable, give it appropriate linkage.
2266 if (isThreadWrapperReplaceable(VD, CGM))
2267 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2268 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2270 return llvm::GlobalValue::WeakODRLinkage;
2274 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2276 // Mangle the name for the thread_local wrapper function.
2277 SmallString<256> WrapperName;
2279 llvm::raw_svector_ostream Out(WrapperName);
2280 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2283 // FIXME: If VD is a definition, we should regenerate the function attributes
2284 // before returning.
2285 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2286 return cast<llvm::Function>(V);
2288 QualType RetQT = VD->getType();
2289 if (RetQT->isReferenceType())
2290 RetQT = RetQT.getNonReferenceType();
2292 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2293 getContext().getPointerType(RetQT), FunctionArgList());
2295 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2296 llvm::Function *Wrapper =
2297 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2298 WrapperName.str(), &CGM.getModule());
2300 CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper);
2302 if (VD->hasDefinition())
2303 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2305 // Always resolve references to the wrapper at link time.
2306 if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
2307 !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
2308 !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
2309 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2311 if (isThreadWrapperReplaceable(VD, CGM)) {
2312 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2313 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2318 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2319 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2320 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2321 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2322 llvm::Function *InitFunc = nullptr;
2324 // Separate initializers into those with ordered (or partially-ordered)
2325 // initialization and those with unordered initialization.
2326 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2327 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2328 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2329 if (isTemplateInstantiation(
2330 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2331 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2332 CXXThreadLocalInits[I];
2334 OrderedInits.push_back(CXXThreadLocalInits[I]);
2337 if (!OrderedInits.empty()) {
2338 // Generate a guarded initialization function.
2339 llvm::FunctionType *FTy =
2340 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2341 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2342 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2345 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2346 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2347 llvm::GlobalVariable::InternalLinkage,
2348 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2349 Guard->setThreadLocal(true);
2351 CharUnits GuardAlign = CharUnits::One();
2352 Guard->setAlignment(GuardAlign.getQuantity());
2354 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, OrderedInits,
2355 Address(Guard, GuardAlign));
2356 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2357 if (CGM.getTarget().getTriple().isOSDarwin()) {
2358 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2359 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2363 // Emit thread wrappers.
2364 for (const VarDecl *VD : CXXThreadLocals) {
2365 llvm::GlobalVariable *Var =
2366 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2367 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2369 // Some targets require that all access to thread local variables go through
2370 // the thread wrapper. This means that we cannot attempt to create a thread
2371 // wrapper or a thread helper.
2372 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) {
2373 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2377 // Mangle the name for the thread_local initialization function.
2378 SmallString<256> InitFnName;
2380 llvm::raw_svector_ostream Out(InitFnName);
2381 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2384 // If we have a definition for the variable, emit the initialization
2385 // function as an alias to the global Init function (if any). Otherwise,
2386 // produce a declaration of the initialization function.
2387 llvm::GlobalValue *Init = nullptr;
2388 bool InitIsInitFunc = false;
2389 if (VD->hasDefinition()) {
2390 InitIsInitFunc = true;
2391 llvm::Function *InitFuncToUse = InitFunc;
2392 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2393 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2395 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2398 // Emit a weak global function referring to the initialization function.
2399 // This function will not exist if the TU defining the thread_local
2400 // variable in question does not need any dynamic initialization for
2401 // its thread_local variables.
2402 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2403 Init = llvm::Function::Create(FnTy,
2404 llvm::GlobalVariable::ExternalWeakLinkage,
2405 InitFnName.str(), &CGM.getModule());
2406 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2407 CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
2411 Init->setVisibility(Var->getVisibility());
2413 llvm::LLVMContext &Context = CGM.getModule().getContext();
2414 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2415 CGBuilderTy Builder(CGM, Entry);
2416 if (InitIsInitFunc) {
2418 llvm::CallInst *CallVal = Builder.CreateCall(Init);
2419 if (isThreadWrapperReplaceable(VD, CGM))
2420 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2423 // Don't know whether we have an init function. Call it if it exists.
2424 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2425 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2426 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2427 Builder.CreateCondBr(Have, InitBB, ExitBB);
2429 Builder.SetInsertPoint(InitBB);
2430 Builder.CreateCall(Init);
2431 Builder.CreateBr(ExitBB);
2433 Builder.SetInsertPoint(ExitBB);
2436 // For a reference, the result of the wrapper function is a pointer to
2437 // the referenced object.
2438 llvm::Value *Val = Var;
2439 if (VD->getType()->isReferenceType()) {
2440 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2441 Val = Builder.CreateAlignedLoad(Val, Align);
2443 if (Val->getType() != Wrapper->getReturnType())
2444 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2445 Val, Wrapper->getReturnType(), "");
2446 Builder.CreateRet(Val);
2450 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2452 QualType LValType) {
2453 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2454 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2456 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2457 CallVal->setCallingConv(Wrapper->getCallingConv());
2460 if (VD->getType()->isReferenceType())
2461 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2463 LV = CGF.MakeAddrLValue(CallVal, LValType,
2464 CGF.getContext().getDeclAlign(VD));
2465 // FIXME: need setObjCGCLValueClass?
2469 /// Return whether the given global decl needs a VTT parameter, which it does
2470 /// if it's a base constructor or destructor with virtual bases.
2471 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2472 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2474 // We don't have any virtual bases, just return early.
2475 if (!MD->getParent()->getNumVBases())
2478 // Check if we have a base constructor.
2479 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2482 // Check if we have a base destructor.
2483 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2490 class ItaniumRTTIBuilder {
2491 CodeGenModule &CGM; // Per-module state.
2492 llvm::LLVMContext &VMContext;
2493 const ItaniumCXXABI &CXXABI; // Per-module state.
2495 /// Fields - The fields of the RTTI descriptor currently being built.
2496 SmallVector<llvm::Constant *, 16> Fields;
2498 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2499 llvm::GlobalVariable *
2500 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2502 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2503 /// descriptor of the given type.
2504 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2506 /// BuildVTablePointer - Build the vtable pointer for the given type.
2507 void BuildVTablePointer(const Type *Ty);
2509 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2510 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2511 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2513 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2514 /// classes with bases that do not satisfy the abi::__si_class_type_info
2515 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2516 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2518 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2519 /// for pointer types.
2520 void BuildPointerTypeInfo(QualType PointeeTy);
2522 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2523 /// type_info for an object type.
2524 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2526 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2527 /// struct, used for member pointer types.
2528 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2531 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2532 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2534 // Pointer type info flags.
2536 /// PTI_Const - Type has const qualifier.
2539 /// PTI_Volatile - Type has volatile qualifier.
2542 /// PTI_Restrict - Type has restrict qualifier.
2545 /// PTI_Incomplete - Type is incomplete.
2546 PTI_Incomplete = 0x8,
2548 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2549 /// (in pointer to member).
2550 PTI_ContainingClassIncomplete = 0x10,
2552 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2553 //PTI_TransactionSafe = 0x20,
2555 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2556 PTI_Noexcept = 0x40,
2559 // VMI type info flags.
2561 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2562 VMI_NonDiamondRepeat = 0x1,
2564 /// VMI_DiamondShaped - Class is diamond shaped.
2565 VMI_DiamondShaped = 0x2
2568 // Base class type info flags.
2570 /// BCTI_Virtual - Base class is virtual.
2573 /// BCTI_Public - Base class is public.
2577 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2579 /// \param Force - true to force the creation of this RTTI value
2580 /// \param DLLExport - true to mark the RTTI value as DLLExport
2581 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false,
2582 bool DLLExport = false);
2586 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2587 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2588 SmallString<256> Name;
2589 llvm::raw_svector_ostream Out(Name);
2590 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2592 // We know that the mangled name of the type starts at index 4 of the
2593 // mangled name of the typename, so we can just index into it in order to
2594 // get the mangled name of the type.
2595 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2598 llvm::GlobalVariable *GV =
2599 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2601 GV->setInitializer(Init);
2607 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2608 // Mangle the RTTI name.
2609 SmallString<256> Name;
2610 llvm::raw_svector_ostream Out(Name);
2611 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2613 // Look for an existing global.
2614 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2617 // Create a new global variable.
2618 // Note for the future: If we would ever like to do deferred emission of
2619 // RTTI, check if emitting vtables opportunistically need any adjustment.
2621 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2623 llvm::GlobalValue::ExternalLinkage, nullptr,
2625 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2626 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2627 if (RD->hasAttr<DLLImportAttr>())
2628 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2632 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2635 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2636 /// info for that type is defined in the standard library.
2637 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2638 // Itanium C++ ABI 2.9.2:
2639 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2640 // the run-time support library. Specifically, the run-time support
2641 // library should contain type_info objects for the types X, X* and
2642 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2643 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2644 // long, unsigned long, long long, unsigned long long, float, double,
2645 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2646 // half-precision floating point types.
2648 // GCC also emits RTTI for __int128.
2649 // FIXME: We do not emit RTTI information for decimal types here.
2651 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2652 switch (Ty->getKind()) {
2653 case BuiltinType::Void:
2654 case BuiltinType::NullPtr:
2655 case BuiltinType::Bool:
2656 case BuiltinType::WChar_S:
2657 case BuiltinType::WChar_U:
2658 case BuiltinType::Char_U:
2659 case BuiltinType::Char_S:
2660 case BuiltinType::UChar:
2661 case BuiltinType::SChar:
2662 case BuiltinType::Short:
2663 case BuiltinType::UShort:
2664 case BuiltinType::Int:
2665 case BuiltinType::UInt:
2666 case BuiltinType::Long:
2667 case BuiltinType::ULong:
2668 case BuiltinType::LongLong:
2669 case BuiltinType::ULongLong:
2670 case BuiltinType::Half:
2671 case BuiltinType::Float:
2672 case BuiltinType::Double:
2673 case BuiltinType::LongDouble:
2674 case BuiltinType::Float16:
2675 case BuiltinType::Float128:
2676 case BuiltinType::Char16:
2677 case BuiltinType::Char32:
2678 case BuiltinType::Int128:
2679 case BuiltinType::UInt128:
2682 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2683 case BuiltinType::Id:
2684 #include "clang/Basic/OpenCLImageTypes.def"
2685 case BuiltinType::OCLSampler:
2686 case BuiltinType::OCLEvent:
2687 case BuiltinType::OCLClkEvent:
2688 case BuiltinType::OCLQueue:
2689 case BuiltinType::OCLReserveID:
2692 case BuiltinType::Dependent:
2693 #define BUILTIN_TYPE(Id, SingletonId)
2694 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2695 case BuiltinType::Id:
2696 #include "clang/AST/BuiltinTypes.def"
2697 llvm_unreachable("asking for RRTI for a placeholder type!");
2699 case BuiltinType::ObjCId:
2700 case BuiltinType::ObjCClass:
2701 case BuiltinType::ObjCSel:
2702 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2705 llvm_unreachable("Invalid BuiltinType Kind!");
2708 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2709 QualType PointeeTy = PointerTy->getPointeeType();
2710 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2714 // Check the qualifiers.
2715 Qualifiers Quals = PointeeTy.getQualifiers();
2716 Quals.removeConst();
2721 return TypeInfoIsInStandardLibrary(BuiltinTy);
2724 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2725 /// information for the given type exists in the standard library.
2726 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2727 // Type info for builtin types is defined in the standard library.
2728 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2729 return TypeInfoIsInStandardLibrary(BuiltinTy);
2731 // Type info for some pointer types to builtin types is defined in the
2732 // standard library.
2733 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2734 return TypeInfoIsInStandardLibrary(PointerTy);
2739 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2740 /// the given type exists somewhere else, and that we should not emit the type
2741 /// information in this translation unit. Assumes that it is not a
2742 /// standard-library type.
2743 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2745 ASTContext &Context = CGM.getContext();
2747 // If RTTI is disabled, assume it might be disabled in the
2748 // translation unit that defines any potential key function, too.
2749 if (!Context.getLangOpts().RTTI) return false;
2751 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2752 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2753 if (!RD->hasDefinition())
2756 if (!RD->isDynamicClass())
2759 // FIXME: this may need to be reconsidered if the key function
2761 // N.B. We must always emit the RTTI data ourselves if there exists a key
2763 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2764 if (CGM.getVTables().isVTableExternal(RD))
2765 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
2776 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
2777 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2778 return !RecordTy->getDecl()->isCompleteDefinition();
2781 /// ContainsIncompleteClassType - Returns whether the given type contains an
2782 /// incomplete class type. This is true if
2784 /// * The given type is an incomplete class type.
2785 /// * The given type is a pointer type whose pointee type contains an
2786 /// incomplete class type.
2787 /// * The given type is a member pointer type whose class is an incomplete
2789 /// * The given type is a member pointer type whoise pointee type contains an
2790 /// incomplete class type.
2791 /// is an indirect or direct pointer to an incomplete class type.
2792 static bool ContainsIncompleteClassType(QualType Ty) {
2793 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2794 if (IsIncompleteClassType(RecordTy))
2798 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2799 return ContainsIncompleteClassType(PointerTy->getPointeeType());
2801 if (const MemberPointerType *MemberPointerTy =
2802 dyn_cast<MemberPointerType>(Ty)) {
2803 // Check if the class type is incomplete.
2804 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2805 if (IsIncompleteClassType(ClassType))
2808 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2814 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2815 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2816 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
2817 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2818 // Check the number of bases.
2819 if (RD->getNumBases() != 1)
2823 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2825 // Check that the base is not virtual.
2826 if (Base->isVirtual())
2829 // Check that the base is public.
2830 if (Base->getAccessSpecifier() != AS_public)
2833 // Check that the class is dynamic iff the base is.
2834 const CXXRecordDecl *BaseDecl =
2835 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2836 if (!BaseDecl->isEmpty() &&
2837 BaseDecl->isDynamicClass() != RD->isDynamicClass())
2843 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2844 // abi::__class_type_info.
2845 static const char * const ClassTypeInfo =
2846 "_ZTVN10__cxxabiv117__class_type_infoE";
2847 // abi::__si_class_type_info.
2848 static const char * const SIClassTypeInfo =
2849 "_ZTVN10__cxxabiv120__si_class_type_infoE";
2850 // abi::__vmi_class_type_info.
2851 static const char * const VMIClassTypeInfo =
2852 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
2854 const char *VTableName = nullptr;
2856 switch (Ty->getTypeClass()) {
2857 #define TYPE(Class, Base)
2858 #define ABSTRACT_TYPE(Class, Base)
2859 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2860 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2861 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2862 #include "clang/AST/TypeNodes.def"
2863 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2865 case Type::LValueReference:
2866 case Type::RValueReference:
2867 llvm_unreachable("References shouldn't get here");
2870 case Type::DeducedTemplateSpecialization:
2871 llvm_unreachable("Undeduced type shouldn't get here");
2874 llvm_unreachable("Pipe types shouldn't get here");
2877 // GCC treats vector and complex types as fundamental types.
2879 case Type::ExtVector:
2882 // FIXME: GCC treats block pointers as fundamental types?!
2883 case Type::BlockPointer:
2884 // abi::__fundamental_type_info.
2885 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
2888 case Type::ConstantArray:
2889 case Type::IncompleteArray:
2890 case Type::VariableArray:
2891 // abi::__array_type_info.
2892 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
2895 case Type::FunctionNoProto:
2896 case Type::FunctionProto:
2897 // abi::__function_type_info.
2898 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
2902 // abi::__enum_type_info.
2903 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
2906 case Type::Record: {
2907 const CXXRecordDecl *RD =
2908 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2910 if (!RD->hasDefinition() || !RD->getNumBases()) {
2911 VTableName = ClassTypeInfo;
2912 } else if (CanUseSingleInheritance(RD)) {
2913 VTableName = SIClassTypeInfo;
2915 VTableName = VMIClassTypeInfo;
2921 case Type::ObjCObject:
2922 // Ignore protocol qualifiers.
2923 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
2925 // Handle id and Class.
2926 if (isa<BuiltinType>(Ty)) {
2927 VTableName = ClassTypeInfo;
2931 assert(isa<ObjCInterfaceType>(Ty));
2934 case Type::ObjCInterface:
2935 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
2936 VTableName = SIClassTypeInfo;
2938 VTableName = ClassTypeInfo;
2942 case Type::ObjCObjectPointer:
2944 // abi::__pointer_type_info.
2945 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
2948 case Type::MemberPointer:
2949 // abi::__pointer_to_member_type_info.
2950 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
2954 llvm::Constant *VTable =
2955 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
2957 llvm::Type *PtrDiffTy =
2958 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
2960 // The vtable address point is 2.
2961 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
2963 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
2964 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
2966 Fields.push_back(VTable);
2969 /// \brief Return the linkage that the type info and type info name constants
2970 /// should have for the given type.
2971 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
2973 // Itanium C++ ABI 2.9.5p7:
2974 // In addition, it and all of the intermediate abi::__pointer_type_info
2975 // structs in the chain down to the abi::__class_type_info for the
2976 // incomplete class type must be prevented from resolving to the
2977 // corresponding type_info structs for the complete class type, possibly
2978 // by making them local static objects. Finally, a dummy class RTTI is
2979 // generated for the incomplete type that will not resolve to the final
2980 // complete class RTTI (because the latter need not exist), possibly by
2981 // making it a local static object.
2982 if (ContainsIncompleteClassType(Ty))
2983 return llvm::GlobalValue::InternalLinkage;
2985 switch (Ty->getLinkage()) {
2987 case InternalLinkage:
2988 case UniqueExternalLinkage:
2989 return llvm::GlobalValue::InternalLinkage;
2991 case VisibleNoLinkage:
2992 case ModuleInternalLinkage:
2994 case ExternalLinkage:
2995 // RTTI is not enabled, which means that this type info struct is going
2996 // to be used for exception handling. Give it linkonce_odr linkage.
2997 if (!CGM.getLangOpts().RTTI)
2998 return llvm::GlobalValue::LinkOnceODRLinkage;
3000 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3001 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3002 if (RD->hasAttr<WeakAttr>())
3003 return llvm::GlobalValue::WeakODRLinkage;
3004 if (CGM.getTriple().isWindowsItaniumEnvironment())
3005 if (RD->hasAttr<DLLImportAttr>() &&
3006 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3007 return llvm::GlobalValue::ExternalLinkage;
3008 // MinGW always uses LinkOnceODRLinkage for type info.
3009 if (RD->isDynamicClass() &&
3013 .isWindowsGNUEnvironment())
3014 return CGM.getVTableLinkage(RD);
3017 return llvm::GlobalValue::LinkOnceODRLinkage;
3020 llvm_unreachable("Invalid linkage!");
3023 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
3025 // We want to operate on the canonical type.
3026 Ty = Ty.getCanonicalType();
3028 // Check if we've already emitted an RTTI descriptor for this type.
3029 SmallString<256> Name;
3030 llvm::raw_svector_ostream Out(Name);
3031 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3033 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3034 if (OldGV && !OldGV->isDeclaration()) {
3035 assert(!OldGV->hasAvailableExternallyLinkage() &&
3036 "available_externally typeinfos not yet implemented");
3038 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3041 // Check if there is already an external RTTI descriptor for this type.
3042 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
3043 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
3044 return GetAddrOfExternalRTTIDescriptor(Ty);
3046 // Emit the standard library with external linkage.
3047 llvm::GlobalVariable::LinkageTypes Linkage;
3049 Linkage = llvm::GlobalValue::ExternalLinkage;
3051 Linkage = getTypeInfoLinkage(CGM, Ty);
3053 // Add the vtable pointer.
3054 BuildVTablePointer(cast<Type>(Ty));
3057 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3058 llvm::Constant *TypeNameField;
3060 // If we're supposed to demote the visibility, be sure to set a flag
3061 // to use a string comparison for type_info comparisons.
3062 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3063 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3064 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3065 // The flag is the sign bit, which on ARM64 is defined to be clear
3066 // for global pointers. This is very ARM64-specific.
3067 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3068 llvm::Constant *flag =
3069 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3070 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3072 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3074 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3076 Fields.push_back(TypeNameField);
3078 switch (Ty->getTypeClass()) {
3079 #define TYPE(Class, Base)
3080 #define ABSTRACT_TYPE(Class, Base)
3081 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3082 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3083 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3084 #include "clang/AST/TypeNodes.def"
3085 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3087 // GCC treats vector types as fundamental types.
3090 case Type::ExtVector:
3092 case Type::BlockPointer:
3093 // Itanium C++ ABI 2.9.5p4:
3094 // abi::__fundamental_type_info adds no data members to std::type_info.
3097 case Type::LValueReference:
3098 case Type::RValueReference:
3099 llvm_unreachable("References shouldn't get here");
3102 case Type::DeducedTemplateSpecialization:
3103 llvm_unreachable("Undeduced type shouldn't get here");
3106 llvm_unreachable("Pipe type shouldn't get here");
3108 case Type::ConstantArray:
3109 case Type::IncompleteArray:
3110 case Type::VariableArray:
3111 // Itanium C++ ABI 2.9.5p5:
3112 // abi::__array_type_info adds no data members to std::type_info.
3115 case Type::FunctionNoProto:
3116 case Type::FunctionProto:
3117 // Itanium C++ ABI 2.9.5p5:
3118 // abi::__function_type_info adds no data members to std::type_info.
3122 // Itanium C++ ABI 2.9.5p5:
3123 // abi::__enum_type_info adds no data members to std::type_info.
3126 case Type::Record: {
3127 const CXXRecordDecl *RD =
3128 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3129 if (!RD->hasDefinition() || !RD->getNumBases()) {
3130 // We don't need to emit any fields.
3134 if (CanUseSingleInheritance(RD))
3135 BuildSIClassTypeInfo(RD);
3137 BuildVMIClassTypeInfo(RD);
3142 case Type::ObjCObject:
3143 case Type::ObjCInterface:
3144 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3147 case Type::ObjCObjectPointer:
3148 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3152 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3155 case Type::MemberPointer:
3156 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3160 // No fields, at least for the moment.
3164 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3166 llvm::Module &M = CGM.getModule();
3167 llvm::GlobalVariable *GV =
3168 new llvm::GlobalVariable(M, Init->getType(),
3169 /*Constant=*/true, Linkage, Init, Name);
3171 // If there's already an old global variable, replace it with the new one.
3173 GV->takeName(OldGV);
3174 llvm::Constant *NewPtr =
3175 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3176 OldGV->replaceAllUsesWith(NewPtr);
3177 OldGV->eraseFromParent();
3180 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3181 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3183 // The Itanium ABI specifies that type_info objects must be globally
3184 // unique, with one exception: if the type is an incomplete class
3185 // type or a (possibly indirect) pointer to one. That exception
3186 // affects the general case of comparing type_info objects produced
3187 // by the typeid operator, which is why the comparison operators on
3188 // std::type_info generally use the type_info name pointers instead
3189 // of the object addresses. However, the language's built-in uses
3190 // of RTTI generally require class types to be complete, even when
3191 // manipulating pointers to those class types. This allows the
3192 // implementation of dynamic_cast to rely on address equality tests,
3193 // which is much faster.
3195 // All of this is to say that it's important that both the type_info
3196 // object and the type_info name be uniqued when weakly emitted.
3198 // Give the type_info object and name the formal visibility of the
3200 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3201 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3202 // If the linkage is local, only default visibility makes sense.
3203 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3204 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
3205 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3207 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3209 TypeName->setVisibility(llvmVisibility);
3210 GV->setVisibility(llvmVisibility);
3212 if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3213 auto RD = Ty->getAsCXXRecordDecl();
3214 if (DLLExport || (RD && RD->hasAttr<DLLExportAttr>())) {
3215 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3216 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3217 } else if (RD && RD->hasAttr<DLLImportAttr>() &&
3218 ShouldUseExternalRTTIDescriptor(CGM, Ty)) {
3219 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3220 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3222 // Because the typename and the typeinfo are DLL import, convert them to
3223 // declarations rather than definitions. The initializers still need to
3224 // be constructed to calculate the type for the declarations.
3225 TypeName->setInitializer(nullptr);
3226 GV->setInitializer(nullptr);
3230 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3233 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3234 /// for the given Objective-C object type.
3235 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3237 const Type *T = OT->getBaseType().getTypePtr();
3238 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3240 // The builtin types are abi::__class_type_infos and don't require
3242 if (isa<BuiltinType>(T)) return;
3244 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3245 ObjCInterfaceDecl *Super = Class->getSuperClass();
3247 // Root classes are also __class_type_info.
3250 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3252 // Everything else is single inheritance.
3253 llvm::Constant *BaseTypeInfo =
3254 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3255 Fields.push_back(BaseTypeInfo);
3258 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3259 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3260 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3261 // Itanium C++ ABI 2.9.5p6b:
3262 // It adds to abi::__class_type_info a single member pointing to the
3263 // type_info structure for the base type,
3264 llvm::Constant *BaseTypeInfo =
3265 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3266 Fields.push_back(BaseTypeInfo);
3270 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3271 /// a class hierarchy.
3273 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3274 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3278 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3279 /// abi::__vmi_class_type_info.
3281 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3286 const CXXRecordDecl *BaseDecl =
3287 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3289 if (Base->isVirtual()) {
3290 // Mark the virtual base as seen.
3291 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3292 // If this virtual base has been seen before, then the class is diamond
3294 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3296 if (Bases.NonVirtualBases.count(BaseDecl))
3297 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3300 // Mark the non-virtual base as seen.
3301 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3302 // If this non-virtual base has been seen before, then the class has non-
3303 // diamond shaped repeated inheritance.
3304 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3306 if (Bases.VirtualBases.count(BaseDecl))
3307 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3312 for (const auto &I : BaseDecl->bases())
3313 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3318 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3323 for (const auto &I : RD->bases())
3324 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3329 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3330 /// classes with bases that do not satisfy the abi::__si_class_type_info
3331 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3332 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3333 llvm::Type *UnsignedIntLTy =
3334 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3336 // Itanium C++ ABI 2.9.5p6c:
3337 // __flags is a word with flags describing details about the class
3338 // structure, which may be referenced by using the __flags_masks
3339 // enumeration. These flags refer to both direct and indirect bases.
3340 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3341 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3343 // Itanium C++ ABI 2.9.5p6c:
3344 // __base_count is a word with the number of direct proper base class
3345 // descriptions that follow.
3346 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3348 if (!RD->getNumBases())
3351 // Now add the base class descriptions.
3353 // Itanium C++ ABI 2.9.5p6c:
3354 // __base_info[] is an array of base class descriptions -- one for every
3355 // direct proper base. Each description is of the type:
3357 // struct abi::__base_class_type_info {
3359 // const __class_type_info *__base_type;
3360 // long __offset_flags;
3362 // enum __offset_flags_masks {
3363 // __virtual_mask = 0x1,
3364 // __public_mask = 0x2,
3365 // __offset_shift = 8
3369 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3370 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3372 // FIXME: Consider updating libc++abi to match, and extend this logic to all
3374 QualType OffsetFlagsTy = CGM.getContext().LongTy;
3375 const TargetInfo &TI = CGM.getContext().getTargetInfo();
3376 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3377 OffsetFlagsTy = CGM.getContext().LongLongTy;
3378 llvm::Type *OffsetFlagsLTy =
3379 CGM.getTypes().ConvertType(OffsetFlagsTy);
3381 for (const auto &Base : RD->bases()) {
3382 // The __base_type member points to the RTTI for the base type.
3383 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3385 const CXXRecordDecl *BaseDecl =
3386 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3388 int64_t OffsetFlags = 0;
3390 // All but the lower 8 bits of __offset_flags are a signed offset.
3391 // For a non-virtual base, this is the offset in the object of the base
3392 // subobject. For a virtual base, this is the offset in the virtual table of
3393 // the virtual base offset for the virtual base referenced (negative).
3395 if (Base.isVirtual())
3397 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3399 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3400 Offset = Layout.getBaseClassOffset(BaseDecl);
3403 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3405 // The low-order byte of __offset_flags contains flags, as given by the
3406 // masks from the enumeration __offset_flags_masks.
3407 if (Base.isVirtual())
3408 OffsetFlags |= BCTI_Virtual;
3409 if (Base.getAccessSpecifier() == AS_public)
3410 OffsetFlags |= BCTI_Public;
3412 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3416 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3417 /// pieces from \p Type.
3418 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3421 if (Type.isConstQualified())
3422 Flags |= ItaniumRTTIBuilder::PTI_Const;
3423 if (Type.isVolatileQualified())
3424 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3425 if (Type.isRestrictQualified())
3426 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3427 Type = Type.getUnqualifiedType();
3429 // Itanium C++ ABI 2.9.5p7:
3430 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3431 // incomplete class type, the incomplete target type flag is set.
3432 if (ContainsIncompleteClassType(Type))
3433 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3435 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3436 if (Proto->isNothrow(Ctx)) {
3437 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3438 Type = Ctx.getFunctionType(
3439 Proto->getReturnType(), Proto->getParamTypes(),
3440 Proto->getExtProtoInfo().withExceptionSpec(EST_None));
3447 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3448 /// used for pointer types.
3449 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3450 // Itanium C++ ABI 2.9.5p7:
3451 // __flags is a flag word describing the cv-qualification and other
3452 // attributes of the type pointed to
3453 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3455 llvm::Type *UnsignedIntLTy =
3456 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3457 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3459 // Itanium C++ ABI 2.9.5p7:
3460 // __pointee is a pointer to the std::type_info derivation for the
3461 // unqualified type being pointed to.
3462 llvm::Constant *PointeeTypeInfo =
3463 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3464 Fields.push_back(PointeeTypeInfo);
3467 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3468 /// struct, used for member pointer types.
3470 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3471 QualType PointeeTy = Ty->getPointeeType();
3473 // Itanium C++ ABI 2.9.5p7:
3474 // __flags is a flag word describing the cv-qualification and other
3475 // attributes of the type pointed to.
3476 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3478 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3479 if (IsIncompleteClassType(ClassType))
3480 Flags |= PTI_ContainingClassIncomplete;
3482 llvm::Type *UnsignedIntLTy =
3483 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3484 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3486 // Itanium C++ ABI 2.9.5p7:
3487 // __pointee is a pointer to the std::type_info derivation for the
3488 // unqualified type being pointed to.
3489 llvm::Constant *PointeeTypeInfo =
3490 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3491 Fields.push_back(PointeeTypeInfo);
3493 // Itanium C++ ABI 2.9.5p9:
3494 // __context is a pointer to an abi::__class_type_info corresponding to the
3495 // class type containing the member pointed to
3496 // (e.g., the "A" in "int A::*").
3498 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3501 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3502 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3505 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type,
3507 QualType PointerType = getContext().getPointerType(Type);
3508 QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
3509 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, /*Force=*/true, DLLExport);
3510 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, /*Force=*/true,
3512 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, /*Force=*/true,
3516 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) {
3517 // Types added here must also be added to TypeInfoIsInStandardLibrary.
3518 QualType FundamentalTypes[] = {
3519 getContext().VoidTy, getContext().NullPtrTy,
3520 getContext().BoolTy, getContext().WCharTy,
3521 getContext().CharTy, getContext().UnsignedCharTy,
3522 getContext().SignedCharTy, getContext().ShortTy,
3523 getContext().UnsignedShortTy, getContext().IntTy,
3524 getContext().UnsignedIntTy, getContext().LongTy,
3525 getContext().UnsignedLongTy, getContext().LongLongTy,
3526 getContext().UnsignedLongLongTy, getContext().Int128Ty,
3527 getContext().UnsignedInt128Ty, getContext().HalfTy,
3528 getContext().FloatTy, getContext().DoubleTy,
3529 getContext().LongDoubleTy, getContext().Float128Ty,
3530 getContext().Char16Ty, getContext().Char32Ty
3532 for (const QualType &FundamentalType : FundamentalTypes)
3533 EmitFundamentalRTTIDescriptor(FundamentalType, DLLExport);
3536 /// What sort of uniqueness rules should we use for the RTTI for the
3538 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3539 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3540 if (shouldRTTIBeUnique())
3543 // It's only necessary for linkonce_odr or weak_odr linkage.
3544 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3545 Linkage != llvm::GlobalValue::WeakODRLinkage)
3548 // It's only necessary with default visibility.
3549 if (CanTy->getVisibility() != DefaultVisibility)
3552 // If we're not required to publish this symbol, hide it.
3553 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3554 return RUK_NonUniqueHidden;
3556 // If we're required to publish this symbol, as we might be under an
3557 // explicit instantiation, leave it with default visibility but
3558 // enable string-comparisons.
3559 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3560 return RUK_NonUniqueVisible;
3563 // Find out how to codegen the complete destructor and constructor
3565 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3567 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3568 const CXXMethodDecl *MD) {
3569 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3570 return StructorCodegen::Emit;
3572 // The complete and base structors are not equivalent if there are any virtual
3573 // bases, so emit separate functions.
3574 if (MD->getParent()->getNumVBases())
3575 return StructorCodegen::Emit;
3577 GlobalDecl AliasDecl;
3578 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3579 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3581 const auto *CD = cast<CXXConstructorDecl>(MD);
3582 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3584 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3586 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3587 return StructorCodegen::RAUW;
3589 // FIXME: Should we allow available_externally aliases?
3590 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3591 return StructorCodegen::RAUW;
3593 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3594 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3595 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3596 CGM.getTarget().getTriple().isOSBinFormatWasm())
3597 return StructorCodegen::COMDAT;
3598 return StructorCodegen::Emit;
3601 return StructorCodegen::Alias;
3604 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3605 GlobalDecl AliasDecl,
3606 GlobalDecl TargetDecl) {
3607 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3609 StringRef MangledName = CGM.getMangledName(AliasDecl);
3610 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3611 if (Entry && !Entry->isDeclaration())
3614 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3616 // Create the alias with no name.
3617 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3619 // Switch any previous uses to the alias.
3621 assert(Entry->getType() == Aliasee->getType() &&
3622 "declaration exists with different type");
3623 Alias->takeName(Entry);
3624 Entry->replaceAllUsesWith(Alias);
3625 Entry->eraseFromParent();
3627 Alias->setName(MangledName);
3630 // Finally, set up the alias with its proper name and attributes.
3631 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
3634 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3635 StructorType Type) {
3636 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3637 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3639 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3641 if (Type == StructorType::Complete) {
3642 GlobalDecl CompleteDecl;
3643 GlobalDecl BaseDecl;
3645 CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3646 BaseDecl = GlobalDecl(CD, Ctor_Base);
3648 CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3649 BaseDecl = GlobalDecl(DD, Dtor_Base);
3652 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3653 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3657 if (CGType == StructorCodegen::RAUW) {
3658 StringRef MangledName = CGM.getMangledName(CompleteDecl);
3659 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3660 CGM.addReplacement(MangledName, Aliasee);
3665 // The base destructor is equivalent to the base destructor of its
3666 // base class if there is exactly one non-virtual base class with a
3667 // non-trivial destructor, there are no fields with a non-trivial
3668 // destructor, and the body of the destructor is trivial.
3669 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3670 !CGM.TryEmitBaseDestructorAsAlias(DD))
3673 // FIXME: The deleting destructor is equivalent to the selected operator
3675 // * either the delete is a destroying operator delete or the destructor
3676 // would be trivial if it weren't virtual,
3677 // * the conversion from the 'this' parameter to the first parameter of the
3678 // destructor is equivalent to a bitcast,
3679 // * the destructor does not have an implicit "this" return, and
3680 // * the operator delete has the same calling convention and IR function type
3681 // as the destructor.
3682 // In such cases we should try to emit the deleting dtor as an alias to the
3683 // selected 'operator delete'.
3685 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3687 if (CGType == StructorCodegen::COMDAT) {
3688 SmallString<256> Buffer;
3689 llvm::raw_svector_ostream Out(Buffer);
3691 getMangleContext().mangleCXXDtorComdat(DD, Out);
3693 getMangleContext().mangleCXXCtorComdat(CD, Out);
3694 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3697 CGM.maybeSetTrivialComdat(*MD, *Fn);
3701 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3702 // void *__cxa_begin_catch(void*);
3703 llvm::FunctionType *FTy = llvm::FunctionType::get(
3704 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3706 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3709 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3710 // void __cxa_end_catch();
3711 llvm::FunctionType *FTy =
3712 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3714 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3717 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3718 // void *__cxa_get_exception_ptr(void*);
3719 llvm::FunctionType *FTy = llvm::FunctionType::get(
3720 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3722 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3726 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3727 /// exception type lets us state definitively that the thrown exception
3728 /// type does not have a destructor. In particular:
3729 /// - Catch-alls tell us nothing, so we have to conservatively
3730 /// assume that the thrown exception might have a destructor.
3731 /// - Catches by reference behave according to their base types.
3732 /// - Catches of non-record types will only trigger for exceptions
3733 /// of non-record types, which never have destructors.
3734 /// - Catches of record types can trigger for arbitrary subclasses
3735 /// of the caught type, so we have to assume the actual thrown
3736 /// exception type might have a throwing destructor, even if the
3737 /// caught type's destructor is trivial or nothrow.
3738 struct CallEndCatch final : EHScopeStack::Cleanup {
3739 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3742 void Emit(CodeGenFunction &CGF, Flags flags) override {
3744 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3748 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3753 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3754 /// __cxa_end_catch.
3756 /// \param EndMightThrow - true if __cxa_end_catch might throw
3757 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3759 bool EndMightThrow) {
3760 llvm::CallInst *call =
3761 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3763 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3768 /// A "special initializer" callback for initializing a catch
3769 /// parameter during catch initialization.
3770 static void InitCatchParam(CodeGenFunction &CGF,
3771 const VarDecl &CatchParam,
3773 SourceLocation Loc) {
3774 // Load the exception from where the landing pad saved it.
3775 llvm::Value *Exn = CGF.getExceptionFromSlot();
3777 CanQualType CatchType =
3778 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3779 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3781 // If we're catching by reference, we can just cast the object
3782 // pointer to the appropriate pointer.
3783 if (isa<ReferenceType>(CatchType)) {
3784 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3785 bool EndCatchMightThrow = CaughtType->isRecordType();
3787 // __cxa_begin_catch returns the adjusted object pointer.
3788 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3790 // We have no way to tell the personality function that we're
3791 // catching by reference, so if we're catching a pointer,
3792 // __cxa_begin_catch will actually return that pointer by value.
3793 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3794 QualType PointeeType = PT->getPointeeType();
3796 // When catching by reference, generally we should just ignore
3797 // this by-value pointer and use the exception object instead.
3798 if (!PointeeType->isRecordType()) {
3800 // Exn points to the struct _Unwind_Exception header, which
3801 // we have to skip past in order to reach the exception data.
3802 unsigned HeaderSize =
3803 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3804 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3806 // However, if we're catching a pointer-to-record type that won't
3807 // work, because the personality function might have adjusted
3808 // the pointer. There's actually no way for us to fully satisfy
3809 // the language/ABI contract here: we can't use Exn because it
3810 // might have the wrong adjustment, but we can't use the by-value
3811 // pointer because it's off by a level of abstraction.
3813 // The current solution is to dump the adjusted pointer into an
3814 // alloca, which breaks language semantics (because changing the
3815 // pointer doesn't change the exception) but at least works.
3816 // The better solution would be to filter out non-exact matches
3817 // and rethrow them, but this is tricky because the rethrow
3818 // really needs to be catchable by other sites at this landing
3819 // pad. The best solution is to fix the personality function.
3821 // Pull the pointer for the reference type off.
3823 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3825 // Create the temporary and write the adjusted pointer into it.
3827 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
3828 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3829 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3831 // Bind the reference to the temporary.
3832 AdjustedExn = ExnPtrTmp.getPointer();
3836 llvm::Value *ExnCast =
3837 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
3838 CGF.Builder.CreateStore(ExnCast, ParamAddr);
3842 // Scalars and complexes.
3843 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
3844 if (TEK != TEK_Aggregate) {
3845 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
3847 // If the catch type is a pointer type, __cxa_begin_catch returns
3848 // the pointer by value.
3849 if (CatchType->hasPointerRepresentation()) {
3850 llvm::Value *CastExn =
3851 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
3853 switch (CatchType.getQualifiers().getObjCLifetime()) {
3854 case Qualifiers::OCL_Strong:
3855 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
3858 case Qualifiers::OCL_None:
3859 case Qualifiers::OCL_ExplicitNone:
3860 case Qualifiers::OCL_Autoreleasing:
3861 CGF.Builder.CreateStore(CastExn, ParamAddr);
3864 case Qualifiers::OCL_Weak:
3865 CGF.EmitARCInitWeak(ParamAddr, CastExn);
3868 llvm_unreachable("bad ownership qualifier!");
3871 // Otherwise, it returns a pointer into the exception object.
3873 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3874 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3876 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
3877 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
3880 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
3884 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
3885 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
3889 llvm_unreachable("evaluation kind filtered out!");
3891 llvm_unreachable("bad evaluation kind");
3894 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
3895 auto catchRD = CatchType->getAsCXXRecordDecl();
3896 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
3898 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3900 // Check for a copy expression. If we don't have a copy expression,
3901 // that means a trivial copy is okay.
3902 const Expr *copyExpr = CatchParam.getInit();
3904 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
3905 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3906 caughtExnAlignment);
3907 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
3911 // We have to call __cxa_get_exception_ptr to get the adjusted
3912 // pointer before copying.
3913 llvm::CallInst *rawAdjustedExn =
3914 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
3916 // Cast that to the appropriate type.
3917 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3918 caughtExnAlignment);
3920 // The copy expression is defined in terms of an OpaqueValueExpr.
3921 // Find it and map it to the adjusted expression.
3922 CodeGenFunction::OpaqueValueMapping
3923 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
3924 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
3926 // Call the copy ctor in a terminate scope.
3927 CGF.EHStack.pushTerminate();
3929 // Perform the copy construction.
3930 CGF.EmitAggExpr(copyExpr,
3931 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
3932 AggValueSlot::IsNotDestructed,
3933 AggValueSlot::DoesNotNeedGCBarriers,
3934 AggValueSlot::IsNotAliased));
3936 // Leave the terminate scope.
3937 CGF.EHStack.popTerminate();
3939 // Undo the opaque value mapping.
3942 // Finally we can call __cxa_begin_catch.
3943 CallBeginCatch(CGF, Exn, true);
3946 /// Begins a catch statement by initializing the catch variable and
3947 /// calling __cxa_begin_catch.
3948 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
3949 const CXXCatchStmt *S) {
3950 // We have to be very careful with the ordering of cleanups here:
3951 // C++ [except.throw]p4:
3952 // The destruction [of the exception temporary] occurs
3953 // immediately after the destruction of the object declared in
3954 // the exception-declaration in the handler.
3956 // So the precise ordering is:
3957 // 1. Construct catch variable.
3958 // 2. __cxa_begin_catch
3959 // 3. Enter __cxa_end_catch cleanup
3960 // 4. Enter dtor cleanup
3962 // We do this by using a slightly abnormal initialization process.
3963 // Delegation sequence:
3964 // - ExitCXXTryStmt opens a RunCleanupsScope
3965 // - EmitAutoVarAlloca creates the variable and debug info
3966 // - InitCatchParam initializes the variable from the exception
3967 // - CallBeginCatch calls __cxa_begin_catch
3968 // - CallBeginCatch enters the __cxa_end_catch cleanup
3969 // - EmitAutoVarCleanups enters the variable destructor cleanup
3970 // - EmitCXXTryStmt emits the code for the catch body
3971 // - EmitCXXTryStmt close the RunCleanupsScope
3973 VarDecl *CatchParam = S->getExceptionDecl();
3975 llvm::Value *Exn = CGF.getExceptionFromSlot();
3976 CallBeginCatch(CGF, Exn, true);
3981 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
3982 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
3983 CGF.EmitAutoVarCleanups(var);
3986 /// Get or define the following function:
3987 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
3988 /// This code is used only in C++.
3989 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
3990 llvm::FunctionType *fnTy =
3991 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3992 llvm::Constant *fnRef = CGM.CreateRuntimeFunction(
3993 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
3995 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
3996 if (fn && fn->empty()) {
3997 fn->setDoesNotThrow();
3998 fn->setDoesNotReturn();
4000 // What we really want is to massively penalize inlining without
4001 // forbidding it completely. The difference between that and
4002 // 'noinline' is negligible.
4003 fn->addFnAttr(llvm::Attribute::NoInline);
4005 // Allow this function to be shared across translation units, but
4006 // we don't want it to turn into an exported symbol.
4007 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4008 fn->setVisibility(llvm::Function::HiddenVisibility);
4009 if (CGM.supportsCOMDAT())
4010 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4012 // Set up the function.
4013 llvm::BasicBlock *entry =
4014 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4015 CGBuilderTy builder(CGM, entry);
4017 // Pull the exception pointer out of the parameter list.
4018 llvm::Value *exn = &*fn->arg_begin();
4020 // Call __cxa_begin_catch(exn).
4021 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4022 catchCall->setDoesNotThrow();
4023 catchCall->setCallingConv(CGM.getRuntimeCC());
4025 // Call std::terminate().
4026 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4027 termCall->setDoesNotThrow();
4028 termCall->setDoesNotReturn();
4029 termCall->setCallingConv(CGM.getRuntimeCC());
4031 // std::terminate cannot return.
4032 builder.CreateUnreachable();
4039 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4041 // In C++, we want to call __cxa_begin_catch() before terminating.
4043 assert(CGF.CGM.getLangOpts().CPlusPlus);
4044 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4046 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4049 std::pair<llvm::Value *, const CXXRecordDecl *>
4050 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4051 const CXXRecordDecl *RD) {
4052 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};