1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
19 //===----------------------------------------------------------------------===//
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/CodeGen/ConstantInitBuilder.h"
29 #include "clang/AST/Mangle.h"
30 #include "clang/AST/Type.h"
31 #include "clang/AST/StmtCXX.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
38 using namespace clang;
39 using namespace CodeGen;
42 class ItaniumCXXABI : public CodeGen::CGCXXABI {
43 /// VTables - All the vtables which have been defined.
44 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 bool UseARMMethodPtrABI;
48 bool UseARMGuardVarABI;
49 bool Use32BitVTableOffsetABI;
51 ItaniumMangleContext &getMangleContext() {
52 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
56 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
57 bool UseARMMethodPtrABI = false,
58 bool UseARMGuardVarABI = false) :
59 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
60 UseARMGuardVarABI(UseARMGuardVarABI),
61 Use32BitVTableOffsetABI(false) { }
63 bool classifyReturnType(CGFunctionInfo &FI) const override;
65 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
66 // Structures with either a non-trivial destructor or a non-trivial
67 // copy constructor are always indirect.
68 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
70 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor())
75 bool isThisCompleteObject(GlobalDecl GD) const override {
76 // The Itanium ABI has separate complete-object vs. base-object
77 // variants of both constructors and destructors.
78 if (isa<CXXDestructorDecl>(GD.getDecl())) {
79 switch (GD.getDtorType()) {
88 llvm_unreachable("emitting dtor comdat as function?");
90 llvm_unreachable("bad dtor kind");
92 if (isa<CXXConstructorDecl>(GD.getDecl())) {
93 switch (GD.getCtorType()) {
100 case Ctor_CopyingClosure:
101 case Ctor_DefaultClosure:
102 llvm_unreachable("closure ctors in Itanium ABI?");
105 llvm_unreachable("emitting ctor comdat as function?");
107 llvm_unreachable("bad dtor kind");
114 bool isZeroInitializable(const MemberPointerType *MPT) override;
116 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122 llvm::Value *&ThisPtrForCall,
123 llvm::Value *MemFnPtr,
124 const MemberPointerType *MPT) override;
127 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130 const MemberPointerType *MPT) override;
132 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
134 llvm::Value *Src) override;
135 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
136 llvm::Constant *Src) override;
138 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
140 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
141 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
142 CharUnits offset) override;
143 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
144 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
145 CharUnits ThisAdjustment);
147 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
148 llvm::Value *L, llvm::Value *R,
149 const MemberPointerType *MPT,
150 bool Inequality) override;
152 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
154 const MemberPointerType *MPT) override;
156 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
157 Address Ptr, QualType ElementType,
158 const CXXDestructorDecl *Dtor) override;
160 CharUnits getAlignmentOfExnObject() {
161 unsigned Align = CGM.getContext().getTargetInfo().getExnObjectAlignment();
162 return CGM.getContext().toCharUnitsFromBits(Align);
165 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
166 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
168 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
171 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
172 llvm::Value *Exn) override;
174 void EmitFundamentalRTTIDescriptor(QualType Type, bool DLLExport);
175 void EmitFundamentalRTTIDescriptors(bool DLLExport);
176 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
178 getAddrOfCXXCatchHandlerType(QualType Ty,
179 QualType CatchHandlerType) override {
180 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
183 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
184 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
185 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
187 llvm::Type *StdTypeInfoPtrTy) override;
189 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
190 QualType SrcRecordTy) override;
192 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
193 QualType SrcRecordTy, QualType DestTy,
194 QualType DestRecordTy,
195 llvm::BasicBlock *CastEnd) override;
197 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
198 QualType SrcRecordTy,
199 QualType DestTy) override;
201 bool EmitBadCastCall(CodeGenFunction &CGF) override;
204 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
205 const CXXRecordDecl *ClassDecl,
206 const CXXRecordDecl *BaseClassDecl) override;
208 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
211 buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
212 SmallVectorImpl<CanQualType> &ArgTys) override;
214 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
215 CXXDtorType DT) const override {
216 // Itanium does not emit any destructor variant as an inline thunk.
217 // Delegating may occur as an optimization, but all variants are either
218 // emitted with external linkage or as linkonce if they are inline and used.
222 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
224 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
225 FunctionArgList &Params) override;
227 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
230 addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
231 CXXCtorType Type, bool ForVirtualBase,
232 bool Delegating, CallArgList &Args) override;
234 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
235 CXXDtorType Type, bool ForVirtualBase,
236 bool Delegating, Address This) override;
238 void emitVTableDefinitions(CodeGenVTables &CGVT,
239 const CXXRecordDecl *RD) override;
241 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
242 CodeGenFunction::VPtr Vptr) override;
244 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249 getVTableAddressPoint(BaseSubobject Base,
250 const CXXRecordDecl *VTableClass) override;
252 llvm::Value *getVTableAddressPointInStructor(
253 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
254 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
256 llvm::Value *getVTableAddressPointInStructorWithVTT(
257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
261 getVTableAddressPointForConstExpr(BaseSubobject Base,
262 const CXXRecordDecl *VTableClass) override;
264 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
265 CharUnits VPtrOffset) override;
267 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
268 Address This, llvm::Type *Ty,
269 SourceLocation Loc) override;
271 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
272 const CXXDestructorDecl *Dtor,
273 CXXDtorType DtorType,
275 const CXXMemberCallExpr *CE) override;
277 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
279 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
281 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
282 bool ReturnAdjustment) override {
283 // Allow inlining of thunks by emitting them with available_externally
284 // linkage together with vtables when needed.
285 if (ForVTable && !Thunk->hasLocalLinkage())
286 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
289 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
290 const ThisAdjustment &TA) override;
292 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
293 const ReturnAdjustment &RA) override;
295 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
296 FunctionArgList &Args) const override {
297 assert(!Args.empty() && "expected the arglist to not be empty!");
298 return Args.size() - 1;
301 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
302 StringRef GetDeletedVirtualCallName() override
303 { return "__cxa_deleted_virtual"; }
305 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
306 Address InitializeArrayCookie(CodeGenFunction &CGF,
308 llvm::Value *NumElements,
309 const CXXNewExpr *expr,
310 QualType ElementType) override;
311 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
313 CharUnits cookieSize) override;
315 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
316 llvm::GlobalVariable *DeclPtr,
317 bool PerformInit) override;
318 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
319 llvm::Constant *dtor, llvm::Constant *addr) override;
321 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
323 void EmitThreadLocalInitFuncs(
325 ArrayRef<const VarDecl *> CXXThreadLocals,
326 ArrayRef<llvm::Function *> CXXThreadLocalInits,
327 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
329 bool usesThreadWrapperFunction() const override { return true; }
330 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
331 QualType LValType) override;
333 bool NeedsVTTParameter(GlobalDecl GD) override;
335 /**************************** RTTI Uniqueness ******************************/
338 /// Returns true if the ABI requires RTTI type_info objects to be unique
339 /// across a program.
340 virtual bool shouldRTTIBeUnique() const { return true; }
343 /// What sort of unique-RTTI behavior should we use?
344 enum RTTIUniquenessKind {
345 /// We are guaranteeing, or need to guarantee, that the RTTI string
349 /// We are not guaranteeing uniqueness for the RTTI string, so we
350 /// can demote to hidden visibility but must use string comparisons.
353 /// We are not guaranteeing uniqueness for the RTTI string, so we
354 /// have to use string comparisons, but we also have to emit it with
355 /// non-hidden visibility.
359 /// Return the required visibility status for the given type and linkage in
362 classifyRTTIUniqueness(QualType CanTy,
363 llvm::GlobalValue::LinkageTypes Linkage) const;
364 friend class ItaniumRTTIBuilder;
366 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
369 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
370 const auto &VtableLayout =
371 CGM.getItaniumVTableContext().getVTableLayout(RD);
373 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
375 if (!VtableComponent.isUsedFunctionPointerKind())
378 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
379 if (!Method->getCanonicalDecl()->isInlined())
382 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
383 auto *Entry = CGM.GetGlobalValue(Name);
384 // This checks if virtual inline function has already been emitted.
385 // Note that it is possible that this inline function would be emitted
386 // after trying to emit vtable speculatively. Because of this we do
387 // an extra pass after emitting all deferred vtables to find and emit
388 // these vtables opportunistically.
389 if (!Entry || Entry->isDeclaration())
395 bool isVTableHidden(const CXXRecordDecl *RD) const {
396 const auto &VtableLayout =
397 CGM.getItaniumVTableContext().getVTableLayout(RD);
399 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
400 if (VtableComponent.isRTTIKind()) {
401 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
402 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
404 } else if (VtableComponent.isUsedFunctionPointerKind()) {
405 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
406 if (Method->getVisibility() == Visibility::HiddenVisibility &&
407 !Method->isDefined())
415 class ARMCXXABI : public ItaniumCXXABI {
417 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
418 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
419 /* UseARMGuardVarABI = */ true) {}
421 bool HasThisReturn(GlobalDecl GD) const override {
422 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
423 isa<CXXDestructorDecl>(GD.getDecl()) &&
424 GD.getDtorType() != Dtor_Deleting));
427 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
428 QualType ResTy) override;
430 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
431 Address InitializeArrayCookie(CodeGenFunction &CGF,
433 llvm::Value *NumElements,
434 const CXXNewExpr *expr,
435 QualType ElementType) override;
436 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
437 CharUnits cookieSize) override;
440 class iOS64CXXABI : public ARMCXXABI {
442 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
443 Use32BitVTableOffsetABI = true;
446 // ARM64 libraries are prepared for non-unique RTTI.
447 bool shouldRTTIBeUnique() const override { return false; }
450 class WebAssemblyCXXABI final : public ItaniumCXXABI {
452 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
453 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
454 /*UseARMGuardVarABI=*/true) {}
457 bool HasThisReturn(GlobalDecl GD) const override {
458 return isa<CXXConstructorDecl>(GD.getDecl()) ||
459 (isa<CXXDestructorDecl>(GD.getDecl()) &&
460 GD.getDtorType() != Dtor_Deleting);
462 bool canCallMismatchedFunctionType() const override { return false; }
466 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
467 switch (CGM.getTarget().getCXXABI().getKind()) {
468 // For IR-generation purposes, there's no significant difference
469 // between the ARM and iOS ABIs.
470 case TargetCXXABI::GenericARM:
471 case TargetCXXABI::iOS:
472 case TargetCXXABI::WatchOS:
473 return new ARMCXXABI(CGM);
475 case TargetCXXABI::iOS64:
476 return new iOS64CXXABI(CGM);
478 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
479 // include the other 32-bit ARM oddities: constructor/destructor return values
480 // and array cookies.
481 case TargetCXXABI::GenericAArch64:
482 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
483 /* UseARMGuardVarABI = */ true);
485 case TargetCXXABI::GenericMIPS:
486 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
488 case TargetCXXABI::WebAssembly:
489 return new WebAssemblyCXXABI(CGM);
491 case TargetCXXABI::GenericItanium:
492 if (CGM.getContext().getTargetInfo().getTriple().getArch()
493 == llvm::Triple::le32) {
494 // For PNaCl, use ARM-style method pointers so that PNaCl code
495 // does not assume anything about the alignment of function
497 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
498 /* UseARMGuardVarABI = */ false);
500 return new ItaniumCXXABI(CGM);
502 case TargetCXXABI::Microsoft:
503 llvm_unreachable("Microsoft ABI is not Itanium-based");
505 llvm_unreachable("bad ABI kind");
509 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
510 if (MPT->isMemberDataPointer())
511 return CGM.PtrDiffTy;
512 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
515 /// In the Itanium and ARM ABIs, method pointers have the form:
516 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
518 /// In the Itanium ABI:
519 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
520 /// - the this-adjustment is (memptr.adj)
521 /// - the virtual offset is (memptr.ptr - 1)
524 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
525 /// - the this-adjustment is (memptr.adj >> 1)
526 /// - the virtual offset is (memptr.ptr)
527 /// ARM uses 'adj' for the virtual flag because Thumb functions
528 /// may be only single-byte aligned.
530 /// If the member is virtual, the adjusted 'this' pointer points
531 /// to a vtable pointer from which the virtual offset is applied.
533 /// If the member is non-virtual, memptr.ptr is the address of
534 /// the function to call.
535 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
536 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
537 llvm::Value *&ThisPtrForCall,
538 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
539 CGBuilderTy &Builder = CGF.Builder;
541 const FunctionProtoType *FPT =
542 MPT->getPointeeType()->getAs<FunctionProtoType>();
543 const CXXRecordDecl *RD =
544 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
546 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
547 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
549 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
551 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
552 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
553 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
555 // Extract memptr.adj, which is in the second field.
556 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
558 // Compute the true adjustment.
559 llvm::Value *Adj = RawAdj;
560 if (UseARMMethodPtrABI)
561 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
563 // Apply the adjustment and cast back to the original struct type
565 llvm::Value *This = ThisAddr.getPointer();
566 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
567 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
568 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
569 ThisPtrForCall = This;
571 // Load the function pointer.
572 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
574 // If the LSB in the function pointer is 1, the function pointer points to
575 // a virtual function.
576 llvm::Value *IsVirtual;
577 if (UseARMMethodPtrABI)
578 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
580 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
581 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
582 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
584 // In the virtual path, the adjustment left 'This' pointing to the
585 // vtable of the correct base subobject. The "function pointer" is an
586 // offset within the vtable (+1 for the virtual flag on non-ARM).
587 CGF.EmitBlock(FnVirtual);
589 // Cast the adjusted this to a pointer to vtable pointer and load.
590 llvm::Type *VTableTy = Builder.getInt8PtrTy();
591 CharUnits VTablePtrAlign =
592 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
593 CGF.getPointerAlign());
594 llvm::Value *VTable =
595 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
598 // On ARM64, to reserve extra space in virtual member function pointers,
599 // we only pay attention to the low 32 bits of the offset.
600 llvm::Value *VTableOffset = FnAsInt;
601 if (!UseARMMethodPtrABI)
602 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
603 if (Use32BitVTableOffsetABI) {
604 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
605 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
607 VTable = Builder.CreateGEP(VTable, VTableOffset);
609 // Load the virtual function to call.
610 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
611 llvm::Value *VirtualFn =
612 Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
614 CGF.EmitBranch(FnEnd);
616 // In the non-virtual path, the function pointer is actually a
618 CGF.EmitBlock(FnNonVirtual);
619 llvm::Value *NonVirtualFn =
620 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
623 CGF.EmitBlock(FnEnd);
624 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
625 CalleePtr->addIncoming(VirtualFn, FnVirtual);
626 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
628 CGCallee Callee(FPT, CalleePtr);
632 /// Compute an l-value by applying the given pointer-to-member to a
634 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
635 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
636 const MemberPointerType *MPT) {
637 assert(MemPtr->getType() == CGM.PtrDiffTy);
639 CGBuilderTy &Builder = CGF.Builder;
642 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
644 // Apply the offset, which we assume is non-null.
646 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
648 // Cast the address to the appropriate pointer type, adopting the
649 // address space of the base pointer.
650 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
651 ->getPointerTo(Base.getAddressSpace());
652 return Builder.CreateBitCast(Addr, PType);
655 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
658 /// Bitcast conversions are always a no-op under Itanium.
660 /// Obligatory offset/adjustment diagram:
661 /// <-- offset --> <-- adjustment -->
662 /// |--------------------------|----------------------|--------------------|
663 /// ^Derived address point ^Base address point ^Member address point
665 /// So when converting a base member pointer to a derived member pointer,
666 /// we add the offset to the adjustment because the address point has
667 /// decreased; and conversely, when converting a derived MP to a base MP
668 /// we subtract the offset from the adjustment because the address point
671 /// The standard forbids (at compile time) conversion to and from
672 /// virtual bases, which is why we don't have to consider them here.
674 /// The standard forbids (at run time) casting a derived MP to a base
675 /// MP when the derived MP does not point to a member of the base.
676 /// This is why -1 is a reasonable choice for null data member
679 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
682 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
683 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
684 E->getCastKind() == CK_ReinterpretMemberPointer);
686 // Under Itanium, reinterprets don't require any additional processing.
687 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
689 // Use constant emission if we can.
690 if (isa<llvm::Constant>(src))
691 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
693 llvm::Constant *adj = getMemberPointerAdjustment(E);
694 if (!adj) return src;
696 CGBuilderTy &Builder = CGF.Builder;
697 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
699 const MemberPointerType *destTy =
700 E->getType()->castAs<MemberPointerType>();
702 // For member data pointers, this is just a matter of adding the
703 // offset if the source is non-null.
704 if (destTy->isMemberDataPointer()) {
707 dst = Builder.CreateNSWSub(src, adj, "adj");
709 dst = Builder.CreateNSWAdd(src, adj, "adj");
712 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
713 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
714 return Builder.CreateSelect(isNull, src, dst);
717 // The this-adjustment is left-shifted by 1 on ARM.
718 if (UseARMMethodPtrABI) {
719 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
721 adj = llvm::ConstantInt::get(adj->getType(), offset);
724 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
727 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
729 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
731 return Builder.CreateInsertValue(src, dstAdj, 1);
735 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
736 llvm::Constant *src) {
737 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
738 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
739 E->getCastKind() == CK_ReinterpretMemberPointer);
741 // Under Itanium, reinterprets don't require any additional processing.
742 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
744 // If the adjustment is trivial, we don't need to do anything.
745 llvm::Constant *adj = getMemberPointerAdjustment(E);
746 if (!adj) return src;
748 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
750 const MemberPointerType *destTy =
751 E->getType()->castAs<MemberPointerType>();
753 // For member data pointers, this is just a matter of adding the
754 // offset if the source is non-null.
755 if (destTy->isMemberDataPointer()) {
756 // null maps to null.
757 if (src->isAllOnesValue()) return src;
760 return llvm::ConstantExpr::getNSWSub(src, adj);
762 return llvm::ConstantExpr::getNSWAdd(src, adj);
765 // The this-adjustment is left-shifted by 1 on ARM.
766 if (UseARMMethodPtrABI) {
767 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
769 adj = llvm::ConstantInt::get(adj->getType(), offset);
772 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
773 llvm::Constant *dstAdj;
775 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
777 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
779 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
783 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
784 // Itanium C++ ABI 2.3:
785 // A NULL pointer is represented as -1.
786 if (MPT->isMemberDataPointer())
787 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
789 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
790 llvm::Constant *Values[2] = { Zero, Zero };
791 return llvm::ConstantStruct::getAnon(Values);
795 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
797 // Itanium C++ ABI 2.3:
798 // A pointer to data member is an offset from the base address of
799 // the class object containing it, represented as a ptrdiff_t
800 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
804 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
805 return BuildMemberPointer(MD, CharUnits::Zero());
808 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
809 CharUnits ThisAdjustment) {
810 assert(MD->isInstance() && "Member function must not be static!");
811 MD = MD->getCanonicalDecl();
813 CodeGenTypes &Types = CGM.getTypes();
815 // Get the function pointer (or index if this is a virtual function).
816 llvm::Constant *MemPtr[2];
817 if (MD->isVirtual()) {
818 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
820 const ASTContext &Context = getContext();
821 CharUnits PointerWidth =
822 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
823 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
825 if (UseARMMethodPtrABI) {
826 // ARM C++ ABI 3.2.1:
827 // This ABI specifies that adj contains twice the this
828 // adjustment, plus 1 if the member function is virtual. The
829 // least significant bit of adj then makes exactly the same
830 // discrimination as the least significant bit of ptr does for
832 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
833 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
834 2 * ThisAdjustment.getQuantity() + 1);
836 // Itanium C++ ABI 2.3:
837 // For a virtual function, [the pointer field] is 1 plus the
838 // virtual table offset (in bytes) of the function,
839 // represented as a ptrdiff_t.
840 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
841 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
842 ThisAdjustment.getQuantity());
845 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
847 // Check whether the function has a computable LLVM signature.
848 if (Types.isFuncTypeConvertible(FPT)) {
849 // The function has a computable LLVM signature; use the correct type.
850 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
852 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
853 // function type is incomplete.
856 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
858 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
859 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
860 (UseARMMethodPtrABI ? 2 : 1) *
861 ThisAdjustment.getQuantity());
864 return llvm::ConstantStruct::getAnon(MemPtr);
867 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
869 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
870 const ValueDecl *MPD = MP.getMemberPointerDecl();
872 return EmitNullMemberPointer(MPT);
874 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
876 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
877 return BuildMemberPointer(MD, ThisAdjustment);
879 CharUnits FieldOffset =
880 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
881 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
884 /// The comparison algorithm is pretty easy: the member pointers are
885 /// the same if they're either bitwise identical *or* both null.
887 /// ARM is different here only because null-ness is more complicated.
889 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
892 const MemberPointerType *MPT,
894 CGBuilderTy &Builder = CGF.Builder;
896 llvm::ICmpInst::Predicate Eq;
897 llvm::Instruction::BinaryOps And, Or;
899 Eq = llvm::ICmpInst::ICMP_NE;
900 And = llvm::Instruction::Or;
901 Or = llvm::Instruction::And;
903 Eq = llvm::ICmpInst::ICMP_EQ;
904 And = llvm::Instruction::And;
905 Or = llvm::Instruction::Or;
908 // Member data pointers are easy because there's a unique null
909 // value, so it just comes down to bitwise equality.
910 if (MPT->isMemberDataPointer())
911 return Builder.CreateICmp(Eq, L, R);
913 // For member function pointers, the tautologies are more complex.
914 // The Itanium tautology is:
915 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
916 // The ARM tautology is:
917 // (L == R) <==> (L.ptr == R.ptr &&
918 // (L.adj == R.adj ||
919 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
920 // The inequality tautologies have exactly the same structure, except
921 // applying De Morgan's laws.
923 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
924 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
926 // This condition tests whether L.ptr == R.ptr. This must always be
927 // true for equality to hold.
928 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
930 // This condition, together with the assumption that L.ptr == R.ptr,
931 // tests whether the pointers are both null. ARM imposes an extra
933 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
934 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
936 // This condition tests whether L.adj == R.adj. If this isn't
937 // true, the pointers are unequal unless they're both null.
938 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
939 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
940 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
942 // Null member function pointers on ARM clear the low bit of Adj,
943 // so the zero condition has to check that neither low bit is set.
944 if (UseARMMethodPtrABI) {
945 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
947 // Compute (l.adj | r.adj) & 1 and test it against zero.
948 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
949 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
950 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
952 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
955 // Tie together all our conditions.
956 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
957 Result = Builder.CreateBinOp(And, PtrEq, Result,
958 Inequality ? "memptr.ne" : "memptr.eq");
963 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
965 const MemberPointerType *MPT) {
966 CGBuilderTy &Builder = CGF.Builder;
968 /// For member data pointers, this is just a check against -1.
969 if (MPT->isMemberDataPointer()) {
970 assert(MemPtr->getType() == CGM.PtrDiffTy);
971 llvm::Value *NegativeOne =
972 llvm::Constant::getAllOnesValue(MemPtr->getType());
973 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
976 // In Itanium, a member function pointer is not null if 'ptr' is not null.
977 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
979 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
980 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
982 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
983 // (the virtual bit) is set.
984 if (UseARMMethodPtrABI) {
985 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
986 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
987 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
988 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
990 Result = Builder.CreateOr(Result, IsVirtual);
996 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
997 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1001 // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor.
1002 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
1004 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
1005 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1006 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1012 /// The Itanium ABI requires non-zero initialization only for data
1013 /// member pointers, for which '0' is a valid offset.
1014 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1015 return MPT->isMemberFunctionPointer();
1018 /// The Itanium ABI always places an offset to the complete object
1019 /// at entry -2 in the vtable.
1020 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1021 const CXXDeleteExpr *DE,
1023 QualType ElementType,
1024 const CXXDestructorDecl *Dtor) {
1025 bool UseGlobalDelete = DE->isGlobalDelete();
1026 if (UseGlobalDelete) {
1027 // Derive the complete-object pointer, which is what we need
1028 // to pass to the deallocation function.
1030 // Grab the vtable pointer as an intptr_t*.
1032 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1033 llvm::Value *VTable =
1034 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1036 // Track back to entry -2 and pull out the offset there.
1037 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1038 VTable, -2, "complete-offset.ptr");
1039 llvm::Value *Offset =
1040 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1042 // Apply the offset.
1043 llvm::Value *CompletePtr =
1044 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1045 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1047 // If we're supposed to call the global delete, make sure we do so
1048 // even if the destructor throws.
1049 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1053 // FIXME: Provide a source location here even though there's no
1054 // CXXMemberCallExpr for dtor call.
1055 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1056 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1058 if (UseGlobalDelete)
1059 CGF.PopCleanupBlock();
1062 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1063 // void __cxa_rethrow();
1065 llvm::FunctionType *FTy =
1066 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1068 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1071 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1073 CGF.EmitRuntimeCallOrInvoke(Fn);
1076 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1077 // void *__cxa_allocate_exception(size_t thrown_size);
1079 llvm::FunctionType *FTy =
1080 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1082 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1085 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1086 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1087 // void (*dest) (void *));
1089 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1090 llvm::FunctionType *FTy =
1091 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1093 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1096 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1097 QualType ThrowType = E->getSubExpr()->getType();
1098 // Now allocate the exception object.
1099 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1100 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1102 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1103 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1104 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1106 CharUnits ExnAlign = getAlignmentOfExnObject();
1107 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1109 // Now throw the exception.
1110 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1113 // The address of the destructor. If the exception type has a
1114 // trivial destructor (or isn't a record), we just pass null.
1115 llvm::Constant *Dtor = nullptr;
1116 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1117 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1118 if (!Record->hasTrivialDestructor()) {
1119 CXXDestructorDecl *DtorD = Record->getDestructor();
1120 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1121 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1124 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1126 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1127 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1130 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1131 // void *__dynamic_cast(const void *sub,
1132 // const abi::__class_type_info *src,
1133 // const abi::__class_type_info *dst,
1134 // std::ptrdiff_t src2dst_offset);
1136 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1137 llvm::Type *PtrDiffTy =
1138 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1140 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1142 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1144 // Mark the function as nounwind readonly.
1145 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1146 llvm::Attribute::ReadOnly };
1147 llvm::AttributeList Attrs = llvm::AttributeList::get(
1148 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1150 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1153 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1154 // void __cxa_bad_cast();
1155 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1156 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1159 /// \brief Compute the src2dst_offset hint as described in the
1160 /// Itanium C++ ABI [2.9.7]
1161 static CharUnits computeOffsetHint(ASTContext &Context,
1162 const CXXRecordDecl *Src,
1163 const CXXRecordDecl *Dst) {
1164 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1165 /*DetectVirtual=*/false);
1167 // If Dst is not derived from Src we can skip the whole computation below and
1168 // return that Src is not a public base of Dst. Record all inheritance paths.
1169 if (!Dst->isDerivedFrom(Src, Paths))
1170 return CharUnits::fromQuantity(-2ULL);
1172 unsigned NumPublicPaths = 0;
1175 // Now walk all possible inheritance paths.
1176 for (const CXXBasePath &Path : Paths) {
1177 if (Path.Access != AS_public) // Ignore non-public inheritance.
1182 for (const CXXBasePathElement &PathElement : Path) {
1183 // If the path contains a virtual base class we can't give any hint.
1185 if (PathElement.Base->isVirtual())
1186 return CharUnits::fromQuantity(-1ULL);
1188 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1191 // Accumulate the base class offsets.
1192 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1193 Offset += L.getBaseClassOffset(
1194 PathElement.Base->getType()->getAsCXXRecordDecl());
1198 // -2: Src is not a public base of Dst.
1199 if (NumPublicPaths == 0)
1200 return CharUnits::fromQuantity(-2ULL);
1202 // -3: Src is a multiple public base type but never a virtual base type.
1203 if (NumPublicPaths > 1)
1204 return CharUnits::fromQuantity(-3ULL);
1206 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1207 // Return the offset of Src from the origin of Dst.
1211 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1212 // void __cxa_bad_typeid();
1213 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1215 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1218 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1219 QualType SrcRecordTy) {
1223 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1224 llvm::Value *Fn = getBadTypeidFn(CGF);
1225 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1226 CGF.Builder.CreateUnreachable();
1229 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1230 QualType SrcRecordTy,
1232 llvm::Type *StdTypeInfoPtrTy) {
1234 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1235 llvm::Value *Value =
1236 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1238 // Load the type info.
1239 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1240 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1243 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1244 QualType SrcRecordTy) {
1248 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1249 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1250 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1251 llvm::Type *PtrDiffLTy =
1252 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1253 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1255 llvm::Value *SrcRTTI =
1256 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1257 llvm::Value *DestRTTI =
1258 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1260 // Compute the offset hint.
1261 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1262 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1263 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1265 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1267 // Emit the call to __dynamic_cast.
1268 llvm::Value *Value = ThisAddr.getPointer();
1269 Value = CGF.EmitCastToVoidPtr(Value);
1271 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1272 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1273 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1275 /// C++ [expr.dynamic.cast]p9:
1276 /// A failed cast to reference type throws std::bad_cast
1277 if (DestTy->isReferenceType()) {
1278 llvm::BasicBlock *BadCastBlock =
1279 CGF.createBasicBlock("dynamic_cast.bad_cast");
1281 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1282 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1284 CGF.EmitBlock(BadCastBlock);
1285 EmitBadCastCall(CGF);
1291 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1293 QualType SrcRecordTy,
1295 llvm::Type *PtrDiffLTy =
1296 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1297 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1300 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1301 // Get the vtable pointer.
1302 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1305 // Get the offset-to-top from the vtable.
1306 llvm::Value *OffsetToTop =
1307 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1309 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1312 // Finally, add the offset to the pointer.
1313 llvm::Value *Value = ThisAddr.getPointer();
1314 Value = CGF.EmitCastToVoidPtr(Value);
1315 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1317 return CGF.Builder.CreateBitCast(Value, DestLTy);
1320 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1321 llvm::Value *Fn = getBadCastFn(CGF);
1322 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1323 CGF.Builder.CreateUnreachable();
1328 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1330 const CXXRecordDecl *ClassDecl,
1331 const CXXRecordDecl *BaseClassDecl) {
1332 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1333 CharUnits VBaseOffsetOffset =
1334 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1337 llvm::Value *VBaseOffsetPtr =
1338 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1339 "vbase.offset.ptr");
1340 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1341 CGM.PtrDiffTy->getPointerTo());
1343 llvm::Value *VBaseOffset =
1344 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1350 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1351 // Just make sure we're in sync with TargetCXXABI.
1352 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1354 // The constructor used for constructing this as a base class;
1355 // ignores virtual bases.
1356 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1358 // The constructor used for constructing this as a complete class;
1359 // constructs the virtual bases, then calls the base constructor.
1360 if (!D->getParent()->isAbstract()) {
1361 // We don't need to emit the complete ctor if the class is abstract.
1362 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1366 CGCXXABI::AddedStructorArgs
1367 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1368 SmallVectorImpl<CanQualType> &ArgTys) {
1369 ASTContext &Context = getContext();
1371 // All parameters are already in place except VTT, which goes after 'this'.
1372 // These are Clang types, so we don't need to worry about sret yet.
1374 // Check if we need to add a VTT parameter (which has type void **).
1375 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) {
1376 ArgTys.insert(ArgTys.begin() + 1,
1377 Context.getPointerType(Context.VoidPtrTy));
1378 return AddedStructorArgs::prefix(1);
1380 return AddedStructorArgs{};
1383 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1384 // The destructor used for destructing this as a base class; ignores
1386 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1388 // The destructor used for destructing this as a most-derived class;
1389 // call the base destructor and then destructs any virtual bases.
1390 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1392 // The destructor in a virtual table is always a 'deleting'
1393 // destructor, which calls the complete destructor and then uses the
1394 // appropriate operator delete.
1396 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1399 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1401 FunctionArgList &Params) {
1402 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1403 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1405 // Check if we need a VTT parameter as well.
1406 if (NeedsVTTParameter(CGF.CurGD)) {
1407 ASTContext &Context = getContext();
1409 // FIXME: avoid the fake decl
1410 QualType T = Context.getPointerType(Context.VoidPtrTy);
1411 auto *VTTDecl = ImplicitParamDecl::Create(
1412 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1413 T, ImplicitParamDecl::CXXVTT);
1414 Params.insert(Params.begin() + 1, VTTDecl);
1415 getStructorImplicitParamDecl(CGF) = VTTDecl;
1419 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1420 // Naked functions have no prolog.
1421 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1424 /// Initialize the 'this' slot.
1427 /// Initialize the 'vtt' slot if needed.
1428 if (getStructorImplicitParamDecl(CGF)) {
1429 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1430 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1433 /// If this is a function that the ABI specifies returns 'this', initialize
1434 /// the return slot to 'this' at the start of the function.
1436 /// Unlike the setting of return types, this is done within the ABI
1437 /// implementation instead of by clients of CGCXXABI because:
1438 /// 1) getThisValue is currently protected
1439 /// 2) in theory, an ABI could implement 'this' returns some other way;
1440 /// HasThisReturn only specifies a contract, not the implementation
1441 if (HasThisReturn(CGF.CurGD))
1442 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1445 CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1446 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1447 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1448 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1449 return AddedStructorArgs{};
1451 // Insert the implicit 'vtt' argument as the second argument.
1453 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1454 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1455 Args.insert(Args.begin() + 1,
1456 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
1457 return AddedStructorArgs::prefix(1); // Added one arg.
1460 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1461 const CXXDestructorDecl *DD,
1462 CXXDtorType Type, bool ForVirtualBase,
1463 bool Delegating, Address This) {
1464 GlobalDecl GD(DD, Type);
1465 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1466 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1469 if (getContext().getLangOpts().AppleKext &&
1470 Type != Dtor_Base && DD->isVirtual())
1471 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1474 CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
1477 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1478 This.getPointer(), VTT, VTTTy,
1482 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1483 const CXXRecordDecl *RD) {
1484 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1485 if (VTable->hasInitializer())
1488 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1489 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1490 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1491 llvm::Constant *RTTI =
1492 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1494 // Create and set the initializer.
1495 ConstantInitBuilder Builder(CGM);
1496 auto Components = Builder.beginStruct();
1497 CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1498 Components.finishAndSetAsInitializer(VTable);
1500 // Set the correct linkage.
1501 VTable->setLinkage(Linkage);
1503 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1504 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1506 // Set the right visibility.
1507 CGM.setGlobalVisibility(VTable, RD);
1509 // Use pointer alignment for the vtable. Otherwise we would align them based
1510 // on the size of the initializer which doesn't make sense as only single
1512 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1513 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1515 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1516 // we will emit the typeinfo for the fundamental types. This is the
1517 // same behaviour as GCC.
1518 const DeclContext *DC = RD->getDeclContext();
1519 if (RD->getIdentifier() &&
1520 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1521 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1522 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1523 DC->getParent()->isTranslationUnit())
1524 EmitFundamentalRTTIDescriptors(RD->hasAttr<DLLExportAttr>());
1526 if (!VTable->isDeclarationForLinker())
1527 CGM.EmitVTableTypeMetadata(VTable, VTLayout);
1530 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1531 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1532 if (Vptr.NearestVBase == nullptr)
1534 return NeedsVTTParameter(CGF.CurGD);
1537 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1538 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1539 const CXXRecordDecl *NearestVBase) {
1541 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1542 NeedsVTTParameter(CGF.CurGD)) {
1543 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1546 return getVTableAddressPoint(Base, VTableClass);
1550 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1551 const CXXRecordDecl *VTableClass) {
1552 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1554 // Find the appropriate vtable within the vtable group, and the address point
1555 // within that vtable.
1556 VTableLayout::AddressPointLocation AddressPoint =
1557 CGM.getItaniumVTableContext()
1558 .getVTableLayout(VTableClass)
1559 .getAddressPoint(Base);
1560 llvm::Value *Indices[] = {
1561 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1562 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1563 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1566 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1567 Indices, /*InBounds=*/true,
1568 /*InRangeIndex=*/1);
1571 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1572 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1573 const CXXRecordDecl *NearestVBase) {
1574 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1575 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1577 // Get the secondary vpointer index.
1578 uint64_t VirtualPointerIndex =
1579 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1582 llvm::Value *VTT = CGF.LoadCXXVTT();
1583 if (VirtualPointerIndex)
1584 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1586 // And load the address point from the VTT.
1587 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1590 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1591 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1592 return getVTableAddressPoint(Base, VTableClass);
1595 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1596 CharUnits VPtrOffset) {
1597 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1599 llvm::GlobalVariable *&VTable = VTables[RD];
1603 // Queue up this vtable for possible deferred emission.
1604 CGM.addDeferredVTable(RD);
1606 SmallString<256> Name;
1607 llvm::raw_svector_ostream Out(Name);
1608 getMangleContext().mangleCXXVTable(RD, Out);
1610 const VTableLayout &VTLayout =
1611 CGM.getItaniumVTableContext().getVTableLayout(RD);
1612 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1614 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1615 Name, VTableType, llvm::GlobalValue::ExternalLinkage);
1616 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1618 if (RD->hasAttr<DLLImportAttr>())
1619 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1620 else if (RD->hasAttr<DLLExportAttr>())
1621 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1626 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1630 SourceLocation Loc) {
1631 GD = GD.getCanonicalDecl();
1632 Ty = Ty->getPointerTo()->getPointerTo();
1633 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1634 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1636 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1638 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1639 VFunc = CGF.EmitVTableTypeCheckedLoad(
1640 MethodDecl->getParent(), VTable,
1641 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1643 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1645 llvm::Value *VFuncPtr =
1646 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1648 CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1650 // Add !invariant.load md to virtual function load to indicate that
1651 // function didn't change inside vtable.
1652 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1653 // help in devirtualization because it will only matter if we will have 2
1654 // the same virtual function loads from the same vtable load, which won't
1655 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1656 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1657 CGM.getCodeGenOpts().StrictVTablePointers)
1658 VFuncLoad->setMetadata(
1659 llvm::LLVMContext::MD_invariant_load,
1660 llvm::MDNode::get(CGM.getLLVMContext(),
1661 llvm::ArrayRef<llvm::Metadata *>()));
1665 CGCallee Callee(MethodDecl, VFunc);
1669 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1670 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1671 Address This, const CXXMemberCallExpr *CE) {
1672 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1673 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1675 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1676 Dtor, getFromDtorType(DtorType));
1677 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1679 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
1680 CE ? CE->getLocStart() : SourceLocation());
1682 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1683 This.getPointer(), /*ImplicitParam=*/nullptr,
1684 QualType(), CE, nullptr);
1688 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1689 CodeGenVTables &VTables = CGM.getVTables();
1690 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1691 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1694 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1695 // We don't emit available_externally vtables if we are in -fapple-kext mode
1696 // because kext mode does not permit devirtualization.
1697 if (CGM.getLangOpts().AppleKext)
1700 // If we don't have any not emitted inline virtual function, and if vtable is
1701 // not hidden, then we are safe to emit available_externally copy of vtable.
1702 // FIXME we can still emit a copy of the vtable if we
1703 // can emit definition of the inline functions.
1704 return !hasAnyUnusedVirtualInlineFunction(RD) && !isVTableHidden(RD);
1706 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1708 int64_t NonVirtualAdjustment,
1709 int64_t VirtualAdjustment,
1710 bool IsReturnAdjustment) {
1711 if (!NonVirtualAdjustment && !VirtualAdjustment)
1712 return InitialPtr.getPointer();
1714 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1716 // In a base-to-derived cast, the non-virtual adjustment is applied first.
1717 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1718 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1719 CharUnits::fromQuantity(NonVirtualAdjustment));
1722 // Perform the virtual adjustment if we have one.
1723 llvm::Value *ResultPtr;
1724 if (VirtualAdjustment) {
1725 llvm::Type *PtrDiffTy =
1726 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1728 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1729 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1731 llvm::Value *OffsetPtr =
1732 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1734 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1736 // Load the adjustment offset from the vtable.
1737 llvm::Value *Offset =
1738 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1740 // Adjust our pointer.
1741 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1743 ResultPtr = V.getPointer();
1746 // In a derived-to-base conversion, the non-virtual adjustment is
1748 if (NonVirtualAdjustment && IsReturnAdjustment) {
1749 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1750 NonVirtualAdjustment);
1753 // Cast back to the original type.
1754 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1757 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1759 const ThisAdjustment &TA) {
1760 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1761 TA.Virtual.Itanium.VCallOffsetOffset,
1762 /*IsReturnAdjustment=*/false);
1766 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1767 const ReturnAdjustment &RA) {
1768 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1769 RA.Virtual.Itanium.VBaseOffsetOffset,
1770 /*IsReturnAdjustment=*/true);
1773 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1774 RValue RV, QualType ResultType) {
1775 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1776 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1778 // Destructor thunks in the ARM ABI have indeterminate results.
1779 llvm::Type *T = CGF.ReturnValue.getElementType();
1780 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1781 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1784 /************************** Array allocation cookies **************************/
1786 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1787 // The array cookie is a size_t; pad that up to the element alignment.
1788 // The cookie is actually right-justified in that space.
1789 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1790 CGM.getContext().getTypeAlignInChars(elementType));
1793 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1795 llvm::Value *NumElements,
1796 const CXXNewExpr *expr,
1797 QualType ElementType) {
1798 assert(requiresArrayCookie(expr));
1800 unsigned AS = NewPtr.getAddressSpace();
1802 ASTContext &Ctx = getContext();
1803 CharUnits SizeSize = CGF.getSizeSize();
1805 // The size of the cookie.
1806 CharUnits CookieSize =
1807 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1808 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1810 // Compute an offset to the cookie.
1811 Address CookiePtr = NewPtr;
1812 CharUnits CookieOffset = CookieSize - SizeSize;
1813 if (!CookieOffset.isZero())
1814 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1816 // Write the number of elements into the appropriate slot.
1817 Address NumElementsPtr =
1818 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1819 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1821 // Handle the array cookie specially in ASan.
1822 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1823 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
1824 // The store to the CookiePtr does not need to be instrumented.
1825 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1826 llvm::FunctionType *FTy =
1827 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1829 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1830 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1833 // Finally, compute a pointer to the actual data buffer by skipping
1834 // over the cookie completely.
1835 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1838 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1840 CharUnits cookieSize) {
1841 // The element size is right-justified in the cookie.
1842 Address numElementsPtr = allocPtr;
1843 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1844 if (!numElementsOffset.isZero())
1846 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1848 unsigned AS = allocPtr.getAddressSpace();
1849 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1850 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1851 return CGF.Builder.CreateLoad(numElementsPtr);
1852 // In asan mode emit a function call instead of a regular load and let the
1853 // run-time deal with it: if the shadow is properly poisoned return the
1854 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1855 // We can't simply ignore this load using nosanitize metadata because
1856 // the metadata may be lost.
1857 llvm::FunctionType *FTy =
1858 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1860 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1861 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1864 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1865 // ARM says that the cookie is always:
1866 // struct array_cookie {
1867 // std::size_t element_size; // element_size != 0
1868 // std::size_t element_count;
1870 // But the base ABI doesn't give anything an alignment greater than
1871 // 8, so we can dismiss this as typical ABI-author blindness to
1872 // actual language complexity and round up to the element alignment.
1873 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1874 CGM.getContext().getTypeAlignInChars(elementType));
1877 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1879 llvm::Value *numElements,
1880 const CXXNewExpr *expr,
1881 QualType elementType) {
1882 assert(requiresArrayCookie(expr));
1884 // The cookie is always at the start of the buffer.
1885 Address cookie = newPtr;
1887 // The first element is the element size.
1888 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
1889 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1890 getContext().getTypeSizeInChars(elementType).getQuantity());
1891 CGF.Builder.CreateStore(elementSize, cookie);
1893 // The second element is the element count.
1894 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
1895 CGF.Builder.CreateStore(numElements, cookie);
1897 // Finally, compute a pointer to the actual data buffer by skipping
1898 // over the cookie completely.
1899 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1900 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
1903 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1905 CharUnits cookieSize) {
1906 // The number of elements is at offset sizeof(size_t) relative to
1907 // the allocated pointer.
1908 Address numElementsPtr
1909 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
1911 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1912 return CGF.Builder.CreateLoad(numElementsPtr);
1915 /*********************** Static local initialization **************************/
1917 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1918 llvm::PointerType *GuardPtrTy) {
1919 // int __cxa_guard_acquire(__guard *guard_object);
1920 llvm::FunctionType *FTy =
1921 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1922 GuardPtrTy, /*isVarArg=*/false);
1923 return CGM.CreateRuntimeFunction(
1924 FTy, "__cxa_guard_acquire",
1925 llvm::AttributeList::get(CGM.getLLVMContext(),
1926 llvm::AttributeList::FunctionIndex,
1927 llvm::Attribute::NoUnwind));
1930 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1931 llvm::PointerType *GuardPtrTy) {
1932 // void __cxa_guard_release(__guard *guard_object);
1933 llvm::FunctionType *FTy =
1934 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1935 return CGM.CreateRuntimeFunction(
1936 FTy, "__cxa_guard_release",
1937 llvm::AttributeList::get(CGM.getLLVMContext(),
1938 llvm::AttributeList::FunctionIndex,
1939 llvm::Attribute::NoUnwind));
1942 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1943 llvm::PointerType *GuardPtrTy) {
1944 // void __cxa_guard_abort(__guard *guard_object);
1945 llvm::FunctionType *FTy =
1946 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1947 return CGM.CreateRuntimeFunction(
1948 FTy, "__cxa_guard_abort",
1949 llvm::AttributeList::get(CGM.getLLVMContext(),
1950 llvm::AttributeList::FunctionIndex,
1951 llvm::Attribute::NoUnwind));
1955 struct CallGuardAbort final : EHScopeStack::Cleanup {
1956 llvm::GlobalVariable *Guard;
1957 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1959 void Emit(CodeGenFunction &CGF, Flags flags) override {
1960 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
1966 /// The ARM code here follows the Itanium code closely enough that we
1967 /// just special-case it at particular places.
1968 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1970 llvm::GlobalVariable *var,
1971 bool shouldPerformInit) {
1972 CGBuilderTy &Builder = CGF.Builder;
1974 // Inline variables that weren't instantiated from variable templates have
1975 // partially-ordered initialization within their translation unit.
1976 bool NonTemplateInline =
1978 !isTemplateInstantiation(D.getTemplateSpecializationKind());
1980 // We only need to use thread-safe statics for local non-TLS variables and
1981 // inline variables; other global initialization is always single-threaded
1982 // or (through lazy dynamic loading in multiple threads) unsequenced.
1983 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
1984 (D.isLocalVarDecl() || NonTemplateInline) &&
1987 // If we have a global variable with internal linkage and thread-safe statics
1988 // are disabled, we can just let the guard variable be of type i8.
1989 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
1991 llvm::IntegerType *guardTy;
1992 CharUnits guardAlignment;
1993 if (useInt8GuardVariable) {
1994 guardTy = CGF.Int8Ty;
1995 guardAlignment = CharUnits::One();
1997 // Guard variables are 64 bits in the generic ABI and size width on ARM
1998 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
1999 if (UseARMGuardVarABI) {
2000 guardTy = CGF.SizeTy;
2001 guardAlignment = CGF.getSizeAlign();
2003 guardTy = CGF.Int64Ty;
2004 guardAlignment = CharUnits::fromQuantity(
2005 CGM.getDataLayout().getABITypeAlignment(guardTy));
2008 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2010 // Create the guard variable if we don't already have it (as we
2011 // might if we're double-emitting this function body).
2012 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2014 // Mangle the name for the guard.
2015 SmallString<256> guardName;
2017 llvm::raw_svector_ostream out(guardName);
2018 getMangleContext().mangleStaticGuardVariable(&D, out);
2021 // Create the guard variable with a zero-initializer.
2022 // Just absorb linkage and visibility from the guarded variable.
2023 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2024 false, var->getLinkage(),
2025 llvm::ConstantInt::get(guardTy, 0),
2027 guard->setVisibility(var->getVisibility());
2028 // If the variable is thread-local, so is its guard variable.
2029 guard->setThreadLocalMode(var->getThreadLocalMode());
2030 guard->setAlignment(guardAlignment.getQuantity());
2032 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2033 // group as the associated data object." In practice, this doesn't work for
2034 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2035 llvm::Comdat *C = var->getComdat();
2036 if (!D.isLocalVarDecl() && C &&
2037 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2038 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2039 guard->setComdat(C);
2040 // An inline variable's guard function is run from the per-TU
2041 // initialization function, not via a dedicated global ctor function, so
2042 // we can't put it in a comdat.
2043 if (!NonTemplateInline)
2044 CGF.CurFn->setComdat(C);
2045 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2046 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2049 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2052 Address guardAddr = Address(guard, guardAlignment);
2054 // Test whether the variable has completed initialization.
2056 // Itanium C++ ABI 3.3.2:
2057 // The following is pseudo-code showing how these functions can be used:
2058 // if (obj_guard.first_byte == 0) {
2059 // if ( __cxa_guard_acquire (&obj_guard) ) {
2061 // ... initialize the object ...;
2063 // __cxa_guard_abort (&obj_guard);
2066 // ... queue object destructor with __cxa_atexit() ...;
2067 // __cxa_guard_release (&obj_guard);
2071 // Load the first byte of the guard variable.
2072 llvm::LoadInst *LI =
2073 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2076 // An implementation supporting thread-safety on multiprocessor
2077 // systems must also guarantee that references to the initialized
2078 // object do not occur before the load of the initialization flag.
2080 // In LLVM, we do this by marking the load Acquire.
2082 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2084 // For ARM, we should only check the first bit, rather than the entire byte:
2086 // ARM C++ ABI 3.2.3.1:
2087 // To support the potential use of initialization guard variables
2088 // as semaphores that are the target of ARM SWP and LDREX/STREX
2089 // synchronizing instructions we define a static initialization
2090 // guard variable to be a 4-byte aligned, 4-byte word with the
2091 // following inline access protocol.
2092 // #define INITIALIZED 1
2093 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2094 // if (__cxa_guard_acquire(&obj_guard))
2098 // and similarly for ARM64:
2100 // ARM64 C++ ABI 3.2.2:
2101 // This ABI instead only specifies the value bit 0 of the static guard
2102 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2103 // variable is not initialized and 1 when it is.
2105 (UseARMGuardVarABI && !useInt8GuardVariable)
2106 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2108 llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
2110 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2111 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2113 // Check if the first byte of the guard variable is zero.
2114 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
2116 CGF.EmitBlock(InitCheckBlock);
2118 // Variables used when coping with thread-safe statics and exceptions.
2120 // Call __cxa_guard_acquire.
2122 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2124 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2126 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2127 InitBlock, EndBlock);
2129 // Call __cxa_guard_abort along the exceptional edge.
2130 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2132 CGF.EmitBlock(InitBlock);
2135 // Emit the initializer and add a global destructor if appropriate.
2136 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2139 // Pop the guard-abort cleanup if we pushed one.
2140 CGF.PopCleanupBlock();
2142 // Call __cxa_guard_release. This cannot throw.
2143 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2144 guardAddr.getPointer());
2146 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2149 CGF.EmitBlock(EndBlock);
2152 /// Register a global destructor using __cxa_atexit.
2153 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2154 llvm::Constant *dtor,
2155 llvm::Constant *addr,
2157 const char *Name = "__cxa_atexit";
2159 const llvm::Triple &T = CGF.getTarget().getTriple();
2160 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2163 // We're assuming that the destructor function is something we can
2164 // reasonably call with the default CC. Go ahead and cast it to the
2166 llvm::Type *dtorTy =
2167 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2169 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2170 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2171 llvm::FunctionType *atexitTy =
2172 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2174 // Fetch the actual function.
2175 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2176 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2177 fn->setDoesNotThrow();
2179 // Create a variable that binds the atexit to this shared object.
2180 llvm::Constant *handle =
2181 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2182 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2183 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2185 llvm::Value *args[] = {
2186 llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2187 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2190 CGF.EmitNounwindRuntimeCall(atexit, args);
2193 /// Register a global destructor as best as we know how.
2194 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2196 llvm::Constant *dtor,
2197 llvm::Constant *addr) {
2198 // Use __cxa_atexit if available.
2199 if (CGM.getCodeGenOpts().CXAAtExit)
2200 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2203 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2205 // In Apple kexts, we want to add a global destructor entry.
2206 // FIXME: shouldn't this be guarded by some variable?
2207 if (CGM.getLangOpts().AppleKext) {
2208 // Generate a global destructor entry.
2209 return CGM.AddCXXDtorEntry(dtor, addr);
2212 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2215 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2216 CodeGen::CodeGenModule &CGM) {
2217 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2218 // Darwin prefers to have references to thread local variables to go through
2219 // the thread wrapper instead of directly referencing the backing variable.
2220 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2221 CGM.getTarget().getTriple().isOSDarwin();
2224 /// Get the appropriate linkage for the wrapper function. This is essentially
2225 /// the weak form of the variable's linkage; every translation unit which needs
2226 /// the wrapper emits a copy, and we want the linker to merge them.
2227 static llvm::GlobalValue::LinkageTypes
2228 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2229 llvm::GlobalValue::LinkageTypes VarLinkage =
2230 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2232 // For internal linkage variables, we don't need an external or weak wrapper.
2233 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2236 // If the thread wrapper is replaceable, give it appropriate linkage.
2237 if (isThreadWrapperReplaceable(VD, CGM))
2238 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2239 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2241 return llvm::GlobalValue::WeakODRLinkage;
2245 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2247 // Mangle the name for the thread_local wrapper function.
2248 SmallString<256> WrapperName;
2250 llvm::raw_svector_ostream Out(WrapperName);
2251 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2254 // FIXME: If VD is a definition, we should regenerate the function attributes
2255 // before returning.
2256 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2257 return cast<llvm::Function>(V);
2259 QualType RetQT = VD->getType();
2260 if (RetQT->isReferenceType())
2261 RetQT = RetQT.getNonReferenceType();
2263 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2264 getContext().getPointerType(RetQT), FunctionArgList());
2266 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2267 llvm::Function *Wrapper =
2268 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2269 WrapperName.str(), &CGM.getModule());
2271 CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper);
2273 if (VD->hasDefinition())
2274 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2276 // Always resolve references to the wrapper at link time.
2277 if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
2278 !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
2279 !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
2280 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2282 if (isThreadWrapperReplaceable(VD, CGM)) {
2283 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2284 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2289 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2290 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2291 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2292 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2293 llvm::Function *InitFunc = nullptr;
2295 // Separate initializers into those with ordered (or partially-ordered)
2296 // initialization and those with unordered initialization.
2297 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2298 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2299 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2300 if (isTemplateInstantiation(
2301 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2302 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2303 CXXThreadLocalInits[I];
2305 OrderedInits.push_back(CXXThreadLocalInits[I]);
2308 if (!OrderedInits.empty()) {
2309 // Generate a guarded initialization function.
2310 llvm::FunctionType *FTy =
2311 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2312 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2313 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2316 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2317 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2318 llvm::GlobalVariable::InternalLinkage,
2319 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2320 Guard->setThreadLocal(true);
2322 CharUnits GuardAlign = CharUnits::One();
2323 Guard->setAlignment(GuardAlign.getQuantity());
2325 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, OrderedInits,
2326 Address(Guard, GuardAlign));
2327 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2328 if (CGM.getTarget().getTriple().isOSDarwin()) {
2329 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2330 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2334 // Emit thread wrappers.
2335 for (const VarDecl *VD : CXXThreadLocals) {
2336 llvm::GlobalVariable *Var =
2337 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2338 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2340 // Some targets require that all access to thread local variables go through
2341 // the thread wrapper. This means that we cannot attempt to create a thread
2342 // wrapper or a thread helper.
2343 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) {
2344 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2348 // Mangle the name for the thread_local initialization function.
2349 SmallString<256> InitFnName;
2351 llvm::raw_svector_ostream Out(InitFnName);
2352 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2355 // If we have a definition for the variable, emit the initialization
2356 // function as an alias to the global Init function (if any). Otherwise,
2357 // produce a declaration of the initialization function.
2358 llvm::GlobalValue *Init = nullptr;
2359 bool InitIsInitFunc = false;
2360 if (VD->hasDefinition()) {
2361 InitIsInitFunc = true;
2362 llvm::Function *InitFuncToUse = InitFunc;
2363 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2364 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2366 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2369 // Emit a weak global function referring to the initialization function.
2370 // This function will not exist if the TU defining the thread_local
2371 // variable in question does not need any dynamic initialization for
2372 // its thread_local variables.
2373 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2374 Init = llvm::Function::Create(FnTy,
2375 llvm::GlobalVariable::ExternalWeakLinkage,
2376 InitFnName.str(), &CGM.getModule());
2377 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2378 CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
2382 Init->setVisibility(Var->getVisibility());
2384 llvm::LLVMContext &Context = CGM.getModule().getContext();
2385 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2386 CGBuilderTy Builder(CGM, Entry);
2387 if (InitIsInitFunc) {
2389 llvm::CallInst *CallVal = Builder.CreateCall(Init);
2390 if (isThreadWrapperReplaceable(VD, CGM))
2391 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2394 // Don't know whether we have an init function. Call it if it exists.
2395 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2396 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2397 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2398 Builder.CreateCondBr(Have, InitBB, ExitBB);
2400 Builder.SetInsertPoint(InitBB);
2401 Builder.CreateCall(Init);
2402 Builder.CreateBr(ExitBB);
2404 Builder.SetInsertPoint(ExitBB);
2407 // For a reference, the result of the wrapper function is a pointer to
2408 // the referenced object.
2409 llvm::Value *Val = Var;
2410 if (VD->getType()->isReferenceType()) {
2411 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2412 Val = Builder.CreateAlignedLoad(Val, Align);
2414 if (Val->getType() != Wrapper->getReturnType())
2415 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2416 Val, Wrapper->getReturnType(), "");
2417 Builder.CreateRet(Val);
2421 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2423 QualType LValType) {
2424 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2425 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2427 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2428 CallVal->setCallingConv(Wrapper->getCallingConv());
2431 if (VD->getType()->isReferenceType())
2432 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2434 LV = CGF.MakeAddrLValue(CallVal, LValType,
2435 CGF.getContext().getDeclAlign(VD));
2436 // FIXME: need setObjCGCLValueClass?
2440 /// Return whether the given global decl needs a VTT parameter, which it does
2441 /// if it's a base constructor or destructor with virtual bases.
2442 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2443 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2445 // We don't have any virtual bases, just return early.
2446 if (!MD->getParent()->getNumVBases())
2449 // Check if we have a base constructor.
2450 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2453 // Check if we have a base destructor.
2454 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2461 class ItaniumRTTIBuilder {
2462 CodeGenModule &CGM; // Per-module state.
2463 llvm::LLVMContext &VMContext;
2464 const ItaniumCXXABI &CXXABI; // Per-module state.
2466 /// Fields - The fields of the RTTI descriptor currently being built.
2467 SmallVector<llvm::Constant *, 16> Fields;
2469 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2470 llvm::GlobalVariable *
2471 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2473 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2474 /// descriptor of the given type.
2475 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2477 /// BuildVTablePointer - Build the vtable pointer for the given type.
2478 void BuildVTablePointer(const Type *Ty);
2480 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2481 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2482 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2484 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2485 /// classes with bases that do not satisfy the abi::__si_class_type_info
2486 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2487 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2489 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2490 /// for pointer types.
2491 void BuildPointerTypeInfo(QualType PointeeTy);
2493 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2494 /// type_info for an object type.
2495 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2497 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2498 /// struct, used for member pointer types.
2499 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2502 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2503 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2505 // Pointer type info flags.
2507 /// PTI_Const - Type has const qualifier.
2510 /// PTI_Volatile - Type has volatile qualifier.
2513 /// PTI_Restrict - Type has restrict qualifier.
2516 /// PTI_Incomplete - Type is incomplete.
2517 PTI_Incomplete = 0x8,
2519 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2520 /// (in pointer to member).
2521 PTI_ContainingClassIncomplete = 0x10,
2523 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2524 //PTI_TransactionSafe = 0x20,
2526 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2527 PTI_Noexcept = 0x40,
2530 // VMI type info flags.
2532 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2533 VMI_NonDiamondRepeat = 0x1,
2535 /// VMI_DiamondShaped - Class is diamond shaped.
2536 VMI_DiamondShaped = 0x2
2539 // Base class type info flags.
2541 /// BCTI_Virtual - Base class is virtual.
2544 /// BCTI_Public - Base class is public.
2548 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2550 /// \param Force - true to force the creation of this RTTI value
2551 /// \param DLLExport - true to mark the RTTI value as DLLExport
2552 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false,
2553 bool DLLExport = false);
2557 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2558 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2559 SmallString<256> Name;
2560 llvm::raw_svector_ostream Out(Name);
2561 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2563 // We know that the mangled name of the type starts at index 4 of the
2564 // mangled name of the typename, so we can just index into it in order to
2565 // get the mangled name of the type.
2566 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2569 llvm::GlobalVariable *GV =
2570 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2572 GV->setInitializer(Init);
2578 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2579 // Mangle the RTTI name.
2580 SmallString<256> Name;
2581 llvm::raw_svector_ostream Out(Name);
2582 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2584 // Look for an existing global.
2585 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2588 // Create a new global variable.
2589 // Note for the future: If we would ever like to do deferred emission of
2590 // RTTI, check if emitting vtables opportunistically need any adjustment.
2592 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2594 llvm::GlobalValue::ExternalLinkage, nullptr,
2596 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2597 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2598 if (RD->hasAttr<DLLImportAttr>())
2599 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2603 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2606 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2607 /// info for that type is defined in the standard library.
2608 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2609 // Itanium C++ ABI 2.9.2:
2610 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2611 // the run-time support library. Specifically, the run-time support
2612 // library should contain type_info objects for the types X, X* and
2613 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2614 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2615 // long, unsigned long, long long, unsigned long long, float, double,
2616 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2617 // half-precision floating point types.
2619 // GCC also emits RTTI for __int128.
2620 // FIXME: We do not emit RTTI information for decimal types here.
2622 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2623 switch (Ty->getKind()) {
2624 case BuiltinType::Void:
2625 case BuiltinType::NullPtr:
2626 case BuiltinType::Bool:
2627 case BuiltinType::WChar_S:
2628 case BuiltinType::WChar_U:
2629 case BuiltinType::Char_U:
2630 case BuiltinType::Char_S:
2631 case BuiltinType::UChar:
2632 case BuiltinType::SChar:
2633 case BuiltinType::Short:
2634 case BuiltinType::UShort:
2635 case BuiltinType::Int:
2636 case BuiltinType::UInt:
2637 case BuiltinType::Long:
2638 case BuiltinType::ULong:
2639 case BuiltinType::LongLong:
2640 case BuiltinType::ULongLong:
2641 case BuiltinType::Half:
2642 case BuiltinType::Float:
2643 case BuiltinType::Double:
2644 case BuiltinType::LongDouble:
2645 case BuiltinType::Float128:
2646 case BuiltinType::Char16:
2647 case BuiltinType::Char32:
2648 case BuiltinType::Int128:
2649 case BuiltinType::UInt128:
2652 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2653 case BuiltinType::Id:
2654 #include "clang/Basic/OpenCLImageTypes.def"
2655 case BuiltinType::OCLSampler:
2656 case BuiltinType::OCLEvent:
2657 case BuiltinType::OCLClkEvent:
2658 case BuiltinType::OCLQueue:
2659 case BuiltinType::OCLReserveID:
2662 case BuiltinType::Dependent:
2663 #define BUILTIN_TYPE(Id, SingletonId)
2664 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2665 case BuiltinType::Id:
2666 #include "clang/AST/BuiltinTypes.def"
2667 llvm_unreachable("asking for RRTI for a placeholder type!");
2669 case BuiltinType::ObjCId:
2670 case BuiltinType::ObjCClass:
2671 case BuiltinType::ObjCSel:
2672 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2675 llvm_unreachable("Invalid BuiltinType Kind!");
2678 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2679 QualType PointeeTy = PointerTy->getPointeeType();
2680 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2684 // Check the qualifiers.
2685 Qualifiers Quals = PointeeTy.getQualifiers();
2686 Quals.removeConst();
2691 return TypeInfoIsInStandardLibrary(BuiltinTy);
2694 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2695 /// information for the given type exists in the standard library.
2696 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2697 // Type info for builtin types is defined in the standard library.
2698 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2699 return TypeInfoIsInStandardLibrary(BuiltinTy);
2701 // Type info for some pointer types to builtin types is defined in the
2702 // standard library.
2703 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2704 return TypeInfoIsInStandardLibrary(PointerTy);
2709 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2710 /// the given type exists somewhere else, and that we should not emit the type
2711 /// information in this translation unit. Assumes that it is not a
2712 /// standard-library type.
2713 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2715 ASTContext &Context = CGM.getContext();
2717 // If RTTI is disabled, assume it might be disabled in the
2718 // translation unit that defines any potential key function, too.
2719 if (!Context.getLangOpts().RTTI) return false;
2721 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2722 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2723 if (!RD->hasDefinition())
2726 if (!RD->isDynamicClass())
2729 // FIXME: this may need to be reconsidered if the key function
2731 // N.B. We must always emit the RTTI data ourselves if there exists a key
2733 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2734 if (CGM.getVTables().isVTableExternal(RD))
2735 return IsDLLImport ? false : true;
2744 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
2745 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2746 return !RecordTy->getDecl()->isCompleteDefinition();
2749 /// ContainsIncompleteClassType - Returns whether the given type contains an
2750 /// incomplete class type. This is true if
2752 /// * The given type is an incomplete class type.
2753 /// * The given type is a pointer type whose pointee type contains an
2754 /// incomplete class type.
2755 /// * The given type is a member pointer type whose class is an incomplete
2757 /// * The given type is a member pointer type whoise pointee type contains an
2758 /// incomplete class type.
2759 /// is an indirect or direct pointer to an incomplete class type.
2760 static bool ContainsIncompleteClassType(QualType Ty) {
2761 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2762 if (IsIncompleteClassType(RecordTy))
2766 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2767 return ContainsIncompleteClassType(PointerTy->getPointeeType());
2769 if (const MemberPointerType *MemberPointerTy =
2770 dyn_cast<MemberPointerType>(Ty)) {
2771 // Check if the class type is incomplete.
2772 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2773 if (IsIncompleteClassType(ClassType))
2776 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2782 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2783 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2784 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
2785 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2786 // Check the number of bases.
2787 if (RD->getNumBases() != 1)
2791 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2793 // Check that the base is not virtual.
2794 if (Base->isVirtual())
2797 // Check that the base is public.
2798 if (Base->getAccessSpecifier() != AS_public)
2801 // Check that the class is dynamic iff the base is.
2802 const CXXRecordDecl *BaseDecl =
2803 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2804 if (!BaseDecl->isEmpty() &&
2805 BaseDecl->isDynamicClass() != RD->isDynamicClass())
2811 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2812 // abi::__class_type_info.
2813 static const char * const ClassTypeInfo =
2814 "_ZTVN10__cxxabiv117__class_type_infoE";
2815 // abi::__si_class_type_info.
2816 static const char * const SIClassTypeInfo =
2817 "_ZTVN10__cxxabiv120__si_class_type_infoE";
2818 // abi::__vmi_class_type_info.
2819 static const char * const VMIClassTypeInfo =
2820 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
2822 const char *VTableName = nullptr;
2824 switch (Ty->getTypeClass()) {
2825 #define TYPE(Class, Base)
2826 #define ABSTRACT_TYPE(Class, Base)
2827 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2828 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2829 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2830 #include "clang/AST/TypeNodes.def"
2831 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2833 case Type::LValueReference:
2834 case Type::RValueReference:
2835 llvm_unreachable("References shouldn't get here");
2838 case Type::DeducedTemplateSpecialization:
2839 llvm_unreachable("Undeduced type shouldn't get here");
2842 llvm_unreachable("Pipe types shouldn't get here");
2845 // GCC treats vector and complex types as fundamental types.
2847 case Type::ExtVector:
2850 // FIXME: GCC treats block pointers as fundamental types?!
2851 case Type::BlockPointer:
2852 // abi::__fundamental_type_info.
2853 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
2856 case Type::ConstantArray:
2857 case Type::IncompleteArray:
2858 case Type::VariableArray:
2859 // abi::__array_type_info.
2860 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
2863 case Type::FunctionNoProto:
2864 case Type::FunctionProto:
2865 // abi::__function_type_info.
2866 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
2870 // abi::__enum_type_info.
2871 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
2874 case Type::Record: {
2875 const CXXRecordDecl *RD =
2876 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2878 if (!RD->hasDefinition() || !RD->getNumBases()) {
2879 VTableName = ClassTypeInfo;
2880 } else if (CanUseSingleInheritance(RD)) {
2881 VTableName = SIClassTypeInfo;
2883 VTableName = VMIClassTypeInfo;
2889 case Type::ObjCObject:
2890 // Ignore protocol qualifiers.
2891 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
2893 // Handle id and Class.
2894 if (isa<BuiltinType>(Ty)) {
2895 VTableName = ClassTypeInfo;
2899 assert(isa<ObjCInterfaceType>(Ty));
2902 case Type::ObjCInterface:
2903 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
2904 VTableName = SIClassTypeInfo;
2906 VTableName = ClassTypeInfo;
2910 case Type::ObjCObjectPointer:
2912 // abi::__pointer_type_info.
2913 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
2916 case Type::MemberPointer:
2917 // abi::__pointer_to_member_type_info.
2918 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
2922 llvm::Constant *VTable =
2923 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
2925 llvm::Type *PtrDiffTy =
2926 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
2928 // The vtable address point is 2.
2929 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
2931 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
2932 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
2934 Fields.push_back(VTable);
2937 /// \brief Return the linkage that the type info and type info name constants
2938 /// should have for the given type.
2939 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
2941 // Itanium C++ ABI 2.9.5p7:
2942 // In addition, it and all of the intermediate abi::__pointer_type_info
2943 // structs in the chain down to the abi::__class_type_info for the
2944 // incomplete class type must be prevented from resolving to the
2945 // corresponding type_info structs for the complete class type, possibly
2946 // by making them local static objects. Finally, a dummy class RTTI is
2947 // generated for the incomplete type that will not resolve to the final
2948 // complete class RTTI (because the latter need not exist), possibly by
2949 // making it a local static object.
2950 if (ContainsIncompleteClassType(Ty))
2951 return llvm::GlobalValue::InternalLinkage;
2953 switch (Ty->getLinkage()) {
2955 case InternalLinkage:
2956 case UniqueExternalLinkage:
2957 return llvm::GlobalValue::InternalLinkage;
2959 case VisibleNoLinkage:
2960 case ExternalLinkage:
2961 // RTTI is not enabled, which means that this type info struct is going
2962 // to be used for exception handling. Give it linkonce_odr linkage.
2963 if (!CGM.getLangOpts().RTTI)
2964 return llvm::GlobalValue::LinkOnceODRLinkage;
2966 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
2967 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
2968 if (RD->hasAttr<WeakAttr>())
2969 return llvm::GlobalValue::WeakODRLinkage;
2970 if (CGM.getTriple().isWindowsItaniumEnvironment())
2971 if (RD->hasAttr<DLLImportAttr>())
2972 return llvm::GlobalValue::ExternalLinkage;
2973 if (RD->isDynamicClass()) {
2974 llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
2975 // MinGW won't export the RTTI information when there is a key function.
2976 // Make sure we emit our own copy instead of attempting to dllimport it.
2977 if (RD->hasAttr<DLLImportAttr>() &&
2978 llvm::GlobalValue::isAvailableExternallyLinkage(LT))
2979 LT = llvm::GlobalValue::LinkOnceODRLinkage;
2984 return llvm::GlobalValue::LinkOnceODRLinkage;
2987 llvm_unreachable("Invalid linkage!");
2990 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
2992 // We want to operate on the canonical type.
2993 Ty = Ty.getCanonicalType();
2995 // Check if we've already emitted an RTTI descriptor for this type.
2996 SmallString<256> Name;
2997 llvm::raw_svector_ostream Out(Name);
2998 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3000 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3001 if (OldGV && !OldGV->isDeclaration()) {
3002 assert(!OldGV->hasAvailableExternallyLinkage() &&
3003 "available_externally typeinfos not yet implemented");
3005 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3008 // Check if there is already an external RTTI descriptor for this type.
3009 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
3010 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
3011 return GetAddrOfExternalRTTIDescriptor(Ty);
3013 // Emit the standard library with external linkage.
3014 llvm::GlobalVariable::LinkageTypes Linkage;
3016 Linkage = llvm::GlobalValue::ExternalLinkage;
3018 Linkage = getTypeInfoLinkage(CGM, Ty);
3020 // Add the vtable pointer.
3021 BuildVTablePointer(cast<Type>(Ty));
3024 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3025 llvm::Constant *TypeNameField;
3027 // If we're supposed to demote the visibility, be sure to set a flag
3028 // to use a string comparison for type_info comparisons.
3029 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3030 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3031 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3032 // The flag is the sign bit, which on ARM64 is defined to be clear
3033 // for global pointers. This is very ARM64-specific.
3034 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3035 llvm::Constant *flag =
3036 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3037 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3039 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3041 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3043 Fields.push_back(TypeNameField);
3045 switch (Ty->getTypeClass()) {
3046 #define TYPE(Class, Base)
3047 #define ABSTRACT_TYPE(Class, Base)
3048 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3049 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3050 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3051 #include "clang/AST/TypeNodes.def"
3052 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3054 // GCC treats vector types as fundamental types.
3057 case Type::ExtVector:
3059 case Type::BlockPointer:
3060 // Itanium C++ ABI 2.9.5p4:
3061 // abi::__fundamental_type_info adds no data members to std::type_info.
3064 case Type::LValueReference:
3065 case Type::RValueReference:
3066 llvm_unreachable("References shouldn't get here");
3069 case Type::DeducedTemplateSpecialization:
3070 llvm_unreachable("Undeduced type shouldn't get here");
3073 llvm_unreachable("Pipe type shouldn't get here");
3075 case Type::ConstantArray:
3076 case Type::IncompleteArray:
3077 case Type::VariableArray:
3078 // Itanium C++ ABI 2.9.5p5:
3079 // abi::__array_type_info adds no data members to std::type_info.
3082 case Type::FunctionNoProto:
3083 case Type::FunctionProto:
3084 // Itanium C++ ABI 2.9.5p5:
3085 // abi::__function_type_info adds no data members to std::type_info.
3089 // Itanium C++ ABI 2.9.5p5:
3090 // abi::__enum_type_info adds no data members to std::type_info.
3093 case Type::Record: {
3094 const CXXRecordDecl *RD =
3095 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3096 if (!RD->hasDefinition() || !RD->getNumBases()) {
3097 // We don't need to emit any fields.
3101 if (CanUseSingleInheritance(RD))
3102 BuildSIClassTypeInfo(RD);
3104 BuildVMIClassTypeInfo(RD);
3109 case Type::ObjCObject:
3110 case Type::ObjCInterface:
3111 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3114 case Type::ObjCObjectPointer:
3115 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3119 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3122 case Type::MemberPointer:
3123 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3127 // No fields, at least for the moment.
3131 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3133 llvm::Module &M = CGM.getModule();
3134 llvm::GlobalVariable *GV =
3135 new llvm::GlobalVariable(M, Init->getType(),
3136 /*Constant=*/true, Linkage, Init, Name);
3138 // If there's already an old global variable, replace it with the new one.
3140 GV->takeName(OldGV);
3141 llvm::Constant *NewPtr =
3142 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3143 OldGV->replaceAllUsesWith(NewPtr);
3144 OldGV->eraseFromParent();
3147 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3148 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3150 // The Itanium ABI specifies that type_info objects must be globally
3151 // unique, with one exception: if the type is an incomplete class
3152 // type or a (possibly indirect) pointer to one. That exception
3153 // affects the general case of comparing type_info objects produced
3154 // by the typeid operator, which is why the comparison operators on
3155 // std::type_info generally use the type_info name pointers instead
3156 // of the object addresses. However, the language's built-in uses
3157 // of RTTI generally require class types to be complete, even when
3158 // manipulating pointers to those class types. This allows the
3159 // implementation of dynamic_cast to rely on address equality tests,
3160 // which is much faster.
3162 // All of this is to say that it's important that both the type_info
3163 // object and the type_info name be uniqued when weakly emitted.
3165 // Give the type_info object and name the formal visibility of the
3167 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3168 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3169 // If the linkage is local, only default visibility makes sense.
3170 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3171 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
3172 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3174 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3176 TypeName->setVisibility(llvmVisibility);
3177 GV->setVisibility(llvmVisibility);
3179 if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3180 auto RD = Ty->getAsCXXRecordDecl();
3181 if (DLLExport || (RD && RD->hasAttr<DLLExportAttr>())) {
3182 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3183 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3184 } else if (CGM.getLangOpts().RTTI && RD && RD->hasAttr<DLLImportAttr>()) {
3185 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3186 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3188 // Because the typename and the typeinfo are DLL import, convert them to
3189 // declarations rather than definitions. The initializers still need to
3190 // be constructed to calculate the type for the declarations.
3191 TypeName->setInitializer(nullptr);
3192 GV->setInitializer(nullptr);
3196 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3199 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3200 /// for the given Objective-C object type.
3201 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3203 const Type *T = OT->getBaseType().getTypePtr();
3204 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3206 // The builtin types are abi::__class_type_infos and don't require
3208 if (isa<BuiltinType>(T)) return;
3210 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3211 ObjCInterfaceDecl *Super = Class->getSuperClass();
3213 // Root classes are also __class_type_info.
3216 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3218 // Everything else is single inheritance.
3219 llvm::Constant *BaseTypeInfo =
3220 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3221 Fields.push_back(BaseTypeInfo);
3224 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3225 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3226 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3227 // Itanium C++ ABI 2.9.5p6b:
3228 // It adds to abi::__class_type_info a single member pointing to the
3229 // type_info structure for the base type,
3230 llvm::Constant *BaseTypeInfo =
3231 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3232 Fields.push_back(BaseTypeInfo);
3236 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3237 /// a class hierarchy.
3239 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3240 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3244 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3245 /// abi::__vmi_class_type_info.
3247 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3252 const CXXRecordDecl *BaseDecl =
3253 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3255 if (Base->isVirtual()) {
3256 // Mark the virtual base as seen.
3257 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3258 // If this virtual base has been seen before, then the class is diamond
3260 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3262 if (Bases.NonVirtualBases.count(BaseDecl))
3263 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3266 // Mark the non-virtual base as seen.
3267 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3268 // If this non-virtual base has been seen before, then the class has non-
3269 // diamond shaped repeated inheritance.
3270 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3272 if (Bases.VirtualBases.count(BaseDecl))
3273 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3278 for (const auto &I : BaseDecl->bases())
3279 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3284 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3289 for (const auto &I : RD->bases())
3290 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3295 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3296 /// classes with bases that do not satisfy the abi::__si_class_type_info
3297 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3298 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3299 llvm::Type *UnsignedIntLTy =
3300 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3302 // Itanium C++ ABI 2.9.5p6c:
3303 // __flags is a word with flags describing details about the class
3304 // structure, which may be referenced by using the __flags_masks
3305 // enumeration. These flags refer to both direct and indirect bases.
3306 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3307 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3309 // Itanium C++ ABI 2.9.5p6c:
3310 // __base_count is a word with the number of direct proper base class
3311 // descriptions that follow.
3312 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3314 if (!RD->getNumBases())
3317 // Now add the base class descriptions.
3319 // Itanium C++ ABI 2.9.5p6c:
3320 // __base_info[] is an array of base class descriptions -- one for every
3321 // direct proper base. Each description is of the type:
3323 // struct abi::__base_class_type_info {
3325 // const __class_type_info *__base_type;
3326 // long __offset_flags;
3328 // enum __offset_flags_masks {
3329 // __virtual_mask = 0x1,
3330 // __public_mask = 0x2,
3331 // __offset_shift = 8
3335 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3336 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3338 // FIXME: Consider updating libc++abi to match, and extend this logic to all
3340 QualType OffsetFlagsTy = CGM.getContext().LongTy;
3341 const TargetInfo &TI = CGM.getContext().getTargetInfo();
3342 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3343 OffsetFlagsTy = CGM.getContext().LongLongTy;
3344 llvm::Type *OffsetFlagsLTy =
3345 CGM.getTypes().ConvertType(OffsetFlagsTy);
3347 for (const auto &Base : RD->bases()) {
3348 // The __base_type member points to the RTTI for the base type.
3349 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3351 const CXXRecordDecl *BaseDecl =
3352 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3354 int64_t OffsetFlags = 0;
3356 // All but the lower 8 bits of __offset_flags are a signed offset.
3357 // For a non-virtual base, this is the offset in the object of the base
3358 // subobject. For a virtual base, this is the offset in the virtual table of
3359 // the virtual base offset for the virtual base referenced (negative).
3361 if (Base.isVirtual())
3363 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3365 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3366 Offset = Layout.getBaseClassOffset(BaseDecl);
3369 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3371 // The low-order byte of __offset_flags contains flags, as given by the
3372 // masks from the enumeration __offset_flags_masks.
3373 if (Base.isVirtual())
3374 OffsetFlags |= BCTI_Virtual;
3375 if (Base.getAccessSpecifier() == AS_public)
3376 OffsetFlags |= BCTI_Public;
3378 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3382 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3383 /// pieces from \p Type.
3384 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3387 if (Type.isConstQualified())
3388 Flags |= ItaniumRTTIBuilder::PTI_Const;
3389 if (Type.isVolatileQualified())
3390 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3391 if (Type.isRestrictQualified())
3392 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3393 Type = Type.getUnqualifiedType();
3395 // Itanium C++ ABI 2.9.5p7:
3396 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3397 // incomplete class type, the incomplete target type flag is set.
3398 if (ContainsIncompleteClassType(Type))
3399 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3401 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3402 if (Proto->isNothrow(Ctx)) {
3403 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3404 Type = Ctx.getFunctionType(
3405 Proto->getReturnType(), Proto->getParamTypes(),
3406 Proto->getExtProtoInfo().withExceptionSpec(EST_None));
3413 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3414 /// used for pointer types.
3415 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3416 // Itanium C++ ABI 2.9.5p7:
3417 // __flags is a flag word describing the cv-qualification and other
3418 // attributes of the type pointed to
3419 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3421 llvm::Type *UnsignedIntLTy =
3422 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3423 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3425 // Itanium C++ ABI 2.9.5p7:
3426 // __pointee is a pointer to the std::type_info derivation for the
3427 // unqualified type being pointed to.
3428 llvm::Constant *PointeeTypeInfo =
3429 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3430 Fields.push_back(PointeeTypeInfo);
3433 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3434 /// struct, used for member pointer types.
3436 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3437 QualType PointeeTy = Ty->getPointeeType();
3439 // Itanium C++ ABI 2.9.5p7:
3440 // __flags is a flag word describing the cv-qualification and other
3441 // attributes of the type pointed to.
3442 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3444 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3445 if (IsIncompleteClassType(ClassType))
3446 Flags |= PTI_ContainingClassIncomplete;
3448 llvm::Type *UnsignedIntLTy =
3449 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3450 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3452 // Itanium C++ ABI 2.9.5p7:
3453 // __pointee is a pointer to the std::type_info derivation for the
3454 // unqualified type being pointed to.
3455 llvm::Constant *PointeeTypeInfo =
3456 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3457 Fields.push_back(PointeeTypeInfo);
3459 // Itanium C++ ABI 2.9.5p9:
3460 // __context is a pointer to an abi::__class_type_info corresponding to the
3461 // class type containing the member pointed to
3462 // (e.g., the "A" in "int A::*").
3464 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3467 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3468 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3471 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type,
3473 QualType PointerType = getContext().getPointerType(Type);
3474 QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
3475 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, /*Force=*/true, DLLExport);
3476 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, /*Force=*/true,
3478 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, /*Force=*/true,
3482 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) {
3483 // Types added here must also be added to TypeInfoIsInStandardLibrary.
3484 QualType FundamentalTypes[] = {
3485 getContext().VoidTy, getContext().NullPtrTy,
3486 getContext().BoolTy, getContext().WCharTy,
3487 getContext().CharTy, getContext().UnsignedCharTy,
3488 getContext().SignedCharTy, getContext().ShortTy,
3489 getContext().UnsignedShortTy, getContext().IntTy,
3490 getContext().UnsignedIntTy, getContext().LongTy,
3491 getContext().UnsignedLongTy, getContext().LongLongTy,
3492 getContext().UnsignedLongLongTy, getContext().Int128Ty,
3493 getContext().UnsignedInt128Ty, getContext().HalfTy,
3494 getContext().FloatTy, getContext().DoubleTy,
3495 getContext().LongDoubleTy, getContext().Float128Ty,
3496 getContext().Char16Ty, getContext().Char32Ty
3498 for (const QualType &FundamentalType : FundamentalTypes)
3499 EmitFundamentalRTTIDescriptor(FundamentalType, DLLExport);
3502 /// What sort of uniqueness rules should we use for the RTTI for the
3504 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3505 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3506 if (shouldRTTIBeUnique())
3509 // It's only necessary for linkonce_odr or weak_odr linkage.
3510 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3511 Linkage != llvm::GlobalValue::WeakODRLinkage)
3514 // It's only necessary with default visibility.
3515 if (CanTy->getVisibility() != DefaultVisibility)
3518 // If we're not required to publish this symbol, hide it.
3519 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3520 return RUK_NonUniqueHidden;
3522 // If we're required to publish this symbol, as we might be under an
3523 // explicit instantiation, leave it with default visibility but
3524 // enable string-comparisons.
3525 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3526 return RUK_NonUniqueVisible;
3529 // Find out how to codegen the complete destructor and constructor
3531 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3533 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3534 const CXXMethodDecl *MD) {
3535 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3536 return StructorCodegen::Emit;
3538 // The complete and base structors are not equivalent if there are any virtual
3539 // bases, so emit separate functions.
3540 if (MD->getParent()->getNumVBases())
3541 return StructorCodegen::Emit;
3543 GlobalDecl AliasDecl;
3544 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3545 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3547 const auto *CD = cast<CXXConstructorDecl>(MD);
3548 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3550 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3552 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3553 return StructorCodegen::RAUW;
3555 // FIXME: Should we allow available_externally aliases?
3556 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3557 return StructorCodegen::RAUW;
3559 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3560 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3561 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3562 CGM.getTarget().getTriple().isOSBinFormatWasm())
3563 return StructorCodegen::COMDAT;
3564 return StructorCodegen::Emit;
3567 return StructorCodegen::Alias;
3570 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3571 GlobalDecl AliasDecl,
3572 GlobalDecl TargetDecl) {
3573 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3575 StringRef MangledName = CGM.getMangledName(AliasDecl);
3576 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3577 if (Entry && !Entry->isDeclaration())
3580 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3582 // Create the alias with no name.
3583 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3585 // Switch any previous uses to the alias.
3587 assert(Entry->getType() == Aliasee->getType() &&
3588 "declaration exists with different type");
3589 Alias->takeName(Entry);
3590 Entry->replaceAllUsesWith(Alias);
3591 Entry->eraseFromParent();
3593 Alias->setName(MangledName);
3596 // Finally, set up the alias with its proper name and attributes.
3597 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
3600 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3601 StructorType Type) {
3602 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3603 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3605 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3607 if (Type == StructorType::Complete) {
3608 GlobalDecl CompleteDecl;
3609 GlobalDecl BaseDecl;
3611 CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3612 BaseDecl = GlobalDecl(CD, Ctor_Base);
3614 CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3615 BaseDecl = GlobalDecl(DD, Dtor_Base);
3618 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3619 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3623 if (CGType == StructorCodegen::RAUW) {
3624 StringRef MangledName = CGM.getMangledName(CompleteDecl);
3625 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3626 CGM.addReplacement(MangledName, Aliasee);
3631 // The base destructor is equivalent to the base destructor of its
3632 // base class if there is exactly one non-virtual base class with a
3633 // non-trivial destructor, there are no fields with a non-trivial
3634 // destructor, and the body of the destructor is trivial.
3635 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3636 !CGM.TryEmitBaseDestructorAsAlias(DD))
3639 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3641 if (CGType == StructorCodegen::COMDAT) {
3642 SmallString<256> Buffer;
3643 llvm::raw_svector_ostream Out(Buffer);
3645 getMangleContext().mangleCXXDtorComdat(DD, Out);
3647 getMangleContext().mangleCXXCtorComdat(CD, Out);
3648 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3651 CGM.maybeSetTrivialComdat(*MD, *Fn);
3655 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3656 // void *__cxa_begin_catch(void*);
3657 llvm::FunctionType *FTy = llvm::FunctionType::get(
3658 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3660 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3663 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3664 // void __cxa_end_catch();
3665 llvm::FunctionType *FTy =
3666 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3668 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3671 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3672 // void *__cxa_get_exception_ptr(void*);
3673 llvm::FunctionType *FTy = llvm::FunctionType::get(
3674 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3676 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3680 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3681 /// exception type lets us state definitively that the thrown exception
3682 /// type does not have a destructor. In particular:
3683 /// - Catch-alls tell us nothing, so we have to conservatively
3684 /// assume that the thrown exception might have a destructor.
3685 /// - Catches by reference behave according to their base types.
3686 /// - Catches of non-record types will only trigger for exceptions
3687 /// of non-record types, which never have destructors.
3688 /// - Catches of record types can trigger for arbitrary subclasses
3689 /// of the caught type, so we have to assume the actual thrown
3690 /// exception type might have a throwing destructor, even if the
3691 /// caught type's destructor is trivial or nothrow.
3692 struct CallEndCatch final : EHScopeStack::Cleanup {
3693 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3696 void Emit(CodeGenFunction &CGF, Flags flags) override {
3698 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3702 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3707 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3708 /// __cxa_end_catch.
3710 /// \param EndMightThrow - true if __cxa_end_catch might throw
3711 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3713 bool EndMightThrow) {
3714 llvm::CallInst *call =
3715 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3717 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3722 /// A "special initializer" callback for initializing a catch
3723 /// parameter during catch initialization.
3724 static void InitCatchParam(CodeGenFunction &CGF,
3725 const VarDecl &CatchParam,
3727 SourceLocation Loc) {
3728 // Load the exception from where the landing pad saved it.
3729 llvm::Value *Exn = CGF.getExceptionFromSlot();
3731 CanQualType CatchType =
3732 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3733 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3735 // If we're catching by reference, we can just cast the object
3736 // pointer to the appropriate pointer.
3737 if (isa<ReferenceType>(CatchType)) {
3738 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3739 bool EndCatchMightThrow = CaughtType->isRecordType();
3741 // __cxa_begin_catch returns the adjusted object pointer.
3742 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3744 // We have no way to tell the personality function that we're
3745 // catching by reference, so if we're catching a pointer,
3746 // __cxa_begin_catch will actually return that pointer by value.
3747 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3748 QualType PointeeType = PT->getPointeeType();
3750 // When catching by reference, generally we should just ignore
3751 // this by-value pointer and use the exception object instead.
3752 if (!PointeeType->isRecordType()) {
3754 // Exn points to the struct _Unwind_Exception header, which
3755 // we have to skip past in order to reach the exception data.
3756 unsigned HeaderSize =
3757 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3758 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3760 // However, if we're catching a pointer-to-record type that won't
3761 // work, because the personality function might have adjusted
3762 // the pointer. There's actually no way for us to fully satisfy
3763 // the language/ABI contract here: we can't use Exn because it
3764 // might have the wrong adjustment, but we can't use the by-value
3765 // pointer because it's off by a level of abstraction.
3767 // The current solution is to dump the adjusted pointer into an
3768 // alloca, which breaks language semantics (because changing the
3769 // pointer doesn't change the exception) but at least works.
3770 // The better solution would be to filter out non-exact matches
3771 // and rethrow them, but this is tricky because the rethrow
3772 // really needs to be catchable by other sites at this landing
3773 // pad. The best solution is to fix the personality function.
3775 // Pull the pointer for the reference type off.
3777 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3779 // Create the temporary and write the adjusted pointer into it.
3781 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
3782 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3783 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3785 // Bind the reference to the temporary.
3786 AdjustedExn = ExnPtrTmp.getPointer();
3790 llvm::Value *ExnCast =
3791 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
3792 CGF.Builder.CreateStore(ExnCast, ParamAddr);
3796 // Scalars and complexes.
3797 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
3798 if (TEK != TEK_Aggregate) {
3799 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
3801 // If the catch type is a pointer type, __cxa_begin_catch returns
3802 // the pointer by value.
3803 if (CatchType->hasPointerRepresentation()) {
3804 llvm::Value *CastExn =
3805 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
3807 switch (CatchType.getQualifiers().getObjCLifetime()) {
3808 case Qualifiers::OCL_Strong:
3809 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
3812 case Qualifiers::OCL_None:
3813 case Qualifiers::OCL_ExplicitNone:
3814 case Qualifiers::OCL_Autoreleasing:
3815 CGF.Builder.CreateStore(CastExn, ParamAddr);
3818 case Qualifiers::OCL_Weak:
3819 CGF.EmitARCInitWeak(ParamAddr, CastExn);
3822 llvm_unreachable("bad ownership qualifier!");
3825 // Otherwise, it returns a pointer into the exception object.
3827 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3828 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3830 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
3831 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
3834 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
3838 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
3839 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
3843 llvm_unreachable("evaluation kind filtered out!");
3845 llvm_unreachable("bad evaluation kind");
3848 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
3849 auto catchRD = CatchType->getAsCXXRecordDecl();
3850 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
3852 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3854 // Check for a copy expression. If we don't have a copy expression,
3855 // that means a trivial copy is okay.
3856 const Expr *copyExpr = CatchParam.getInit();
3858 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
3859 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3860 caughtExnAlignment);
3861 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
3865 // We have to call __cxa_get_exception_ptr to get the adjusted
3866 // pointer before copying.
3867 llvm::CallInst *rawAdjustedExn =
3868 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
3870 // Cast that to the appropriate type.
3871 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3872 caughtExnAlignment);
3874 // The copy expression is defined in terms of an OpaqueValueExpr.
3875 // Find it and map it to the adjusted expression.
3876 CodeGenFunction::OpaqueValueMapping
3877 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
3878 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
3880 // Call the copy ctor in a terminate scope.
3881 CGF.EHStack.pushTerminate();
3883 // Perform the copy construction.
3884 CGF.EmitAggExpr(copyExpr,
3885 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
3886 AggValueSlot::IsNotDestructed,
3887 AggValueSlot::DoesNotNeedGCBarriers,
3888 AggValueSlot::IsNotAliased));
3890 // Leave the terminate scope.
3891 CGF.EHStack.popTerminate();
3893 // Undo the opaque value mapping.
3896 // Finally we can call __cxa_begin_catch.
3897 CallBeginCatch(CGF, Exn, true);
3900 /// Begins a catch statement by initializing the catch variable and
3901 /// calling __cxa_begin_catch.
3902 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
3903 const CXXCatchStmt *S) {
3904 // We have to be very careful with the ordering of cleanups here:
3905 // C++ [except.throw]p4:
3906 // The destruction [of the exception temporary] occurs
3907 // immediately after the destruction of the object declared in
3908 // the exception-declaration in the handler.
3910 // So the precise ordering is:
3911 // 1. Construct catch variable.
3912 // 2. __cxa_begin_catch
3913 // 3. Enter __cxa_end_catch cleanup
3914 // 4. Enter dtor cleanup
3916 // We do this by using a slightly abnormal initialization process.
3917 // Delegation sequence:
3918 // - ExitCXXTryStmt opens a RunCleanupsScope
3919 // - EmitAutoVarAlloca creates the variable and debug info
3920 // - InitCatchParam initializes the variable from the exception
3921 // - CallBeginCatch calls __cxa_begin_catch
3922 // - CallBeginCatch enters the __cxa_end_catch cleanup
3923 // - EmitAutoVarCleanups enters the variable destructor cleanup
3924 // - EmitCXXTryStmt emits the code for the catch body
3925 // - EmitCXXTryStmt close the RunCleanupsScope
3927 VarDecl *CatchParam = S->getExceptionDecl();
3929 llvm::Value *Exn = CGF.getExceptionFromSlot();
3930 CallBeginCatch(CGF, Exn, true);
3935 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
3936 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
3937 CGF.EmitAutoVarCleanups(var);
3940 /// Get or define the following function:
3941 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
3942 /// This code is used only in C++.
3943 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
3944 llvm::FunctionType *fnTy =
3945 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3946 llvm::Constant *fnRef = CGM.CreateRuntimeFunction(
3947 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
3949 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
3950 if (fn && fn->empty()) {
3951 fn->setDoesNotThrow();
3952 fn->setDoesNotReturn();
3954 // What we really want is to massively penalize inlining without
3955 // forbidding it completely. The difference between that and
3956 // 'noinline' is negligible.
3957 fn->addFnAttr(llvm::Attribute::NoInline);
3959 // Allow this function to be shared across translation units, but
3960 // we don't want it to turn into an exported symbol.
3961 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
3962 fn->setVisibility(llvm::Function::HiddenVisibility);
3963 if (CGM.supportsCOMDAT())
3964 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
3966 // Set up the function.
3967 llvm::BasicBlock *entry =
3968 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
3969 CGBuilderTy builder(CGM, entry);
3971 // Pull the exception pointer out of the parameter list.
3972 llvm::Value *exn = &*fn->arg_begin();
3974 // Call __cxa_begin_catch(exn).
3975 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
3976 catchCall->setDoesNotThrow();
3977 catchCall->setCallingConv(CGM.getRuntimeCC());
3979 // Call std::terminate().
3980 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
3981 termCall->setDoesNotThrow();
3982 termCall->setDoesNotReturn();
3983 termCall->setCallingConv(CGM.getRuntimeCC());
3985 // std::terminate cannot return.
3986 builder.CreateUnreachable();
3993 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
3995 // In C++, we want to call __cxa_begin_catch() before terminating.
3997 assert(CGF.CGM.getLangOpts().CPlusPlus);
3998 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4000 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());