1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
19 //===----------------------------------------------------------------------===//
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/CodeGen/ConstantInitBuilder.h"
29 #include "clang/AST/Mangle.h"
30 #include "clang/AST/Type.h"
31 #include "clang/AST/StmtCXX.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
38 using namespace clang;
39 using namespace CodeGen;
42 class ItaniumCXXABI : public CodeGen::CGCXXABI {
43 /// VTables - All the vtables which have been defined.
44 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 bool UseARMMethodPtrABI;
48 bool UseARMGuardVarABI;
49 bool Use32BitVTableOffsetABI;
51 ItaniumMangleContext &getMangleContext() {
52 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
56 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
57 bool UseARMMethodPtrABI = false,
58 bool UseARMGuardVarABI = false) :
59 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
60 UseARMGuardVarABI(UseARMGuardVarABI),
61 Use32BitVTableOffsetABI(false) { }
63 bool classifyReturnType(CGFunctionInfo &FI) const override;
65 bool passClassIndirect(const CXXRecordDecl *RD) const {
66 // Clang <= 4 used the pre-C++11 rule, which ignores move operations.
67 // The PS4 platform ABI follows the behavior of Clang 3.2.
68 if (CGM.getCodeGenOpts().getClangABICompat() <=
69 CodeGenOptions::ClangABI::Ver4 ||
70 CGM.getTriple().getOS() == llvm::Triple::PS4)
71 return RD->hasNonTrivialDestructor() ||
72 RD->hasNonTrivialCopyConstructor();
73 return !canCopyArgument(RD);
76 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
77 // If C++ prohibits us from making a copy, pass by address.
78 if (passClassIndirect(RD))
83 bool isThisCompleteObject(GlobalDecl GD) const override {
84 // The Itanium ABI has separate complete-object vs. base-object
85 // variants of both constructors and destructors.
86 if (isa<CXXDestructorDecl>(GD.getDecl())) {
87 switch (GD.getDtorType()) {
96 llvm_unreachable("emitting dtor comdat as function?");
98 llvm_unreachable("bad dtor kind");
100 if (isa<CXXConstructorDecl>(GD.getDecl())) {
101 switch (GD.getCtorType()) {
108 case Ctor_CopyingClosure:
109 case Ctor_DefaultClosure:
110 llvm_unreachable("closure ctors in Itanium ABI?");
113 llvm_unreachable("emitting ctor comdat as function?");
115 llvm_unreachable("bad dtor kind");
122 bool isZeroInitializable(const MemberPointerType *MPT) override;
124 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
127 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
130 llvm::Value *&ThisPtrForCall,
131 llvm::Value *MemFnPtr,
132 const MemberPointerType *MPT) override;
135 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
138 const MemberPointerType *MPT) override;
140 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
142 llvm::Value *Src) override;
143 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
144 llvm::Constant *Src) override;
146 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
148 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
149 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
150 CharUnits offset) override;
151 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
152 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
153 CharUnits ThisAdjustment);
155 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
156 llvm::Value *L, llvm::Value *R,
157 const MemberPointerType *MPT,
158 bool Inequality) override;
160 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
162 const MemberPointerType *MPT) override;
164 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
165 Address Ptr, QualType ElementType,
166 const CXXDestructorDecl *Dtor) override;
168 CharUnits getAlignmentOfExnObject() {
169 unsigned Align = CGM.getContext().getTargetInfo().getExnObjectAlignment();
170 return CGM.getContext().toCharUnitsFromBits(Align);
173 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
174 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
176 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
179 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
180 llvm::Value *Exn) override;
182 void EmitFundamentalRTTIDescriptor(QualType Type, bool DLLExport);
183 void EmitFundamentalRTTIDescriptors(bool DLLExport);
184 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
186 getAddrOfCXXCatchHandlerType(QualType Ty,
187 QualType CatchHandlerType) override {
188 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
191 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
192 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
193 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
195 llvm::Type *StdTypeInfoPtrTy) override;
197 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
198 QualType SrcRecordTy) override;
200 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
201 QualType SrcRecordTy, QualType DestTy,
202 QualType DestRecordTy,
203 llvm::BasicBlock *CastEnd) override;
205 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
206 QualType SrcRecordTy,
207 QualType DestTy) override;
209 bool EmitBadCastCall(CodeGenFunction &CGF) override;
212 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
213 const CXXRecordDecl *ClassDecl,
214 const CXXRecordDecl *BaseClassDecl) override;
216 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
219 buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
220 SmallVectorImpl<CanQualType> &ArgTys) override;
222 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
223 CXXDtorType DT) const override {
224 // Itanium does not emit any destructor variant as an inline thunk.
225 // Delegating may occur as an optimization, but all variants are either
226 // emitted with external linkage or as linkonce if they are inline and used.
230 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
232 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
233 FunctionArgList &Params) override;
235 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
238 addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
239 CXXCtorType Type, bool ForVirtualBase,
240 bool Delegating, CallArgList &Args) override;
242 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
243 CXXDtorType Type, bool ForVirtualBase,
244 bool Delegating, Address This) override;
246 void emitVTableDefinitions(CodeGenVTables &CGVT,
247 const CXXRecordDecl *RD) override;
249 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
250 CodeGenFunction::VPtr Vptr) override;
252 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
257 getVTableAddressPoint(BaseSubobject Base,
258 const CXXRecordDecl *VTableClass) override;
260 llvm::Value *getVTableAddressPointInStructor(
261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
264 llvm::Value *getVTableAddressPointInStructorWithVTT(
265 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
266 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
269 getVTableAddressPointForConstExpr(BaseSubobject Base,
270 const CXXRecordDecl *VTableClass) override;
272 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
273 CharUnits VPtrOffset) override;
275 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
276 Address This, llvm::Type *Ty,
277 SourceLocation Loc) override;
279 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
280 const CXXDestructorDecl *Dtor,
281 CXXDtorType DtorType,
283 const CXXMemberCallExpr *CE) override;
285 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
287 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
289 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
290 bool ReturnAdjustment) override {
291 // Allow inlining of thunks by emitting them with available_externally
292 // linkage together with vtables when needed.
293 if (ForVTable && !Thunk->hasLocalLinkage())
294 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
297 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
298 const ThisAdjustment &TA) override;
300 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
301 const ReturnAdjustment &RA) override;
303 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
304 FunctionArgList &Args) const override {
305 assert(!Args.empty() && "expected the arglist to not be empty!");
306 return Args.size() - 1;
309 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
310 StringRef GetDeletedVirtualCallName() override
311 { return "__cxa_deleted_virtual"; }
313 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
314 Address InitializeArrayCookie(CodeGenFunction &CGF,
316 llvm::Value *NumElements,
317 const CXXNewExpr *expr,
318 QualType ElementType) override;
319 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
321 CharUnits cookieSize) override;
323 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
324 llvm::GlobalVariable *DeclPtr,
325 bool PerformInit) override;
326 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
327 llvm::Constant *dtor, llvm::Constant *addr) override;
329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
331 void EmitThreadLocalInitFuncs(
333 ArrayRef<const VarDecl *> CXXThreadLocals,
334 ArrayRef<llvm::Function *> CXXThreadLocalInits,
335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
337 bool usesThreadWrapperFunction() const override { return true; }
338 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
339 QualType LValType) override;
341 bool NeedsVTTParameter(GlobalDecl GD) override;
343 /**************************** RTTI Uniqueness ******************************/
346 /// Returns true if the ABI requires RTTI type_info objects to be unique
347 /// across a program.
348 virtual bool shouldRTTIBeUnique() const { return true; }
351 /// What sort of unique-RTTI behavior should we use?
352 enum RTTIUniquenessKind {
353 /// We are guaranteeing, or need to guarantee, that the RTTI string
357 /// We are not guaranteeing uniqueness for the RTTI string, so we
358 /// can demote to hidden visibility but must use string comparisons.
361 /// We are not guaranteeing uniqueness for the RTTI string, so we
362 /// have to use string comparisons, but we also have to emit it with
363 /// non-hidden visibility.
367 /// Return the required visibility status for the given type and linkage in
370 classifyRTTIUniqueness(QualType CanTy,
371 llvm::GlobalValue::LinkageTypes Linkage) const;
372 friend class ItaniumRTTIBuilder;
374 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
377 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
378 const auto &VtableLayout =
379 CGM.getItaniumVTableContext().getVTableLayout(RD);
381 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
383 if (!VtableComponent.isUsedFunctionPointerKind())
386 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
387 if (!Method->getCanonicalDecl()->isInlined())
390 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
391 auto *Entry = CGM.GetGlobalValue(Name);
392 // This checks if virtual inline function has already been emitted.
393 // Note that it is possible that this inline function would be emitted
394 // after trying to emit vtable speculatively. Because of this we do
395 // an extra pass after emitting all deferred vtables to find and emit
396 // these vtables opportunistically.
397 if (!Entry || Entry->isDeclaration())
403 bool isVTableHidden(const CXXRecordDecl *RD) const {
404 const auto &VtableLayout =
405 CGM.getItaniumVTableContext().getVTableLayout(RD);
407 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
408 if (VtableComponent.isRTTIKind()) {
409 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
410 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
412 } else if (VtableComponent.isUsedFunctionPointerKind()) {
413 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
414 if (Method->getVisibility() == Visibility::HiddenVisibility &&
415 !Method->isDefined())
423 class ARMCXXABI : public ItaniumCXXABI {
425 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
426 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
427 /* UseARMGuardVarABI = */ true) {}
429 bool HasThisReturn(GlobalDecl GD) const override {
430 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
431 isa<CXXDestructorDecl>(GD.getDecl()) &&
432 GD.getDtorType() != Dtor_Deleting));
435 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
436 QualType ResTy) override;
438 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
439 Address InitializeArrayCookie(CodeGenFunction &CGF,
441 llvm::Value *NumElements,
442 const CXXNewExpr *expr,
443 QualType ElementType) override;
444 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
445 CharUnits cookieSize) override;
448 class iOS64CXXABI : public ARMCXXABI {
450 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
451 Use32BitVTableOffsetABI = true;
454 // ARM64 libraries are prepared for non-unique RTTI.
455 bool shouldRTTIBeUnique() const override { return false; }
458 class WebAssemblyCXXABI final : public ItaniumCXXABI {
460 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
461 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
462 /*UseARMGuardVarABI=*/true) {}
465 bool HasThisReturn(GlobalDecl GD) const override {
466 return isa<CXXConstructorDecl>(GD.getDecl()) ||
467 (isa<CXXDestructorDecl>(GD.getDecl()) &&
468 GD.getDtorType() != Dtor_Deleting);
470 bool canCallMismatchedFunctionType() const override { return false; }
474 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
475 switch (CGM.getTarget().getCXXABI().getKind()) {
476 // For IR-generation purposes, there's no significant difference
477 // between the ARM and iOS ABIs.
478 case TargetCXXABI::GenericARM:
479 case TargetCXXABI::iOS:
480 case TargetCXXABI::WatchOS:
481 return new ARMCXXABI(CGM);
483 case TargetCXXABI::iOS64:
484 return new iOS64CXXABI(CGM);
486 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
487 // include the other 32-bit ARM oddities: constructor/destructor return values
488 // and array cookies.
489 case TargetCXXABI::GenericAArch64:
490 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
491 /* UseARMGuardVarABI = */ true);
493 case TargetCXXABI::GenericMIPS:
494 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
496 case TargetCXXABI::WebAssembly:
497 return new WebAssemblyCXXABI(CGM);
499 case TargetCXXABI::GenericItanium:
500 if (CGM.getContext().getTargetInfo().getTriple().getArch()
501 == llvm::Triple::le32) {
502 // For PNaCl, use ARM-style method pointers so that PNaCl code
503 // does not assume anything about the alignment of function
505 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
506 /* UseARMGuardVarABI = */ false);
508 return new ItaniumCXXABI(CGM);
510 case TargetCXXABI::Microsoft:
511 llvm_unreachable("Microsoft ABI is not Itanium-based");
513 llvm_unreachable("bad ABI kind");
517 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
518 if (MPT->isMemberDataPointer())
519 return CGM.PtrDiffTy;
520 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
523 /// In the Itanium and ARM ABIs, method pointers have the form:
524 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
526 /// In the Itanium ABI:
527 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
528 /// - the this-adjustment is (memptr.adj)
529 /// - the virtual offset is (memptr.ptr - 1)
532 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
533 /// - the this-adjustment is (memptr.adj >> 1)
534 /// - the virtual offset is (memptr.ptr)
535 /// ARM uses 'adj' for the virtual flag because Thumb functions
536 /// may be only single-byte aligned.
538 /// If the member is virtual, the adjusted 'this' pointer points
539 /// to a vtable pointer from which the virtual offset is applied.
541 /// If the member is non-virtual, memptr.ptr is the address of
542 /// the function to call.
543 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
544 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
545 llvm::Value *&ThisPtrForCall,
546 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
547 CGBuilderTy &Builder = CGF.Builder;
549 const FunctionProtoType *FPT =
550 MPT->getPointeeType()->getAs<FunctionProtoType>();
551 const CXXRecordDecl *RD =
552 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
554 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
555 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
557 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
559 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
560 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
561 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
563 // Extract memptr.adj, which is in the second field.
564 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
566 // Compute the true adjustment.
567 llvm::Value *Adj = RawAdj;
568 if (UseARMMethodPtrABI)
569 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
571 // Apply the adjustment and cast back to the original struct type
573 llvm::Value *This = ThisAddr.getPointer();
574 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
575 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
576 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
577 ThisPtrForCall = This;
579 // Load the function pointer.
580 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
582 // If the LSB in the function pointer is 1, the function pointer points to
583 // a virtual function.
584 llvm::Value *IsVirtual;
585 if (UseARMMethodPtrABI)
586 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
588 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
589 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
590 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
592 // In the virtual path, the adjustment left 'This' pointing to the
593 // vtable of the correct base subobject. The "function pointer" is an
594 // offset within the vtable (+1 for the virtual flag on non-ARM).
595 CGF.EmitBlock(FnVirtual);
597 // Cast the adjusted this to a pointer to vtable pointer and load.
598 llvm::Type *VTableTy = Builder.getInt8PtrTy();
599 CharUnits VTablePtrAlign =
600 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
601 CGF.getPointerAlign());
602 llvm::Value *VTable =
603 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
606 // On ARM64, to reserve extra space in virtual member function pointers,
607 // we only pay attention to the low 32 bits of the offset.
608 llvm::Value *VTableOffset = FnAsInt;
609 if (!UseARMMethodPtrABI)
610 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
611 if (Use32BitVTableOffsetABI) {
612 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
613 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
615 VTable = Builder.CreateGEP(VTable, VTableOffset);
617 // Load the virtual function to call.
618 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
619 llvm::Value *VirtualFn =
620 Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
622 CGF.EmitBranch(FnEnd);
624 // In the non-virtual path, the function pointer is actually a
626 CGF.EmitBlock(FnNonVirtual);
627 llvm::Value *NonVirtualFn =
628 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
631 CGF.EmitBlock(FnEnd);
632 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
633 CalleePtr->addIncoming(VirtualFn, FnVirtual);
634 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
636 CGCallee Callee(FPT, CalleePtr);
640 /// Compute an l-value by applying the given pointer-to-member to a
642 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
643 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
644 const MemberPointerType *MPT) {
645 assert(MemPtr->getType() == CGM.PtrDiffTy);
647 CGBuilderTy &Builder = CGF.Builder;
650 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
652 // Apply the offset, which we assume is non-null.
654 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
656 // Cast the address to the appropriate pointer type, adopting the
657 // address space of the base pointer.
658 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
659 ->getPointerTo(Base.getAddressSpace());
660 return Builder.CreateBitCast(Addr, PType);
663 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
666 /// Bitcast conversions are always a no-op under Itanium.
668 /// Obligatory offset/adjustment diagram:
669 /// <-- offset --> <-- adjustment -->
670 /// |--------------------------|----------------------|--------------------|
671 /// ^Derived address point ^Base address point ^Member address point
673 /// So when converting a base member pointer to a derived member pointer,
674 /// we add the offset to the adjustment because the address point has
675 /// decreased; and conversely, when converting a derived MP to a base MP
676 /// we subtract the offset from the adjustment because the address point
679 /// The standard forbids (at compile time) conversion to and from
680 /// virtual bases, which is why we don't have to consider them here.
682 /// The standard forbids (at run time) casting a derived MP to a base
683 /// MP when the derived MP does not point to a member of the base.
684 /// This is why -1 is a reasonable choice for null data member
687 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
690 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
691 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
692 E->getCastKind() == CK_ReinterpretMemberPointer);
694 // Under Itanium, reinterprets don't require any additional processing.
695 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
697 // Use constant emission if we can.
698 if (isa<llvm::Constant>(src))
699 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
701 llvm::Constant *adj = getMemberPointerAdjustment(E);
702 if (!adj) return src;
704 CGBuilderTy &Builder = CGF.Builder;
705 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
707 const MemberPointerType *destTy =
708 E->getType()->castAs<MemberPointerType>();
710 // For member data pointers, this is just a matter of adding the
711 // offset if the source is non-null.
712 if (destTy->isMemberDataPointer()) {
715 dst = Builder.CreateNSWSub(src, adj, "adj");
717 dst = Builder.CreateNSWAdd(src, adj, "adj");
720 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
721 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
722 return Builder.CreateSelect(isNull, src, dst);
725 // The this-adjustment is left-shifted by 1 on ARM.
726 if (UseARMMethodPtrABI) {
727 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
729 adj = llvm::ConstantInt::get(adj->getType(), offset);
732 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
735 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
737 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
739 return Builder.CreateInsertValue(src, dstAdj, 1);
743 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
744 llvm::Constant *src) {
745 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
746 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
747 E->getCastKind() == CK_ReinterpretMemberPointer);
749 // Under Itanium, reinterprets don't require any additional processing.
750 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
752 // If the adjustment is trivial, we don't need to do anything.
753 llvm::Constant *adj = getMemberPointerAdjustment(E);
754 if (!adj) return src;
756 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
758 const MemberPointerType *destTy =
759 E->getType()->castAs<MemberPointerType>();
761 // For member data pointers, this is just a matter of adding the
762 // offset if the source is non-null.
763 if (destTy->isMemberDataPointer()) {
764 // null maps to null.
765 if (src->isAllOnesValue()) return src;
768 return llvm::ConstantExpr::getNSWSub(src, adj);
770 return llvm::ConstantExpr::getNSWAdd(src, adj);
773 // The this-adjustment is left-shifted by 1 on ARM.
774 if (UseARMMethodPtrABI) {
775 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
777 adj = llvm::ConstantInt::get(adj->getType(), offset);
780 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
781 llvm::Constant *dstAdj;
783 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
785 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
787 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
791 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
792 // Itanium C++ ABI 2.3:
793 // A NULL pointer is represented as -1.
794 if (MPT->isMemberDataPointer())
795 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
797 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
798 llvm::Constant *Values[2] = { Zero, Zero };
799 return llvm::ConstantStruct::getAnon(Values);
803 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
805 // Itanium C++ ABI 2.3:
806 // A pointer to data member is an offset from the base address of
807 // the class object containing it, represented as a ptrdiff_t
808 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
812 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
813 return BuildMemberPointer(MD, CharUnits::Zero());
816 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
817 CharUnits ThisAdjustment) {
818 assert(MD->isInstance() && "Member function must not be static!");
819 MD = MD->getCanonicalDecl();
821 CodeGenTypes &Types = CGM.getTypes();
823 // Get the function pointer (or index if this is a virtual function).
824 llvm::Constant *MemPtr[2];
825 if (MD->isVirtual()) {
826 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
828 const ASTContext &Context = getContext();
829 CharUnits PointerWidth =
830 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
831 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
833 if (UseARMMethodPtrABI) {
834 // ARM C++ ABI 3.2.1:
835 // This ABI specifies that adj contains twice the this
836 // adjustment, plus 1 if the member function is virtual. The
837 // least significant bit of adj then makes exactly the same
838 // discrimination as the least significant bit of ptr does for
840 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
841 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
842 2 * ThisAdjustment.getQuantity() + 1);
844 // Itanium C++ ABI 2.3:
845 // For a virtual function, [the pointer field] is 1 plus the
846 // virtual table offset (in bytes) of the function,
847 // represented as a ptrdiff_t.
848 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
849 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
850 ThisAdjustment.getQuantity());
853 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
855 // Check whether the function has a computable LLVM signature.
856 if (Types.isFuncTypeConvertible(FPT)) {
857 // The function has a computable LLVM signature; use the correct type.
858 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
860 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
861 // function type is incomplete.
864 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
866 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
867 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
868 (UseARMMethodPtrABI ? 2 : 1) *
869 ThisAdjustment.getQuantity());
872 return llvm::ConstantStruct::getAnon(MemPtr);
875 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
877 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
878 const ValueDecl *MPD = MP.getMemberPointerDecl();
880 return EmitNullMemberPointer(MPT);
882 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
884 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
885 return BuildMemberPointer(MD, ThisAdjustment);
887 CharUnits FieldOffset =
888 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
889 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
892 /// The comparison algorithm is pretty easy: the member pointers are
893 /// the same if they're either bitwise identical *or* both null.
895 /// ARM is different here only because null-ness is more complicated.
897 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
900 const MemberPointerType *MPT,
902 CGBuilderTy &Builder = CGF.Builder;
904 llvm::ICmpInst::Predicate Eq;
905 llvm::Instruction::BinaryOps And, Or;
907 Eq = llvm::ICmpInst::ICMP_NE;
908 And = llvm::Instruction::Or;
909 Or = llvm::Instruction::And;
911 Eq = llvm::ICmpInst::ICMP_EQ;
912 And = llvm::Instruction::And;
913 Or = llvm::Instruction::Or;
916 // Member data pointers are easy because there's a unique null
917 // value, so it just comes down to bitwise equality.
918 if (MPT->isMemberDataPointer())
919 return Builder.CreateICmp(Eq, L, R);
921 // For member function pointers, the tautologies are more complex.
922 // The Itanium tautology is:
923 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
924 // The ARM tautology is:
925 // (L == R) <==> (L.ptr == R.ptr &&
926 // (L.adj == R.adj ||
927 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
928 // The inequality tautologies have exactly the same structure, except
929 // applying De Morgan's laws.
931 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
932 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
934 // This condition tests whether L.ptr == R.ptr. This must always be
935 // true for equality to hold.
936 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
938 // This condition, together with the assumption that L.ptr == R.ptr,
939 // tests whether the pointers are both null. ARM imposes an extra
941 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
942 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
944 // This condition tests whether L.adj == R.adj. If this isn't
945 // true, the pointers are unequal unless they're both null.
946 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
947 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
948 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
950 // Null member function pointers on ARM clear the low bit of Adj,
951 // so the zero condition has to check that neither low bit is set.
952 if (UseARMMethodPtrABI) {
953 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
955 // Compute (l.adj | r.adj) & 1 and test it against zero.
956 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
957 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
958 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
960 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
963 // Tie together all our conditions.
964 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
965 Result = Builder.CreateBinOp(And, PtrEq, Result,
966 Inequality ? "memptr.ne" : "memptr.eq");
971 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
973 const MemberPointerType *MPT) {
974 CGBuilderTy &Builder = CGF.Builder;
976 /// For member data pointers, this is just a check against -1.
977 if (MPT->isMemberDataPointer()) {
978 assert(MemPtr->getType() == CGM.PtrDiffTy);
979 llvm::Value *NegativeOne =
980 llvm::Constant::getAllOnesValue(MemPtr->getType());
981 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
984 // In Itanium, a member function pointer is not null if 'ptr' is not null.
985 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
987 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
988 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
990 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
991 // (the virtual bit) is set.
992 if (UseARMMethodPtrABI) {
993 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
994 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
995 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
996 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
998 Result = Builder.CreateOr(Result, IsVirtual);
1004 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1005 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1009 // If C++ prohibits us from making a copy, return by address.
1010 if (passClassIndirect(RD)) {
1011 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1012 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1018 /// The Itanium ABI requires non-zero initialization only for data
1019 /// member pointers, for which '0' is a valid offset.
1020 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1021 return MPT->isMemberFunctionPointer();
1024 /// The Itanium ABI always places an offset to the complete object
1025 /// at entry -2 in the vtable.
1026 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1027 const CXXDeleteExpr *DE,
1029 QualType ElementType,
1030 const CXXDestructorDecl *Dtor) {
1031 bool UseGlobalDelete = DE->isGlobalDelete();
1032 if (UseGlobalDelete) {
1033 // Derive the complete-object pointer, which is what we need
1034 // to pass to the deallocation function.
1036 // Grab the vtable pointer as an intptr_t*.
1038 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1039 llvm::Value *VTable =
1040 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1042 // Track back to entry -2 and pull out the offset there.
1043 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1044 VTable, -2, "complete-offset.ptr");
1045 llvm::Value *Offset =
1046 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1048 // Apply the offset.
1049 llvm::Value *CompletePtr =
1050 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1051 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1053 // If we're supposed to call the global delete, make sure we do so
1054 // even if the destructor throws.
1055 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1059 // FIXME: Provide a source location here even though there's no
1060 // CXXMemberCallExpr for dtor call.
1061 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1062 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1064 if (UseGlobalDelete)
1065 CGF.PopCleanupBlock();
1068 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1069 // void __cxa_rethrow();
1071 llvm::FunctionType *FTy =
1072 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1074 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1077 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1079 CGF.EmitRuntimeCallOrInvoke(Fn);
1082 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1083 // void *__cxa_allocate_exception(size_t thrown_size);
1085 llvm::FunctionType *FTy =
1086 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1088 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1091 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1092 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1093 // void (*dest) (void *));
1095 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1096 llvm::FunctionType *FTy =
1097 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1099 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1102 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1103 QualType ThrowType = E->getSubExpr()->getType();
1104 // Now allocate the exception object.
1105 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1106 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1108 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1109 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1110 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1112 CharUnits ExnAlign = getAlignmentOfExnObject();
1113 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1115 // Now throw the exception.
1116 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1119 // The address of the destructor. If the exception type has a
1120 // trivial destructor (or isn't a record), we just pass null.
1121 llvm::Constant *Dtor = nullptr;
1122 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1123 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1124 if (!Record->hasTrivialDestructor()) {
1125 CXXDestructorDecl *DtorD = Record->getDestructor();
1126 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1127 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1130 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1132 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1133 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1136 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1137 // void *__dynamic_cast(const void *sub,
1138 // const abi::__class_type_info *src,
1139 // const abi::__class_type_info *dst,
1140 // std::ptrdiff_t src2dst_offset);
1142 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1143 llvm::Type *PtrDiffTy =
1144 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1146 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1148 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1150 // Mark the function as nounwind readonly.
1151 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1152 llvm::Attribute::ReadOnly };
1153 llvm::AttributeList Attrs = llvm::AttributeList::get(
1154 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1156 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1159 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1160 // void __cxa_bad_cast();
1161 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1162 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1165 /// \brief Compute the src2dst_offset hint as described in the
1166 /// Itanium C++ ABI [2.9.7]
1167 static CharUnits computeOffsetHint(ASTContext &Context,
1168 const CXXRecordDecl *Src,
1169 const CXXRecordDecl *Dst) {
1170 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1171 /*DetectVirtual=*/false);
1173 // If Dst is not derived from Src we can skip the whole computation below and
1174 // return that Src is not a public base of Dst. Record all inheritance paths.
1175 if (!Dst->isDerivedFrom(Src, Paths))
1176 return CharUnits::fromQuantity(-2ULL);
1178 unsigned NumPublicPaths = 0;
1181 // Now walk all possible inheritance paths.
1182 for (const CXXBasePath &Path : Paths) {
1183 if (Path.Access != AS_public) // Ignore non-public inheritance.
1188 for (const CXXBasePathElement &PathElement : Path) {
1189 // If the path contains a virtual base class we can't give any hint.
1191 if (PathElement.Base->isVirtual())
1192 return CharUnits::fromQuantity(-1ULL);
1194 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1197 // Accumulate the base class offsets.
1198 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1199 Offset += L.getBaseClassOffset(
1200 PathElement.Base->getType()->getAsCXXRecordDecl());
1204 // -2: Src is not a public base of Dst.
1205 if (NumPublicPaths == 0)
1206 return CharUnits::fromQuantity(-2ULL);
1208 // -3: Src is a multiple public base type but never a virtual base type.
1209 if (NumPublicPaths > 1)
1210 return CharUnits::fromQuantity(-3ULL);
1212 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1213 // Return the offset of Src from the origin of Dst.
1217 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1218 // void __cxa_bad_typeid();
1219 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1221 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1224 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1225 QualType SrcRecordTy) {
1229 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1230 llvm::Value *Fn = getBadTypeidFn(CGF);
1231 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1232 CGF.Builder.CreateUnreachable();
1235 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1236 QualType SrcRecordTy,
1238 llvm::Type *StdTypeInfoPtrTy) {
1240 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1241 llvm::Value *Value =
1242 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1244 // Load the type info.
1245 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1246 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1249 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1250 QualType SrcRecordTy) {
1254 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1255 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1256 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1257 llvm::Type *PtrDiffLTy =
1258 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1259 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1261 llvm::Value *SrcRTTI =
1262 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1263 llvm::Value *DestRTTI =
1264 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1266 // Compute the offset hint.
1267 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1268 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1269 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1271 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1273 // Emit the call to __dynamic_cast.
1274 llvm::Value *Value = ThisAddr.getPointer();
1275 Value = CGF.EmitCastToVoidPtr(Value);
1277 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1278 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1279 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1281 /// C++ [expr.dynamic.cast]p9:
1282 /// A failed cast to reference type throws std::bad_cast
1283 if (DestTy->isReferenceType()) {
1284 llvm::BasicBlock *BadCastBlock =
1285 CGF.createBasicBlock("dynamic_cast.bad_cast");
1287 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1288 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1290 CGF.EmitBlock(BadCastBlock);
1291 EmitBadCastCall(CGF);
1297 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1299 QualType SrcRecordTy,
1301 llvm::Type *PtrDiffLTy =
1302 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1303 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1306 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1307 // Get the vtable pointer.
1308 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1311 // Get the offset-to-top from the vtable.
1312 llvm::Value *OffsetToTop =
1313 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1315 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1318 // Finally, add the offset to the pointer.
1319 llvm::Value *Value = ThisAddr.getPointer();
1320 Value = CGF.EmitCastToVoidPtr(Value);
1321 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1323 return CGF.Builder.CreateBitCast(Value, DestLTy);
1326 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1327 llvm::Value *Fn = getBadCastFn(CGF);
1328 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1329 CGF.Builder.CreateUnreachable();
1334 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1336 const CXXRecordDecl *ClassDecl,
1337 const CXXRecordDecl *BaseClassDecl) {
1338 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1339 CharUnits VBaseOffsetOffset =
1340 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1343 llvm::Value *VBaseOffsetPtr =
1344 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1345 "vbase.offset.ptr");
1346 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1347 CGM.PtrDiffTy->getPointerTo());
1349 llvm::Value *VBaseOffset =
1350 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1356 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1357 // Just make sure we're in sync with TargetCXXABI.
1358 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1360 // The constructor used for constructing this as a base class;
1361 // ignores virtual bases.
1362 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1364 // The constructor used for constructing this as a complete class;
1365 // constructs the virtual bases, then calls the base constructor.
1366 if (!D->getParent()->isAbstract()) {
1367 // We don't need to emit the complete ctor if the class is abstract.
1368 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1372 CGCXXABI::AddedStructorArgs
1373 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1374 SmallVectorImpl<CanQualType> &ArgTys) {
1375 ASTContext &Context = getContext();
1377 // All parameters are already in place except VTT, which goes after 'this'.
1378 // These are Clang types, so we don't need to worry about sret yet.
1380 // Check if we need to add a VTT parameter (which has type void **).
1381 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) {
1382 ArgTys.insert(ArgTys.begin() + 1,
1383 Context.getPointerType(Context.VoidPtrTy));
1384 return AddedStructorArgs::prefix(1);
1386 return AddedStructorArgs{};
1389 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1390 // The destructor used for destructing this as a base class; ignores
1392 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1394 // The destructor used for destructing this as a most-derived class;
1395 // call the base destructor and then destructs any virtual bases.
1396 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1398 // The destructor in a virtual table is always a 'deleting'
1399 // destructor, which calls the complete destructor and then uses the
1400 // appropriate operator delete.
1402 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1405 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1407 FunctionArgList &Params) {
1408 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1409 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1411 // Check if we need a VTT parameter as well.
1412 if (NeedsVTTParameter(CGF.CurGD)) {
1413 ASTContext &Context = getContext();
1415 // FIXME: avoid the fake decl
1416 QualType T = Context.getPointerType(Context.VoidPtrTy);
1417 auto *VTTDecl = ImplicitParamDecl::Create(
1418 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1419 T, ImplicitParamDecl::CXXVTT);
1420 Params.insert(Params.begin() + 1, VTTDecl);
1421 getStructorImplicitParamDecl(CGF) = VTTDecl;
1425 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1426 // Naked functions have no prolog.
1427 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1430 /// Initialize the 'this' slot.
1433 /// Initialize the 'vtt' slot if needed.
1434 if (getStructorImplicitParamDecl(CGF)) {
1435 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1436 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1439 /// If this is a function that the ABI specifies returns 'this', initialize
1440 /// the return slot to 'this' at the start of the function.
1442 /// Unlike the setting of return types, this is done within the ABI
1443 /// implementation instead of by clients of CGCXXABI because:
1444 /// 1) getThisValue is currently protected
1445 /// 2) in theory, an ABI could implement 'this' returns some other way;
1446 /// HasThisReturn only specifies a contract, not the implementation
1447 if (HasThisReturn(CGF.CurGD))
1448 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1451 CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1452 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1453 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1454 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1455 return AddedStructorArgs{};
1457 // Insert the implicit 'vtt' argument as the second argument.
1459 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1460 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1461 Args.insert(Args.begin() + 1,
1462 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
1463 return AddedStructorArgs::prefix(1); // Added one arg.
1466 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1467 const CXXDestructorDecl *DD,
1468 CXXDtorType Type, bool ForVirtualBase,
1469 bool Delegating, Address This) {
1470 GlobalDecl GD(DD, Type);
1471 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1472 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1475 if (getContext().getLangOpts().AppleKext &&
1476 Type != Dtor_Base && DD->isVirtual())
1477 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1480 CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
1483 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1484 This.getPointer(), VTT, VTTTy,
1488 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1489 const CXXRecordDecl *RD) {
1490 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1491 if (VTable->hasInitializer())
1494 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1495 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1496 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1497 llvm::Constant *RTTI =
1498 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1500 // Create and set the initializer.
1501 ConstantInitBuilder Builder(CGM);
1502 auto Components = Builder.beginStruct();
1503 CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1504 Components.finishAndSetAsInitializer(VTable);
1506 // Set the correct linkage.
1507 VTable->setLinkage(Linkage);
1509 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1510 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1512 // Set the right visibility.
1513 CGM.setGlobalVisibility(VTable, RD);
1515 // Use pointer alignment for the vtable. Otherwise we would align them based
1516 // on the size of the initializer which doesn't make sense as only single
1518 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1519 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1521 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1522 // we will emit the typeinfo for the fundamental types. This is the
1523 // same behaviour as GCC.
1524 const DeclContext *DC = RD->getDeclContext();
1525 if (RD->getIdentifier() &&
1526 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1527 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1528 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1529 DC->getParent()->isTranslationUnit())
1530 EmitFundamentalRTTIDescriptors(RD->hasAttr<DLLExportAttr>());
1532 if (!VTable->isDeclarationForLinker())
1533 CGM.EmitVTableTypeMetadata(VTable, VTLayout);
1536 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1537 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1538 if (Vptr.NearestVBase == nullptr)
1540 return NeedsVTTParameter(CGF.CurGD);
1543 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1544 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1545 const CXXRecordDecl *NearestVBase) {
1547 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1548 NeedsVTTParameter(CGF.CurGD)) {
1549 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1552 return getVTableAddressPoint(Base, VTableClass);
1556 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1557 const CXXRecordDecl *VTableClass) {
1558 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1560 // Find the appropriate vtable within the vtable group, and the address point
1561 // within that vtable.
1562 VTableLayout::AddressPointLocation AddressPoint =
1563 CGM.getItaniumVTableContext()
1564 .getVTableLayout(VTableClass)
1565 .getAddressPoint(Base);
1566 llvm::Value *Indices[] = {
1567 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1568 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1569 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1572 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1573 Indices, /*InBounds=*/true,
1574 /*InRangeIndex=*/1);
1577 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1578 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1579 const CXXRecordDecl *NearestVBase) {
1580 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1581 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1583 // Get the secondary vpointer index.
1584 uint64_t VirtualPointerIndex =
1585 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1588 llvm::Value *VTT = CGF.LoadCXXVTT();
1589 if (VirtualPointerIndex)
1590 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1592 // And load the address point from the VTT.
1593 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1596 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1597 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1598 return getVTableAddressPoint(Base, VTableClass);
1601 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1602 CharUnits VPtrOffset) {
1603 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1605 llvm::GlobalVariable *&VTable = VTables[RD];
1609 // Queue up this vtable for possible deferred emission.
1610 CGM.addDeferredVTable(RD);
1612 SmallString<256> Name;
1613 llvm::raw_svector_ostream Out(Name);
1614 getMangleContext().mangleCXXVTable(RD, Out);
1616 const VTableLayout &VTLayout =
1617 CGM.getItaniumVTableContext().getVTableLayout(RD);
1618 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1620 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1621 Name, VTableType, llvm::GlobalValue::ExternalLinkage);
1622 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1624 if (RD->hasAttr<DLLImportAttr>())
1625 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1626 else if (RD->hasAttr<DLLExportAttr>())
1627 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1632 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1636 SourceLocation Loc) {
1637 GD = GD.getCanonicalDecl();
1638 Ty = Ty->getPointerTo()->getPointerTo();
1639 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1640 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1642 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1644 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1645 VFunc = CGF.EmitVTableTypeCheckedLoad(
1646 MethodDecl->getParent(), VTable,
1647 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1649 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1651 llvm::Value *VFuncPtr =
1652 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1654 CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1656 // Add !invariant.load md to virtual function load to indicate that
1657 // function didn't change inside vtable.
1658 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1659 // help in devirtualization because it will only matter if we will have 2
1660 // the same virtual function loads from the same vtable load, which won't
1661 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1662 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1663 CGM.getCodeGenOpts().StrictVTablePointers)
1664 VFuncLoad->setMetadata(
1665 llvm::LLVMContext::MD_invariant_load,
1666 llvm::MDNode::get(CGM.getLLVMContext(),
1667 llvm::ArrayRef<llvm::Metadata *>()));
1671 CGCallee Callee(MethodDecl, VFunc);
1675 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1676 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1677 Address This, const CXXMemberCallExpr *CE) {
1678 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1679 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1681 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1682 Dtor, getFromDtorType(DtorType));
1683 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1685 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
1686 CE ? CE->getLocStart() : SourceLocation());
1688 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1689 This.getPointer(), /*ImplicitParam=*/nullptr,
1690 QualType(), CE, nullptr);
1694 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1695 CodeGenVTables &VTables = CGM.getVTables();
1696 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1697 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1700 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1701 // We don't emit available_externally vtables if we are in -fapple-kext mode
1702 // because kext mode does not permit devirtualization.
1703 if (CGM.getLangOpts().AppleKext)
1706 // If we don't have any not emitted inline virtual function, and if vtable is
1707 // not hidden, then we are safe to emit available_externally copy of vtable.
1708 // FIXME we can still emit a copy of the vtable if we
1709 // can emit definition of the inline functions.
1710 return !hasAnyUnusedVirtualInlineFunction(RD) && !isVTableHidden(RD);
1712 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1714 int64_t NonVirtualAdjustment,
1715 int64_t VirtualAdjustment,
1716 bool IsReturnAdjustment) {
1717 if (!NonVirtualAdjustment && !VirtualAdjustment)
1718 return InitialPtr.getPointer();
1720 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1722 // In a base-to-derived cast, the non-virtual adjustment is applied first.
1723 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1724 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1725 CharUnits::fromQuantity(NonVirtualAdjustment));
1728 // Perform the virtual adjustment if we have one.
1729 llvm::Value *ResultPtr;
1730 if (VirtualAdjustment) {
1731 llvm::Type *PtrDiffTy =
1732 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1734 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1735 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1737 llvm::Value *OffsetPtr =
1738 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1740 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1742 // Load the adjustment offset from the vtable.
1743 llvm::Value *Offset =
1744 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1746 // Adjust our pointer.
1747 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1749 ResultPtr = V.getPointer();
1752 // In a derived-to-base conversion, the non-virtual adjustment is
1754 if (NonVirtualAdjustment && IsReturnAdjustment) {
1755 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1756 NonVirtualAdjustment);
1759 // Cast back to the original type.
1760 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1763 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1765 const ThisAdjustment &TA) {
1766 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1767 TA.Virtual.Itanium.VCallOffsetOffset,
1768 /*IsReturnAdjustment=*/false);
1772 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1773 const ReturnAdjustment &RA) {
1774 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1775 RA.Virtual.Itanium.VBaseOffsetOffset,
1776 /*IsReturnAdjustment=*/true);
1779 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1780 RValue RV, QualType ResultType) {
1781 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1782 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1784 // Destructor thunks in the ARM ABI have indeterminate results.
1785 llvm::Type *T = CGF.ReturnValue.getElementType();
1786 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1787 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1790 /************************** Array allocation cookies **************************/
1792 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1793 // The array cookie is a size_t; pad that up to the element alignment.
1794 // The cookie is actually right-justified in that space.
1795 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1796 CGM.getContext().getTypeAlignInChars(elementType));
1799 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1801 llvm::Value *NumElements,
1802 const CXXNewExpr *expr,
1803 QualType ElementType) {
1804 assert(requiresArrayCookie(expr));
1806 unsigned AS = NewPtr.getAddressSpace();
1808 ASTContext &Ctx = getContext();
1809 CharUnits SizeSize = CGF.getSizeSize();
1811 // The size of the cookie.
1812 CharUnits CookieSize =
1813 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1814 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1816 // Compute an offset to the cookie.
1817 Address CookiePtr = NewPtr;
1818 CharUnits CookieOffset = CookieSize - SizeSize;
1819 if (!CookieOffset.isZero())
1820 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1822 // Write the number of elements into the appropriate slot.
1823 Address NumElementsPtr =
1824 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1825 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1827 // Handle the array cookie specially in ASan.
1828 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1829 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
1830 // The store to the CookiePtr does not need to be instrumented.
1831 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1832 llvm::FunctionType *FTy =
1833 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1835 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1836 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1839 // Finally, compute a pointer to the actual data buffer by skipping
1840 // over the cookie completely.
1841 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1844 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1846 CharUnits cookieSize) {
1847 // The element size is right-justified in the cookie.
1848 Address numElementsPtr = allocPtr;
1849 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1850 if (!numElementsOffset.isZero())
1852 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1854 unsigned AS = allocPtr.getAddressSpace();
1855 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1856 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1857 return CGF.Builder.CreateLoad(numElementsPtr);
1858 // In asan mode emit a function call instead of a regular load and let the
1859 // run-time deal with it: if the shadow is properly poisoned return the
1860 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1861 // We can't simply ignore this load using nosanitize metadata because
1862 // the metadata may be lost.
1863 llvm::FunctionType *FTy =
1864 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1866 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1867 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1870 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1871 // ARM says that the cookie is always:
1872 // struct array_cookie {
1873 // std::size_t element_size; // element_size != 0
1874 // std::size_t element_count;
1876 // But the base ABI doesn't give anything an alignment greater than
1877 // 8, so we can dismiss this as typical ABI-author blindness to
1878 // actual language complexity and round up to the element alignment.
1879 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1880 CGM.getContext().getTypeAlignInChars(elementType));
1883 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1885 llvm::Value *numElements,
1886 const CXXNewExpr *expr,
1887 QualType elementType) {
1888 assert(requiresArrayCookie(expr));
1890 // The cookie is always at the start of the buffer.
1891 Address cookie = newPtr;
1893 // The first element is the element size.
1894 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
1895 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1896 getContext().getTypeSizeInChars(elementType).getQuantity());
1897 CGF.Builder.CreateStore(elementSize, cookie);
1899 // The second element is the element count.
1900 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
1901 CGF.Builder.CreateStore(numElements, cookie);
1903 // Finally, compute a pointer to the actual data buffer by skipping
1904 // over the cookie completely.
1905 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1906 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
1909 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1911 CharUnits cookieSize) {
1912 // The number of elements is at offset sizeof(size_t) relative to
1913 // the allocated pointer.
1914 Address numElementsPtr
1915 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
1917 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1918 return CGF.Builder.CreateLoad(numElementsPtr);
1921 /*********************** Static local initialization **************************/
1923 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1924 llvm::PointerType *GuardPtrTy) {
1925 // int __cxa_guard_acquire(__guard *guard_object);
1926 llvm::FunctionType *FTy =
1927 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1928 GuardPtrTy, /*isVarArg=*/false);
1929 return CGM.CreateRuntimeFunction(
1930 FTy, "__cxa_guard_acquire",
1931 llvm::AttributeList::get(CGM.getLLVMContext(),
1932 llvm::AttributeList::FunctionIndex,
1933 llvm::Attribute::NoUnwind));
1936 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1937 llvm::PointerType *GuardPtrTy) {
1938 // void __cxa_guard_release(__guard *guard_object);
1939 llvm::FunctionType *FTy =
1940 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1941 return CGM.CreateRuntimeFunction(
1942 FTy, "__cxa_guard_release",
1943 llvm::AttributeList::get(CGM.getLLVMContext(),
1944 llvm::AttributeList::FunctionIndex,
1945 llvm::Attribute::NoUnwind));
1948 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1949 llvm::PointerType *GuardPtrTy) {
1950 // void __cxa_guard_abort(__guard *guard_object);
1951 llvm::FunctionType *FTy =
1952 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1953 return CGM.CreateRuntimeFunction(
1954 FTy, "__cxa_guard_abort",
1955 llvm::AttributeList::get(CGM.getLLVMContext(),
1956 llvm::AttributeList::FunctionIndex,
1957 llvm::Attribute::NoUnwind));
1961 struct CallGuardAbort final : EHScopeStack::Cleanup {
1962 llvm::GlobalVariable *Guard;
1963 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1965 void Emit(CodeGenFunction &CGF, Flags flags) override {
1966 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
1972 /// The ARM code here follows the Itanium code closely enough that we
1973 /// just special-case it at particular places.
1974 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1976 llvm::GlobalVariable *var,
1977 bool shouldPerformInit) {
1978 CGBuilderTy &Builder = CGF.Builder;
1980 // Inline variables that weren't instantiated from variable templates have
1981 // partially-ordered initialization within their translation unit.
1982 bool NonTemplateInline =
1984 !isTemplateInstantiation(D.getTemplateSpecializationKind());
1986 // We only need to use thread-safe statics for local non-TLS variables and
1987 // inline variables; other global initialization is always single-threaded
1988 // or (through lazy dynamic loading in multiple threads) unsequenced.
1989 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
1990 (D.isLocalVarDecl() || NonTemplateInline) &&
1993 // If we have a global variable with internal linkage and thread-safe statics
1994 // are disabled, we can just let the guard variable be of type i8.
1995 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
1997 llvm::IntegerType *guardTy;
1998 CharUnits guardAlignment;
1999 if (useInt8GuardVariable) {
2000 guardTy = CGF.Int8Ty;
2001 guardAlignment = CharUnits::One();
2003 // Guard variables are 64 bits in the generic ABI and size width on ARM
2004 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2005 if (UseARMGuardVarABI) {
2006 guardTy = CGF.SizeTy;
2007 guardAlignment = CGF.getSizeAlign();
2009 guardTy = CGF.Int64Ty;
2010 guardAlignment = CharUnits::fromQuantity(
2011 CGM.getDataLayout().getABITypeAlignment(guardTy));
2014 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2016 // Create the guard variable if we don't already have it (as we
2017 // might if we're double-emitting this function body).
2018 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2020 // Mangle the name for the guard.
2021 SmallString<256> guardName;
2023 llvm::raw_svector_ostream out(guardName);
2024 getMangleContext().mangleStaticGuardVariable(&D, out);
2027 // Create the guard variable with a zero-initializer.
2028 // Just absorb linkage and visibility from the guarded variable.
2029 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2030 false, var->getLinkage(),
2031 llvm::ConstantInt::get(guardTy, 0),
2033 guard->setVisibility(var->getVisibility());
2034 // If the variable is thread-local, so is its guard variable.
2035 guard->setThreadLocalMode(var->getThreadLocalMode());
2036 guard->setAlignment(guardAlignment.getQuantity());
2038 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2039 // group as the associated data object." In practice, this doesn't work for
2040 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2041 llvm::Comdat *C = var->getComdat();
2042 if (!D.isLocalVarDecl() && C &&
2043 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2044 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2045 guard->setComdat(C);
2046 // An inline variable's guard function is run from the per-TU
2047 // initialization function, not via a dedicated global ctor function, so
2048 // we can't put it in a comdat.
2049 if (!NonTemplateInline)
2050 CGF.CurFn->setComdat(C);
2051 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2052 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2055 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2058 Address guardAddr = Address(guard, guardAlignment);
2060 // Test whether the variable has completed initialization.
2062 // Itanium C++ ABI 3.3.2:
2063 // The following is pseudo-code showing how these functions can be used:
2064 // if (obj_guard.first_byte == 0) {
2065 // if ( __cxa_guard_acquire (&obj_guard) ) {
2067 // ... initialize the object ...;
2069 // __cxa_guard_abort (&obj_guard);
2072 // ... queue object destructor with __cxa_atexit() ...;
2073 // __cxa_guard_release (&obj_guard);
2077 // Load the first byte of the guard variable.
2078 llvm::LoadInst *LI =
2079 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2082 // An implementation supporting thread-safety on multiprocessor
2083 // systems must also guarantee that references to the initialized
2084 // object do not occur before the load of the initialization flag.
2086 // In LLVM, we do this by marking the load Acquire.
2088 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2090 // For ARM, we should only check the first bit, rather than the entire byte:
2092 // ARM C++ ABI 3.2.3.1:
2093 // To support the potential use of initialization guard variables
2094 // as semaphores that are the target of ARM SWP and LDREX/STREX
2095 // synchronizing instructions we define a static initialization
2096 // guard variable to be a 4-byte aligned, 4-byte word with the
2097 // following inline access protocol.
2098 // #define INITIALIZED 1
2099 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2100 // if (__cxa_guard_acquire(&obj_guard))
2104 // and similarly for ARM64:
2106 // ARM64 C++ ABI 3.2.2:
2107 // This ABI instead only specifies the value bit 0 of the static guard
2108 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2109 // variable is not initialized and 1 when it is.
2111 (UseARMGuardVarABI && !useInt8GuardVariable)
2112 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2114 llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
2116 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2117 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2119 // Check if the first byte of the guard variable is zero.
2120 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
2122 CGF.EmitBlock(InitCheckBlock);
2124 // Variables used when coping with thread-safe statics and exceptions.
2126 // Call __cxa_guard_acquire.
2128 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2130 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2132 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2133 InitBlock, EndBlock);
2135 // Call __cxa_guard_abort along the exceptional edge.
2136 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2138 CGF.EmitBlock(InitBlock);
2141 // Emit the initializer and add a global destructor if appropriate.
2142 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2145 // Pop the guard-abort cleanup if we pushed one.
2146 CGF.PopCleanupBlock();
2148 // Call __cxa_guard_release. This cannot throw.
2149 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2150 guardAddr.getPointer());
2152 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2155 CGF.EmitBlock(EndBlock);
2158 /// Register a global destructor using __cxa_atexit.
2159 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2160 llvm::Constant *dtor,
2161 llvm::Constant *addr,
2163 const char *Name = "__cxa_atexit";
2165 const llvm::Triple &T = CGF.getTarget().getTriple();
2166 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2169 // We're assuming that the destructor function is something we can
2170 // reasonably call with the default CC. Go ahead and cast it to the
2172 llvm::Type *dtorTy =
2173 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2175 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2176 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2177 llvm::FunctionType *atexitTy =
2178 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2180 // Fetch the actual function.
2181 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2182 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2183 fn->setDoesNotThrow();
2185 // Create a variable that binds the atexit to this shared object.
2186 llvm::Constant *handle =
2187 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2188 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2189 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2191 llvm::Value *args[] = {
2192 llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2193 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2196 CGF.EmitNounwindRuntimeCall(atexit, args);
2199 /// Register a global destructor as best as we know how.
2200 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2202 llvm::Constant *dtor,
2203 llvm::Constant *addr) {
2204 // Use __cxa_atexit if available.
2205 if (CGM.getCodeGenOpts().CXAAtExit)
2206 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2209 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2211 // In Apple kexts, we want to add a global destructor entry.
2212 // FIXME: shouldn't this be guarded by some variable?
2213 if (CGM.getLangOpts().AppleKext) {
2214 // Generate a global destructor entry.
2215 return CGM.AddCXXDtorEntry(dtor, addr);
2218 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2221 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2222 CodeGen::CodeGenModule &CGM) {
2223 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2224 // Darwin prefers to have references to thread local variables to go through
2225 // the thread wrapper instead of directly referencing the backing variable.
2226 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2227 CGM.getTarget().getTriple().isOSDarwin();
2230 /// Get the appropriate linkage for the wrapper function. This is essentially
2231 /// the weak form of the variable's linkage; every translation unit which needs
2232 /// the wrapper emits a copy, and we want the linker to merge them.
2233 static llvm::GlobalValue::LinkageTypes
2234 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2235 llvm::GlobalValue::LinkageTypes VarLinkage =
2236 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2238 // For internal linkage variables, we don't need an external or weak wrapper.
2239 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2242 // If the thread wrapper is replaceable, give it appropriate linkage.
2243 if (isThreadWrapperReplaceable(VD, CGM))
2244 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2245 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2247 return llvm::GlobalValue::WeakODRLinkage;
2251 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2253 // Mangle the name for the thread_local wrapper function.
2254 SmallString<256> WrapperName;
2256 llvm::raw_svector_ostream Out(WrapperName);
2257 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2260 // FIXME: If VD is a definition, we should regenerate the function attributes
2261 // before returning.
2262 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2263 return cast<llvm::Function>(V);
2265 QualType RetQT = VD->getType();
2266 if (RetQT->isReferenceType())
2267 RetQT = RetQT.getNonReferenceType();
2269 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2270 getContext().getPointerType(RetQT), FunctionArgList());
2272 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2273 llvm::Function *Wrapper =
2274 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2275 WrapperName.str(), &CGM.getModule());
2277 CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper);
2279 if (VD->hasDefinition())
2280 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2282 // Always resolve references to the wrapper at link time.
2283 if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
2284 !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
2285 !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
2286 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2288 if (isThreadWrapperReplaceable(VD, CGM)) {
2289 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2290 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2295 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2296 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2297 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2298 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2299 llvm::Function *InitFunc = nullptr;
2301 // Separate initializers into those with ordered (or partially-ordered)
2302 // initialization and those with unordered initialization.
2303 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2304 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2305 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2306 if (isTemplateInstantiation(
2307 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2308 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2309 CXXThreadLocalInits[I];
2311 OrderedInits.push_back(CXXThreadLocalInits[I]);
2314 if (!OrderedInits.empty()) {
2315 // Generate a guarded initialization function.
2316 llvm::FunctionType *FTy =
2317 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2318 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2319 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2322 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2323 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2324 llvm::GlobalVariable::InternalLinkage,
2325 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2326 Guard->setThreadLocal(true);
2328 CharUnits GuardAlign = CharUnits::One();
2329 Guard->setAlignment(GuardAlign.getQuantity());
2331 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, OrderedInits,
2332 Address(Guard, GuardAlign));
2333 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2334 if (CGM.getTarget().getTriple().isOSDarwin()) {
2335 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2336 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2340 // Emit thread wrappers.
2341 for (const VarDecl *VD : CXXThreadLocals) {
2342 llvm::GlobalVariable *Var =
2343 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2344 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2346 // Some targets require that all access to thread local variables go through
2347 // the thread wrapper. This means that we cannot attempt to create a thread
2348 // wrapper or a thread helper.
2349 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) {
2350 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2354 // Mangle the name for the thread_local initialization function.
2355 SmallString<256> InitFnName;
2357 llvm::raw_svector_ostream Out(InitFnName);
2358 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2361 // If we have a definition for the variable, emit the initialization
2362 // function as an alias to the global Init function (if any). Otherwise,
2363 // produce a declaration of the initialization function.
2364 llvm::GlobalValue *Init = nullptr;
2365 bool InitIsInitFunc = false;
2366 if (VD->hasDefinition()) {
2367 InitIsInitFunc = true;
2368 llvm::Function *InitFuncToUse = InitFunc;
2369 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2370 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2372 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2375 // Emit a weak global function referring to the initialization function.
2376 // This function will not exist if the TU defining the thread_local
2377 // variable in question does not need any dynamic initialization for
2378 // its thread_local variables.
2379 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2380 Init = llvm::Function::Create(FnTy,
2381 llvm::GlobalVariable::ExternalWeakLinkage,
2382 InitFnName.str(), &CGM.getModule());
2383 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2384 CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
2388 Init->setVisibility(Var->getVisibility());
2390 llvm::LLVMContext &Context = CGM.getModule().getContext();
2391 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2392 CGBuilderTy Builder(CGM, Entry);
2393 if (InitIsInitFunc) {
2395 llvm::CallInst *CallVal = Builder.CreateCall(Init);
2396 if (isThreadWrapperReplaceable(VD, CGM))
2397 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2400 // Don't know whether we have an init function. Call it if it exists.
2401 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2402 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2403 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2404 Builder.CreateCondBr(Have, InitBB, ExitBB);
2406 Builder.SetInsertPoint(InitBB);
2407 Builder.CreateCall(Init);
2408 Builder.CreateBr(ExitBB);
2410 Builder.SetInsertPoint(ExitBB);
2413 // For a reference, the result of the wrapper function is a pointer to
2414 // the referenced object.
2415 llvm::Value *Val = Var;
2416 if (VD->getType()->isReferenceType()) {
2417 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2418 Val = Builder.CreateAlignedLoad(Val, Align);
2420 if (Val->getType() != Wrapper->getReturnType())
2421 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2422 Val, Wrapper->getReturnType(), "");
2423 Builder.CreateRet(Val);
2427 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2429 QualType LValType) {
2430 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2431 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2433 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2434 CallVal->setCallingConv(Wrapper->getCallingConv());
2437 if (VD->getType()->isReferenceType())
2438 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2440 LV = CGF.MakeAddrLValue(CallVal, LValType,
2441 CGF.getContext().getDeclAlign(VD));
2442 // FIXME: need setObjCGCLValueClass?
2446 /// Return whether the given global decl needs a VTT parameter, which it does
2447 /// if it's a base constructor or destructor with virtual bases.
2448 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2449 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2451 // We don't have any virtual bases, just return early.
2452 if (!MD->getParent()->getNumVBases())
2455 // Check if we have a base constructor.
2456 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2459 // Check if we have a base destructor.
2460 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2467 class ItaniumRTTIBuilder {
2468 CodeGenModule &CGM; // Per-module state.
2469 llvm::LLVMContext &VMContext;
2470 const ItaniumCXXABI &CXXABI; // Per-module state.
2472 /// Fields - The fields of the RTTI descriptor currently being built.
2473 SmallVector<llvm::Constant *, 16> Fields;
2475 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2476 llvm::GlobalVariable *
2477 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2479 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2480 /// descriptor of the given type.
2481 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2483 /// BuildVTablePointer - Build the vtable pointer for the given type.
2484 void BuildVTablePointer(const Type *Ty);
2486 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2487 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2488 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2490 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2491 /// classes with bases that do not satisfy the abi::__si_class_type_info
2492 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2493 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2495 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2496 /// for pointer types.
2497 void BuildPointerTypeInfo(QualType PointeeTy);
2499 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2500 /// type_info for an object type.
2501 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2503 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2504 /// struct, used for member pointer types.
2505 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2508 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2509 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2511 // Pointer type info flags.
2513 /// PTI_Const - Type has const qualifier.
2516 /// PTI_Volatile - Type has volatile qualifier.
2519 /// PTI_Restrict - Type has restrict qualifier.
2522 /// PTI_Incomplete - Type is incomplete.
2523 PTI_Incomplete = 0x8,
2525 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2526 /// (in pointer to member).
2527 PTI_ContainingClassIncomplete = 0x10,
2529 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2530 //PTI_TransactionSafe = 0x20,
2532 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2533 PTI_Noexcept = 0x40,
2536 // VMI type info flags.
2538 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2539 VMI_NonDiamondRepeat = 0x1,
2541 /// VMI_DiamondShaped - Class is diamond shaped.
2542 VMI_DiamondShaped = 0x2
2545 // Base class type info flags.
2547 /// BCTI_Virtual - Base class is virtual.
2550 /// BCTI_Public - Base class is public.
2554 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2556 /// \param Force - true to force the creation of this RTTI value
2557 /// \param DLLExport - true to mark the RTTI value as DLLExport
2558 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false,
2559 bool DLLExport = false);
2563 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2564 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2565 SmallString<256> Name;
2566 llvm::raw_svector_ostream Out(Name);
2567 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2569 // We know that the mangled name of the type starts at index 4 of the
2570 // mangled name of the typename, so we can just index into it in order to
2571 // get the mangled name of the type.
2572 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2575 llvm::GlobalVariable *GV =
2576 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2578 GV->setInitializer(Init);
2584 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2585 // Mangle the RTTI name.
2586 SmallString<256> Name;
2587 llvm::raw_svector_ostream Out(Name);
2588 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2590 // Look for an existing global.
2591 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2594 // Create a new global variable.
2595 // Note for the future: If we would ever like to do deferred emission of
2596 // RTTI, check if emitting vtables opportunistically need any adjustment.
2598 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2600 llvm::GlobalValue::ExternalLinkage, nullptr,
2602 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2603 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2604 if (RD->hasAttr<DLLImportAttr>())
2605 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2609 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2612 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2613 /// info for that type is defined in the standard library.
2614 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2615 // Itanium C++ ABI 2.9.2:
2616 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2617 // the run-time support library. Specifically, the run-time support
2618 // library should contain type_info objects for the types X, X* and
2619 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2620 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2621 // long, unsigned long, long long, unsigned long long, float, double,
2622 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2623 // half-precision floating point types.
2625 // GCC also emits RTTI for __int128.
2626 // FIXME: We do not emit RTTI information for decimal types here.
2628 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2629 switch (Ty->getKind()) {
2630 case BuiltinType::Void:
2631 case BuiltinType::NullPtr:
2632 case BuiltinType::Bool:
2633 case BuiltinType::WChar_S:
2634 case BuiltinType::WChar_U:
2635 case BuiltinType::Char_U:
2636 case BuiltinType::Char_S:
2637 case BuiltinType::UChar:
2638 case BuiltinType::SChar:
2639 case BuiltinType::Short:
2640 case BuiltinType::UShort:
2641 case BuiltinType::Int:
2642 case BuiltinType::UInt:
2643 case BuiltinType::Long:
2644 case BuiltinType::ULong:
2645 case BuiltinType::LongLong:
2646 case BuiltinType::ULongLong:
2647 case BuiltinType::Half:
2648 case BuiltinType::Float:
2649 case BuiltinType::Double:
2650 case BuiltinType::LongDouble:
2651 case BuiltinType::Float128:
2652 case BuiltinType::Char16:
2653 case BuiltinType::Char32:
2654 case BuiltinType::Int128:
2655 case BuiltinType::UInt128:
2658 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2659 case BuiltinType::Id:
2660 #include "clang/Basic/OpenCLImageTypes.def"
2661 case BuiltinType::OCLSampler:
2662 case BuiltinType::OCLEvent:
2663 case BuiltinType::OCLClkEvent:
2664 case BuiltinType::OCLQueue:
2665 case BuiltinType::OCLReserveID:
2668 case BuiltinType::Dependent:
2669 #define BUILTIN_TYPE(Id, SingletonId)
2670 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2671 case BuiltinType::Id:
2672 #include "clang/AST/BuiltinTypes.def"
2673 llvm_unreachable("asking for RRTI for a placeholder type!");
2675 case BuiltinType::ObjCId:
2676 case BuiltinType::ObjCClass:
2677 case BuiltinType::ObjCSel:
2678 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2681 llvm_unreachable("Invalid BuiltinType Kind!");
2684 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2685 QualType PointeeTy = PointerTy->getPointeeType();
2686 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2690 // Check the qualifiers.
2691 Qualifiers Quals = PointeeTy.getQualifiers();
2692 Quals.removeConst();
2697 return TypeInfoIsInStandardLibrary(BuiltinTy);
2700 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2701 /// information for the given type exists in the standard library.
2702 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2703 // Type info for builtin types is defined in the standard library.
2704 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2705 return TypeInfoIsInStandardLibrary(BuiltinTy);
2707 // Type info for some pointer types to builtin types is defined in the
2708 // standard library.
2709 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2710 return TypeInfoIsInStandardLibrary(PointerTy);
2715 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2716 /// the given type exists somewhere else, and that we should not emit the type
2717 /// information in this translation unit. Assumes that it is not a
2718 /// standard-library type.
2719 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2721 ASTContext &Context = CGM.getContext();
2723 // If RTTI is disabled, assume it might be disabled in the
2724 // translation unit that defines any potential key function, too.
2725 if (!Context.getLangOpts().RTTI) return false;
2727 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2728 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2729 if (!RD->hasDefinition())
2732 if (!RD->isDynamicClass())
2735 // FIXME: this may need to be reconsidered if the key function
2737 // N.B. We must always emit the RTTI data ourselves if there exists a key
2739 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2740 if (CGM.getVTables().isVTableExternal(RD))
2741 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
2752 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
2753 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2754 return !RecordTy->getDecl()->isCompleteDefinition();
2757 /// ContainsIncompleteClassType - Returns whether the given type contains an
2758 /// incomplete class type. This is true if
2760 /// * The given type is an incomplete class type.
2761 /// * The given type is a pointer type whose pointee type contains an
2762 /// incomplete class type.
2763 /// * The given type is a member pointer type whose class is an incomplete
2765 /// * The given type is a member pointer type whoise pointee type contains an
2766 /// incomplete class type.
2767 /// is an indirect or direct pointer to an incomplete class type.
2768 static bool ContainsIncompleteClassType(QualType Ty) {
2769 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2770 if (IsIncompleteClassType(RecordTy))
2774 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2775 return ContainsIncompleteClassType(PointerTy->getPointeeType());
2777 if (const MemberPointerType *MemberPointerTy =
2778 dyn_cast<MemberPointerType>(Ty)) {
2779 // Check if the class type is incomplete.
2780 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2781 if (IsIncompleteClassType(ClassType))
2784 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2790 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2791 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2792 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
2793 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2794 // Check the number of bases.
2795 if (RD->getNumBases() != 1)
2799 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2801 // Check that the base is not virtual.
2802 if (Base->isVirtual())
2805 // Check that the base is public.
2806 if (Base->getAccessSpecifier() != AS_public)
2809 // Check that the class is dynamic iff the base is.
2810 const CXXRecordDecl *BaseDecl =
2811 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2812 if (!BaseDecl->isEmpty() &&
2813 BaseDecl->isDynamicClass() != RD->isDynamicClass())
2819 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2820 // abi::__class_type_info.
2821 static const char * const ClassTypeInfo =
2822 "_ZTVN10__cxxabiv117__class_type_infoE";
2823 // abi::__si_class_type_info.
2824 static const char * const SIClassTypeInfo =
2825 "_ZTVN10__cxxabiv120__si_class_type_infoE";
2826 // abi::__vmi_class_type_info.
2827 static const char * const VMIClassTypeInfo =
2828 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
2830 const char *VTableName = nullptr;
2832 switch (Ty->getTypeClass()) {
2833 #define TYPE(Class, Base)
2834 #define ABSTRACT_TYPE(Class, Base)
2835 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2836 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2837 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2838 #include "clang/AST/TypeNodes.def"
2839 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2841 case Type::LValueReference:
2842 case Type::RValueReference:
2843 llvm_unreachable("References shouldn't get here");
2846 case Type::DeducedTemplateSpecialization:
2847 llvm_unreachable("Undeduced type shouldn't get here");
2850 llvm_unreachable("Pipe types shouldn't get here");
2853 // GCC treats vector and complex types as fundamental types.
2855 case Type::ExtVector:
2858 // FIXME: GCC treats block pointers as fundamental types?!
2859 case Type::BlockPointer:
2860 // abi::__fundamental_type_info.
2861 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
2864 case Type::ConstantArray:
2865 case Type::IncompleteArray:
2866 case Type::VariableArray:
2867 // abi::__array_type_info.
2868 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
2871 case Type::FunctionNoProto:
2872 case Type::FunctionProto:
2873 // abi::__function_type_info.
2874 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
2878 // abi::__enum_type_info.
2879 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
2882 case Type::Record: {
2883 const CXXRecordDecl *RD =
2884 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2886 if (!RD->hasDefinition() || !RD->getNumBases()) {
2887 VTableName = ClassTypeInfo;
2888 } else if (CanUseSingleInheritance(RD)) {
2889 VTableName = SIClassTypeInfo;
2891 VTableName = VMIClassTypeInfo;
2897 case Type::ObjCObject:
2898 // Ignore protocol qualifiers.
2899 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
2901 // Handle id and Class.
2902 if (isa<BuiltinType>(Ty)) {
2903 VTableName = ClassTypeInfo;
2907 assert(isa<ObjCInterfaceType>(Ty));
2910 case Type::ObjCInterface:
2911 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
2912 VTableName = SIClassTypeInfo;
2914 VTableName = ClassTypeInfo;
2918 case Type::ObjCObjectPointer:
2920 // abi::__pointer_type_info.
2921 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
2924 case Type::MemberPointer:
2925 // abi::__pointer_to_member_type_info.
2926 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
2930 llvm::Constant *VTable =
2931 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
2933 llvm::Type *PtrDiffTy =
2934 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
2936 // The vtable address point is 2.
2937 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
2939 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
2940 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
2942 Fields.push_back(VTable);
2945 /// \brief Return the linkage that the type info and type info name constants
2946 /// should have for the given type.
2947 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
2949 // Itanium C++ ABI 2.9.5p7:
2950 // In addition, it and all of the intermediate abi::__pointer_type_info
2951 // structs in the chain down to the abi::__class_type_info for the
2952 // incomplete class type must be prevented from resolving to the
2953 // corresponding type_info structs for the complete class type, possibly
2954 // by making them local static objects. Finally, a dummy class RTTI is
2955 // generated for the incomplete type that will not resolve to the final
2956 // complete class RTTI (because the latter need not exist), possibly by
2957 // making it a local static object.
2958 if (ContainsIncompleteClassType(Ty))
2959 return llvm::GlobalValue::InternalLinkage;
2961 switch (Ty->getLinkage()) {
2963 case InternalLinkage:
2964 case UniqueExternalLinkage:
2965 return llvm::GlobalValue::InternalLinkage;
2967 case VisibleNoLinkage:
2968 case ModuleInternalLinkage:
2970 case ExternalLinkage:
2971 // RTTI is not enabled, which means that this type info struct is going
2972 // to be used for exception handling. Give it linkonce_odr linkage.
2973 if (!CGM.getLangOpts().RTTI)
2974 return llvm::GlobalValue::LinkOnceODRLinkage;
2976 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
2977 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
2978 if (RD->hasAttr<WeakAttr>())
2979 return llvm::GlobalValue::WeakODRLinkage;
2980 if (CGM.getTriple().isWindowsItaniumEnvironment())
2981 if (RD->hasAttr<DLLImportAttr>() &&
2982 ShouldUseExternalRTTIDescriptor(CGM, Ty))
2983 return llvm::GlobalValue::ExternalLinkage;
2984 if (RD->isDynamicClass()) {
2985 llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
2986 // MinGW won't export the RTTI information when there is a key function.
2987 // Make sure we emit our own copy instead of attempting to dllimport it.
2988 if (RD->hasAttr<DLLImportAttr>() &&
2989 llvm::GlobalValue::isAvailableExternallyLinkage(LT))
2990 LT = llvm::GlobalValue::LinkOnceODRLinkage;
2995 return llvm::GlobalValue::LinkOnceODRLinkage;
2998 llvm_unreachable("Invalid linkage!");
3001 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
3003 // We want to operate on the canonical type.
3004 Ty = Ty.getCanonicalType();
3006 // Check if we've already emitted an RTTI descriptor for this type.
3007 SmallString<256> Name;
3008 llvm::raw_svector_ostream Out(Name);
3009 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3011 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3012 if (OldGV && !OldGV->isDeclaration()) {
3013 assert(!OldGV->hasAvailableExternallyLinkage() &&
3014 "available_externally typeinfos not yet implemented");
3016 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3019 // Check if there is already an external RTTI descriptor for this type.
3020 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
3021 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
3022 return GetAddrOfExternalRTTIDescriptor(Ty);
3024 // Emit the standard library with external linkage.
3025 llvm::GlobalVariable::LinkageTypes Linkage;
3027 Linkage = llvm::GlobalValue::ExternalLinkage;
3029 Linkage = getTypeInfoLinkage(CGM, Ty);
3031 // Add the vtable pointer.
3032 BuildVTablePointer(cast<Type>(Ty));
3035 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3036 llvm::Constant *TypeNameField;
3038 // If we're supposed to demote the visibility, be sure to set a flag
3039 // to use a string comparison for type_info comparisons.
3040 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3041 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3042 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3043 // The flag is the sign bit, which on ARM64 is defined to be clear
3044 // for global pointers. This is very ARM64-specific.
3045 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3046 llvm::Constant *flag =
3047 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3048 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3050 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3052 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3054 Fields.push_back(TypeNameField);
3056 switch (Ty->getTypeClass()) {
3057 #define TYPE(Class, Base)
3058 #define ABSTRACT_TYPE(Class, Base)
3059 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3060 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3061 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3062 #include "clang/AST/TypeNodes.def"
3063 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3065 // GCC treats vector types as fundamental types.
3068 case Type::ExtVector:
3070 case Type::BlockPointer:
3071 // Itanium C++ ABI 2.9.5p4:
3072 // abi::__fundamental_type_info adds no data members to std::type_info.
3075 case Type::LValueReference:
3076 case Type::RValueReference:
3077 llvm_unreachable("References shouldn't get here");
3080 case Type::DeducedTemplateSpecialization:
3081 llvm_unreachable("Undeduced type shouldn't get here");
3084 llvm_unreachable("Pipe type shouldn't get here");
3086 case Type::ConstantArray:
3087 case Type::IncompleteArray:
3088 case Type::VariableArray:
3089 // Itanium C++ ABI 2.9.5p5:
3090 // abi::__array_type_info adds no data members to std::type_info.
3093 case Type::FunctionNoProto:
3094 case Type::FunctionProto:
3095 // Itanium C++ ABI 2.9.5p5:
3096 // abi::__function_type_info adds no data members to std::type_info.
3100 // Itanium C++ ABI 2.9.5p5:
3101 // abi::__enum_type_info adds no data members to std::type_info.
3104 case Type::Record: {
3105 const CXXRecordDecl *RD =
3106 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3107 if (!RD->hasDefinition() || !RD->getNumBases()) {
3108 // We don't need to emit any fields.
3112 if (CanUseSingleInheritance(RD))
3113 BuildSIClassTypeInfo(RD);
3115 BuildVMIClassTypeInfo(RD);
3120 case Type::ObjCObject:
3121 case Type::ObjCInterface:
3122 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3125 case Type::ObjCObjectPointer:
3126 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3130 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3133 case Type::MemberPointer:
3134 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3138 // No fields, at least for the moment.
3142 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3144 llvm::Module &M = CGM.getModule();
3145 llvm::GlobalVariable *GV =
3146 new llvm::GlobalVariable(M, Init->getType(),
3147 /*Constant=*/true, Linkage, Init, Name);
3149 // If there's already an old global variable, replace it with the new one.
3151 GV->takeName(OldGV);
3152 llvm::Constant *NewPtr =
3153 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3154 OldGV->replaceAllUsesWith(NewPtr);
3155 OldGV->eraseFromParent();
3158 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3159 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3161 // The Itanium ABI specifies that type_info objects must be globally
3162 // unique, with one exception: if the type is an incomplete class
3163 // type or a (possibly indirect) pointer to one. That exception
3164 // affects the general case of comparing type_info objects produced
3165 // by the typeid operator, which is why the comparison operators on
3166 // std::type_info generally use the type_info name pointers instead
3167 // of the object addresses. However, the language's built-in uses
3168 // of RTTI generally require class types to be complete, even when
3169 // manipulating pointers to those class types. This allows the
3170 // implementation of dynamic_cast to rely on address equality tests,
3171 // which is much faster.
3173 // All of this is to say that it's important that both the type_info
3174 // object and the type_info name be uniqued when weakly emitted.
3176 // Give the type_info object and name the formal visibility of the
3178 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3179 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3180 // If the linkage is local, only default visibility makes sense.
3181 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3182 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
3183 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3185 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3187 TypeName->setVisibility(llvmVisibility);
3188 GV->setVisibility(llvmVisibility);
3190 if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3191 auto RD = Ty->getAsCXXRecordDecl();
3192 if (DLLExport || (RD && RD->hasAttr<DLLExportAttr>())) {
3193 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3194 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3195 } else if (RD && RD->hasAttr<DLLImportAttr>() &&
3196 ShouldUseExternalRTTIDescriptor(CGM, Ty)) {
3197 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3198 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3200 // Because the typename and the typeinfo are DLL import, convert them to
3201 // declarations rather than definitions. The initializers still need to
3202 // be constructed to calculate the type for the declarations.
3203 TypeName->setInitializer(nullptr);
3204 GV->setInitializer(nullptr);
3208 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3211 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3212 /// for the given Objective-C object type.
3213 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3215 const Type *T = OT->getBaseType().getTypePtr();
3216 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3218 // The builtin types are abi::__class_type_infos and don't require
3220 if (isa<BuiltinType>(T)) return;
3222 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3223 ObjCInterfaceDecl *Super = Class->getSuperClass();
3225 // Root classes are also __class_type_info.
3228 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3230 // Everything else is single inheritance.
3231 llvm::Constant *BaseTypeInfo =
3232 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3233 Fields.push_back(BaseTypeInfo);
3236 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3237 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3238 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3239 // Itanium C++ ABI 2.9.5p6b:
3240 // It adds to abi::__class_type_info a single member pointing to the
3241 // type_info structure for the base type,
3242 llvm::Constant *BaseTypeInfo =
3243 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3244 Fields.push_back(BaseTypeInfo);
3248 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3249 /// a class hierarchy.
3251 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3252 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3256 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3257 /// abi::__vmi_class_type_info.
3259 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3264 const CXXRecordDecl *BaseDecl =
3265 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3267 if (Base->isVirtual()) {
3268 // Mark the virtual base as seen.
3269 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3270 // If this virtual base has been seen before, then the class is diamond
3272 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3274 if (Bases.NonVirtualBases.count(BaseDecl))
3275 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3278 // Mark the non-virtual base as seen.
3279 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3280 // If this non-virtual base has been seen before, then the class has non-
3281 // diamond shaped repeated inheritance.
3282 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3284 if (Bases.VirtualBases.count(BaseDecl))
3285 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3290 for (const auto &I : BaseDecl->bases())
3291 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3296 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3301 for (const auto &I : RD->bases())
3302 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3307 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3308 /// classes with bases that do not satisfy the abi::__si_class_type_info
3309 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3310 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3311 llvm::Type *UnsignedIntLTy =
3312 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3314 // Itanium C++ ABI 2.9.5p6c:
3315 // __flags is a word with flags describing details about the class
3316 // structure, which may be referenced by using the __flags_masks
3317 // enumeration. These flags refer to both direct and indirect bases.
3318 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3319 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3321 // Itanium C++ ABI 2.9.5p6c:
3322 // __base_count is a word with the number of direct proper base class
3323 // descriptions that follow.
3324 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3326 if (!RD->getNumBases())
3329 // Now add the base class descriptions.
3331 // Itanium C++ ABI 2.9.5p6c:
3332 // __base_info[] is an array of base class descriptions -- one for every
3333 // direct proper base. Each description is of the type:
3335 // struct abi::__base_class_type_info {
3337 // const __class_type_info *__base_type;
3338 // long __offset_flags;
3340 // enum __offset_flags_masks {
3341 // __virtual_mask = 0x1,
3342 // __public_mask = 0x2,
3343 // __offset_shift = 8
3347 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3348 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3350 // FIXME: Consider updating libc++abi to match, and extend this logic to all
3352 QualType OffsetFlagsTy = CGM.getContext().LongTy;
3353 const TargetInfo &TI = CGM.getContext().getTargetInfo();
3354 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3355 OffsetFlagsTy = CGM.getContext().LongLongTy;
3356 llvm::Type *OffsetFlagsLTy =
3357 CGM.getTypes().ConvertType(OffsetFlagsTy);
3359 for (const auto &Base : RD->bases()) {
3360 // The __base_type member points to the RTTI for the base type.
3361 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3363 const CXXRecordDecl *BaseDecl =
3364 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3366 int64_t OffsetFlags = 0;
3368 // All but the lower 8 bits of __offset_flags are a signed offset.
3369 // For a non-virtual base, this is the offset in the object of the base
3370 // subobject. For a virtual base, this is the offset in the virtual table of
3371 // the virtual base offset for the virtual base referenced (negative).
3373 if (Base.isVirtual())
3375 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3377 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3378 Offset = Layout.getBaseClassOffset(BaseDecl);
3381 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3383 // The low-order byte of __offset_flags contains flags, as given by the
3384 // masks from the enumeration __offset_flags_masks.
3385 if (Base.isVirtual())
3386 OffsetFlags |= BCTI_Virtual;
3387 if (Base.getAccessSpecifier() == AS_public)
3388 OffsetFlags |= BCTI_Public;
3390 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3394 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3395 /// pieces from \p Type.
3396 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3399 if (Type.isConstQualified())
3400 Flags |= ItaniumRTTIBuilder::PTI_Const;
3401 if (Type.isVolatileQualified())
3402 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3403 if (Type.isRestrictQualified())
3404 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3405 Type = Type.getUnqualifiedType();
3407 // Itanium C++ ABI 2.9.5p7:
3408 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3409 // incomplete class type, the incomplete target type flag is set.
3410 if (ContainsIncompleteClassType(Type))
3411 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3413 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3414 if (Proto->isNothrow(Ctx)) {
3415 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3416 Type = Ctx.getFunctionType(
3417 Proto->getReturnType(), Proto->getParamTypes(),
3418 Proto->getExtProtoInfo().withExceptionSpec(EST_None));
3425 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3426 /// used for pointer types.
3427 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3428 // Itanium C++ ABI 2.9.5p7:
3429 // __flags is a flag word describing the cv-qualification and other
3430 // attributes of the type pointed to
3431 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3433 llvm::Type *UnsignedIntLTy =
3434 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3435 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3437 // Itanium C++ ABI 2.9.5p7:
3438 // __pointee is a pointer to the std::type_info derivation for the
3439 // unqualified type being pointed to.
3440 llvm::Constant *PointeeTypeInfo =
3441 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3442 Fields.push_back(PointeeTypeInfo);
3445 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3446 /// struct, used for member pointer types.
3448 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3449 QualType PointeeTy = Ty->getPointeeType();
3451 // Itanium C++ ABI 2.9.5p7:
3452 // __flags is a flag word describing the cv-qualification and other
3453 // attributes of the type pointed to.
3454 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3456 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3457 if (IsIncompleteClassType(ClassType))
3458 Flags |= PTI_ContainingClassIncomplete;
3460 llvm::Type *UnsignedIntLTy =
3461 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3462 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3464 // Itanium C++ ABI 2.9.5p7:
3465 // __pointee is a pointer to the std::type_info derivation for the
3466 // unqualified type being pointed to.
3467 llvm::Constant *PointeeTypeInfo =
3468 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3469 Fields.push_back(PointeeTypeInfo);
3471 // Itanium C++ ABI 2.9.5p9:
3472 // __context is a pointer to an abi::__class_type_info corresponding to the
3473 // class type containing the member pointed to
3474 // (e.g., the "A" in "int A::*").
3476 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3479 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3480 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3483 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type,
3485 QualType PointerType = getContext().getPointerType(Type);
3486 QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
3487 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, /*Force=*/true, DLLExport);
3488 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, /*Force=*/true,
3490 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, /*Force=*/true,
3494 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) {
3495 // Types added here must also be added to TypeInfoIsInStandardLibrary.
3496 QualType FundamentalTypes[] = {
3497 getContext().VoidTy, getContext().NullPtrTy,
3498 getContext().BoolTy, getContext().WCharTy,
3499 getContext().CharTy, getContext().UnsignedCharTy,
3500 getContext().SignedCharTy, getContext().ShortTy,
3501 getContext().UnsignedShortTy, getContext().IntTy,
3502 getContext().UnsignedIntTy, getContext().LongTy,
3503 getContext().UnsignedLongTy, getContext().LongLongTy,
3504 getContext().UnsignedLongLongTy, getContext().Int128Ty,
3505 getContext().UnsignedInt128Ty, getContext().HalfTy,
3506 getContext().FloatTy, getContext().DoubleTy,
3507 getContext().LongDoubleTy, getContext().Float128Ty,
3508 getContext().Char16Ty, getContext().Char32Ty
3510 for (const QualType &FundamentalType : FundamentalTypes)
3511 EmitFundamentalRTTIDescriptor(FundamentalType, DLLExport);
3514 /// What sort of uniqueness rules should we use for the RTTI for the
3516 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3517 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3518 if (shouldRTTIBeUnique())
3521 // It's only necessary for linkonce_odr or weak_odr linkage.
3522 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3523 Linkage != llvm::GlobalValue::WeakODRLinkage)
3526 // It's only necessary with default visibility.
3527 if (CanTy->getVisibility() != DefaultVisibility)
3530 // If we're not required to publish this symbol, hide it.
3531 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3532 return RUK_NonUniqueHidden;
3534 // If we're required to publish this symbol, as we might be under an
3535 // explicit instantiation, leave it with default visibility but
3536 // enable string-comparisons.
3537 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3538 return RUK_NonUniqueVisible;
3541 // Find out how to codegen the complete destructor and constructor
3543 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3545 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3546 const CXXMethodDecl *MD) {
3547 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3548 return StructorCodegen::Emit;
3550 // The complete and base structors are not equivalent if there are any virtual
3551 // bases, so emit separate functions.
3552 if (MD->getParent()->getNumVBases())
3553 return StructorCodegen::Emit;
3555 GlobalDecl AliasDecl;
3556 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3557 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3559 const auto *CD = cast<CXXConstructorDecl>(MD);
3560 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3562 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3564 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3565 return StructorCodegen::RAUW;
3567 // FIXME: Should we allow available_externally aliases?
3568 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3569 return StructorCodegen::RAUW;
3571 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3572 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3573 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3574 CGM.getTarget().getTriple().isOSBinFormatWasm())
3575 return StructorCodegen::COMDAT;
3576 return StructorCodegen::Emit;
3579 return StructorCodegen::Alias;
3582 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3583 GlobalDecl AliasDecl,
3584 GlobalDecl TargetDecl) {
3585 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3587 StringRef MangledName = CGM.getMangledName(AliasDecl);
3588 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3589 if (Entry && !Entry->isDeclaration())
3592 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3594 // Create the alias with no name.
3595 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3597 // Switch any previous uses to the alias.
3599 assert(Entry->getType() == Aliasee->getType() &&
3600 "declaration exists with different type");
3601 Alias->takeName(Entry);
3602 Entry->replaceAllUsesWith(Alias);
3603 Entry->eraseFromParent();
3605 Alias->setName(MangledName);
3608 // Finally, set up the alias with its proper name and attributes.
3609 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
3612 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3613 StructorType Type) {
3614 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3615 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3617 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3619 if (Type == StructorType::Complete) {
3620 GlobalDecl CompleteDecl;
3621 GlobalDecl BaseDecl;
3623 CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3624 BaseDecl = GlobalDecl(CD, Ctor_Base);
3626 CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3627 BaseDecl = GlobalDecl(DD, Dtor_Base);
3630 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3631 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3635 if (CGType == StructorCodegen::RAUW) {
3636 StringRef MangledName = CGM.getMangledName(CompleteDecl);
3637 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3638 CGM.addReplacement(MangledName, Aliasee);
3643 // The base destructor is equivalent to the base destructor of its
3644 // base class if there is exactly one non-virtual base class with a
3645 // non-trivial destructor, there are no fields with a non-trivial
3646 // destructor, and the body of the destructor is trivial.
3647 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3648 !CGM.TryEmitBaseDestructorAsAlias(DD))
3651 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3653 if (CGType == StructorCodegen::COMDAT) {
3654 SmallString<256> Buffer;
3655 llvm::raw_svector_ostream Out(Buffer);
3657 getMangleContext().mangleCXXDtorComdat(DD, Out);
3659 getMangleContext().mangleCXXCtorComdat(CD, Out);
3660 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3663 CGM.maybeSetTrivialComdat(*MD, *Fn);
3667 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3668 // void *__cxa_begin_catch(void*);
3669 llvm::FunctionType *FTy = llvm::FunctionType::get(
3670 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3672 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3675 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3676 // void __cxa_end_catch();
3677 llvm::FunctionType *FTy =
3678 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3680 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3683 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3684 // void *__cxa_get_exception_ptr(void*);
3685 llvm::FunctionType *FTy = llvm::FunctionType::get(
3686 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3688 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3692 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3693 /// exception type lets us state definitively that the thrown exception
3694 /// type does not have a destructor. In particular:
3695 /// - Catch-alls tell us nothing, so we have to conservatively
3696 /// assume that the thrown exception might have a destructor.
3697 /// - Catches by reference behave according to their base types.
3698 /// - Catches of non-record types will only trigger for exceptions
3699 /// of non-record types, which never have destructors.
3700 /// - Catches of record types can trigger for arbitrary subclasses
3701 /// of the caught type, so we have to assume the actual thrown
3702 /// exception type might have a throwing destructor, even if the
3703 /// caught type's destructor is trivial or nothrow.
3704 struct CallEndCatch final : EHScopeStack::Cleanup {
3705 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3708 void Emit(CodeGenFunction &CGF, Flags flags) override {
3710 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3714 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3719 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3720 /// __cxa_end_catch.
3722 /// \param EndMightThrow - true if __cxa_end_catch might throw
3723 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3725 bool EndMightThrow) {
3726 llvm::CallInst *call =
3727 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3729 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3734 /// A "special initializer" callback for initializing a catch
3735 /// parameter during catch initialization.
3736 static void InitCatchParam(CodeGenFunction &CGF,
3737 const VarDecl &CatchParam,
3739 SourceLocation Loc) {
3740 // Load the exception from where the landing pad saved it.
3741 llvm::Value *Exn = CGF.getExceptionFromSlot();
3743 CanQualType CatchType =
3744 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3745 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3747 // If we're catching by reference, we can just cast the object
3748 // pointer to the appropriate pointer.
3749 if (isa<ReferenceType>(CatchType)) {
3750 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3751 bool EndCatchMightThrow = CaughtType->isRecordType();
3753 // __cxa_begin_catch returns the adjusted object pointer.
3754 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3756 // We have no way to tell the personality function that we're
3757 // catching by reference, so if we're catching a pointer,
3758 // __cxa_begin_catch will actually return that pointer by value.
3759 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3760 QualType PointeeType = PT->getPointeeType();
3762 // When catching by reference, generally we should just ignore
3763 // this by-value pointer and use the exception object instead.
3764 if (!PointeeType->isRecordType()) {
3766 // Exn points to the struct _Unwind_Exception header, which
3767 // we have to skip past in order to reach the exception data.
3768 unsigned HeaderSize =
3769 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3770 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3772 // However, if we're catching a pointer-to-record type that won't
3773 // work, because the personality function might have adjusted
3774 // the pointer. There's actually no way for us to fully satisfy
3775 // the language/ABI contract here: we can't use Exn because it
3776 // might have the wrong adjustment, but we can't use the by-value
3777 // pointer because it's off by a level of abstraction.
3779 // The current solution is to dump the adjusted pointer into an
3780 // alloca, which breaks language semantics (because changing the
3781 // pointer doesn't change the exception) but at least works.
3782 // The better solution would be to filter out non-exact matches
3783 // and rethrow them, but this is tricky because the rethrow
3784 // really needs to be catchable by other sites at this landing
3785 // pad. The best solution is to fix the personality function.
3787 // Pull the pointer for the reference type off.
3789 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3791 // Create the temporary and write the adjusted pointer into it.
3793 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
3794 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3795 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3797 // Bind the reference to the temporary.
3798 AdjustedExn = ExnPtrTmp.getPointer();
3802 llvm::Value *ExnCast =
3803 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
3804 CGF.Builder.CreateStore(ExnCast, ParamAddr);
3808 // Scalars and complexes.
3809 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
3810 if (TEK != TEK_Aggregate) {
3811 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
3813 // If the catch type is a pointer type, __cxa_begin_catch returns
3814 // the pointer by value.
3815 if (CatchType->hasPointerRepresentation()) {
3816 llvm::Value *CastExn =
3817 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
3819 switch (CatchType.getQualifiers().getObjCLifetime()) {
3820 case Qualifiers::OCL_Strong:
3821 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
3824 case Qualifiers::OCL_None:
3825 case Qualifiers::OCL_ExplicitNone:
3826 case Qualifiers::OCL_Autoreleasing:
3827 CGF.Builder.CreateStore(CastExn, ParamAddr);
3830 case Qualifiers::OCL_Weak:
3831 CGF.EmitARCInitWeak(ParamAddr, CastExn);
3834 llvm_unreachable("bad ownership qualifier!");
3837 // Otherwise, it returns a pointer into the exception object.
3839 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3840 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3842 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
3843 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
3846 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
3850 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
3851 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
3855 llvm_unreachable("evaluation kind filtered out!");
3857 llvm_unreachable("bad evaluation kind");
3860 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
3861 auto catchRD = CatchType->getAsCXXRecordDecl();
3862 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
3864 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3866 // Check for a copy expression. If we don't have a copy expression,
3867 // that means a trivial copy is okay.
3868 const Expr *copyExpr = CatchParam.getInit();
3870 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
3871 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3872 caughtExnAlignment);
3873 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
3877 // We have to call __cxa_get_exception_ptr to get the adjusted
3878 // pointer before copying.
3879 llvm::CallInst *rawAdjustedExn =
3880 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
3882 // Cast that to the appropriate type.
3883 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3884 caughtExnAlignment);
3886 // The copy expression is defined in terms of an OpaqueValueExpr.
3887 // Find it and map it to the adjusted expression.
3888 CodeGenFunction::OpaqueValueMapping
3889 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
3890 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
3892 // Call the copy ctor in a terminate scope.
3893 CGF.EHStack.pushTerminate();
3895 // Perform the copy construction.
3896 CGF.EmitAggExpr(copyExpr,
3897 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
3898 AggValueSlot::IsNotDestructed,
3899 AggValueSlot::DoesNotNeedGCBarriers,
3900 AggValueSlot::IsNotAliased));
3902 // Leave the terminate scope.
3903 CGF.EHStack.popTerminate();
3905 // Undo the opaque value mapping.
3908 // Finally we can call __cxa_begin_catch.
3909 CallBeginCatch(CGF, Exn, true);
3912 /// Begins a catch statement by initializing the catch variable and
3913 /// calling __cxa_begin_catch.
3914 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
3915 const CXXCatchStmt *S) {
3916 // We have to be very careful with the ordering of cleanups here:
3917 // C++ [except.throw]p4:
3918 // The destruction [of the exception temporary] occurs
3919 // immediately after the destruction of the object declared in
3920 // the exception-declaration in the handler.
3922 // So the precise ordering is:
3923 // 1. Construct catch variable.
3924 // 2. __cxa_begin_catch
3925 // 3. Enter __cxa_end_catch cleanup
3926 // 4. Enter dtor cleanup
3928 // We do this by using a slightly abnormal initialization process.
3929 // Delegation sequence:
3930 // - ExitCXXTryStmt opens a RunCleanupsScope
3931 // - EmitAutoVarAlloca creates the variable and debug info
3932 // - InitCatchParam initializes the variable from the exception
3933 // - CallBeginCatch calls __cxa_begin_catch
3934 // - CallBeginCatch enters the __cxa_end_catch cleanup
3935 // - EmitAutoVarCleanups enters the variable destructor cleanup
3936 // - EmitCXXTryStmt emits the code for the catch body
3937 // - EmitCXXTryStmt close the RunCleanupsScope
3939 VarDecl *CatchParam = S->getExceptionDecl();
3941 llvm::Value *Exn = CGF.getExceptionFromSlot();
3942 CallBeginCatch(CGF, Exn, true);
3947 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
3948 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
3949 CGF.EmitAutoVarCleanups(var);
3952 /// Get or define the following function:
3953 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
3954 /// This code is used only in C++.
3955 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
3956 llvm::FunctionType *fnTy =
3957 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3958 llvm::Constant *fnRef = CGM.CreateRuntimeFunction(
3959 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
3961 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
3962 if (fn && fn->empty()) {
3963 fn->setDoesNotThrow();
3964 fn->setDoesNotReturn();
3966 // What we really want is to massively penalize inlining without
3967 // forbidding it completely. The difference between that and
3968 // 'noinline' is negligible.
3969 fn->addFnAttr(llvm::Attribute::NoInline);
3971 // Allow this function to be shared across translation units, but
3972 // we don't want it to turn into an exported symbol.
3973 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
3974 fn->setVisibility(llvm::Function::HiddenVisibility);
3975 if (CGM.supportsCOMDAT())
3976 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
3978 // Set up the function.
3979 llvm::BasicBlock *entry =
3980 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
3981 CGBuilderTy builder(CGM, entry);
3983 // Pull the exception pointer out of the parameter list.
3984 llvm::Value *exn = &*fn->arg_begin();
3986 // Call __cxa_begin_catch(exn).
3987 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
3988 catchCall->setDoesNotThrow();
3989 catchCall->setCallingConv(CGM.getRuntimeCC());
3991 // Call std::terminate().
3992 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
3993 termCall->setDoesNotThrow();
3994 termCall->setDoesNotReturn();
3995 termCall->setCallingConv(CGM.getRuntimeCC());
3997 // std::terminate cannot return.
3998 builder.CreateUnreachable();
4005 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4007 // In C++, we want to call __cxa_begin_catch() before terminating.
4009 assert(CGF.CGM.getLangOpts().CPlusPlus);
4010 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4012 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());