1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This provides C++ code generation targeting the Itanium C++ ABI. The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // http://www.codesourcery.com/public/cxx-abi/abi.html
13 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
15 // It also supports the closely-related ARM ABI, documented at:
16 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
18 //===----------------------------------------------------------------------===//
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/CodeGen/ConstantInitBuilder.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/Type.h"
30 #include "clang/AST/StmtCXX.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/GlobalValue.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Support/ScopedPrinter.h"
38 using namespace clang;
39 using namespace CodeGen;
42 class ItaniumCXXABI : public CodeGen::CGCXXABI {
43 /// VTables - All the vtables which have been defined.
44 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 bool UseARMMethodPtrABI;
48 bool UseARMGuardVarABI;
49 bool Use32BitVTableOffsetABI;
51 ItaniumMangleContext &getMangleContext() {
52 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
56 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
57 bool UseARMMethodPtrABI = false,
58 bool UseARMGuardVarABI = false) :
59 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
60 UseARMGuardVarABI(UseARMGuardVarABI),
61 Use32BitVTableOffsetABI(false) { }
63 bool classifyReturnType(CGFunctionInfo &FI) const override;
65 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
66 // If C++ prohibits us from making a copy, pass by address.
67 if (!RD->canPassInRegisters())
72 bool isThisCompleteObject(GlobalDecl GD) const override {
73 // The Itanium ABI has separate complete-object vs. base-object
74 // variants of both constructors and destructors.
75 if (isa<CXXDestructorDecl>(GD.getDecl())) {
76 switch (GD.getDtorType()) {
85 llvm_unreachable("emitting dtor comdat as function?");
87 llvm_unreachable("bad dtor kind");
89 if (isa<CXXConstructorDecl>(GD.getDecl())) {
90 switch (GD.getCtorType()) {
97 case Ctor_CopyingClosure:
98 case Ctor_DefaultClosure:
99 llvm_unreachable("closure ctors in Itanium ABI?");
102 llvm_unreachable("emitting ctor comdat as function?");
104 llvm_unreachable("bad dtor kind");
111 bool isZeroInitializable(const MemberPointerType *MPT) override;
113 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
116 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
119 llvm::Value *&ThisPtrForCall,
120 llvm::Value *MemFnPtr,
121 const MemberPointerType *MPT) override;
124 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
127 const MemberPointerType *MPT) override;
129 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
131 llvm::Value *Src) override;
132 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
133 llvm::Constant *Src) override;
135 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
137 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
138 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
139 CharUnits offset) override;
140 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
141 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
142 CharUnits ThisAdjustment);
144 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
145 llvm::Value *L, llvm::Value *R,
146 const MemberPointerType *MPT,
147 bool Inequality) override;
149 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
151 const MemberPointerType *MPT) override;
153 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
154 Address Ptr, QualType ElementType,
155 const CXXDestructorDecl *Dtor) override;
157 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
158 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
160 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
163 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
164 llvm::Value *Exn) override;
166 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
167 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
169 getAddrOfCXXCatchHandlerType(QualType Ty,
170 QualType CatchHandlerType) override {
171 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
174 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
175 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
176 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
178 llvm::Type *StdTypeInfoPtrTy) override;
180 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
181 QualType SrcRecordTy) override;
183 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
184 QualType SrcRecordTy, QualType DestTy,
185 QualType DestRecordTy,
186 llvm::BasicBlock *CastEnd) override;
188 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
189 QualType SrcRecordTy,
190 QualType DestTy) override;
192 bool EmitBadCastCall(CodeGenFunction &CGF) override;
195 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
196 const CXXRecordDecl *ClassDecl,
197 const CXXRecordDecl *BaseClassDecl) override;
199 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
202 buildStructorSignature(GlobalDecl GD,
203 SmallVectorImpl<CanQualType> &ArgTys) override;
205 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
206 CXXDtorType DT) const override {
207 // Itanium does not emit any destructor variant as an inline thunk.
208 // Delegating may occur as an optimization, but all variants are either
209 // emitted with external linkage or as linkonce if they are inline and used.
213 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
215 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
216 FunctionArgList &Params) override;
218 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
221 addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
222 CXXCtorType Type, bool ForVirtualBase,
223 bool Delegating, CallArgList &Args) override;
225 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
226 CXXDtorType Type, bool ForVirtualBase,
227 bool Delegating, Address This,
228 QualType ThisTy) override;
230 void emitVTableDefinitions(CodeGenVTables &CGVT,
231 const CXXRecordDecl *RD) override;
233 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
234 CodeGenFunction::VPtr Vptr) override;
236 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
241 getVTableAddressPoint(BaseSubobject Base,
242 const CXXRecordDecl *VTableClass) override;
244 llvm::Value *getVTableAddressPointInStructor(
245 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
246 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
248 llvm::Value *getVTableAddressPointInStructorWithVTT(
249 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
250 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
253 getVTableAddressPointForConstExpr(BaseSubobject Base,
254 const CXXRecordDecl *VTableClass) override;
256 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
257 CharUnits VPtrOffset) override;
259 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
260 Address This, llvm::Type *Ty,
261 SourceLocation Loc) override;
263 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
264 const CXXDestructorDecl *Dtor,
265 CXXDtorType DtorType, Address This,
266 DeleteOrMemberCallExpr E) override;
268 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
270 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
271 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
273 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
274 bool ReturnAdjustment) override {
275 // Allow inlining of thunks by emitting them with available_externally
276 // linkage together with vtables when needed.
277 if (ForVTable && !Thunk->hasLocalLinkage())
278 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
279 CGM.setGVProperties(Thunk, GD);
282 bool exportThunk() override { return true; }
284 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
285 const ThisAdjustment &TA) override;
287 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
288 const ReturnAdjustment &RA) override;
290 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
291 FunctionArgList &Args) const override {
292 assert(!Args.empty() && "expected the arglist to not be empty!");
293 return Args.size() - 1;
296 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
297 StringRef GetDeletedVirtualCallName() override
298 { return "__cxa_deleted_virtual"; }
300 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
301 Address InitializeArrayCookie(CodeGenFunction &CGF,
303 llvm::Value *NumElements,
304 const CXXNewExpr *expr,
305 QualType ElementType) override;
306 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
308 CharUnits cookieSize) override;
310 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
311 llvm::GlobalVariable *DeclPtr,
312 bool PerformInit) override;
313 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
314 llvm::FunctionCallee dtor,
315 llvm::Constant *addr) override;
317 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
319 void EmitThreadLocalInitFuncs(
321 ArrayRef<const VarDecl *> CXXThreadLocals,
322 ArrayRef<llvm::Function *> CXXThreadLocalInits,
323 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
325 bool usesThreadWrapperFunction() const override { return true; }
326 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
327 QualType LValType) override;
329 bool NeedsVTTParameter(GlobalDecl GD) override;
331 /**************************** RTTI Uniqueness ******************************/
334 /// Returns true if the ABI requires RTTI type_info objects to be unique
335 /// across a program.
336 virtual bool shouldRTTIBeUnique() const { return true; }
339 /// What sort of unique-RTTI behavior should we use?
340 enum RTTIUniquenessKind {
341 /// We are guaranteeing, or need to guarantee, that the RTTI string
345 /// We are not guaranteeing uniqueness for the RTTI string, so we
346 /// can demote to hidden visibility but must use string comparisons.
349 /// We are not guaranteeing uniqueness for the RTTI string, so we
350 /// have to use string comparisons, but we also have to emit it with
351 /// non-hidden visibility.
355 /// Return the required visibility status for the given type and linkage in
358 classifyRTTIUniqueness(QualType CanTy,
359 llvm::GlobalValue::LinkageTypes Linkage) const;
360 friend class ItaniumRTTIBuilder;
362 void emitCXXStructor(GlobalDecl GD) override;
364 std::pair<llvm::Value *, const CXXRecordDecl *>
365 LoadVTablePtr(CodeGenFunction &CGF, Address This,
366 const CXXRecordDecl *RD) override;
369 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
370 const auto &VtableLayout =
371 CGM.getItaniumVTableContext().getVTableLayout(RD);
373 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
375 if (!VtableComponent.isUsedFunctionPointerKind())
378 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
379 if (!Method->getCanonicalDecl()->isInlined())
382 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
383 auto *Entry = CGM.GetGlobalValue(Name);
384 // This checks if virtual inline function has already been emitted.
385 // Note that it is possible that this inline function would be emitted
386 // after trying to emit vtable speculatively. Because of this we do
387 // an extra pass after emitting all deferred vtables to find and emit
388 // these vtables opportunistically.
389 if (!Entry || Entry->isDeclaration())
395 bool isVTableHidden(const CXXRecordDecl *RD) const {
396 const auto &VtableLayout =
397 CGM.getItaniumVTableContext().getVTableLayout(RD);
399 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
400 if (VtableComponent.isRTTIKind()) {
401 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
402 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
404 } else if (VtableComponent.isUsedFunctionPointerKind()) {
405 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
406 if (Method->getVisibility() == Visibility::HiddenVisibility &&
407 !Method->isDefined())
415 class ARMCXXABI : public ItaniumCXXABI {
417 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
418 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
419 /* UseARMGuardVarABI = */ true) {}
421 bool HasThisReturn(GlobalDecl GD) const override {
422 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
423 isa<CXXDestructorDecl>(GD.getDecl()) &&
424 GD.getDtorType() != Dtor_Deleting));
427 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
428 QualType ResTy) override;
430 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
431 Address InitializeArrayCookie(CodeGenFunction &CGF,
433 llvm::Value *NumElements,
434 const CXXNewExpr *expr,
435 QualType ElementType) override;
436 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
437 CharUnits cookieSize) override;
440 class iOS64CXXABI : public ARMCXXABI {
442 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
443 Use32BitVTableOffsetABI = true;
446 // ARM64 libraries are prepared for non-unique RTTI.
447 bool shouldRTTIBeUnique() const override { return false; }
450 class WebAssemblyCXXABI final : public ItaniumCXXABI {
452 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
453 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
454 /*UseARMGuardVarABI=*/true) {}
455 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
458 bool HasThisReturn(GlobalDecl GD) const override {
459 return isa<CXXConstructorDecl>(GD.getDecl()) ||
460 (isa<CXXDestructorDecl>(GD.getDecl()) &&
461 GD.getDtorType() != Dtor_Deleting);
463 bool canCallMismatchedFunctionType() const override { return false; }
467 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
468 switch (CGM.getTarget().getCXXABI().getKind()) {
469 // For IR-generation purposes, there's no significant difference
470 // between the ARM and iOS ABIs.
471 case TargetCXXABI::GenericARM:
472 case TargetCXXABI::iOS:
473 case TargetCXXABI::WatchOS:
474 return new ARMCXXABI(CGM);
476 case TargetCXXABI::iOS64:
477 return new iOS64CXXABI(CGM);
479 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
480 // include the other 32-bit ARM oddities: constructor/destructor return values
481 // and array cookies.
482 case TargetCXXABI::GenericAArch64:
483 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
484 /* UseARMGuardVarABI = */ true);
486 case TargetCXXABI::GenericMIPS:
487 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
489 case TargetCXXABI::WebAssembly:
490 return new WebAssemblyCXXABI(CGM);
492 case TargetCXXABI::GenericItanium:
493 if (CGM.getContext().getTargetInfo().getTriple().getArch()
494 == llvm::Triple::le32) {
495 // For PNaCl, use ARM-style method pointers so that PNaCl code
496 // does not assume anything about the alignment of function
498 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
499 /* UseARMGuardVarABI = */ false);
501 return new ItaniumCXXABI(CGM);
503 case TargetCXXABI::Microsoft:
504 llvm_unreachable("Microsoft ABI is not Itanium-based");
506 llvm_unreachable("bad ABI kind");
510 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
511 if (MPT->isMemberDataPointer())
512 return CGM.PtrDiffTy;
513 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
516 /// In the Itanium and ARM ABIs, method pointers have the form:
517 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
519 /// In the Itanium ABI:
520 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
521 /// - the this-adjustment is (memptr.adj)
522 /// - the virtual offset is (memptr.ptr - 1)
525 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
526 /// - the this-adjustment is (memptr.adj >> 1)
527 /// - the virtual offset is (memptr.ptr)
528 /// ARM uses 'adj' for the virtual flag because Thumb functions
529 /// may be only single-byte aligned.
531 /// If the member is virtual, the adjusted 'this' pointer points
532 /// to a vtable pointer from which the virtual offset is applied.
534 /// If the member is non-virtual, memptr.ptr is the address of
535 /// the function to call.
536 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
537 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
538 llvm::Value *&ThisPtrForCall,
539 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
540 CGBuilderTy &Builder = CGF.Builder;
542 const FunctionProtoType *FPT =
543 MPT->getPointeeType()->getAs<FunctionProtoType>();
544 const CXXRecordDecl *RD =
545 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
547 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
548 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
550 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
552 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
553 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
554 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
556 // Extract memptr.adj, which is in the second field.
557 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
559 // Compute the true adjustment.
560 llvm::Value *Adj = RawAdj;
561 if (UseARMMethodPtrABI)
562 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
564 // Apply the adjustment and cast back to the original struct type
566 llvm::Value *This = ThisAddr.getPointer();
567 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
568 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
569 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
570 ThisPtrForCall = This;
572 // Load the function pointer.
573 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
575 // If the LSB in the function pointer is 1, the function pointer points to
576 // a virtual function.
577 llvm::Value *IsVirtual;
578 if (UseARMMethodPtrABI)
579 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
581 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
582 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
583 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
585 // In the virtual path, the adjustment left 'This' pointing to the
586 // vtable of the correct base subobject. The "function pointer" is an
587 // offset within the vtable (+1 for the virtual flag on non-ARM).
588 CGF.EmitBlock(FnVirtual);
590 // Cast the adjusted this to a pointer to vtable pointer and load.
591 llvm::Type *VTableTy = Builder.getInt8PtrTy();
592 CharUnits VTablePtrAlign =
593 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
594 CGF.getPointerAlign());
595 llvm::Value *VTable =
596 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
599 // On ARM64, to reserve extra space in virtual member function pointers,
600 // we only pay attention to the low 32 bits of the offset.
601 llvm::Value *VTableOffset = FnAsInt;
602 if (!UseARMMethodPtrABI)
603 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
604 if (Use32BitVTableOffsetABI) {
605 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
606 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
608 // Compute the address of the virtual function pointer.
609 llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
611 // Check the address of the function pointer if CFI on member function
612 // pointers is enabled.
613 llvm::Constant *CheckSourceLocation;
614 llvm::Constant *CheckTypeDesc;
615 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
616 CGM.HasHiddenLTOVisibility(RD);
617 if (ShouldEmitCFICheck) {
618 CodeGenFunction::SanitizerScope SanScope(&CGF);
620 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
621 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
622 llvm::Constant *StaticData[] = {
623 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
629 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
630 llvm::Value *TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
632 llvm::Value *TypeTest = Builder.CreateCall(
633 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VFPAddr, TypeId});
635 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
636 CGF.EmitTrapCheck(TypeTest);
638 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
639 CGM.getLLVMContext(),
640 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
641 llvm::Value *ValidVtable = Builder.CreateCall(
642 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
643 CGF.EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIMFCall),
644 SanitizerHandler::CFICheckFail, StaticData,
645 {VTable, ValidVtable});
648 FnVirtual = Builder.GetInsertBlock();
651 // Load the virtual function to call.
652 VFPAddr = Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
653 llvm::Value *VirtualFn = Builder.CreateAlignedLoad(
654 VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
655 CGF.EmitBranch(FnEnd);
657 // In the non-virtual path, the function pointer is actually a
659 CGF.EmitBlock(FnNonVirtual);
660 llvm::Value *NonVirtualFn =
661 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
663 // Check the function pointer if CFI on member function pointers is enabled.
664 if (ShouldEmitCFICheck) {
665 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
666 if (RD->hasDefinition()) {
667 CodeGenFunction::SanitizerScope SanScope(&CGF);
669 llvm::Constant *StaticData[] = {
670 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
675 llvm::Value *Bit = Builder.getFalse();
676 llvm::Value *CastedNonVirtualFn =
677 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
678 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
679 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
680 getContext().getMemberPointerType(
681 MPT->getPointeeType(),
682 getContext().getRecordType(Base).getTypePtr()));
683 llvm::Value *TypeId =
684 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
686 llvm::Value *TypeTest =
687 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
688 {CastedNonVirtualFn, TypeId});
689 Bit = Builder.CreateOr(Bit, TypeTest);
692 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
693 SanitizerHandler::CFICheckFail, StaticData,
694 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
696 FnNonVirtual = Builder.GetInsertBlock();
701 CGF.EmitBlock(FnEnd);
702 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
703 CalleePtr->addIncoming(VirtualFn, FnVirtual);
704 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
706 CGCallee Callee(FPT, CalleePtr);
710 /// Compute an l-value by applying the given pointer-to-member to a
712 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
713 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
714 const MemberPointerType *MPT) {
715 assert(MemPtr->getType() == CGM.PtrDiffTy);
717 CGBuilderTy &Builder = CGF.Builder;
720 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
722 // Apply the offset, which we assume is non-null.
724 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
726 // Cast the address to the appropriate pointer type, adopting the
727 // address space of the base pointer.
728 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
729 ->getPointerTo(Base.getAddressSpace());
730 return Builder.CreateBitCast(Addr, PType);
733 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
736 /// Bitcast conversions are always a no-op under Itanium.
738 /// Obligatory offset/adjustment diagram:
739 /// <-- offset --> <-- adjustment -->
740 /// |--------------------------|----------------------|--------------------|
741 /// ^Derived address point ^Base address point ^Member address point
743 /// So when converting a base member pointer to a derived member pointer,
744 /// we add the offset to the adjustment because the address point has
745 /// decreased; and conversely, when converting a derived MP to a base MP
746 /// we subtract the offset from the adjustment because the address point
749 /// The standard forbids (at compile time) conversion to and from
750 /// virtual bases, which is why we don't have to consider them here.
752 /// The standard forbids (at run time) casting a derived MP to a base
753 /// MP when the derived MP does not point to a member of the base.
754 /// This is why -1 is a reasonable choice for null data member
757 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
760 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
761 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
762 E->getCastKind() == CK_ReinterpretMemberPointer);
764 // Under Itanium, reinterprets don't require any additional processing.
765 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
767 // Use constant emission if we can.
768 if (isa<llvm::Constant>(src))
769 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
771 llvm::Constant *adj = getMemberPointerAdjustment(E);
772 if (!adj) return src;
774 CGBuilderTy &Builder = CGF.Builder;
775 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
777 const MemberPointerType *destTy =
778 E->getType()->castAs<MemberPointerType>();
780 // For member data pointers, this is just a matter of adding the
781 // offset if the source is non-null.
782 if (destTy->isMemberDataPointer()) {
785 dst = Builder.CreateNSWSub(src, adj, "adj");
787 dst = Builder.CreateNSWAdd(src, adj, "adj");
790 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
791 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
792 return Builder.CreateSelect(isNull, src, dst);
795 // The this-adjustment is left-shifted by 1 on ARM.
796 if (UseARMMethodPtrABI) {
797 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
799 adj = llvm::ConstantInt::get(adj->getType(), offset);
802 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
805 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
807 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
809 return Builder.CreateInsertValue(src, dstAdj, 1);
813 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
814 llvm::Constant *src) {
815 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
816 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
817 E->getCastKind() == CK_ReinterpretMemberPointer);
819 // Under Itanium, reinterprets don't require any additional processing.
820 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
822 // If the adjustment is trivial, we don't need to do anything.
823 llvm::Constant *adj = getMemberPointerAdjustment(E);
824 if (!adj) return src;
826 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
828 const MemberPointerType *destTy =
829 E->getType()->castAs<MemberPointerType>();
831 // For member data pointers, this is just a matter of adding the
832 // offset if the source is non-null.
833 if (destTy->isMemberDataPointer()) {
834 // null maps to null.
835 if (src->isAllOnesValue()) return src;
838 return llvm::ConstantExpr::getNSWSub(src, adj);
840 return llvm::ConstantExpr::getNSWAdd(src, adj);
843 // The this-adjustment is left-shifted by 1 on ARM.
844 if (UseARMMethodPtrABI) {
845 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
847 adj = llvm::ConstantInt::get(adj->getType(), offset);
850 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
851 llvm::Constant *dstAdj;
853 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
855 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
857 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
861 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
862 // Itanium C++ ABI 2.3:
863 // A NULL pointer is represented as -1.
864 if (MPT->isMemberDataPointer())
865 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
867 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
868 llvm::Constant *Values[2] = { Zero, Zero };
869 return llvm::ConstantStruct::getAnon(Values);
873 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
875 // Itanium C++ ABI 2.3:
876 // A pointer to data member is an offset from the base address of
877 // the class object containing it, represented as a ptrdiff_t
878 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
882 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
883 return BuildMemberPointer(MD, CharUnits::Zero());
886 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
887 CharUnits ThisAdjustment) {
888 assert(MD->isInstance() && "Member function must not be static!");
890 CodeGenTypes &Types = CGM.getTypes();
892 // Get the function pointer (or index if this is a virtual function).
893 llvm::Constant *MemPtr[2];
894 if (MD->isVirtual()) {
895 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
897 const ASTContext &Context = getContext();
898 CharUnits PointerWidth =
899 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
900 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
902 if (UseARMMethodPtrABI) {
903 // ARM C++ ABI 3.2.1:
904 // This ABI specifies that adj contains twice the this
905 // adjustment, plus 1 if the member function is virtual. The
906 // least significant bit of adj then makes exactly the same
907 // discrimination as the least significant bit of ptr does for
909 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
910 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
911 2 * ThisAdjustment.getQuantity() + 1);
913 // Itanium C++ ABI 2.3:
914 // For a virtual function, [the pointer field] is 1 plus the
915 // virtual table offset (in bytes) of the function,
916 // represented as a ptrdiff_t.
917 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
918 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
919 ThisAdjustment.getQuantity());
922 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
924 // Check whether the function has a computable LLVM signature.
925 if (Types.isFuncTypeConvertible(FPT)) {
926 // The function has a computable LLVM signature; use the correct type.
927 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
929 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
930 // function type is incomplete.
933 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
935 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
936 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
937 (UseARMMethodPtrABI ? 2 : 1) *
938 ThisAdjustment.getQuantity());
941 return llvm::ConstantStruct::getAnon(MemPtr);
944 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
946 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
947 const ValueDecl *MPD = MP.getMemberPointerDecl();
949 return EmitNullMemberPointer(MPT);
951 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
953 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
954 return BuildMemberPointer(MD, ThisAdjustment);
956 CharUnits FieldOffset =
957 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
958 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
961 /// The comparison algorithm is pretty easy: the member pointers are
962 /// the same if they're either bitwise identical *or* both null.
964 /// ARM is different here only because null-ness is more complicated.
966 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
969 const MemberPointerType *MPT,
971 CGBuilderTy &Builder = CGF.Builder;
973 llvm::ICmpInst::Predicate Eq;
974 llvm::Instruction::BinaryOps And, Or;
976 Eq = llvm::ICmpInst::ICMP_NE;
977 And = llvm::Instruction::Or;
978 Or = llvm::Instruction::And;
980 Eq = llvm::ICmpInst::ICMP_EQ;
981 And = llvm::Instruction::And;
982 Or = llvm::Instruction::Or;
985 // Member data pointers are easy because there's a unique null
986 // value, so it just comes down to bitwise equality.
987 if (MPT->isMemberDataPointer())
988 return Builder.CreateICmp(Eq, L, R);
990 // For member function pointers, the tautologies are more complex.
991 // The Itanium tautology is:
992 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
993 // The ARM tautology is:
994 // (L == R) <==> (L.ptr == R.ptr &&
995 // (L.adj == R.adj ||
996 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
997 // The inequality tautologies have exactly the same structure, except
998 // applying De Morgan's laws.
1000 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1001 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1003 // This condition tests whether L.ptr == R.ptr. This must always be
1004 // true for equality to hold.
1005 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1007 // This condition, together with the assumption that L.ptr == R.ptr,
1008 // tests whether the pointers are both null. ARM imposes an extra
1010 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1011 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1013 // This condition tests whether L.adj == R.adj. If this isn't
1014 // true, the pointers are unequal unless they're both null.
1015 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1016 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1017 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1019 // Null member function pointers on ARM clear the low bit of Adj,
1020 // so the zero condition has to check that neither low bit is set.
1021 if (UseARMMethodPtrABI) {
1022 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1024 // Compute (l.adj | r.adj) & 1 and test it against zero.
1025 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1026 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1027 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1029 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1032 // Tie together all our conditions.
1033 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1034 Result = Builder.CreateBinOp(And, PtrEq, Result,
1035 Inequality ? "memptr.ne" : "memptr.eq");
1040 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1041 llvm::Value *MemPtr,
1042 const MemberPointerType *MPT) {
1043 CGBuilderTy &Builder = CGF.Builder;
1045 /// For member data pointers, this is just a check against -1.
1046 if (MPT->isMemberDataPointer()) {
1047 assert(MemPtr->getType() == CGM.PtrDiffTy);
1048 llvm::Value *NegativeOne =
1049 llvm::Constant::getAllOnesValue(MemPtr->getType());
1050 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1053 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1054 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1056 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1057 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1059 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1060 // (the virtual bit) is set.
1061 if (UseARMMethodPtrABI) {
1062 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1063 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1064 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1065 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1066 "memptr.isvirtual");
1067 Result = Builder.CreateOr(Result, IsVirtual);
1073 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1074 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1078 // If C++ prohibits us from making a copy, return by address.
1079 if (!RD->canPassInRegisters()) {
1080 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1081 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1087 /// The Itanium ABI requires non-zero initialization only for data
1088 /// member pointers, for which '0' is a valid offset.
1089 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1090 return MPT->isMemberFunctionPointer();
1093 /// The Itanium ABI always places an offset to the complete object
1094 /// at entry -2 in the vtable.
1095 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1096 const CXXDeleteExpr *DE,
1098 QualType ElementType,
1099 const CXXDestructorDecl *Dtor) {
1100 bool UseGlobalDelete = DE->isGlobalDelete();
1101 if (UseGlobalDelete) {
1102 // Derive the complete-object pointer, which is what we need
1103 // to pass to the deallocation function.
1105 // Grab the vtable pointer as an intptr_t*.
1107 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1108 llvm::Value *VTable =
1109 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1111 // Track back to entry -2 and pull out the offset there.
1112 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1113 VTable, -2, "complete-offset.ptr");
1114 llvm::Value *Offset =
1115 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1117 // Apply the offset.
1118 llvm::Value *CompletePtr =
1119 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1120 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1122 // If we're supposed to call the global delete, make sure we do so
1123 // even if the destructor throws.
1124 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1128 // FIXME: Provide a source location here even though there's no
1129 // CXXMemberCallExpr for dtor call.
1130 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1131 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1133 if (UseGlobalDelete)
1134 CGF.PopCleanupBlock();
1137 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1138 // void __cxa_rethrow();
1140 llvm::FunctionType *FTy =
1141 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1143 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1146 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1148 CGF.EmitRuntimeCallOrInvoke(Fn);
1151 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1152 // void *__cxa_allocate_exception(size_t thrown_size);
1154 llvm::FunctionType *FTy =
1155 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1157 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1160 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1161 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1162 // void (*dest) (void *));
1164 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1165 llvm::FunctionType *FTy =
1166 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1168 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1171 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1172 QualType ThrowType = E->getSubExpr()->getType();
1173 // Now allocate the exception object.
1174 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1175 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1177 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1178 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1179 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1181 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1182 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1184 // Now throw the exception.
1185 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1188 // The address of the destructor. If the exception type has a
1189 // trivial destructor (or isn't a record), we just pass null.
1190 llvm::Constant *Dtor = nullptr;
1191 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1192 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1193 if (!Record->hasTrivialDestructor()) {
1194 CXXDestructorDecl *DtorD = Record->getDestructor();
1195 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1196 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1199 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1201 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1202 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1205 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1206 // void *__dynamic_cast(const void *sub,
1207 // const abi::__class_type_info *src,
1208 // const abi::__class_type_info *dst,
1209 // std::ptrdiff_t src2dst_offset);
1211 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1212 llvm::Type *PtrDiffTy =
1213 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1215 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1217 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1219 // Mark the function as nounwind readonly.
1220 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1221 llvm::Attribute::ReadOnly };
1222 llvm::AttributeList Attrs = llvm::AttributeList::get(
1223 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1225 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1228 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1229 // void __cxa_bad_cast();
1230 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1231 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1234 /// Compute the src2dst_offset hint as described in the
1235 /// Itanium C++ ABI [2.9.7]
1236 static CharUnits computeOffsetHint(ASTContext &Context,
1237 const CXXRecordDecl *Src,
1238 const CXXRecordDecl *Dst) {
1239 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1240 /*DetectVirtual=*/false);
1242 // If Dst is not derived from Src we can skip the whole computation below and
1243 // return that Src is not a public base of Dst. Record all inheritance paths.
1244 if (!Dst->isDerivedFrom(Src, Paths))
1245 return CharUnits::fromQuantity(-2ULL);
1247 unsigned NumPublicPaths = 0;
1250 // Now walk all possible inheritance paths.
1251 for (const CXXBasePath &Path : Paths) {
1252 if (Path.Access != AS_public) // Ignore non-public inheritance.
1257 for (const CXXBasePathElement &PathElement : Path) {
1258 // If the path contains a virtual base class we can't give any hint.
1260 if (PathElement.Base->isVirtual())
1261 return CharUnits::fromQuantity(-1ULL);
1263 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1266 // Accumulate the base class offsets.
1267 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1268 Offset += L.getBaseClassOffset(
1269 PathElement.Base->getType()->getAsCXXRecordDecl());
1273 // -2: Src is not a public base of Dst.
1274 if (NumPublicPaths == 0)
1275 return CharUnits::fromQuantity(-2ULL);
1277 // -3: Src is a multiple public base type but never a virtual base type.
1278 if (NumPublicPaths > 1)
1279 return CharUnits::fromQuantity(-3ULL);
1281 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1282 // Return the offset of Src from the origin of Dst.
1286 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1287 // void __cxa_bad_typeid();
1288 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1290 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1293 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1294 QualType SrcRecordTy) {
1298 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1299 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1300 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1301 Call->setDoesNotReturn();
1302 CGF.Builder.CreateUnreachable();
1305 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1306 QualType SrcRecordTy,
1308 llvm::Type *StdTypeInfoPtrTy) {
1310 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1311 llvm::Value *Value =
1312 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1314 // Load the type info.
1315 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1316 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1319 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1320 QualType SrcRecordTy) {
1324 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1325 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1326 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1327 llvm::Type *PtrDiffLTy =
1328 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1329 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1331 llvm::Value *SrcRTTI =
1332 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1333 llvm::Value *DestRTTI =
1334 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1336 // Compute the offset hint.
1337 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1338 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1339 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1341 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1343 // Emit the call to __dynamic_cast.
1344 llvm::Value *Value = ThisAddr.getPointer();
1345 Value = CGF.EmitCastToVoidPtr(Value);
1347 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1348 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1349 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1351 /// C++ [expr.dynamic.cast]p9:
1352 /// A failed cast to reference type throws std::bad_cast
1353 if (DestTy->isReferenceType()) {
1354 llvm::BasicBlock *BadCastBlock =
1355 CGF.createBasicBlock("dynamic_cast.bad_cast");
1357 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1358 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1360 CGF.EmitBlock(BadCastBlock);
1361 EmitBadCastCall(CGF);
1367 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1369 QualType SrcRecordTy,
1371 llvm::Type *PtrDiffLTy =
1372 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1373 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1376 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1377 // Get the vtable pointer.
1378 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1381 // Get the offset-to-top from the vtable.
1382 llvm::Value *OffsetToTop =
1383 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1385 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1388 // Finally, add the offset to the pointer.
1389 llvm::Value *Value = ThisAddr.getPointer();
1390 Value = CGF.EmitCastToVoidPtr(Value);
1391 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1393 return CGF.Builder.CreateBitCast(Value, DestLTy);
1396 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1397 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1398 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1399 Call->setDoesNotReturn();
1400 CGF.Builder.CreateUnreachable();
1405 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1407 const CXXRecordDecl *ClassDecl,
1408 const CXXRecordDecl *BaseClassDecl) {
1409 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1410 CharUnits VBaseOffsetOffset =
1411 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1414 llvm::Value *VBaseOffsetPtr =
1415 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1416 "vbase.offset.ptr");
1417 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1418 CGM.PtrDiffTy->getPointerTo());
1420 llvm::Value *VBaseOffset =
1421 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1427 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1428 // Just make sure we're in sync with TargetCXXABI.
1429 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1431 // The constructor used for constructing this as a base class;
1432 // ignores virtual bases.
1433 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1435 // The constructor used for constructing this as a complete class;
1436 // constructs the virtual bases, then calls the base constructor.
1437 if (!D->getParent()->isAbstract()) {
1438 // We don't need to emit the complete ctor if the class is abstract.
1439 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1443 CGCXXABI::AddedStructorArgs
1444 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1445 SmallVectorImpl<CanQualType> &ArgTys) {
1446 ASTContext &Context = getContext();
1448 // All parameters are already in place except VTT, which goes after 'this'.
1449 // These are Clang types, so we don't need to worry about sret yet.
1451 // Check if we need to add a VTT parameter (which has type void **).
1452 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1453 : GD.getDtorType() == Dtor_Base) &&
1454 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1455 ArgTys.insert(ArgTys.begin() + 1,
1456 Context.getPointerType(Context.VoidPtrTy));
1457 return AddedStructorArgs::prefix(1);
1459 return AddedStructorArgs{};
1462 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1463 // The destructor used for destructing this as a base class; ignores
1465 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1467 // The destructor used for destructing this as a most-derived class;
1468 // call the base destructor and then destructs any virtual bases.
1469 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1471 // The destructor in a virtual table is always a 'deleting'
1472 // destructor, which calls the complete destructor and then uses the
1473 // appropriate operator delete.
1475 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1478 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1480 FunctionArgList &Params) {
1481 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1482 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1484 // Check if we need a VTT parameter as well.
1485 if (NeedsVTTParameter(CGF.CurGD)) {
1486 ASTContext &Context = getContext();
1488 // FIXME: avoid the fake decl
1489 QualType T = Context.getPointerType(Context.VoidPtrTy);
1490 auto *VTTDecl = ImplicitParamDecl::Create(
1491 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1492 T, ImplicitParamDecl::CXXVTT);
1493 Params.insert(Params.begin() + 1, VTTDecl);
1494 getStructorImplicitParamDecl(CGF) = VTTDecl;
1498 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1499 // Naked functions have no prolog.
1500 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1503 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1504 /// adjustments are required, because they are all handled by thunks.
1505 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1507 /// Initialize the 'vtt' slot if needed.
1508 if (getStructorImplicitParamDecl(CGF)) {
1509 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1510 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1513 /// If this is a function that the ABI specifies returns 'this', initialize
1514 /// the return slot to 'this' at the start of the function.
1516 /// Unlike the setting of return types, this is done within the ABI
1517 /// implementation instead of by clients of CGCXXABI because:
1518 /// 1) getThisValue is currently protected
1519 /// 2) in theory, an ABI could implement 'this' returns some other way;
1520 /// HasThisReturn only specifies a contract, not the implementation
1521 if (HasThisReturn(CGF.CurGD))
1522 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1525 CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1526 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1527 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1528 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1529 return AddedStructorArgs{};
1531 // Insert the implicit 'vtt' argument as the second argument.
1533 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1534 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1535 Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
1536 return AddedStructorArgs::prefix(1); // Added one arg.
1539 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1540 const CXXDestructorDecl *DD,
1541 CXXDtorType Type, bool ForVirtualBase,
1542 bool Delegating, Address This,
1544 GlobalDecl GD(DD, Type);
1545 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1546 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1549 if (getContext().getLangOpts().AppleKext &&
1550 Type != Dtor_Base && DD->isVirtual())
1551 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1553 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1555 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1559 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1560 const CXXRecordDecl *RD) {
1561 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1562 if (VTable->hasInitializer())
1565 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1566 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1567 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1568 llvm::Constant *RTTI =
1569 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1571 // Create and set the initializer.
1572 ConstantInitBuilder Builder(CGM);
1573 auto Components = Builder.beginStruct();
1574 CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1575 Components.finishAndSetAsInitializer(VTable);
1577 // Set the correct linkage.
1578 VTable->setLinkage(Linkage);
1580 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1581 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1583 // Set the right visibility.
1584 CGM.setGVProperties(VTable, RD);
1586 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1587 // we will emit the typeinfo for the fundamental types. This is the
1588 // same behaviour as GCC.
1589 const DeclContext *DC = RD->getDeclContext();
1590 if (RD->getIdentifier() &&
1591 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1592 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1593 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1594 DC->getParent()->isTranslationUnit())
1595 EmitFundamentalRTTIDescriptors(RD);
1597 if (!VTable->isDeclarationForLinker())
1598 CGM.EmitVTableTypeMetadata(VTable, VTLayout);
1601 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1602 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1603 if (Vptr.NearestVBase == nullptr)
1605 return NeedsVTTParameter(CGF.CurGD);
1608 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1609 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1610 const CXXRecordDecl *NearestVBase) {
1612 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1613 NeedsVTTParameter(CGF.CurGD)) {
1614 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1617 return getVTableAddressPoint(Base, VTableClass);
1621 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1622 const CXXRecordDecl *VTableClass) {
1623 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1625 // Find the appropriate vtable within the vtable group, and the address point
1626 // within that vtable.
1627 VTableLayout::AddressPointLocation AddressPoint =
1628 CGM.getItaniumVTableContext()
1629 .getVTableLayout(VTableClass)
1630 .getAddressPoint(Base);
1631 llvm::Value *Indices[] = {
1632 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1633 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1634 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1637 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1638 Indices, /*InBounds=*/true,
1639 /*InRangeIndex=*/1);
1642 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1643 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1644 const CXXRecordDecl *NearestVBase) {
1645 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1646 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1648 // Get the secondary vpointer index.
1649 uint64_t VirtualPointerIndex =
1650 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1653 llvm::Value *VTT = CGF.LoadCXXVTT();
1654 if (VirtualPointerIndex)
1655 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1657 // And load the address point from the VTT.
1658 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1661 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1662 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1663 return getVTableAddressPoint(Base, VTableClass);
1666 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1667 CharUnits VPtrOffset) {
1668 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1670 llvm::GlobalVariable *&VTable = VTables[RD];
1674 // Queue up this vtable for possible deferred emission.
1675 CGM.addDeferredVTable(RD);
1677 SmallString<256> Name;
1678 llvm::raw_svector_ostream Out(Name);
1679 getMangleContext().mangleCXXVTable(RD, Out);
1681 const VTableLayout &VTLayout =
1682 CGM.getItaniumVTableContext().getVTableLayout(RD);
1683 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1685 // Use pointer alignment for the vtable. Otherwise we would align them based
1686 // on the size of the initializer which doesn't make sense as only single
1688 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1690 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1691 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1692 getContext().toCharUnitsFromBits(PAlign).getQuantity());
1693 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1695 CGM.setGVProperties(VTable, RD);
1700 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1704 SourceLocation Loc) {
1705 Ty = Ty->getPointerTo()->getPointerTo();
1706 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1707 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1709 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1711 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1712 VFunc = CGF.EmitVTableTypeCheckedLoad(
1713 MethodDecl->getParent(), VTable,
1714 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1716 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1718 llvm::Value *VFuncPtr =
1719 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1721 CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1723 // Add !invariant.load md to virtual function load to indicate that
1724 // function didn't change inside vtable.
1725 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1726 // help in devirtualization because it will only matter if we will have 2
1727 // the same virtual function loads from the same vtable load, which won't
1728 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1729 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1730 CGM.getCodeGenOpts().StrictVTablePointers)
1731 VFuncLoad->setMetadata(
1732 llvm::LLVMContext::MD_invariant_load,
1733 llvm::MDNode::get(CGM.getLLVMContext(),
1734 llvm::ArrayRef<llvm::Metadata *>()));
1738 CGCallee Callee(GD, VFunc);
1742 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1743 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1744 Address This, DeleteOrMemberCallExpr E) {
1745 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1746 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1747 assert((CE != nullptr) ^ (D != nullptr));
1748 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1749 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1751 GlobalDecl GD(Dtor, DtorType);
1752 const CGFunctionInfo *FInfo =
1753 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1754 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1755 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1759 ThisTy = CE->getObjectType();
1761 ThisTy = D->getDestroyedType();
1764 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1765 QualType(), nullptr);
1769 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1770 CodeGenVTables &VTables = CGM.getVTables();
1771 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1772 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1775 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
1776 const CXXRecordDecl *RD) const {
1777 // We don't emit available_externally vtables if we are in -fapple-kext mode
1778 // because kext mode does not permit devirtualization.
1779 if (CGM.getLangOpts().AppleKext)
1782 // If the vtable is hidden then it is not safe to emit an available_externally
1784 if (isVTableHidden(RD))
1787 if (CGM.getCodeGenOpts().ForceEmitVTables)
1790 // If we don't have any not emitted inline virtual function then we are safe
1791 // to emit an available_externally copy of vtable.
1792 // FIXME we can still emit a copy of the vtable if we
1793 // can emit definition of the inline functions.
1794 if (hasAnyUnusedVirtualInlineFunction(RD))
1797 // For a class with virtual bases, we must also be able to speculatively
1798 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
1799 // the vtable" and "can emit the VTT". For a base subobject, this means we
1800 // need to be able to emit non-virtual base vtables.
1801 if (RD->getNumVBases()) {
1802 for (const auto &B : RD->bases()) {
1803 auto *BRD = B.getType()->getAsCXXRecordDecl();
1804 assert(BRD && "no class for base specifier");
1805 if (B.isVirtual() || !BRD->isDynamicClass())
1807 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1815 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1816 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
1819 // For a complete-object vtable (or more specifically, for the VTT), we need
1820 // to be able to speculatively emit the vtables of all dynamic virtual bases.
1821 for (const auto &B : RD->vbases()) {
1822 auto *BRD = B.getType()->getAsCXXRecordDecl();
1823 assert(BRD && "no class for base specifier");
1824 if (!BRD->isDynamicClass())
1826 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1832 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1834 int64_t NonVirtualAdjustment,
1835 int64_t VirtualAdjustment,
1836 bool IsReturnAdjustment) {
1837 if (!NonVirtualAdjustment && !VirtualAdjustment)
1838 return InitialPtr.getPointer();
1840 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1842 // In a base-to-derived cast, the non-virtual adjustment is applied first.
1843 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1844 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1845 CharUnits::fromQuantity(NonVirtualAdjustment));
1848 // Perform the virtual adjustment if we have one.
1849 llvm::Value *ResultPtr;
1850 if (VirtualAdjustment) {
1851 llvm::Type *PtrDiffTy =
1852 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1854 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1855 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1857 llvm::Value *OffsetPtr =
1858 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1860 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1862 // Load the adjustment offset from the vtable.
1863 llvm::Value *Offset =
1864 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1866 // Adjust our pointer.
1867 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1869 ResultPtr = V.getPointer();
1872 // In a derived-to-base conversion, the non-virtual adjustment is
1874 if (NonVirtualAdjustment && IsReturnAdjustment) {
1875 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1876 NonVirtualAdjustment);
1879 // Cast back to the original type.
1880 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1883 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1885 const ThisAdjustment &TA) {
1886 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1887 TA.Virtual.Itanium.VCallOffsetOffset,
1888 /*IsReturnAdjustment=*/false);
1892 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1893 const ReturnAdjustment &RA) {
1894 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1895 RA.Virtual.Itanium.VBaseOffsetOffset,
1896 /*IsReturnAdjustment=*/true);
1899 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1900 RValue RV, QualType ResultType) {
1901 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1902 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1904 // Destructor thunks in the ARM ABI have indeterminate results.
1905 llvm::Type *T = CGF.ReturnValue.getElementType();
1906 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1907 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1910 /************************** Array allocation cookies **************************/
1912 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1913 // The array cookie is a size_t; pad that up to the element alignment.
1914 // The cookie is actually right-justified in that space.
1915 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1916 CGM.getContext().getTypeAlignInChars(elementType));
1919 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1921 llvm::Value *NumElements,
1922 const CXXNewExpr *expr,
1923 QualType ElementType) {
1924 assert(requiresArrayCookie(expr));
1926 unsigned AS = NewPtr.getAddressSpace();
1928 ASTContext &Ctx = getContext();
1929 CharUnits SizeSize = CGF.getSizeSize();
1931 // The size of the cookie.
1932 CharUnits CookieSize =
1933 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1934 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1936 // Compute an offset to the cookie.
1937 Address CookiePtr = NewPtr;
1938 CharUnits CookieOffset = CookieSize - SizeSize;
1939 if (!CookieOffset.isZero())
1940 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1942 // Write the number of elements into the appropriate slot.
1943 Address NumElementsPtr =
1944 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1945 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1947 // Handle the array cookie specially in ASan.
1948 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1949 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
1950 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
1951 // The store to the CookiePtr does not need to be instrumented.
1952 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1953 llvm::FunctionType *FTy =
1954 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1955 llvm::FunctionCallee F =
1956 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1957 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1960 // Finally, compute a pointer to the actual data buffer by skipping
1961 // over the cookie completely.
1962 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1965 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1967 CharUnits cookieSize) {
1968 // The element size is right-justified in the cookie.
1969 Address numElementsPtr = allocPtr;
1970 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1971 if (!numElementsOffset.isZero())
1973 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1975 unsigned AS = allocPtr.getAddressSpace();
1976 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1977 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1978 return CGF.Builder.CreateLoad(numElementsPtr);
1979 // In asan mode emit a function call instead of a regular load and let the
1980 // run-time deal with it: if the shadow is properly poisoned return the
1981 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1982 // We can't simply ignore this load using nosanitize metadata because
1983 // the metadata may be lost.
1984 llvm::FunctionType *FTy =
1985 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1986 llvm::FunctionCallee F =
1987 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1988 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1991 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1992 // ARM says that the cookie is always:
1993 // struct array_cookie {
1994 // std::size_t element_size; // element_size != 0
1995 // std::size_t element_count;
1997 // But the base ABI doesn't give anything an alignment greater than
1998 // 8, so we can dismiss this as typical ABI-author blindness to
1999 // actual language complexity and round up to the element alignment.
2000 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2001 CGM.getContext().getTypeAlignInChars(elementType));
2004 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2006 llvm::Value *numElements,
2007 const CXXNewExpr *expr,
2008 QualType elementType) {
2009 assert(requiresArrayCookie(expr));
2011 // The cookie is always at the start of the buffer.
2012 Address cookie = newPtr;
2014 // The first element is the element size.
2015 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2016 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2017 getContext().getTypeSizeInChars(elementType).getQuantity());
2018 CGF.Builder.CreateStore(elementSize, cookie);
2020 // The second element is the element count.
2021 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2022 CGF.Builder.CreateStore(numElements, cookie);
2024 // Finally, compute a pointer to the actual data buffer by skipping
2025 // over the cookie completely.
2026 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2027 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2030 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2032 CharUnits cookieSize) {
2033 // The number of elements is at offset sizeof(size_t) relative to
2034 // the allocated pointer.
2035 Address numElementsPtr
2036 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2038 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2039 return CGF.Builder.CreateLoad(numElementsPtr);
2042 /*********************** Static local initialization **************************/
2044 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2045 llvm::PointerType *GuardPtrTy) {
2046 // int __cxa_guard_acquire(__guard *guard_object);
2047 llvm::FunctionType *FTy =
2048 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2049 GuardPtrTy, /*isVarArg=*/false);
2050 return CGM.CreateRuntimeFunction(
2051 FTy, "__cxa_guard_acquire",
2052 llvm::AttributeList::get(CGM.getLLVMContext(),
2053 llvm::AttributeList::FunctionIndex,
2054 llvm::Attribute::NoUnwind));
2057 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2058 llvm::PointerType *GuardPtrTy) {
2059 // void __cxa_guard_release(__guard *guard_object);
2060 llvm::FunctionType *FTy =
2061 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2062 return CGM.CreateRuntimeFunction(
2063 FTy, "__cxa_guard_release",
2064 llvm::AttributeList::get(CGM.getLLVMContext(),
2065 llvm::AttributeList::FunctionIndex,
2066 llvm::Attribute::NoUnwind));
2069 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2070 llvm::PointerType *GuardPtrTy) {
2071 // void __cxa_guard_abort(__guard *guard_object);
2072 llvm::FunctionType *FTy =
2073 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2074 return CGM.CreateRuntimeFunction(
2075 FTy, "__cxa_guard_abort",
2076 llvm::AttributeList::get(CGM.getLLVMContext(),
2077 llvm::AttributeList::FunctionIndex,
2078 llvm::Attribute::NoUnwind));
2082 struct CallGuardAbort final : EHScopeStack::Cleanup {
2083 llvm::GlobalVariable *Guard;
2084 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2086 void Emit(CodeGenFunction &CGF, Flags flags) override {
2087 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2093 /// The ARM code here follows the Itanium code closely enough that we
2094 /// just special-case it at particular places.
2095 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2097 llvm::GlobalVariable *var,
2098 bool shouldPerformInit) {
2099 CGBuilderTy &Builder = CGF.Builder;
2101 // Inline variables that weren't instantiated from variable templates have
2102 // partially-ordered initialization within their translation unit.
2103 bool NonTemplateInline =
2105 !isTemplateInstantiation(D.getTemplateSpecializationKind());
2107 // We only need to use thread-safe statics for local non-TLS variables and
2108 // inline variables; other global initialization is always single-threaded
2109 // or (through lazy dynamic loading in multiple threads) unsequenced.
2110 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2111 (D.isLocalVarDecl() || NonTemplateInline) &&
2114 // If we have a global variable with internal linkage and thread-safe statics
2115 // are disabled, we can just let the guard variable be of type i8.
2116 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2118 llvm::IntegerType *guardTy;
2119 CharUnits guardAlignment;
2120 if (useInt8GuardVariable) {
2121 guardTy = CGF.Int8Ty;
2122 guardAlignment = CharUnits::One();
2124 // Guard variables are 64 bits in the generic ABI and size width on ARM
2125 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2126 if (UseARMGuardVarABI) {
2127 guardTy = CGF.SizeTy;
2128 guardAlignment = CGF.getSizeAlign();
2130 guardTy = CGF.Int64Ty;
2131 guardAlignment = CharUnits::fromQuantity(
2132 CGM.getDataLayout().getABITypeAlignment(guardTy));
2135 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2137 // Create the guard variable if we don't already have it (as we
2138 // might if we're double-emitting this function body).
2139 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2141 // Mangle the name for the guard.
2142 SmallString<256> guardName;
2144 llvm::raw_svector_ostream out(guardName);
2145 getMangleContext().mangleStaticGuardVariable(&D, out);
2148 // Create the guard variable with a zero-initializer.
2149 // Just absorb linkage and visibility from the guarded variable.
2150 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2151 false, var->getLinkage(),
2152 llvm::ConstantInt::get(guardTy, 0),
2154 guard->setDSOLocal(var->isDSOLocal());
2155 guard->setVisibility(var->getVisibility());
2156 // If the variable is thread-local, so is its guard variable.
2157 guard->setThreadLocalMode(var->getThreadLocalMode());
2158 guard->setAlignment(guardAlignment.getQuantity());
2160 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2161 // group as the associated data object." In practice, this doesn't work for
2162 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2163 llvm::Comdat *C = var->getComdat();
2164 if (!D.isLocalVarDecl() && C &&
2165 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2166 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2167 guard->setComdat(C);
2168 // An inline variable's guard function is run from the per-TU
2169 // initialization function, not via a dedicated global ctor function, so
2170 // we can't put it in a comdat.
2171 if (!NonTemplateInline)
2172 CGF.CurFn->setComdat(C);
2173 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2174 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2177 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2180 Address guardAddr = Address(guard, guardAlignment);
2182 // Test whether the variable has completed initialization.
2184 // Itanium C++ ABI 3.3.2:
2185 // The following is pseudo-code showing how these functions can be used:
2186 // if (obj_guard.first_byte == 0) {
2187 // if ( __cxa_guard_acquire (&obj_guard) ) {
2189 // ... initialize the object ...;
2191 // __cxa_guard_abort (&obj_guard);
2194 // ... queue object destructor with __cxa_atexit() ...;
2195 // __cxa_guard_release (&obj_guard);
2199 // Load the first byte of the guard variable.
2200 llvm::LoadInst *LI =
2201 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2204 // An implementation supporting thread-safety on multiprocessor
2205 // systems must also guarantee that references to the initialized
2206 // object do not occur before the load of the initialization flag.
2208 // In LLVM, we do this by marking the load Acquire.
2210 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2212 // For ARM, we should only check the first bit, rather than the entire byte:
2214 // ARM C++ ABI 3.2.3.1:
2215 // To support the potential use of initialization guard variables
2216 // as semaphores that are the target of ARM SWP and LDREX/STREX
2217 // synchronizing instructions we define a static initialization
2218 // guard variable to be a 4-byte aligned, 4-byte word with the
2219 // following inline access protocol.
2220 // #define INITIALIZED 1
2221 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2222 // if (__cxa_guard_acquire(&obj_guard))
2226 // and similarly for ARM64:
2228 // ARM64 C++ ABI 3.2.2:
2229 // This ABI instead only specifies the value bit 0 of the static guard
2230 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2231 // variable is not initialized and 1 when it is.
2233 (UseARMGuardVarABI && !useInt8GuardVariable)
2234 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2236 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2238 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2239 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2241 // Check if the first byte of the guard variable is zero.
2242 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2243 CodeGenFunction::GuardKind::VariableGuard, &D);
2245 CGF.EmitBlock(InitCheckBlock);
2247 // Variables used when coping with thread-safe statics and exceptions.
2249 // Call __cxa_guard_acquire.
2251 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2253 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2255 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2256 InitBlock, EndBlock);
2258 // Call __cxa_guard_abort along the exceptional edge.
2259 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2261 CGF.EmitBlock(InitBlock);
2264 // Emit the initializer and add a global destructor if appropriate.
2265 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2268 // Pop the guard-abort cleanup if we pushed one.
2269 CGF.PopCleanupBlock();
2271 // Call __cxa_guard_release. This cannot throw.
2272 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2273 guardAddr.getPointer());
2275 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2278 CGF.EmitBlock(EndBlock);
2281 /// Register a global destructor using __cxa_atexit.
2282 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2283 llvm::FunctionCallee dtor,
2284 llvm::Constant *addr, bool TLS) {
2285 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2286 "__cxa_atexit is disabled");
2287 const char *Name = "__cxa_atexit";
2289 const llvm::Triple &T = CGF.getTarget().getTriple();
2290 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2293 // We're assuming that the destructor function is something we can
2294 // reasonably call with the default CC. Go ahead and cast it to the
2296 llvm::Type *dtorTy =
2297 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2299 // Preserve address space of addr.
2300 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2301 auto AddrInt8PtrTy =
2302 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2304 // Create a variable that binds the atexit to this shared object.
2305 llvm::Constant *handle =
2306 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2307 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2308 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2310 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2311 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2312 llvm::FunctionType *atexitTy =
2313 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2315 // Fetch the actual function.
2316 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2317 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2318 fn->setDoesNotThrow();
2321 // addr is null when we are trying to register a dtor annotated with
2322 // __attribute__((destructor)) in a constructor function. Using null here is
2323 // okay because this argument is just passed back to the destructor
2325 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2327 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2328 cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2329 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2331 CGF.EmitNounwindRuntimeCall(atexit, args);
2334 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2335 for (const auto I : DtorsUsingAtExit) {
2336 int Priority = I.first;
2337 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2339 // Create a function that registers destructors that have the same priority.
2341 // Since constructor functions are run in non-descending order of their
2342 // priorities, destructors are registered in non-descending order of their
2343 // priorities, and since destructor functions are run in the reverse order
2344 // of their registration, destructor functions are run in non-ascending
2345 // order of their priorities.
2346 CodeGenFunction CGF(*this);
2347 std::string GlobalInitFnName =
2348 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2349 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
2350 llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
2351 FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
2353 ASTContext &Ctx = getContext();
2354 QualType ReturnTy = Ctx.VoidTy;
2355 QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
2356 FunctionDecl *FD = FunctionDecl::Create(
2357 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2358 &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
2360 CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
2361 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2362 SourceLocation(), SourceLocation());
2364 for (auto *Dtor : Dtors) {
2365 // Register the destructor function calling __cxa_atexit if it is
2366 // available. Otherwise fall back on calling atexit.
2367 if (getCodeGenOpts().CXAAtExit)
2368 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2370 CGF.registerGlobalDtorWithAtExit(Dtor);
2373 CGF.FinishFunction();
2374 AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2378 /// Register a global destructor as best as we know how.
2379 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2380 llvm::FunctionCallee dtor,
2381 llvm::Constant *addr) {
2382 if (D.isNoDestroy(CGM.getContext()))
2385 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2386 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2387 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2388 // We can always use __cxa_thread_atexit.
2389 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2390 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2392 // In Apple kexts, we want to add a global destructor entry.
2393 // FIXME: shouldn't this be guarded by some variable?
2394 if (CGM.getLangOpts().AppleKext) {
2395 // Generate a global destructor entry.
2396 return CGM.AddCXXDtorEntry(dtor, addr);
2399 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2402 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2403 CodeGen::CodeGenModule &CGM) {
2404 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2405 // Darwin prefers to have references to thread local variables to go through
2406 // the thread wrapper instead of directly referencing the backing variable.
2407 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2408 CGM.getTarget().getTriple().isOSDarwin();
2411 /// Get the appropriate linkage for the wrapper function. This is essentially
2412 /// the weak form of the variable's linkage; every translation unit which needs
2413 /// the wrapper emits a copy, and we want the linker to merge them.
2414 static llvm::GlobalValue::LinkageTypes
2415 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2416 llvm::GlobalValue::LinkageTypes VarLinkage =
2417 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2419 // For internal linkage variables, we don't need an external or weak wrapper.
2420 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2423 // If the thread wrapper is replaceable, give it appropriate linkage.
2424 if (isThreadWrapperReplaceable(VD, CGM))
2425 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2426 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2428 return llvm::GlobalValue::WeakODRLinkage;
2432 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2434 // Mangle the name for the thread_local wrapper function.
2435 SmallString<256> WrapperName;
2437 llvm::raw_svector_ostream Out(WrapperName);
2438 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2441 // FIXME: If VD is a definition, we should regenerate the function attributes
2442 // before returning.
2443 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2444 return cast<llvm::Function>(V);
2446 QualType RetQT = VD->getType();
2447 if (RetQT->isReferenceType())
2448 RetQT = RetQT.getNonReferenceType();
2450 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2451 getContext().getPointerType(RetQT), FunctionArgList());
2453 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2454 llvm::Function *Wrapper =
2455 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2456 WrapperName.str(), &CGM.getModule());
2458 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
2460 if (VD->hasDefinition())
2461 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2463 // Always resolve references to the wrapper at link time.
2464 if (!Wrapper->hasLocalLinkage())
2465 if (!isThreadWrapperReplaceable(VD, CGM) ||
2466 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2467 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2468 VD->getVisibility() == HiddenVisibility)
2469 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2471 if (isThreadWrapperReplaceable(VD, CGM)) {
2472 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2473 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2478 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2479 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2480 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2481 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2482 llvm::Function *InitFunc = nullptr;
2484 // Separate initializers into those with ordered (or partially-ordered)
2485 // initialization and those with unordered initialization.
2486 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2487 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2488 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2489 if (isTemplateInstantiation(
2490 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2491 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2492 CXXThreadLocalInits[I];
2494 OrderedInits.push_back(CXXThreadLocalInits[I]);
2497 if (!OrderedInits.empty()) {
2498 // Generate a guarded initialization function.
2499 llvm::FunctionType *FTy =
2500 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2501 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2502 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2505 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2506 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2507 llvm::GlobalVariable::InternalLinkage,
2508 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2509 Guard->setThreadLocal(true);
2511 CharUnits GuardAlign = CharUnits::One();
2512 Guard->setAlignment(GuardAlign.getQuantity());
2514 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2515 InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2516 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2517 if (CGM.getTarget().getTriple().isOSDarwin()) {
2518 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2519 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2523 // Emit thread wrappers.
2524 for (const VarDecl *VD : CXXThreadLocals) {
2525 llvm::GlobalVariable *Var =
2526 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2527 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2529 // Some targets require that all access to thread local variables go through
2530 // the thread wrapper. This means that we cannot attempt to create a thread
2531 // wrapper or a thread helper.
2532 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) {
2533 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2537 // Mangle the name for the thread_local initialization function.
2538 SmallString<256> InitFnName;
2540 llvm::raw_svector_ostream Out(InitFnName);
2541 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2544 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2546 // If we have a definition for the variable, emit the initialization
2547 // function as an alias to the global Init function (if any). Otherwise,
2548 // produce a declaration of the initialization function.
2549 llvm::GlobalValue *Init = nullptr;
2550 bool InitIsInitFunc = false;
2551 if (VD->hasDefinition()) {
2552 InitIsInitFunc = true;
2553 llvm::Function *InitFuncToUse = InitFunc;
2554 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2555 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2557 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2560 // Emit a weak global function referring to the initialization function.
2561 // This function will not exist if the TU defining the thread_local
2562 // variable in question does not need any dynamic initialization for
2563 // its thread_local variables.
2564 Init = llvm::Function::Create(InitFnTy,
2565 llvm::GlobalVariable::ExternalWeakLinkage,
2566 InitFnName.str(), &CGM.getModule());
2567 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2568 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2569 cast<llvm::Function>(Init));
2573 Init->setVisibility(Var->getVisibility());
2574 Init->setDSOLocal(Var->isDSOLocal());
2577 llvm::LLVMContext &Context = CGM.getModule().getContext();
2578 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2579 CGBuilderTy Builder(CGM, Entry);
2580 if (InitIsInitFunc) {
2582 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2583 if (isThreadWrapperReplaceable(VD, CGM)) {
2584 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2585 llvm::Function *Fn =
2586 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2587 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2591 // Don't know whether we have an init function. Call it if it exists.
2592 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2593 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2594 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2595 Builder.CreateCondBr(Have, InitBB, ExitBB);
2597 Builder.SetInsertPoint(InitBB);
2598 Builder.CreateCall(InitFnTy, Init);
2599 Builder.CreateBr(ExitBB);
2601 Builder.SetInsertPoint(ExitBB);
2604 // For a reference, the result of the wrapper function is a pointer to
2605 // the referenced object.
2606 llvm::Value *Val = Var;
2607 if (VD->getType()->isReferenceType()) {
2608 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2609 Val = Builder.CreateAlignedLoad(Val, Align);
2611 if (Val->getType() != Wrapper->getReturnType())
2612 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2613 Val, Wrapper->getReturnType(), "");
2614 Builder.CreateRet(Val);
2618 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2620 QualType LValType) {
2621 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2622 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2624 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2625 CallVal->setCallingConv(Wrapper->getCallingConv());
2628 if (VD->getType()->isReferenceType())
2629 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2631 LV = CGF.MakeAddrLValue(CallVal, LValType,
2632 CGF.getContext().getDeclAlign(VD));
2633 // FIXME: need setObjCGCLValueClass?
2637 /// Return whether the given global decl needs a VTT parameter, which it does
2638 /// if it's a base constructor or destructor with virtual bases.
2639 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2640 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2642 // We don't have any virtual bases, just return early.
2643 if (!MD->getParent()->getNumVBases())
2646 // Check if we have a base constructor.
2647 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2650 // Check if we have a base destructor.
2651 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2658 class ItaniumRTTIBuilder {
2659 CodeGenModule &CGM; // Per-module state.
2660 llvm::LLVMContext &VMContext;
2661 const ItaniumCXXABI &CXXABI; // Per-module state.
2663 /// Fields - The fields of the RTTI descriptor currently being built.
2664 SmallVector<llvm::Constant *, 16> Fields;
2666 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2667 llvm::GlobalVariable *
2668 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2670 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2671 /// descriptor of the given type.
2672 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2674 /// BuildVTablePointer - Build the vtable pointer for the given type.
2675 void BuildVTablePointer(const Type *Ty);
2677 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2678 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2679 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2681 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2682 /// classes with bases that do not satisfy the abi::__si_class_type_info
2683 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2684 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2686 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2687 /// for pointer types.
2688 void BuildPointerTypeInfo(QualType PointeeTy);
2690 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2691 /// type_info for an object type.
2692 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2694 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2695 /// struct, used for member pointer types.
2696 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2699 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2700 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2702 // Pointer type info flags.
2704 /// PTI_Const - Type has const qualifier.
2707 /// PTI_Volatile - Type has volatile qualifier.
2710 /// PTI_Restrict - Type has restrict qualifier.
2713 /// PTI_Incomplete - Type is incomplete.
2714 PTI_Incomplete = 0x8,
2716 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2717 /// (in pointer to member).
2718 PTI_ContainingClassIncomplete = 0x10,
2720 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2721 //PTI_TransactionSafe = 0x20,
2723 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2724 PTI_Noexcept = 0x40,
2727 // VMI type info flags.
2729 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2730 VMI_NonDiamondRepeat = 0x1,
2732 /// VMI_DiamondShaped - Class is diamond shaped.
2733 VMI_DiamondShaped = 0x2
2736 // Base class type info flags.
2738 /// BCTI_Virtual - Base class is virtual.
2741 /// BCTI_Public - Base class is public.
2745 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
2746 /// link to an existing RTTI descriptor if one already exists.
2747 llvm::Constant *BuildTypeInfo(QualType Ty);
2749 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2750 llvm::Constant *BuildTypeInfo(
2752 llvm::GlobalVariable::LinkageTypes Linkage,
2753 llvm::GlobalValue::VisibilityTypes Visibility,
2754 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
2758 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2759 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2760 SmallString<256> Name;
2761 llvm::raw_svector_ostream Out(Name);
2762 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2764 // We know that the mangled name of the type starts at index 4 of the
2765 // mangled name of the typename, so we can just index into it in order to
2766 // get the mangled name of the type.
2767 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2769 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
2771 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
2772 Name, Init->getType(), Linkage, Align.getQuantity());
2774 GV->setInitializer(Init);
2780 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2781 // Mangle the RTTI name.
2782 SmallString<256> Name;
2783 llvm::raw_svector_ostream Out(Name);
2784 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2786 // Look for an existing global.
2787 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2790 // Create a new global variable.
2791 // Note for the future: If we would ever like to do deferred emission of
2792 // RTTI, check if emitting vtables opportunistically need any adjustment.
2794 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2795 /*isConstant=*/true,
2796 llvm::GlobalValue::ExternalLinkage, nullptr,
2798 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
2799 CGM.setGVProperties(GV, RD);
2802 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2805 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2806 /// info for that type is defined in the standard library.
2807 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2808 // Itanium C++ ABI 2.9.2:
2809 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2810 // the run-time support library. Specifically, the run-time support
2811 // library should contain type_info objects for the types X, X* and
2812 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2813 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2814 // long, unsigned long, long long, unsigned long long, float, double,
2815 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2816 // half-precision floating point types.
2818 // GCC also emits RTTI for __int128.
2819 // FIXME: We do not emit RTTI information for decimal types here.
2821 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2822 switch (Ty->getKind()) {
2823 case BuiltinType::Void:
2824 case BuiltinType::NullPtr:
2825 case BuiltinType::Bool:
2826 case BuiltinType::WChar_S:
2827 case BuiltinType::WChar_U:
2828 case BuiltinType::Char_U:
2829 case BuiltinType::Char_S:
2830 case BuiltinType::UChar:
2831 case BuiltinType::SChar:
2832 case BuiltinType::Short:
2833 case BuiltinType::UShort:
2834 case BuiltinType::Int:
2835 case BuiltinType::UInt:
2836 case BuiltinType::Long:
2837 case BuiltinType::ULong:
2838 case BuiltinType::LongLong:
2839 case BuiltinType::ULongLong:
2840 case BuiltinType::Half:
2841 case BuiltinType::Float:
2842 case BuiltinType::Double:
2843 case BuiltinType::LongDouble:
2844 case BuiltinType::Float16:
2845 case BuiltinType::Float128:
2846 case BuiltinType::Char8:
2847 case BuiltinType::Char16:
2848 case BuiltinType::Char32:
2849 case BuiltinType::Int128:
2850 case BuiltinType::UInt128:
2853 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2854 case BuiltinType::Id:
2855 #include "clang/Basic/OpenCLImageTypes.def"
2856 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2857 case BuiltinType::Id:
2858 #include "clang/Basic/OpenCLExtensionTypes.def"
2859 case BuiltinType::OCLSampler:
2860 case BuiltinType::OCLEvent:
2861 case BuiltinType::OCLClkEvent:
2862 case BuiltinType::OCLQueue:
2863 case BuiltinType::OCLReserveID:
2864 case BuiltinType::ShortAccum:
2865 case BuiltinType::Accum:
2866 case BuiltinType::LongAccum:
2867 case BuiltinType::UShortAccum:
2868 case BuiltinType::UAccum:
2869 case BuiltinType::ULongAccum:
2870 case BuiltinType::ShortFract:
2871 case BuiltinType::Fract:
2872 case BuiltinType::LongFract:
2873 case BuiltinType::UShortFract:
2874 case BuiltinType::UFract:
2875 case BuiltinType::ULongFract:
2876 case BuiltinType::SatShortAccum:
2877 case BuiltinType::SatAccum:
2878 case BuiltinType::SatLongAccum:
2879 case BuiltinType::SatUShortAccum:
2880 case BuiltinType::SatUAccum:
2881 case BuiltinType::SatULongAccum:
2882 case BuiltinType::SatShortFract:
2883 case BuiltinType::SatFract:
2884 case BuiltinType::SatLongFract:
2885 case BuiltinType::SatUShortFract:
2886 case BuiltinType::SatUFract:
2887 case BuiltinType::SatULongFract:
2890 case BuiltinType::Dependent:
2891 #define BUILTIN_TYPE(Id, SingletonId)
2892 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2893 case BuiltinType::Id:
2894 #include "clang/AST/BuiltinTypes.def"
2895 llvm_unreachable("asking for RRTI for a placeholder type!");
2897 case BuiltinType::ObjCId:
2898 case BuiltinType::ObjCClass:
2899 case BuiltinType::ObjCSel:
2900 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2903 llvm_unreachable("Invalid BuiltinType Kind!");
2906 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2907 QualType PointeeTy = PointerTy->getPointeeType();
2908 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2912 // Check the qualifiers.
2913 Qualifiers Quals = PointeeTy.getQualifiers();
2914 Quals.removeConst();
2919 return TypeInfoIsInStandardLibrary(BuiltinTy);
2922 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2923 /// information for the given type exists in the standard library.
2924 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2925 // Type info for builtin types is defined in the standard library.
2926 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2927 return TypeInfoIsInStandardLibrary(BuiltinTy);
2929 // Type info for some pointer types to builtin types is defined in the
2930 // standard library.
2931 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2932 return TypeInfoIsInStandardLibrary(PointerTy);
2937 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2938 /// the given type exists somewhere else, and that we should not emit the type
2939 /// information in this translation unit. Assumes that it is not a
2940 /// standard-library type.
2941 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2943 ASTContext &Context = CGM.getContext();
2945 // If RTTI is disabled, assume it might be disabled in the
2946 // translation unit that defines any potential key function, too.
2947 if (!Context.getLangOpts().RTTI) return false;
2949 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2950 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2951 if (!RD->hasDefinition())
2954 if (!RD->isDynamicClass())
2957 // FIXME: this may need to be reconsidered if the key function
2959 // N.B. We must always emit the RTTI data ourselves if there exists a key
2961 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2963 // Don't import the RTTI but emit it locally.
2964 if (CGM.getTriple().isWindowsGNUEnvironment())
2967 if (CGM.getVTables().isVTableExternal(RD))
2968 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
2979 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
2980 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2981 return !RecordTy->getDecl()->isCompleteDefinition();
2984 /// ContainsIncompleteClassType - Returns whether the given type contains an
2985 /// incomplete class type. This is true if
2987 /// * The given type is an incomplete class type.
2988 /// * The given type is a pointer type whose pointee type contains an
2989 /// incomplete class type.
2990 /// * The given type is a member pointer type whose class is an incomplete
2992 /// * The given type is a member pointer type whoise pointee type contains an
2993 /// incomplete class type.
2994 /// is an indirect or direct pointer to an incomplete class type.
2995 static bool ContainsIncompleteClassType(QualType Ty) {
2996 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2997 if (IsIncompleteClassType(RecordTy))
3001 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3002 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3004 if (const MemberPointerType *MemberPointerTy =
3005 dyn_cast<MemberPointerType>(Ty)) {
3006 // Check if the class type is incomplete.
3007 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3008 if (IsIncompleteClassType(ClassType))
3011 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3017 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3018 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3019 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3020 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3021 // Check the number of bases.
3022 if (RD->getNumBases() != 1)
3026 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3028 // Check that the base is not virtual.
3029 if (Base->isVirtual())
3032 // Check that the base is public.
3033 if (Base->getAccessSpecifier() != AS_public)
3036 // Check that the class is dynamic iff the base is.
3037 const CXXRecordDecl *BaseDecl =
3038 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3039 if (!BaseDecl->isEmpty() &&
3040 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3046 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3047 // abi::__class_type_info.
3048 static const char * const ClassTypeInfo =
3049 "_ZTVN10__cxxabiv117__class_type_infoE";
3050 // abi::__si_class_type_info.
3051 static const char * const SIClassTypeInfo =
3052 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3053 // abi::__vmi_class_type_info.
3054 static const char * const VMIClassTypeInfo =
3055 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3057 const char *VTableName = nullptr;
3059 switch (Ty->getTypeClass()) {
3060 #define TYPE(Class, Base)
3061 #define ABSTRACT_TYPE(Class, Base)
3062 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3063 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3064 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3065 #include "clang/AST/TypeNodes.def"
3066 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3068 case Type::LValueReference:
3069 case Type::RValueReference:
3070 llvm_unreachable("References shouldn't get here");
3073 case Type::DeducedTemplateSpecialization:
3074 llvm_unreachable("Undeduced type shouldn't get here");
3077 llvm_unreachable("Pipe types shouldn't get here");
3080 // GCC treats vector and complex types as fundamental types.
3082 case Type::ExtVector:
3085 // FIXME: GCC treats block pointers as fundamental types?!
3086 case Type::BlockPointer:
3087 // abi::__fundamental_type_info.
3088 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3091 case Type::ConstantArray:
3092 case Type::IncompleteArray:
3093 case Type::VariableArray:
3094 // abi::__array_type_info.
3095 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3098 case Type::FunctionNoProto:
3099 case Type::FunctionProto:
3100 // abi::__function_type_info.
3101 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3105 // abi::__enum_type_info.
3106 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3109 case Type::Record: {
3110 const CXXRecordDecl *RD =
3111 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3113 if (!RD->hasDefinition() || !RD->getNumBases()) {
3114 VTableName = ClassTypeInfo;
3115 } else if (CanUseSingleInheritance(RD)) {
3116 VTableName = SIClassTypeInfo;
3118 VTableName = VMIClassTypeInfo;
3124 case Type::ObjCObject:
3125 // Ignore protocol qualifiers.
3126 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3128 // Handle id and Class.
3129 if (isa<BuiltinType>(Ty)) {
3130 VTableName = ClassTypeInfo;
3134 assert(isa<ObjCInterfaceType>(Ty));
3137 case Type::ObjCInterface:
3138 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3139 VTableName = SIClassTypeInfo;
3141 VTableName = ClassTypeInfo;
3145 case Type::ObjCObjectPointer:
3147 // abi::__pointer_type_info.
3148 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3151 case Type::MemberPointer:
3152 // abi::__pointer_to_member_type_info.
3153 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3157 llvm::Constant *VTable =
3158 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3159 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3161 llvm::Type *PtrDiffTy =
3162 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3164 // The vtable address point is 2.
3165 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3167 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
3168 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3170 Fields.push_back(VTable);
3173 /// Return the linkage that the type info and type info name constants
3174 /// should have for the given type.
3175 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3177 // Itanium C++ ABI 2.9.5p7:
3178 // In addition, it and all of the intermediate abi::__pointer_type_info
3179 // structs in the chain down to the abi::__class_type_info for the
3180 // incomplete class type must be prevented from resolving to the
3181 // corresponding type_info structs for the complete class type, possibly
3182 // by making them local static objects. Finally, a dummy class RTTI is
3183 // generated for the incomplete type that will not resolve to the final
3184 // complete class RTTI (because the latter need not exist), possibly by
3185 // making it a local static object.
3186 if (ContainsIncompleteClassType(Ty))
3187 return llvm::GlobalValue::InternalLinkage;
3189 switch (Ty->getLinkage()) {
3191 case InternalLinkage:
3192 case UniqueExternalLinkage:
3193 return llvm::GlobalValue::InternalLinkage;
3195 case VisibleNoLinkage:
3196 case ModuleInternalLinkage:
3198 case ExternalLinkage:
3199 // RTTI is not enabled, which means that this type info struct is going
3200 // to be used for exception handling. Give it linkonce_odr linkage.
3201 if (!CGM.getLangOpts().RTTI)
3202 return llvm::GlobalValue::LinkOnceODRLinkage;
3204 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3205 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3206 if (RD->hasAttr<WeakAttr>())
3207 return llvm::GlobalValue::WeakODRLinkage;
3208 if (CGM.getTriple().isWindowsItaniumEnvironment())
3209 if (RD->hasAttr<DLLImportAttr>() &&
3210 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3211 return llvm::GlobalValue::ExternalLinkage;
3212 // MinGW always uses LinkOnceODRLinkage for type info.
3213 if (RD->isDynamicClass() &&
3217 .isWindowsGNUEnvironment())
3218 return CGM.getVTableLinkage(RD);
3221 return llvm::GlobalValue::LinkOnceODRLinkage;
3224 llvm_unreachable("Invalid linkage!");
3227 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3228 // We want to operate on the canonical type.
3229 Ty = Ty.getCanonicalType();
3231 // Check if we've already emitted an RTTI descriptor for this type.
3232 SmallString<256> Name;
3233 llvm::raw_svector_ostream Out(Name);
3234 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3236 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3237 if (OldGV && !OldGV->isDeclaration()) {
3238 assert(!OldGV->hasAvailableExternallyLinkage() &&
3239 "available_externally typeinfos not yet implemented");
3241 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3244 // Check if there is already an external RTTI descriptor for this type.
3245 if (IsStandardLibraryRTTIDescriptor(Ty) ||
3246 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3247 return GetAddrOfExternalRTTIDescriptor(Ty);
3249 // Emit the standard library with external linkage.
3250 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3252 // Give the type_info object and name the formal visibility of the
3254 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3255 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3256 // If the linkage is local, only default visibility makes sense.
3257 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3258 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3259 ItaniumCXXABI::RUK_NonUniqueHidden)
3260 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3262 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3264 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3265 llvm::GlobalValue::DefaultStorageClass;
3266 if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3267 auto RD = Ty->getAsCXXRecordDecl();
3268 if (RD && RD->hasAttr<DLLExportAttr>())
3269 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3272 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3275 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3277 llvm::GlobalVariable::LinkageTypes Linkage,
3278 llvm::GlobalValue::VisibilityTypes Visibility,
3279 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3280 // Add the vtable pointer.
3281 BuildVTablePointer(cast<Type>(Ty));
3284 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3285 llvm::Constant *TypeNameField;
3287 // If we're supposed to demote the visibility, be sure to set a flag
3288 // to use a string comparison for type_info comparisons.
3289 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3290 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3291 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3292 // The flag is the sign bit, which on ARM64 is defined to be clear
3293 // for global pointers. This is very ARM64-specific.
3294 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3295 llvm::Constant *flag =
3296 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3297 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3299 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3301 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3303 Fields.push_back(TypeNameField);
3305 switch (Ty->getTypeClass()) {
3306 #define TYPE(Class, Base)
3307 #define ABSTRACT_TYPE(Class, Base)
3308 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3309 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3310 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3311 #include "clang/AST/TypeNodes.def"
3312 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3314 // GCC treats vector types as fundamental types.
3317 case Type::ExtVector:
3319 case Type::BlockPointer:
3320 // Itanium C++ ABI 2.9.5p4:
3321 // abi::__fundamental_type_info adds no data members to std::type_info.
3324 case Type::LValueReference:
3325 case Type::RValueReference:
3326 llvm_unreachable("References shouldn't get here");
3329 case Type::DeducedTemplateSpecialization:
3330 llvm_unreachable("Undeduced type shouldn't get here");
3333 llvm_unreachable("Pipe type shouldn't get here");
3335 case Type::ConstantArray:
3336 case Type::IncompleteArray:
3337 case Type::VariableArray:
3338 // Itanium C++ ABI 2.9.5p5:
3339 // abi::__array_type_info adds no data members to std::type_info.
3342 case Type::FunctionNoProto:
3343 case Type::FunctionProto:
3344 // Itanium C++ ABI 2.9.5p5:
3345 // abi::__function_type_info adds no data members to std::type_info.
3349 // Itanium C++ ABI 2.9.5p5:
3350 // abi::__enum_type_info adds no data members to std::type_info.
3353 case Type::Record: {
3354 const CXXRecordDecl *RD =
3355 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3356 if (!RD->hasDefinition() || !RD->getNumBases()) {
3357 // We don't need to emit any fields.
3361 if (CanUseSingleInheritance(RD))
3362 BuildSIClassTypeInfo(RD);
3364 BuildVMIClassTypeInfo(RD);
3369 case Type::ObjCObject:
3370 case Type::ObjCInterface:
3371 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3374 case Type::ObjCObjectPointer:
3375 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3379 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3382 case Type::MemberPointer:
3383 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3387 // No fields, at least for the moment.
3391 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3393 SmallString<256> Name;
3394 llvm::raw_svector_ostream Out(Name);
3395 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3396 llvm::Module &M = CGM.getModule();
3397 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3398 llvm::GlobalVariable *GV =
3399 new llvm::GlobalVariable(M, Init->getType(),
3400 /*isConstant=*/true, Linkage, Init, Name);
3402 // If there's already an old global variable, replace it with the new one.
3404 GV->takeName(OldGV);
3405 llvm::Constant *NewPtr =
3406 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3407 OldGV->replaceAllUsesWith(NewPtr);
3408 OldGV->eraseFromParent();
3411 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3412 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3415 CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3416 GV->setAlignment(Align.getQuantity());
3418 // The Itanium ABI specifies that type_info objects must be globally
3419 // unique, with one exception: if the type is an incomplete class
3420 // type or a (possibly indirect) pointer to one. That exception
3421 // affects the general case of comparing type_info objects produced
3422 // by the typeid operator, which is why the comparison operators on
3423 // std::type_info generally use the type_info name pointers instead
3424 // of the object addresses. However, the language's built-in uses
3425 // of RTTI generally require class types to be complete, even when
3426 // manipulating pointers to those class types. This allows the
3427 // implementation of dynamic_cast to rely on address equality tests,
3428 // which is much faster.
3430 // All of this is to say that it's important that both the type_info
3431 // object and the type_info name be uniqued when weakly emitted.
3433 TypeName->setVisibility(Visibility);
3434 CGM.setDSOLocal(TypeName);
3436 GV->setVisibility(Visibility);
3437 CGM.setDSOLocal(GV);
3439 TypeName->setDLLStorageClass(DLLStorageClass);
3440 GV->setDLLStorageClass(DLLStorageClass);
3442 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3443 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3445 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3448 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3449 /// for the given Objective-C object type.
3450 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3452 const Type *T = OT->getBaseType().getTypePtr();
3453 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3455 // The builtin types are abi::__class_type_infos and don't require
3457 if (isa<BuiltinType>(T)) return;
3459 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3460 ObjCInterfaceDecl *Super = Class->getSuperClass();
3462 // Root classes are also __class_type_info.
3465 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3467 // Everything else is single inheritance.
3468 llvm::Constant *BaseTypeInfo =
3469 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3470 Fields.push_back(BaseTypeInfo);
3473 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3474 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3475 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3476 // Itanium C++ ABI 2.9.5p6b:
3477 // It adds to abi::__class_type_info a single member pointing to the
3478 // type_info structure for the base type,
3479 llvm::Constant *BaseTypeInfo =
3480 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3481 Fields.push_back(BaseTypeInfo);
3485 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3486 /// a class hierarchy.
3488 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3489 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3493 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3494 /// abi::__vmi_class_type_info.
3496 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3501 const CXXRecordDecl *BaseDecl =
3502 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3504 if (Base->isVirtual()) {
3505 // Mark the virtual base as seen.
3506 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3507 // If this virtual base has been seen before, then the class is diamond
3509 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3511 if (Bases.NonVirtualBases.count(BaseDecl))
3512 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3515 // Mark the non-virtual base as seen.
3516 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3517 // If this non-virtual base has been seen before, then the class has non-
3518 // diamond shaped repeated inheritance.
3519 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3521 if (Bases.VirtualBases.count(BaseDecl))
3522 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3527 for (const auto &I : BaseDecl->bases())
3528 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3533 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3538 for (const auto &I : RD->bases())
3539 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3544 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3545 /// classes with bases that do not satisfy the abi::__si_class_type_info
3546 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3547 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3548 llvm::Type *UnsignedIntLTy =
3549 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3551 // Itanium C++ ABI 2.9.5p6c:
3552 // __flags is a word with flags describing details about the class
3553 // structure, which may be referenced by using the __flags_masks
3554 // enumeration. These flags refer to both direct and indirect bases.
3555 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3556 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3558 // Itanium C++ ABI 2.9.5p6c:
3559 // __base_count is a word with the number of direct proper base class
3560 // descriptions that follow.
3561 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3563 if (!RD->getNumBases())
3566 // Now add the base class descriptions.
3568 // Itanium C++ ABI 2.9.5p6c:
3569 // __base_info[] is an array of base class descriptions -- one for every
3570 // direct proper base. Each description is of the type:
3572 // struct abi::__base_class_type_info {
3574 // const __class_type_info *__base_type;
3575 // long __offset_flags;
3577 // enum __offset_flags_masks {
3578 // __virtual_mask = 0x1,
3579 // __public_mask = 0x2,
3580 // __offset_shift = 8
3584 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3585 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3587 // FIXME: Consider updating libc++abi to match, and extend this logic to all
3589 QualType OffsetFlagsTy = CGM.getContext().LongTy;
3590 const TargetInfo &TI = CGM.getContext().getTargetInfo();
3591 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3592 OffsetFlagsTy = CGM.getContext().LongLongTy;
3593 llvm::Type *OffsetFlagsLTy =
3594 CGM.getTypes().ConvertType(OffsetFlagsTy);
3596 for (const auto &Base : RD->bases()) {
3597 // The __base_type member points to the RTTI for the base type.
3598 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3600 const CXXRecordDecl *BaseDecl =
3601 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3603 int64_t OffsetFlags = 0;
3605 // All but the lower 8 bits of __offset_flags are a signed offset.
3606 // For a non-virtual base, this is the offset in the object of the base
3607 // subobject. For a virtual base, this is the offset in the virtual table of
3608 // the virtual base offset for the virtual base referenced (negative).
3610 if (Base.isVirtual())
3612 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3614 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3615 Offset = Layout.getBaseClassOffset(BaseDecl);
3618 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3620 // The low-order byte of __offset_flags contains flags, as given by the
3621 // masks from the enumeration __offset_flags_masks.
3622 if (Base.isVirtual())
3623 OffsetFlags |= BCTI_Virtual;
3624 if (Base.getAccessSpecifier() == AS_public)
3625 OffsetFlags |= BCTI_Public;
3627 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3631 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3632 /// pieces from \p Type.
3633 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3636 if (Type.isConstQualified())
3637 Flags |= ItaniumRTTIBuilder::PTI_Const;
3638 if (Type.isVolatileQualified())
3639 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3640 if (Type.isRestrictQualified())
3641 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3642 Type = Type.getUnqualifiedType();
3644 // Itanium C++ ABI 2.9.5p7:
3645 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3646 // incomplete class type, the incomplete target type flag is set.
3647 if (ContainsIncompleteClassType(Type))
3648 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3650 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3651 if (Proto->isNothrow()) {
3652 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3653 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
3660 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3661 /// used for pointer types.
3662 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3663 // Itanium C++ ABI 2.9.5p7:
3664 // __flags is a flag word describing the cv-qualification and other
3665 // attributes of the type pointed to
3666 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3668 llvm::Type *UnsignedIntLTy =
3669 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3670 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3672 // Itanium C++ ABI 2.9.5p7:
3673 // __pointee is a pointer to the std::type_info derivation for the
3674 // unqualified type being pointed to.
3675 llvm::Constant *PointeeTypeInfo =
3676 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3677 Fields.push_back(PointeeTypeInfo);
3680 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3681 /// struct, used for member pointer types.
3683 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3684 QualType PointeeTy = Ty->getPointeeType();
3686 // Itanium C++ ABI 2.9.5p7:
3687 // __flags is a flag word describing the cv-qualification and other
3688 // attributes of the type pointed to.
3689 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3691 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3692 if (IsIncompleteClassType(ClassType))
3693 Flags |= PTI_ContainingClassIncomplete;
3695 llvm::Type *UnsignedIntLTy =
3696 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3697 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3699 // Itanium C++ ABI 2.9.5p7:
3700 // __pointee is a pointer to the std::type_info derivation for the
3701 // unqualified type being pointed to.
3702 llvm::Constant *PointeeTypeInfo =
3703 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3704 Fields.push_back(PointeeTypeInfo);
3706 // Itanium C++ ABI 2.9.5p9:
3707 // __context is a pointer to an abi::__class_type_info corresponding to the
3708 // class type containing the member pointed to
3709 // (e.g., the "A" in "int A::*").
3711 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3714 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3715 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3718 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
3719 // Types added here must also be added to TypeInfoIsInStandardLibrary.
3720 QualType FundamentalTypes[] = {
3721 getContext().VoidTy, getContext().NullPtrTy,
3722 getContext().BoolTy, getContext().WCharTy,
3723 getContext().CharTy, getContext().UnsignedCharTy,
3724 getContext().SignedCharTy, getContext().ShortTy,
3725 getContext().UnsignedShortTy, getContext().IntTy,
3726 getContext().UnsignedIntTy, getContext().LongTy,
3727 getContext().UnsignedLongTy, getContext().LongLongTy,
3728 getContext().UnsignedLongLongTy, getContext().Int128Ty,
3729 getContext().UnsignedInt128Ty, getContext().HalfTy,
3730 getContext().FloatTy, getContext().DoubleTy,
3731 getContext().LongDoubleTy, getContext().Float128Ty,
3732 getContext().Char8Ty, getContext().Char16Ty,
3733 getContext().Char32Ty
3735 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3736 RD->hasAttr<DLLExportAttr>()
3737 ? llvm::GlobalValue::DLLExportStorageClass
3738 : llvm::GlobalValue::DefaultStorageClass;
3739 llvm::GlobalValue::VisibilityTypes Visibility =
3740 CodeGenModule::GetLLVMVisibility(RD->getVisibility());
3741 for (const QualType &FundamentalType : FundamentalTypes) {
3742 QualType PointerType = getContext().getPointerType(FundamentalType);
3743 QualType PointerTypeConst = getContext().getPointerType(
3744 FundamentalType.withConst());
3745 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
3746 ItaniumRTTIBuilder(*this).BuildTypeInfo(
3747 Type, llvm::GlobalValue::ExternalLinkage,
3748 Visibility, DLLStorageClass);
3752 /// What sort of uniqueness rules should we use for the RTTI for the
3754 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3755 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3756 if (shouldRTTIBeUnique())
3759 // It's only necessary for linkonce_odr or weak_odr linkage.
3760 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3761 Linkage != llvm::GlobalValue::WeakODRLinkage)
3764 // It's only necessary with default visibility.
3765 if (CanTy->getVisibility() != DefaultVisibility)
3768 // If we're not required to publish this symbol, hide it.
3769 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3770 return RUK_NonUniqueHidden;
3772 // If we're required to publish this symbol, as we might be under an
3773 // explicit instantiation, leave it with default visibility but
3774 // enable string-comparisons.
3775 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3776 return RUK_NonUniqueVisible;
3779 // Find out how to codegen the complete destructor and constructor
3781 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3783 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3784 const CXXMethodDecl *MD) {
3785 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3786 return StructorCodegen::Emit;
3788 // The complete and base structors are not equivalent if there are any virtual
3789 // bases, so emit separate functions.
3790 if (MD->getParent()->getNumVBases())
3791 return StructorCodegen::Emit;
3793 GlobalDecl AliasDecl;
3794 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3795 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3797 const auto *CD = cast<CXXConstructorDecl>(MD);
3798 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3800 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3802 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3803 return StructorCodegen::RAUW;
3805 // FIXME: Should we allow available_externally aliases?
3806 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3807 return StructorCodegen::RAUW;
3809 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3810 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3811 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3812 CGM.getTarget().getTriple().isOSBinFormatWasm())
3813 return StructorCodegen::COMDAT;
3814 return StructorCodegen::Emit;
3817 return StructorCodegen::Alias;
3820 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3821 GlobalDecl AliasDecl,
3822 GlobalDecl TargetDecl) {
3823 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3825 StringRef MangledName = CGM.getMangledName(AliasDecl);
3826 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3827 if (Entry && !Entry->isDeclaration())
3830 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3832 // Create the alias with no name.
3833 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3835 // Constructors and destructors are always unnamed_addr.
3836 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3838 // Switch any previous uses to the alias.
3840 assert(Entry->getType() == Aliasee->getType() &&
3841 "declaration exists with different type");
3842 Alias->takeName(Entry);
3843 Entry->replaceAllUsesWith(Alias);
3844 Entry->eraseFromParent();
3846 Alias->setName(MangledName);
3849 // Finally, set up the alias with its proper name and attributes.
3850 CGM.SetCommonAttributes(AliasDecl, Alias);
3853 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
3854 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
3855 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3856 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3858 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3860 if (CD ? GD.getCtorType() == Ctor_Complete
3861 : GD.getDtorType() == Dtor_Complete) {
3862 GlobalDecl BaseDecl;
3864 BaseDecl = GD.getWithCtorType(Ctor_Base);
3866 BaseDecl = GD.getWithDtorType(Dtor_Base);
3868 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3869 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
3873 if (CGType == StructorCodegen::RAUW) {
3874 StringRef MangledName = CGM.getMangledName(GD);
3875 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3876 CGM.addReplacement(MangledName, Aliasee);
3881 // The base destructor is equivalent to the base destructor of its
3882 // base class if there is exactly one non-virtual base class with a
3883 // non-trivial destructor, there are no fields with a non-trivial
3884 // destructor, and the body of the destructor is trivial.
3885 if (DD && GD.getDtorType() == Dtor_Base &&
3886 CGType != StructorCodegen::COMDAT &&
3887 !CGM.TryEmitBaseDestructorAsAlias(DD))
3890 // FIXME: The deleting destructor is equivalent to the selected operator
3892 // * either the delete is a destroying operator delete or the destructor
3893 // would be trivial if it weren't virtual,
3894 // * the conversion from the 'this' parameter to the first parameter of the
3895 // destructor is equivalent to a bitcast,
3896 // * the destructor does not have an implicit "this" return, and
3897 // * the operator delete has the same calling convention and IR function type
3898 // as the destructor.
3899 // In such cases we should try to emit the deleting dtor as an alias to the
3900 // selected 'operator delete'.
3902 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
3904 if (CGType == StructorCodegen::COMDAT) {
3905 SmallString<256> Buffer;
3906 llvm::raw_svector_ostream Out(Buffer);
3908 getMangleContext().mangleCXXDtorComdat(DD, Out);
3910 getMangleContext().mangleCXXCtorComdat(CD, Out);
3911 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3914 CGM.maybeSetTrivialComdat(*MD, *Fn);
3918 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
3919 // void *__cxa_begin_catch(void*);
3920 llvm::FunctionType *FTy = llvm::FunctionType::get(
3921 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
3923 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3926 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
3927 // void __cxa_end_catch();
3928 llvm::FunctionType *FTy =
3929 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
3931 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3934 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
3935 // void *__cxa_get_exception_ptr(void*);
3936 llvm::FunctionType *FTy = llvm::FunctionType::get(
3937 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
3939 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3943 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3944 /// exception type lets us state definitively that the thrown exception
3945 /// type does not have a destructor. In particular:
3946 /// - Catch-alls tell us nothing, so we have to conservatively
3947 /// assume that the thrown exception might have a destructor.
3948 /// - Catches by reference behave according to their base types.
3949 /// - Catches of non-record types will only trigger for exceptions
3950 /// of non-record types, which never have destructors.
3951 /// - Catches of record types can trigger for arbitrary subclasses
3952 /// of the caught type, so we have to assume the actual thrown
3953 /// exception type might have a throwing destructor, even if the
3954 /// caught type's destructor is trivial or nothrow.
3955 struct CallEndCatch final : EHScopeStack::Cleanup {
3956 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3959 void Emit(CodeGenFunction &CGF, Flags flags) override {
3961 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3965 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3970 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3971 /// __cxa_end_catch.
3973 /// \param EndMightThrow - true if __cxa_end_catch might throw
3974 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3976 bool EndMightThrow) {
3977 llvm::CallInst *call =
3978 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3980 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3985 /// A "special initializer" callback for initializing a catch
3986 /// parameter during catch initialization.
3987 static void InitCatchParam(CodeGenFunction &CGF,
3988 const VarDecl &CatchParam,
3990 SourceLocation Loc) {
3991 // Load the exception from where the landing pad saved it.
3992 llvm::Value *Exn = CGF.getExceptionFromSlot();
3994 CanQualType CatchType =
3995 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3996 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3998 // If we're catching by reference, we can just cast the object
3999 // pointer to the appropriate pointer.
4000 if (isa<ReferenceType>(CatchType)) {
4001 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4002 bool EndCatchMightThrow = CaughtType->isRecordType();
4004 // __cxa_begin_catch returns the adjusted object pointer.
4005 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4007 // We have no way to tell the personality function that we're
4008 // catching by reference, so if we're catching a pointer,
4009 // __cxa_begin_catch will actually return that pointer by value.
4010 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4011 QualType PointeeType = PT->getPointeeType();
4013 // When catching by reference, generally we should just ignore
4014 // this by-value pointer and use the exception object instead.
4015 if (!PointeeType->isRecordType()) {
4017 // Exn points to the struct _Unwind_Exception header, which
4018 // we have to skip past in order to reach the exception data.
4019 unsigned HeaderSize =
4020 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4021 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4023 // However, if we're catching a pointer-to-record type that won't
4024 // work, because the personality function might have adjusted
4025 // the pointer. There's actually no way for us to fully satisfy
4026 // the language/ABI contract here: we can't use Exn because it
4027 // might have the wrong adjustment, but we can't use the by-value
4028 // pointer because it's off by a level of abstraction.
4030 // The current solution is to dump the adjusted pointer into an
4031 // alloca, which breaks language semantics (because changing the
4032 // pointer doesn't change the exception) but at least works.
4033 // The better solution would be to filter out non-exact matches
4034 // and rethrow them, but this is tricky because the rethrow
4035 // really needs to be catchable by other sites at this landing
4036 // pad. The best solution is to fix the personality function.
4038 // Pull the pointer for the reference type off.
4040 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4042 // Create the temporary and write the adjusted pointer into it.
4044 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4045 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4046 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4048 // Bind the reference to the temporary.
4049 AdjustedExn = ExnPtrTmp.getPointer();
4053 llvm::Value *ExnCast =
4054 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4055 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4059 // Scalars and complexes.
4060 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4061 if (TEK != TEK_Aggregate) {
4062 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4064 // If the catch type is a pointer type, __cxa_begin_catch returns
4065 // the pointer by value.
4066 if (CatchType->hasPointerRepresentation()) {
4067 llvm::Value *CastExn =
4068 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4070 switch (CatchType.getQualifiers().getObjCLifetime()) {
4071 case Qualifiers::OCL_Strong:
4072 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4075 case Qualifiers::OCL_None:
4076 case Qualifiers::OCL_ExplicitNone:
4077 case Qualifiers::OCL_Autoreleasing:
4078 CGF.Builder.CreateStore(CastExn, ParamAddr);
4081 case Qualifiers::OCL_Weak:
4082 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4085 llvm_unreachable("bad ownership qualifier!");
4088 // Otherwise, it returns a pointer into the exception object.
4090 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4091 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4093 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4094 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4097 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4101 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4102 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4106 llvm_unreachable("evaluation kind filtered out!");
4108 llvm_unreachable("bad evaluation kind");
4111 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4112 auto catchRD = CatchType->getAsCXXRecordDecl();
4113 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4115 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4117 // Check for a copy expression. If we don't have a copy expression,
4118 // that means a trivial copy is okay.
4119 const Expr *copyExpr = CatchParam.getInit();
4121 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4122 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4123 caughtExnAlignment);
4124 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4125 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4126 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4130 // We have to call __cxa_get_exception_ptr to get the adjusted
4131 // pointer before copying.
4132 llvm::CallInst *rawAdjustedExn =
4133 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4135 // Cast that to the appropriate type.
4136 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4137 caughtExnAlignment);
4139 // The copy expression is defined in terms of an OpaqueValueExpr.
4140 // Find it and map it to the adjusted expression.
4141 CodeGenFunction::OpaqueValueMapping
4142 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4143 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4145 // Call the copy ctor in a terminate scope.
4146 CGF.EHStack.pushTerminate();
4148 // Perform the copy construction.
4149 CGF.EmitAggExpr(copyExpr,
4150 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4151 AggValueSlot::IsNotDestructed,
4152 AggValueSlot::DoesNotNeedGCBarriers,
4153 AggValueSlot::IsNotAliased,
4154 AggValueSlot::DoesNotOverlap));
4156 // Leave the terminate scope.
4157 CGF.EHStack.popTerminate();
4159 // Undo the opaque value mapping.
4162 // Finally we can call __cxa_begin_catch.
4163 CallBeginCatch(CGF, Exn, true);
4166 /// Begins a catch statement by initializing the catch variable and
4167 /// calling __cxa_begin_catch.
4168 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4169 const CXXCatchStmt *S) {
4170 // We have to be very careful with the ordering of cleanups here:
4171 // C++ [except.throw]p4:
4172 // The destruction [of the exception temporary] occurs
4173 // immediately after the destruction of the object declared in
4174 // the exception-declaration in the handler.
4176 // So the precise ordering is:
4177 // 1. Construct catch variable.
4178 // 2. __cxa_begin_catch
4179 // 3. Enter __cxa_end_catch cleanup
4180 // 4. Enter dtor cleanup
4182 // We do this by using a slightly abnormal initialization process.
4183 // Delegation sequence:
4184 // - ExitCXXTryStmt opens a RunCleanupsScope
4185 // - EmitAutoVarAlloca creates the variable and debug info
4186 // - InitCatchParam initializes the variable from the exception
4187 // - CallBeginCatch calls __cxa_begin_catch
4188 // - CallBeginCatch enters the __cxa_end_catch cleanup
4189 // - EmitAutoVarCleanups enters the variable destructor cleanup
4190 // - EmitCXXTryStmt emits the code for the catch body
4191 // - EmitCXXTryStmt close the RunCleanupsScope
4193 VarDecl *CatchParam = S->getExceptionDecl();
4195 llvm::Value *Exn = CGF.getExceptionFromSlot();
4196 CallBeginCatch(CGF, Exn, true);
4201 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4202 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4203 CGF.EmitAutoVarCleanups(var);
4206 /// Get or define the following function:
4207 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4208 /// This code is used only in C++.
4209 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4210 llvm::FunctionType *fnTy =
4211 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4212 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4213 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4214 llvm::Function *fn =
4215 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4217 fn->setDoesNotThrow();
4218 fn->setDoesNotReturn();
4220 // What we really want is to massively penalize inlining without
4221 // forbidding it completely. The difference between that and
4222 // 'noinline' is negligible.
4223 fn->addFnAttr(llvm::Attribute::NoInline);
4225 // Allow this function to be shared across translation units, but
4226 // we don't want it to turn into an exported symbol.
4227 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4228 fn->setVisibility(llvm::Function::HiddenVisibility);
4229 if (CGM.supportsCOMDAT())
4230 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4232 // Set up the function.
4233 llvm::BasicBlock *entry =
4234 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4235 CGBuilderTy builder(CGM, entry);
4237 // Pull the exception pointer out of the parameter list.
4238 llvm::Value *exn = &*fn->arg_begin();
4240 // Call __cxa_begin_catch(exn).
4241 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4242 catchCall->setDoesNotThrow();
4243 catchCall->setCallingConv(CGM.getRuntimeCC());
4245 // Call std::terminate().
4246 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4247 termCall->setDoesNotThrow();
4248 termCall->setDoesNotReturn();
4249 termCall->setCallingConv(CGM.getRuntimeCC());
4251 // std::terminate cannot return.
4252 builder.CreateUnreachable();
4258 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4260 // In C++, we want to call __cxa_begin_catch() before terminating.
4262 assert(CGF.CGM.getLangOpts().CPlusPlus);
4263 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4265 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4268 std::pair<llvm::Value *, const CXXRecordDecl *>
4269 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4270 const CXXRecordDecl *RD) {
4271 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4274 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4275 const CXXCatchStmt *C) {
4276 if (CGF.getTarget().hasFeature("exception-handling"))
4277 CGF.EHStack.pushCleanup<CatchRetScope>(
4278 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4279 ItaniumCXXABI::emitBeginCatch(CGF, C);