1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This provides C++ code generation targeting the Itanium C++ ABI. The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
18 //===----------------------------------------------------------------------===//
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
39 using namespace clang;
40 using namespace CodeGen;
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44 /// VTables - All the vtables which have been defined.
45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 /// All the thread wrapper functions that have been used.
48 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
52 bool UseARMMethodPtrABI;
53 bool UseARMGuardVarABI;
54 bool Use32BitVTableOffsetABI;
56 ItaniumMangleContext &getMangleContext() {
57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62 bool UseARMMethodPtrABI = false,
63 bool UseARMGuardVarABI = false) :
64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65 UseARMGuardVarABI(UseARMGuardVarABI),
66 Use32BitVTableOffsetABI(false) { }
68 bool classifyReturnType(CGFunctionInfo &FI) const override;
70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71 // If C++ prohibits us from making a copy, pass by address.
72 if (!RD->canPassInRegisters())
77 bool isThisCompleteObject(GlobalDecl GD) const override {
78 // The Itanium ABI has separate complete-object vs. base-object
79 // variants of both constructors and destructors.
80 if (isa<CXXDestructorDecl>(GD.getDecl())) {
81 switch (GD.getDtorType()) {
90 llvm_unreachable("emitting dtor comdat as function?");
92 llvm_unreachable("bad dtor kind");
94 if (isa<CXXConstructorDecl>(GD.getDecl())) {
95 switch (GD.getCtorType()) {
102 case Ctor_CopyingClosure:
103 case Ctor_DefaultClosure:
104 llvm_unreachable("closure ctors in Itanium ABI?");
107 llvm_unreachable("emitting ctor comdat as function?");
109 llvm_unreachable("bad dtor kind");
116 bool isZeroInitializable(const MemberPointerType *MPT) override;
118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
121 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
124 llvm::Value *&ThisPtrForCall,
125 llvm::Value *MemFnPtr,
126 const MemberPointerType *MPT) override;
129 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
132 const MemberPointerType *MPT) override;
134 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
136 llvm::Value *Src) override;
137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138 llvm::Constant *Src) override;
140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144 CharUnits offset) override;
145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147 CharUnits ThisAdjustment);
149 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150 llvm::Value *L, llvm::Value *R,
151 const MemberPointerType *MPT,
152 bool Inequality) override;
154 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
156 const MemberPointerType *MPT) override;
158 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159 Address Ptr, QualType ElementType,
160 const CXXDestructorDecl *Dtor) override;
162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
168 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169 llvm::Value *Exn) override;
171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
174 getAddrOfCXXCatchHandlerType(QualType Ty,
175 QualType CatchHandlerType) override {
176 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
183 llvm::Type *StdTypeInfoPtrTy) override;
185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186 QualType SrcRecordTy) override;
188 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189 QualType SrcRecordTy, QualType DestTy,
190 QualType DestRecordTy,
191 llvm::BasicBlock *CastEnd) override;
193 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194 QualType SrcRecordTy,
195 QualType DestTy) override;
197 bool EmitBadCastCall(CodeGenFunction &CGF) override;
200 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201 const CXXRecordDecl *ClassDecl,
202 const CXXRecordDecl *BaseClassDecl) override;
204 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
206 AddedStructorArgCounts
207 buildStructorSignature(GlobalDecl GD,
208 SmallVectorImpl<CanQualType> &ArgTys) override;
210 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211 CXXDtorType DT) const override {
212 // Itanium does not emit any destructor variant as an inline thunk.
213 // Delegating may occur as an optimization, but all variants are either
214 // emitted with external linkage or as linkonce if they are inline and used.
218 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
220 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221 FunctionArgList &Params) override;
223 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226 const CXXConstructorDecl *D,
229 bool Delegating) override;
231 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232 const CXXDestructorDecl *DD,
235 bool Delegating) override;
237 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238 CXXDtorType Type, bool ForVirtualBase,
239 bool Delegating, Address This,
240 QualType ThisTy) override;
242 void emitVTableDefinitions(CodeGenVTables &CGVT,
243 const CXXRecordDecl *RD) override;
245 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246 CodeGenFunction::VPtr Vptr) override;
248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
253 getVTableAddressPoint(BaseSubobject Base,
254 const CXXRecordDecl *VTableClass) override;
256 llvm::Value *getVTableAddressPointInStructor(
257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
260 llvm::Value *getVTableAddressPointInStructorWithVTT(
261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
265 getVTableAddressPointForConstExpr(BaseSubobject Base,
266 const CXXRecordDecl *VTableClass) override;
268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269 CharUnits VPtrOffset) override;
271 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272 Address This, llvm::Type *Ty,
273 SourceLocation Loc) override;
275 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276 const CXXDestructorDecl *Dtor,
277 CXXDtorType DtorType, Address This,
278 DeleteOrMemberCallExpr E) override;
280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286 bool ReturnAdjustment) override {
287 // Allow inlining of thunks by emitting them with available_externally
288 // linkage together with vtables when needed.
289 if (ForVTable && !Thunk->hasLocalLinkage())
290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291 CGM.setGVProperties(Thunk, GD);
294 bool exportThunk() override { return true; }
296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297 const ThisAdjustment &TA) override;
299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300 const ReturnAdjustment &RA) override;
302 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303 FunctionArgList &Args) const override {
304 assert(!Args.empty() && "expected the arglist to not be empty!");
305 return Args.size() - 1;
308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309 StringRef GetDeletedVirtualCallName() override
310 { return "__cxa_deleted_virtual"; }
312 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313 Address InitializeArrayCookie(CodeGenFunction &CGF,
315 llvm::Value *NumElements,
316 const CXXNewExpr *expr,
317 QualType ElementType) override;
318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
320 CharUnits cookieSize) override;
322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323 llvm::GlobalVariable *DeclPtr,
324 bool PerformInit) override;
325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326 llvm::FunctionCallee dtor,
327 llvm::Constant *addr) override;
329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
331 void EmitThreadLocalInitFuncs(
333 ArrayRef<const VarDecl *> CXXThreadLocals,
334 ArrayRef<llvm::Function *> CXXThreadLocalInits,
335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
337 bool mayNeedDestruction(const VarDecl *VD) const {
338 if (VD->needsDestruction(getContext()))
341 // If the variable has an incomplete class type (or array thereof), it
342 // might need destruction.
343 const Type *T = VD->getType()->getBaseElementTypeUnsafe();
344 if (T->getAs<RecordType>() && T->isIncompleteType())
350 /// Determine whether we will definitely emit this variable with a constant
351 /// initializer, either because the language semantics demand it or because
352 /// we know that the initializer is a constant.
353 // For weak definitions, any initializer available in the current translation
354 // is not necessarily reflective of the initializer used; such initializers
355 // are ignored unless if InspectInitForWeakDef is true.
357 isEmittedWithConstantInitializer(const VarDecl *VD,
358 bool InspectInitForWeakDef = false) const {
359 VD = VD->getMostRecentDecl();
360 if (VD->hasAttr<ConstInitAttr>())
363 // All later checks examine the initializer specified on the variable. If
364 // the variable is weak, such examination would not be correct.
365 if (!InspectInitForWeakDef &&
366 (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
369 const VarDecl *InitDecl = VD->getInitializingDeclaration();
373 // If there's no initializer to run, this is constant initialization.
374 if (!InitDecl->hasInit())
377 // If we have the only definition, we don't need a thread wrapper if we
378 // will emit the value as a constant.
379 if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
380 return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
382 // Otherwise, we need a thread wrapper unless we know that every
383 // translation unit will emit the value as a constant. We rely on the
384 // variable being constant-initialized in every translation unit if it's
385 // constant-initialized in any translation unit, which isn't actually
386 // guaranteed by the standard but is necessary for sanity.
387 return InitDecl->hasConstantInitialization();
390 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
391 return !isEmittedWithConstantInitializer(VD) ||
392 mayNeedDestruction(VD);
394 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
395 QualType LValType) override;
397 bool NeedsVTTParameter(GlobalDecl GD) override;
399 /**************************** RTTI Uniqueness ******************************/
402 /// Returns true if the ABI requires RTTI type_info objects to be unique
403 /// across a program.
404 virtual bool shouldRTTIBeUnique() const { return true; }
407 /// What sort of unique-RTTI behavior should we use?
408 enum RTTIUniquenessKind {
409 /// We are guaranteeing, or need to guarantee, that the RTTI string
413 /// We are not guaranteeing uniqueness for the RTTI string, so we
414 /// can demote to hidden visibility but must use string comparisons.
417 /// We are not guaranteeing uniqueness for the RTTI string, so we
418 /// have to use string comparisons, but we also have to emit it with
419 /// non-hidden visibility.
423 /// Return the required visibility status for the given type and linkage in
426 classifyRTTIUniqueness(QualType CanTy,
427 llvm::GlobalValue::LinkageTypes Linkage) const;
428 friend class ItaniumRTTIBuilder;
430 void emitCXXStructor(GlobalDecl GD) override;
432 std::pair<llvm::Value *, const CXXRecordDecl *>
433 LoadVTablePtr(CodeGenFunction &CGF, Address This,
434 const CXXRecordDecl *RD) override;
437 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
438 const auto &VtableLayout =
439 CGM.getItaniumVTableContext().getVTableLayout(RD);
441 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
443 if (!VtableComponent.isUsedFunctionPointerKind())
446 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
447 if (!Method->getCanonicalDecl()->isInlined())
450 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
451 auto *Entry = CGM.GetGlobalValue(Name);
452 // This checks if virtual inline function has already been emitted.
453 // Note that it is possible that this inline function would be emitted
454 // after trying to emit vtable speculatively. Because of this we do
455 // an extra pass after emitting all deferred vtables to find and emit
456 // these vtables opportunistically.
457 if (!Entry || Entry->isDeclaration())
463 bool isVTableHidden(const CXXRecordDecl *RD) const {
464 const auto &VtableLayout =
465 CGM.getItaniumVTableContext().getVTableLayout(RD);
467 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
468 if (VtableComponent.isRTTIKind()) {
469 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
470 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
472 } else if (VtableComponent.isUsedFunctionPointerKind()) {
473 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
474 if (Method->getVisibility() == Visibility::HiddenVisibility &&
475 !Method->isDefined())
483 class ARMCXXABI : public ItaniumCXXABI {
485 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
486 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
487 /*UseARMGuardVarABI=*/true) {}
489 bool HasThisReturn(GlobalDecl GD) const override {
490 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
491 isa<CXXDestructorDecl>(GD.getDecl()) &&
492 GD.getDtorType() != Dtor_Deleting));
495 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
496 QualType ResTy) override;
498 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
499 Address InitializeArrayCookie(CodeGenFunction &CGF,
501 llvm::Value *NumElements,
502 const CXXNewExpr *expr,
503 QualType ElementType) override;
504 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
505 CharUnits cookieSize) override;
508 class AppleARM64CXXABI : public ARMCXXABI {
510 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
511 Use32BitVTableOffsetABI = true;
514 // ARM64 libraries are prepared for non-unique RTTI.
515 bool shouldRTTIBeUnique() const override { return false; }
518 class FuchsiaCXXABI final : public ItaniumCXXABI {
520 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
521 : ItaniumCXXABI(CGM) {}
524 bool HasThisReturn(GlobalDecl GD) const override {
525 return isa<CXXConstructorDecl>(GD.getDecl()) ||
526 (isa<CXXDestructorDecl>(GD.getDecl()) &&
527 GD.getDtorType() != Dtor_Deleting);
531 class WebAssemblyCXXABI final : public ItaniumCXXABI {
533 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
534 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
535 /*UseARMGuardVarABI=*/true) {}
536 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
538 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
539 llvm::Value *Exn) override;
542 bool HasThisReturn(GlobalDecl GD) const override {
543 return isa<CXXConstructorDecl>(GD.getDecl()) ||
544 (isa<CXXDestructorDecl>(GD.getDecl()) &&
545 GD.getDtorType() != Dtor_Deleting);
547 bool canCallMismatchedFunctionType() const override { return false; }
550 class XLCXXABI final : public ItaniumCXXABI {
552 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
553 : ItaniumCXXABI(CGM) {}
555 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
556 llvm::FunctionCallee dtor,
557 llvm::Constant *addr) override;
559 bool useSinitAndSterm() const override { return true; }
562 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
563 llvm::Constant *addr);
567 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
568 switch (CGM.getContext().getCXXABIKind()) {
569 // For IR-generation purposes, there's no significant difference
570 // between the ARM and iOS ABIs.
571 case TargetCXXABI::GenericARM:
572 case TargetCXXABI::iOS:
573 case TargetCXXABI::WatchOS:
574 return new ARMCXXABI(CGM);
576 case TargetCXXABI::AppleARM64:
577 return new AppleARM64CXXABI(CGM);
579 case TargetCXXABI::Fuchsia:
580 return new FuchsiaCXXABI(CGM);
582 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
583 // include the other 32-bit ARM oddities: constructor/destructor return values
584 // and array cookies.
585 case TargetCXXABI::GenericAArch64:
586 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
587 /*UseARMGuardVarABI=*/true);
589 case TargetCXXABI::GenericMIPS:
590 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
592 case TargetCXXABI::WebAssembly:
593 return new WebAssemblyCXXABI(CGM);
595 case TargetCXXABI::XL:
596 return new XLCXXABI(CGM);
598 case TargetCXXABI::GenericItanium:
599 if (CGM.getContext().getTargetInfo().getTriple().getArch()
600 == llvm::Triple::le32) {
601 // For PNaCl, use ARM-style method pointers so that PNaCl code
602 // does not assume anything about the alignment of function
604 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
606 return new ItaniumCXXABI(CGM);
608 case TargetCXXABI::Microsoft:
609 llvm_unreachable("Microsoft ABI is not Itanium-based");
611 llvm_unreachable("bad ABI kind");
615 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
616 if (MPT->isMemberDataPointer())
617 return CGM.PtrDiffTy;
618 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
621 /// In the Itanium and ARM ABIs, method pointers have the form:
622 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
624 /// In the Itanium ABI:
625 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
626 /// - the this-adjustment is (memptr.adj)
627 /// - the virtual offset is (memptr.ptr - 1)
630 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
631 /// - the this-adjustment is (memptr.adj >> 1)
632 /// - the virtual offset is (memptr.ptr)
633 /// ARM uses 'adj' for the virtual flag because Thumb functions
634 /// may be only single-byte aligned.
636 /// If the member is virtual, the adjusted 'this' pointer points
637 /// to a vtable pointer from which the virtual offset is applied.
639 /// If the member is non-virtual, memptr.ptr is the address of
640 /// the function to call.
641 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
642 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
643 llvm::Value *&ThisPtrForCall,
644 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
645 CGBuilderTy &Builder = CGF.Builder;
647 const FunctionProtoType *FPT =
648 MPT->getPointeeType()->getAs<FunctionProtoType>();
650 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
652 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
653 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
655 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
657 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
658 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
659 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
661 // Extract memptr.adj, which is in the second field.
662 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
664 // Compute the true adjustment.
665 llvm::Value *Adj = RawAdj;
666 if (UseARMMethodPtrABI)
667 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
669 // Apply the adjustment and cast back to the original struct type
671 llvm::Value *This = ThisAddr.getPointer();
672 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
673 Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
674 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
675 ThisPtrForCall = This;
677 // Load the function pointer.
678 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
680 // If the LSB in the function pointer is 1, the function pointer points to
681 // a virtual function.
682 llvm::Value *IsVirtual;
683 if (UseARMMethodPtrABI)
684 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
686 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
687 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
688 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
690 // In the virtual path, the adjustment left 'This' pointing to the
691 // vtable of the correct base subobject. The "function pointer" is an
692 // offset within the vtable (+1 for the virtual flag on non-ARM).
693 CGF.EmitBlock(FnVirtual);
695 // Cast the adjusted this to a pointer to vtable pointer and load.
696 llvm::Type *VTableTy = Builder.getInt8PtrTy();
697 CharUnits VTablePtrAlign =
698 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
699 CGF.getPointerAlign());
700 llvm::Value *VTable =
701 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
704 // On ARM64, to reserve extra space in virtual member function pointers,
705 // we only pay attention to the low 32 bits of the offset.
706 llvm::Value *VTableOffset = FnAsInt;
707 if (!UseARMMethodPtrABI)
708 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
709 if (Use32BitVTableOffsetABI) {
710 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
711 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
714 // Check the address of the function pointer if CFI on member function
715 // pointers is enabled.
716 llvm::Constant *CheckSourceLocation;
717 llvm::Constant *CheckTypeDesc;
718 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
719 CGM.HasHiddenLTOVisibility(RD);
720 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
721 CGM.HasHiddenLTOVisibility(RD);
722 bool ShouldEmitWPDInfo =
723 CGM.getCodeGenOpts().WholeProgramVTables &&
724 // Don't insert type tests if we are forcing public std visibility.
725 !CGM.HasLTOVisibilityPublicStd(RD);
726 llvm::Value *VirtualFn = nullptr;
729 CodeGenFunction::SanitizerScope SanScope(&CGF);
730 llvm::Value *TypeId = nullptr;
731 llvm::Value *CheckResult = nullptr;
733 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
734 // If doing CFI, VFE or WPD, we will need the metadata node to check
737 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
738 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
741 if (ShouldEmitVFEInfo) {
742 llvm::Value *VFPAddr =
743 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
745 // If doing VFE, load from the vtable with a type.checked.load intrinsic
746 // call. Note that we use the GEP to calculate the address to load from
747 // and pass 0 as the offset to the intrinsic. This is because every
748 // vtable slot of the correct type is marked with matching metadata, and
749 // we know that the load must be from one of these slots.
750 llvm::Value *CheckedLoad = Builder.CreateCall(
751 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
752 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
753 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
754 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
755 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
758 // When not doing VFE, emit a normal load, as it allows more
759 // optimisations than type.checked.load.
760 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
761 llvm::Value *VFPAddr =
762 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
763 CheckResult = Builder.CreateCall(
764 CGM.getIntrinsic(llvm::Intrinsic::type_test),
765 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
768 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
769 VirtualFn = CGF.Builder.CreateCall(
770 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
771 {VTableOffset->getType()}),
772 {VTable, VTableOffset});
773 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
775 llvm::Value *VFPAddr =
776 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
777 VFPAddr = CGF.Builder.CreateBitCast(
778 VFPAddr, FTy->getPointerTo()->getPointerTo());
779 VirtualFn = CGF.Builder.CreateAlignedLoad(
780 FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
784 assert(VirtualFn && "Virtual fuction pointer not created!");
785 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
787 "Check result required but not created!");
789 if (ShouldEmitCFICheck) {
790 // If doing CFI, emit the check.
791 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
792 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
793 llvm::Constant *StaticData[] = {
794 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
799 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
800 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
802 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
803 CGM.getLLVMContext(),
804 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
805 llvm::Value *ValidVtable = Builder.CreateCall(
806 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
807 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
808 SanitizerHandler::CFICheckFail, StaticData,
809 {VTable, ValidVtable});
812 FnVirtual = Builder.GetInsertBlock();
814 } // End of sanitizer scope
816 CGF.EmitBranch(FnEnd);
818 // In the non-virtual path, the function pointer is actually a
820 CGF.EmitBlock(FnNonVirtual);
821 llvm::Value *NonVirtualFn =
822 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
824 // Check the function pointer if CFI on member function pointers is enabled.
825 if (ShouldEmitCFICheck) {
826 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
827 if (RD->hasDefinition()) {
828 CodeGenFunction::SanitizerScope SanScope(&CGF);
830 llvm::Constant *StaticData[] = {
831 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
836 llvm::Value *Bit = Builder.getFalse();
837 llvm::Value *CastedNonVirtualFn =
838 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
839 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
840 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
841 getContext().getMemberPointerType(
842 MPT->getPointeeType(),
843 getContext().getRecordType(Base).getTypePtr()));
844 llvm::Value *TypeId =
845 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
847 llvm::Value *TypeTest =
848 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
849 {CastedNonVirtualFn, TypeId});
850 Bit = Builder.CreateOr(Bit, TypeTest);
853 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
854 SanitizerHandler::CFICheckFail, StaticData,
855 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
857 FnNonVirtual = Builder.GetInsertBlock();
862 CGF.EmitBlock(FnEnd);
863 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
864 CalleePtr->addIncoming(VirtualFn, FnVirtual);
865 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
867 CGCallee Callee(FPT, CalleePtr);
871 /// Compute an l-value by applying the given pointer-to-member to a
873 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
874 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
875 const MemberPointerType *MPT) {
876 assert(MemPtr->getType() == CGM.PtrDiffTy);
878 CGBuilderTy &Builder = CGF.Builder;
881 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
883 // Apply the offset, which we assume is non-null.
884 llvm::Value *Addr = Builder.CreateInBoundsGEP(
885 Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
887 // Cast the address to the appropriate pointer type, adopting the
888 // address space of the base pointer.
889 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
890 ->getPointerTo(Base.getAddressSpace());
891 return Builder.CreateBitCast(Addr, PType);
894 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
897 /// Bitcast conversions are always a no-op under Itanium.
899 /// Obligatory offset/adjustment diagram:
900 /// <-- offset --> <-- adjustment -->
901 /// |--------------------------|----------------------|--------------------|
902 /// ^Derived address point ^Base address point ^Member address point
904 /// So when converting a base member pointer to a derived member pointer,
905 /// we add the offset to the adjustment because the address point has
906 /// decreased; and conversely, when converting a derived MP to a base MP
907 /// we subtract the offset from the adjustment because the address point
910 /// The standard forbids (at compile time) conversion to and from
911 /// virtual bases, which is why we don't have to consider them here.
913 /// The standard forbids (at run time) casting a derived MP to a base
914 /// MP when the derived MP does not point to a member of the base.
915 /// This is why -1 is a reasonable choice for null data member
918 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
921 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
922 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
923 E->getCastKind() == CK_ReinterpretMemberPointer);
925 // Under Itanium, reinterprets don't require any additional processing.
926 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
928 // Use constant emission if we can.
929 if (isa<llvm::Constant>(src))
930 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
932 llvm::Constant *adj = getMemberPointerAdjustment(E);
933 if (!adj) return src;
935 CGBuilderTy &Builder = CGF.Builder;
936 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
938 const MemberPointerType *destTy =
939 E->getType()->castAs<MemberPointerType>();
941 // For member data pointers, this is just a matter of adding the
942 // offset if the source is non-null.
943 if (destTy->isMemberDataPointer()) {
946 dst = Builder.CreateNSWSub(src, adj, "adj");
948 dst = Builder.CreateNSWAdd(src, adj, "adj");
951 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
952 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
953 return Builder.CreateSelect(isNull, src, dst);
956 // The this-adjustment is left-shifted by 1 on ARM.
957 if (UseARMMethodPtrABI) {
958 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
960 adj = llvm::ConstantInt::get(adj->getType(), offset);
963 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
966 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
968 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
970 return Builder.CreateInsertValue(src, dstAdj, 1);
974 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
975 llvm::Constant *src) {
976 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
977 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
978 E->getCastKind() == CK_ReinterpretMemberPointer);
980 // Under Itanium, reinterprets don't require any additional processing.
981 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
983 // If the adjustment is trivial, we don't need to do anything.
984 llvm::Constant *adj = getMemberPointerAdjustment(E);
985 if (!adj) return src;
987 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
989 const MemberPointerType *destTy =
990 E->getType()->castAs<MemberPointerType>();
992 // For member data pointers, this is just a matter of adding the
993 // offset if the source is non-null.
994 if (destTy->isMemberDataPointer()) {
995 // null maps to null.
996 if (src->isAllOnesValue()) return src;
999 return llvm::ConstantExpr::getNSWSub(src, adj);
1001 return llvm::ConstantExpr::getNSWAdd(src, adj);
1004 // The this-adjustment is left-shifted by 1 on ARM.
1005 if (UseARMMethodPtrABI) {
1006 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
1008 adj = llvm::ConstantInt::get(adj->getType(), offset);
1011 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
1012 llvm::Constant *dstAdj;
1013 if (isDerivedToBase)
1014 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
1016 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
1018 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
1022 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1023 // Itanium C++ ABI 2.3:
1024 // A NULL pointer is represented as -1.
1025 if (MPT->isMemberDataPointer())
1026 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1028 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1029 llvm::Constant *Values[2] = { Zero, Zero };
1030 return llvm::ConstantStruct::getAnon(Values);
1034 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1036 // Itanium C++ ABI 2.3:
1037 // A pointer to data member is an offset from the base address of
1038 // the class object containing it, represented as a ptrdiff_t
1039 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1043 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1044 return BuildMemberPointer(MD, CharUnits::Zero());
1047 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1048 CharUnits ThisAdjustment) {
1049 assert(MD->isInstance() && "Member function must not be static!");
1051 CodeGenTypes &Types = CGM.getTypes();
1053 // Get the function pointer (or index if this is a virtual function).
1054 llvm::Constant *MemPtr[2];
1055 if (MD->isVirtual()) {
1056 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1057 uint64_t VTableOffset;
1058 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1059 // Multiply by 4-byte relative offsets.
1060 VTableOffset = Index * 4;
1062 const ASTContext &Context = getContext();
1063 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1064 Context.getTargetInfo().getPointerWidth(0));
1065 VTableOffset = Index * PointerWidth.getQuantity();
1068 if (UseARMMethodPtrABI) {
1069 // ARM C++ ABI 3.2.1:
1070 // This ABI specifies that adj contains twice the this
1071 // adjustment, plus 1 if the member function is virtual. The
1072 // least significant bit of adj then makes exactly the same
1073 // discrimination as the least significant bit of ptr does for
1075 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1076 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1077 2 * ThisAdjustment.getQuantity() + 1);
1079 // Itanium C++ ABI 2.3:
1080 // For a virtual function, [the pointer field] is 1 plus the
1081 // virtual table offset (in bytes) of the function,
1082 // represented as a ptrdiff_t.
1083 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1084 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1085 ThisAdjustment.getQuantity());
1088 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1090 // Check whether the function has a computable LLVM signature.
1091 if (Types.isFuncTypeConvertible(FPT)) {
1092 // The function has a computable LLVM signature; use the correct type.
1093 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1095 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1096 // function type is incomplete.
1099 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1101 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1102 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1103 (UseARMMethodPtrABI ? 2 : 1) *
1104 ThisAdjustment.getQuantity());
1107 return llvm::ConstantStruct::getAnon(MemPtr);
1110 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1112 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1113 const ValueDecl *MPD = MP.getMemberPointerDecl();
1115 return EmitNullMemberPointer(MPT);
1117 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1119 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1120 return BuildMemberPointer(MD, ThisAdjustment);
1122 CharUnits FieldOffset =
1123 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1124 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1127 /// The comparison algorithm is pretty easy: the member pointers are
1128 /// the same if they're either bitwise identical *or* both null.
1130 /// ARM is different here only because null-ness is more complicated.
1132 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1135 const MemberPointerType *MPT,
1137 CGBuilderTy &Builder = CGF.Builder;
1139 llvm::ICmpInst::Predicate Eq;
1140 llvm::Instruction::BinaryOps And, Or;
1142 Eq = llvm::ICmpInst::ICMP_NE;
1143 And = llvm::Instruction::Or;
1144 Or = llvm::Instruction::And;
1146 Eq = llvm::ICmpInst::ICMP_EQ;
1147 And = llvm::Instruction::And;
1148 Or = llvm::Instruction::Or;
1151 // Member data pointers are easy because there's a unique null
1152 // value, so it just comes down to bitwise equality.
1153 if (MPT->isMemberDataPointer())
1154 return Builder.CreateICmp(Eq, L, R);
1156 // For member function pointers, the tautologies are more complex.
1157 // The Itanium tautology is:
1158 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1159 // The ARM tautology is:
1160 // (L == R) <==> (L.ptr == R.ptr &&
1161 // (L.adj == R.adj ||
1162 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1163 // The inequality tautologies have exactly the same structure, except
1164 // applying De Morgan's laws.
1166 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1167 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1169 // This condition tests whether L.ptr == R.ptr. This must always be
1170 // true for equality to hold.
1171 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1173 // This condition, together with the assumption that L.ptr == R.ptr,
1174 // tests whether the pointers are both null. ARM imposes an extra
1176 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1177 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1179 // This condition tests whether L.adj == R.adj. If this isn't
1180 // true, the pointers are unequal unless they're both null.
1181 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1182 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1183 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1185 // Null member function pointers on ARM clear the low bit of Adj,
1186 // so the zero condition has to check that neither low bit is set.
1187 if (UseARMMethodPtrABI) {
1188 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1190 // Compute (l.adj | r.adj) & 1 and test it against zero.
1191 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1192 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1193 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1195 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1198 // Tie together all our conditions.
1199 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1200 Result = Builder.CreateBinOp(And, PtrEq, Result,
1201 Inequality ? "memptr.ne" : "memptr.eq");
1206 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1207 llvm::Value *MemPtr,
1208 const MemberPointerType *MPT) {
1209 CGBuilderTy &Builder = CGF.Builder;
1211 /// For member data pointers, this is just a check against -1.
1212 if (MPT->isMemberDataPointer()) {
1213 assert(MemPtr->getType() == CGM.PtrDiffTy);
1214 llvm::Value *NegativeOne =
1215 llvm::Constant::getAllOnesValue(MemPtr->getType());
1216 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1219 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1220 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1222 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1223 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1225 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1226 // (the virtual bit) is set.
1227 if (UseARMMethodPtrABI) {
1228 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1229 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1230 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1231 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1232 "memptr.isvirtual");
1233 Result = Builder.CreateOr(Result, IsVirtual);
1239 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1240 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1244 // If C++ prohibits us from making a copy, return by address.
1245 if (!RD->canPassInRegisters()) {
1246 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1247 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1253 /// The Itanium ABI requires non-zero initialization only for data
1254 /// member pointers, for which '0' is a valid offset.
1255 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1256 return MPT->isMemberFunctionPointer();
1259 /// The Itanium ABI always places an offset to the complete object
1260 /// at entry -2 in the vtable.
1261 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1262 const CXXDeleteExpr *DE,
1264 QualType ElementType,
1265 const CXXDestructorDecl *Dtor) {
1266 bool UseGlobalDelete = DE->isGlobalDelete();
1267 if (UseGlobalDelete) {
1268 // Derive the complete-object pointer, which is what we need
1269 // to pass to the deallocation function.
1271 // Grab the vtable pointer as an intptr_t*.
1273 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1274 llvm::Value *VTable =
1275 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1277 // Track back to entry -2 and pull out the offset there.
1278 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1279 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1280 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
1282 // Apply the offset.
1283 llvm::Value *CompletePtr =
1284 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1286 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1288 // If we're supposed to call the global delete, make sure we do so
1289 // even if the destructor throws.
1290 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1294 // FIXME: Provide a source location here even though there's no
1295 // CXXMemberCallExpr for dtor call.
1296 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1297 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1299 if (UseGlobalDelete)
1300 CGF.PopCleanupBlock();
1303 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1304 // void __cxa_rethrow();
1306 llvm::FunctionType *FTy =
1307 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1309 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1312 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1314 CGF.EmitRuntimeCallOrInvoke(Fn);
1317 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1318 // void *__cxa_allocate_exception(size_t thrown_size);
1320 llvm::FunctionType *FTy =
1321 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1323 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1326 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1327 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1328 // void (*dest) (void *));
1330 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1331 llvm::FunctionType *FTy =
1332 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1334 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1337 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1338 QualType ThrowType = E->getSubExpr()->getType();
1339 // Now allocate the exception object.
1340 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1341 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1343 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1344 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1345 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1347 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1348 CGF.EmitAnyExprToExn(
1349 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1351 // Now throw the exception.
1352 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1355 // The address of the destructor. If the exception type has a
1356 // trivial destructor (or isn't a record), we just pass null.
1357 llvm::Constant *Dtor = nullptr;
1358 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1359 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1360 if (!Record->hasTrivialDestructor()) {
1361 CXXDestructorDecl *DtorD = Record->getDestructor();
1362 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1363 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1366 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1368 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1369 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1372 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1373 // void *__dynamic_cast(const void *sub,
1374 // const abi::__class_type_info *src,
1375 // const abi::__class_type_info *dst,
1376 // std::ptrdiff_t src2dst_offset);
1378 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1379 llvm::Type *PtrDiffTy =
1380 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1382 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1384 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1386 // Mark the function as nounwind readonly.
1387 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1388 llvm::Attribute::ReadOnly };
1389 llvm::AttributeList Attrs = llvm::AttributeList::get(
1390 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1392 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1395 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1396 // void __cxa_bad_cast();
1397 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1398 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1401 /// Compute the src2dst_offset hint as described in the
1402 /// Itanium C++ ABI [2.9.7]
1403 static CharUnits computeOffsetHint(ASTContext &Context,
1404 const CXXRecordDecl *Src,
1405 const CXXRecordDecl *Dst) {
1406 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1407 /*DetectVirtual=*/false);
1409 // If Dst is not derived from Src we can skip the whole computation below and
1410 // return that Src is not a public base of Dst. Record all inheritance paths.
1411 if (!Dst->isDerivedFrom(Src, Paths))
1412 return CharUnits::fromQuantity(-2ULL);
1414 unsigned NumPublicPaths = 0;
1417 // Now walk all possible inheritance paths.
1418 for (const CXXBasePath &Path : Paths) {
1419 if (Path.Access != AS_public) // Ignore non-public inheritance.
1424 for (const CXXBasePathElement &PathElement : Path) {
1425 // If the path contains a virtual base class we can't give any hint.
1427 if (PathElement.Base->isVirtual())
1428 return CharUnits::fromQuantity(-1ULL);
1430 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1433 // Accumulate the base class offsets.
1434 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1435 Offset += L.getBaseClassOffset(
1436 PathElement.Base->getType()->getAsCXXRecordDecl());
1440 // -2: Src is not a public base of Dst.
1441 if (NumPublicPaths == 0)
1442 return CharUnits::fromQuantity(-2ULL);
1444 // -3: Src is a multiple public base type but never a virtual base type.
1445 if (NumPublicPaths > 1)
1446 return CharUnits::fromQuantity(-3ULL);
1448 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1449 // Return the offset of Src from the origin of Dst.
1453 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1454 // void __cxa_bad_typeid();
1455 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1457 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1460 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1461 QualType SrcRecordTy) {
1465 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1466 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1467 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1468 Call->setDoesNotReturn();
1469 CGF.Builder.CreateUnreachable();
1472 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1473 QualType SrcRecordTy,
1475 llvm::Type *StdTypeInfoPtrTy) {
1477 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1478 llvm::Value *Value =
1479 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1481 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1482 // Load the type info.
1483 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1484 Value = CGF.Builder.CreateCall(
1485 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1486 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1488 // Setup to dereference again since this is a proxy we accessed.
1489 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1491 // Load the type info.
1493 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1495 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1496 CGF.getPointerAlign());
1499 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1500 QualType SrcRecordTy) {
1504 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1505 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1506 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1507 llvm::Type *PtrDiffLTy =
1508 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1509 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1511 llvm::Value *SrcRTTI =
1512 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1513 llvm::Value *DestRTTI =
1514 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1516 // Compute the offset hint.
1517 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1518 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1519 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1521 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1523 // Emit the call to __dynamic_cast.
1524 llvm::Value *Value = ThisAddr.getPointer();
1525 Value = CGF.EmitCastToVoidPtr(Value);
1527 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1528 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1529 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1531 /// C++ [expr.dynamic.cast]p9:
1532 /// A failed cast to reference type throws std::bad_cast
1533 if (DestTy->isReferenceType()) {
1534 llvm::BasicBlock *BadCastBlock =
1535 CGF.createBasicBlock("dynamic_cast.bad_cast");
1537 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1538 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1540 CGF.EmitBlock(BadCastBlock);
1541 EmitBadCastCall(CGF);
1547 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1549 QualType SrcRecordTy,
1551 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1553 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1554 llvm::Value *OffsetToTop;
1555 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1556 // Get the vtable pointer.
1557 llvm::Value *VTable =
1558 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1560 // Get the offset-to-top from the vtable.
1562 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1563 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1564 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1566 llvm::Type *PtrDiffLTy =
1567 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1569 // Get the vtable pointer.
1570 llvm::Value *VTable =
1571 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1573 // Get the offset-to-top from the vtable.
1575 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1576 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1577 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1579 // Finally, add the offset to the pointer.
1580 llvm::Value *Value = ThisAddr.getPointer();
1581 Value = CGF.EmitCastToVoidPtr(Value);
1582 Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1583 return CGF.Builder.CreateBitCast(Value, DestLTy);
1586 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1587 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1588 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1589 Call->setDoesNotReturn();
1590 CGF.Builder.CreateUnreachable();
1595 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1597 const CXXRecordDecl *ClassDecl,
1598 const CXXRecordDecl *BaseClassDecl) {
1599 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1600 CharUnits VBaseOffsetOffset =
1601 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1603 llvm::Value *VBaseOffsetPtr =
1604 CGF.Builder.CreateConstGEP1_64(
1605 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1606 "vbase.offset.ptr");
1608 llvm::Value *VBaseOffset;
1609 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1611 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1612 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1613 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1616 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1617 CGM.PtrDiffTy->getPointerTo());
1618 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1619 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1624 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1625 // Just make sure we're in sync with TargetCXXABI.
1626 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1628 // The constructor used for constructing this as a base class;
1629 // ignores virtual bases.
1630 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1632 // The constructor used for constructing this as a complete class;
1633 // constructs the virtual bases, then calls the base constructor.
1634 if (!D->getParent()->isAbstract()) {
1635 // We don't need to emit the complete ctor if the class is abstract.
1636 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1640 CGCXXABI::AddedStructorArgCounts
1641 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1642 SmallVectorImpl<CanQualType> &ArgTys) {
1643 ASTContext &Context = getContext();
1645 // All parameters are already in place except VTT, which goes after 'this'.
1646 // These are Clang types, so we don't need to worry about sret yet.
1648 // Check if we need to add a VTT parameter (which has type void **).
1649 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1650 : GD.getDtorType() == Dtor_Base) &&
1651 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1652 ArgTys.insert(ArgTys.begin() + 1,
1653 Context.getPointerType(Context.VoidPtrTy));
1654 return AddedStructorArgCounts::prefix(1);
1656 return AddedStructorArgCounts{};
1659 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1660 // The destructor used for destructing this as a base class; ignores
1662 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1664 // The destructor used for destructing this as a most-derived class;
1665 // call the base destructor and then destructs any virtual bases.
1666 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1668 // The destructor in a virtual table is always a 'deleting'
1669 // destructor, which calls the complete destructor and then uses the
1670 // appropriate operator delete.
1672 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1675 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1677 FunctionArgList &Params) {
1678 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1679 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1681 // Check if we need a VTT parameter as well.
1682 if (NeedsVTTParameter(CGF.CurGD)) {
1683 ASTContext &Context = getContext();
1685 // FIXME: avoid the fake decl
1686 QualType T = Context.getPointerType(Context.VoidPtrTy);
1687 auto *VTTDecl = ImplicitParamDecl::Create(
1688 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1689 T, ImplicitParamDecl::CXXVTT);
1690 Params.insert(Params.begin() + 1, VTTDecl);
1691 getStructorImplicitParamDecl(CGF) = VTTDecl;
1695 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1696 // Naked functions have no prolog.
1697 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1700 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1701 /// adjustments are required, because they are all handled by thunks.
1702 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1704 /// Initialize the 'vtt' slot if needed.
1705 if (getStructorImplicitParamDecl(CGF)) {
1706 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1707 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1710 /// If this is a function that the ABI specifies returns 'this', initialize
1711 /// the return slot to 'this' at the start of the function.
1713 /// Unlike the setting of return types, this is done within the ABI
1714 /// implementation instead of by clients of CGCXXABI because:
1715 /// 1) getThisValue is currently protected
1716 /// 2) in theory, an ABI could implement 'this' returns some other way;
1717 /// HasThisReturn only specifies a contract, not the implementation
1718 if (HasThisReturn(CGF.CurGD))
1719 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1722 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1723 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1724 bool ForVirtualBase, bool Delegating) {
1725 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1726 return AddedStructorArgs{};
1728 // Insert the implicit 'vtt' argument as the second argument.
1730 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1731 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1732 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1735 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1736 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1737 bool ForVirtualBase, bool Delegating) {
1738 GlobalDecl GD(DD, Type);
1739 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1742 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1743 const CXXDestructorDecl *DD,
1744 CXXDtorType Type, bool ForVirtualBase,
1745 bool Delegating, Address This,
1747 GlobalDecl GD(DD, Type);
1749 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1750 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1753 if (getContext().getLangOpts().AppleKext &&
1754 Type != Dtor_Base && DD->isVirtual())
1755 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1757 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1759 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1763 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1764 const CXXRecordDecl *RD) {
1765 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1766 if (VTable->hasInitializer())
1769 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1770 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1771 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1772 llvm::Constant *RTTI =
1773 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1775 // Create and set the initializer.
1776 ConstantInitBuilder builder(CGM);
1777 auto components = builder.beginStruct();
1778 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1779 llvm::GlobalValue::isLocalLinkage(Linkage));
1780 components.finishAndSetAsInitializer(VTable);
1782 // Set the correct linkage.
1783 VTable->setLinkage(Linkage);
1785 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1786 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1788 // Set the right visibility.
1789 CGM.setGVProperties(VTable, RD);
1791 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1792 // we will emit the typeinfo for the fundamental types. This is the
1793 // same behaviour as GCC.
1794 const DeclContext *DC = RD->getDeclContext();
1795 if (RD->getIdentifier() &&
1796 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1797 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1798 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1799 DC->getParent()->isTranslationUnit())
1800 EmitFundamentalRTTIDescriptors(RD);
1802 // Always emit type metadata on non-available_externally definitions, and on
1803 // available_externally definitions if we are performing whole program
1804 // devirtualization. For WPD we need the type metadata on all vtable
1805 // definitions to ensure we associate derived classes with base classes
1806 // defined in headers but with a strong definition only in a shared library.
1807 if (!VTable->isDeclarationForLinker() ||
1808 CGM.getCodeGenOpts().WholeProgramVTables) {
1809 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1810 // For available_externally definitions, add the vtable to
1811 // @llvm.compiler.used so that it isn't deleted before whole program
1813 if (VTable->isDeclarationForLinker()) {
1814 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1815 CGM.addCompilerUsedGlobal(VTable);
1819 if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1820 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1823 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1824 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1825 if (Vptr.NearestVBase == nullptr)
1827 return NeedsVTTParameter(CGF.CurGD);
1830 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1831 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1832 const CXXRecordDecl *NearestVBase) {
1834 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1835 NeedsVTTParameter(CGF.CurGD)) {
1836 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1839 return getVTableAddressPoint(Base, VTableClass);
1843 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1844 const CXXRecordDecl *VTableClass) {
1845 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1847 // Find the appropriate vtable within the vtable group, and the address point
1848 // within that vtable.
1849 VTableLayout::AddressPointLocation AddressPoint =
1850 CGM.getItaniumVTableContext()
1851 .getVTableLayout(VTableClass)
1852 .getAddressPoint(Base);
1853 llvm::Value *Indices[] = {
1854 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1855 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1856 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1859 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1860 Indices, /*InBounds=*/true,
1861 /*InRangeIndex=*/1);
1864 // Check whether all the non-inline virtual methods for the class have the
1865 // specified attribute.
1866 template <typename T>
1867 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1868 bool FoundNonInlineVirtualMethodWithAttr = false;
1869 for (const auto *D : RD->noload_decls()) {
1870 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1871 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1872 FD->doesThisDeclarationHaveABody())
1874 if (!D->hasAttr<T>())
1876 FoundNonInlineVirtualMethodWithAttr = true;
1880 // We didn't find any non-inline virtual methods missing the attribute. We
1881 // will return true when we found at least one non-inline virtual with the
1882 // attribute. (This lets our caller know that the attribute needs to be
1883 // propagated up to the vtable.)
1884 return FoundNonInlineVirtualMethodWithAttr;
1887 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1888 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1889 const CXXRecordDecl *NearestVBase) {
1890 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1891 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1893 // Get the secondary vpointer index.
1894 uint64_t VirtualPointerIndex =
1895 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1898 llvm::Value *VTT = CGF.LoadCXXVTT();
1899 if (VirtualPointerIndex)
1900 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1901 CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1903 // And load the address point from the VTT.
1904 return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1905 CGF.getPointerAlign());
1908 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1909 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1910 return getVTableAddressPoint(Base, VTableClass);
1913 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1914 CharUnits VPtrOffset) {
1915 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1917 llvm::GlobalVariable *&VTable = VTables[RD];
1921 // Queue up this vtable for possible deferred emission.
1922 CGM.addDeferredVTable(RD);
1924 SmallString<256> Name;
1925 llvm::raw_svector_ostream Out(Name);
1926 getMangleContext().mangleCXXVTable(RD, Out);
1928 const VTableLayout &VTLayout =
1929 CGM.getItaniumVTableContext().getVTableLayout(RD);
1930 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1932 // Use pointer alignment for the vtable. Otherwise we would align them based
1933 // on the size of the initializer which doesn't make sense as only single
1935 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1937 : CGM.getTarget().getPointerAlign(0);
1939 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1940 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1941 getContext().toCharUnitsFromBits(PAlign).getQuantity());
1942 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1944 // In MS C++ if you have a class with virtual functions in which you are using
1945 // selective member import/export, then all virtual functions must be exported
1946 // unless they are inline, otherwise a link error will result. To match this
1947 // behavior, for such classes, we dllimport the vtable if it is defined
1948 // externally and all the non-inline virtual methods are marked dllimport, and
1949 // we dllexport the vtable if it is defined in this TU and all the non-inline
1950 // virtual methods are marked dllexport.
1951 if (CGM.getTarget().hasPS4DLLImportExport()) {
1952 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1953 if (CGM.getVTables().isVTableExternal(RD)) {
1954 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1955 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1957 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1958 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1962 CGM.setGVProperties(VTable, RD);
1967 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1971 SourceLocation Loc) {
1972 llvm::Type *TyPtr = Ty->getPointerTo();
1973 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1974 llvm::Value *VTable = CGF.GetVTablePtr(
1975 This, TyPtr->getPointerTo(), MethodDecl->getParent());
1977 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1979 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1980 VFunc = CGF.EmitVTableTypeCheckedLoad(
1981 MethodDecl->getParent(), VTable,
1982 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1984 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1986 llvm::Value *VFuncLoad;
1987 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1988 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1989 llvm::Value *Load = CGF.Builder.CreateCall(
1990 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1991 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1992 VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1995 CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1996 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1997 TyPtr, VTable, VTableIndex, "vfn");
1999 CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
2000 CGF.getPointerAlign());
2003 // Add !invariant.load md to virtual function load to indicate that
2004 // function didn't change inside vtable.
2005 // It's safe to add it without -fstrict-vtable-pointers, but it would not
2006 // help in devirtualization because it will only matter if we will have 2
2007 // the same virtual function loads from the same vtable load, which won't
2008 // happen without enabled devirtualization with -fstrict-vtable-pointers.
2009 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2010 CGM.getCodeGenOpts().StrictVTablePointers) {
2011 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
2012 VFuncLoadInstr->setMetadata(
2013 llvm::LLVMContext::MD_invariant_load,
2014 llvm::MDNode::get(CGM.getLLVMContext(),
2015 llvm::ArrayRef<llvm::Metadata *>()));
2021 CGCallee Callee(GD, VFunc);
2025 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
2026 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
2027 Address This, DeleteOrMemberCallExpr E) {
2028 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2029 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2030 assert((CE != nullptr) ^ (D != nullptr));
2031 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2032 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2034 GlobalDecl GD(Dtor, DtorType);
2035 const CGFunctionInfo *FInfo =
2036 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2037 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2038 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2042 ThisTy = CE->getObjectType();
2044 ThisTy = D->getDestroyedType();
2047 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2048 QualType(), nullptr);
2052 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2053 CodeGenVTables &VTables = CGM.getVTables();
2054 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2055 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2058 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2059 const CXXRecordDecl *RD) const {
2060 // We don't emit available_externally vtables if we are in -fapple-kext mode
2061 // because kext mode does not permit devirtualization.
2062 if (CGM.getLangOpts().AppleKext)
2065 // If the vtable is hidden then it is not safe to emit an available_externally
2067 if (isVTableHidden(RD))
2070 if (CGM.getCodeGenOpts().ForceEmitVTables)
2073 // If we don't have any not emitted inline virtual function then we are safe
2074 // to emit an available_externally copy of vtable.
2075 // FIXME we can still emit a copy of the vtable if we
2076 // can emit definition of the inline functions.
2077 if (hasAnyUnusedVirtualInlineFunction(RD))
2080 // For a class with virtual bases, we must also be able to speculatively
2081 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2082 // the vtable" and "can emit the VTT". For a base subobject, this means we
2083 // need to be able to emit non-virtual base vtables.
2084 if (RD->getNumVBases()) {
2085 for (const auto &B : RD->bases()) {
2086 auto *BRD = B.getType()->getAsCXXRecordDecl();
2087 assert(BRD && "no class for base specifier");
2088 if (B.isVirtual() || !BRD->isDynamicClass())
2090 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2098 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2099 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2102 // For a complete-object vtable (or more specifically, for the VTT), we need
2103 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2104 for (const auto &B : RD->vbases()) {
2105 auto *BRD = B.getType()->getAsCXXRecordDecl();
2106 assert(BRD && "no class for base specifier");
2107 if (!BRD->isDynamicClass())
2109 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2115 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2117 int64_t NonVirtualAdjustment,
2118 int64_t VirtualAdjustment,
2119 bool IsReturnAdjustment) {
2120 if (!NonVirtualAdjustment && !VirtualAdjustment)
2121 return InitialPtr.getPointer();
2123 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2125 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2126 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2127 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2128 CharUnits::fromQuantity(NonVirtualAdjustment));
2131 // Perform the virtual adjustment if we have one.
2132 llvm::Value *ResultPtr;
2133 if (VirtualAdjustment) {
2134 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2135 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2137 llvm::Value *Offset;
2138 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2139 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2140 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2141 // Load the adjustment offset from the vtable as a 32-bit int.
2143 CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2145 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2146 CharUnits::fromQuantity(4));
2148 llvm::Type *PtrDiffTy =
2149 CGF.ConvertType(CGF.getContext().getPointerDiffType());
2152 CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2154 // Load the adjustment offset from the vtable.
2155 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2156 CGF.getPointerAlign());
2158 // Adjust our pointer.
2159 ResultPtr = CGF.Builder.CreateInBoundsGEP(
2160 V.getElementType(), V.getPointer(), Offset);
2162 ResultPtr = V.getPointer();
2165 // In a derived-to-base conversion, the non-virtual adjustment is
2167 if (NonVirtualAdjustment && IsReturnAdjustment) {
2168 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2169 NonVirtualAdjustment);
2172 // Cast back to the original type.
2173 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2176 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2178 const ThisAdjustment &TA) {
2179 return performTypeAdjustment(CGF, This, TA.NonVirtual,
2180 TA.Virtual.Itanium.VCallOffsetOffset,
2181 /*IsReturnAdjustment=*/false);
2185 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2186 const ReturnAdjustment &RA) {
2187 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2188 RA.Virtual.Itanium.VBaseOffsetOffset,
2189 /*IsReturnAdjustment=*/true);
2192 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2193 RValue RV, QualType ResultType) {
2194 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2195 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2197 // Destructor thunks in the ARM ABI have indeterminate results.
2198 llvm::Type *T = CGF.ReturnValue.getElementType();
2199 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2200 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2203 /************************** Array allocation cookies **************************/
2205 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2206 // The array cookie is a size_t; pad that up to the element alignment.
2207 // The cookie is actually right-justified in that space.
2208 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2209 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2212 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2214 llvm::Value *NumElements,
2215 const CXXNewExpr *expr,
2216 QualType ElementType) {
2217 assert(requiresArrayCookie(expr));
2219 unsigned AS = NewPtr.getAddressSpace();
2221 ASTContext &Ctx = getContext();
2222 CharUnits SizeSize = CGF.getSizeSize();
2224 // The size of the cookie.
2225 CharUnits CookieSize =
2226 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2227 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2229 // Compute an offset to the cookie.
2230 Address CookiePtr = NewPtr;
2231 CharUnits CookieOffset = CookieSize - SizeSize;
2232 if (!CookieOffset.isZero())
2233 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2235 // Write the number of elements into the appropriate slot.
2236 Address NumElementsPtr =
2237 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2238 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2240 // Handle the array cookie specially in ASan.
2241 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2242 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2243 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2244 // The store to the CookiePtr does not need to be instrumented.
2245 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2246 llvm::FunctionType *FTy =
2247 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2248 llvm::FunctionCallee F =
2249 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2250 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2253 // Finally, compute a pointer to the actual data buffer by skipping
2254 // over the cookie completely.
2255 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2258 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2260 CharUnits cookieSize) {
2261 // The element size is right-justified in the cookie.
2262 Address numElementsPtr = allocPtr;
2263 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2264 if (!numElementsOffset.isZero())
2266 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2268 unsigned AS = allocPtr.getAddressSpace();
2269 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2270 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2271 return CGF.Builder.CreateLoad(numElementsPtr);
2272 // In asan mode emit a function call instead of a regular load and let the
2273 // run-time deal with it: if the shadow is properly poisoned return the
2274 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2275 // We can't simply ignore this load using nosanitize metadata because
2276 // the metadata may be lost.
2277 llvm::FunctionType *FTy =
2278 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2279 llvm::FunctionCallee F =
2280 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2281 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2284 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2285 // ARM says that the cookie is always:
2286 // struct array_cookie {
2287 // std::size_t element_size; // element_size != 0
2288 // std::size_t element_count;
2290 // But the base ABI doesn't give anything an alignment greater than
2291 // 8, so we can dismiss this as typical ABI-author blindness to
2292 // actual language complexity and round up to the element alignment.
2293 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2294 CGM.getContext().getTypeAlignInChars(elementType));
2297 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2299 llvm::Value *numElements,
2300 const CXXNewExpr *expr,
2301 QualType elementType) {
2302 assert(requiresArrayCookie(expr));
2304 // The cookie is always at the start of the buffer.
2305 Address cookie = newPtr;
2307 // The first element is the element size.
2308 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2309 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2310 getContext().getTypeSizeInChars(elementType).getQuantity());
2311 CGF.Builder.CreateStore(elementSize, cookie);
2313 // The second element is the element count.
2314 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2315 CGF.Builder.CreateStore(numElements, cookie);
2317 // Finally, compute a pointer to the actual data buffer by skipping
2318 // over the cookie completely.
2319 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2320 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2323 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2325 CharUnits cookieSize) {
2326 // The number of elements is at offset sizeof(size_t) relative to
2327 // the allocated pointer.
2328 Address numElementsPtr
2329 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2331 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2332 return CGF.Builder.CreateLoad(numElementsPtr);
2335 /*********************** Static local initialization **************************/
2337 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2338 llvm::PointerType *GuardPtrTy) {
2339 // int __cxa_guard_acquire(__guard *guard_object);
2340 llvm::FunctionType *FTy =
2341 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2342 GuardPtrTy, /*isVarArg=*/false);
2343 return CGM.CreateRuntimeFunction(
2344 FTy, "__cxa_guard_acquire",
2345 llvm::AttributeList::get(CGM.getLLVMContext(),
2346 llvm::AttributeList::FunctionIndex,
2347 llvm::Attribute::NoUnwind));
2350 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2351 llvm::PointerType *GuardPtrTy) {
2352 // void __cxa_guard_release(__guard *guard_object);
2353 llvm::FunctionType *FTy =
2354 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2355 return CGM.CreateRuntimeFunction(
2356 FTy, "__cxa_guard_release",
2357 llvm::AttributeList::get(CGM.getLLVMContext(),
2358 llvm::AttributeList::FunctionIndex,
2359 llvm::Attribute::NoUnwind));
2362 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2363 llvm::PointerType *GuardPtrTy) {
2364 // void __cxa_guard_abort(__guard *guard_object);
2365 llvm::FunctionType *FTy =
2366 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2367 return CGM.CreateRuntimeFunction(
2368 FTy, "__cxa_guard_abort",
2369 llvm::AttributeList::get(CGM.getLLVMContext(),
2370 llvm::AttributeList::FunctionIndex,
2371 llvm::Attribute::NoUnwind));
2375 struct CallGuardAbort final : EHScopeStack::Cleanup {
2376 llvm::GlobalVariable *Guard;
2377 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2379 void Emit(CodeGenFunction &CGF, Flags flags) override {
2380 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2386 /// The ARM code here follows the Itanium code closely enough that we
2387 /// just special-case it at particular places.
2388 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2390 llvm::GlobalVariable *var,
2391 bool shouldPerformInit) {
2392 CGBuilderTy &Builder = CGF.Builder;
2394 // Inline variables that weren't instantiated from variable templates have
2395 // partially-ordered initialization within their translation unit.
2396 bool NonTemplateInline =
2398 !isTemplateInstantiation(D.getTemplateSpecializationKind());
2400 // We only need to use thread-safe statics for local non-TLS variables and
2401 // inline variables; other global initialization is always single-threaded
2402 // or (through lazy dynamic loading in multiple threads) unsequenced.
2403 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2404 (D.isLocalVarDecl() || NonTemplateInline) &&
2407 // If we have a global variable with internal linkage and thread-safe statics
2408 // are disabled, we can just let the guard variable be of type i8.
2409 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2411 llvm::IntegerType *guardTy;
2412 CharUnits guardAlignment;
2413 if (useInt8GuardVariable) {
2414 guardTy = CGF.Int8Ty;
2415 guardAlignment = CharUnits::One();
2417 // Guard variables are 64 bits in the generic ABI and size width on ARM
2418 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2419 if (UseARMGuardVarABI) {
2420 guardTy = CGF.SizeTy;
2421 guardAlignment = CGF.getSizeAlign();
2423 guardTy = CGF.Int64Ty;
2424 guardAlignment = CharUnits::fromQuantity(
2425 CGM.getDataLayout().getABITypeAlignment(guardTy));
2428 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2429 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2431 // Create the guard variable if we don't already have it (as we
2432 // might if we're double-emitting this function body).
2433 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2435 // Mangle the name for the guard.
2436 SmallString<256> guardName;
2438 llvm::raw_svector_ostream out(guardName);
2439 getMangleContext().mangleStaticGuardVariable(&D, out);
2442 // Create the guard variable with a zero-initializer.
2443 // Just absorb linkage and visibility from the guarded variable.
2444 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2445 false, var->getLinkage(),
2446 llvm::ConstantInt::get(guardTy, 0),
2448 guard->setDSOLocal(var->isDSOLocal());
2449 guard->setVisibility(var->getVisibility());
2450 // If the variable is thread-local, so is its guard variable.
2451 guard->setThreadLocalMode(var->getThreadLocalMode());
2452 guard->setAlignment(guardAlignment.getAsAlign());
2454 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2455 // group as the associated data object." In practice, this doesn't work for
2456 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2457 llvm::Comdat *C = var->getComdat();
2458 if (!D.isLocalVarDecl() && C &&
2459 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2460 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2461 guard->setComdat(C);
2462 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2463 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2466 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2469 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2471 // Test whether the variable has completed initialization.
2473 // Itanium C++ ABI 3.3.2:
2474 // The following is pseudo-code showing how these functions can be used:
2475 // if (obj_guard.first_byte == 0) {
2476 // if ( __cxa_guard_acquire (&obj_guard) ) {
2478 // ... initialize the object ...;
2480 // __cxa_guard_abort (&obj_guard);
2483 // ... queue object destructor with __cxa_atexit() ...;
2484 // __cxa_guard_release (&obj_guard);
2488 // Load the first byte of the guard variable.
2489 llvm::LoadInst *LI =
2490 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2493 // An implementation supporting thread-safety on multiprocessor
2494 // systems must also guarantee that references to the initialized
2495 // object do not occur before the load of the initialization flag.
2497 // In LLVM, we do this by marking the load Acquire.
2499 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2501 // For ARM, we should only check the first bit, rather than the entire byte:
2503 // ARM C++ ABI 3.2.3.1:
2504 // To support the potential use of initialization guard variables
2505 // as semaphores that are the target of ARM SWP and LDREX/STREX
2506 // synchronizing instructions we define a static initialization
2507 // guard variable to be a 4-byte aligned, 4-byte word with the
2508 // following inline access protocol.
2509 // #define INITIALIZED 1
2510 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2511 // if (__cxa_guard_acquire(&obj_guard))
2515 // and similarly for ARM64:
2517 // ARM64 C++ ABI 3.2.2:
2518 // This ABI instead only specifies the value bit 0 of the static guard
2519 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2520 // variable is not initialized and 1 when it is.
2522 (UseARMGuardVarABI && !useInt8GuardVariable)
2523 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2525 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2527 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2528 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2530 // Check if the first byte of the guard variable is zero.
2531 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2532 CodeGenFunction::GuardKind::VariableGuard, &D);
2534 CGF.EmitBlock(InitCheckBlock);
2536 // Variables used when coping with thread-safe statics and exceptions.
2538 // Call __cxa_guard_acquire.
2540 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2542 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2544 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2545 InitBlock, EndBlock);
2547 // Call __cxa_guard_abort along the exceptional edge.
2548 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2550 CGF.EmitBlock(InitBlock);
2553 // Emit the initializer and add a global destructor if appropriate.
2554 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2557 // Pop the guard-abort cleanup if we pushed one.
2558 CGF.PopCleanupBlock();
2560 // Call __cxa_guard_release. This cannot throw.
2561 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2562 guardAddr.getPointer());
2564 // Store 1 into the first byte of the guard variable after initialization is
2566 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2567 Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2570 CGF.EmitBlock(EndBlock);
2573 /// Register a global destructor using __cxa_atexit.
2574 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2575 llvm::FunctionCallee dtor,
2576 llvm::Constant *addr, bool TLS) {
2577 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2578 "unexpected call to emitGlobalDtorWithCXAAtExit");
2579 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2580 "__cxa_atexit is disabled");
2581 const char *Name = "__cxa_atexit";
2583 const llvm::Triple &T = CGF.getTarget().getTriple();
2584 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2587 // We're assuming that the destructor function is something we can
2588 // reasonably call with the default CC. Go ahead and cast it to the
2590 llvm::Type *dtorTy =
2591 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2593 // Preserve address space of addr.
2594 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2595 auto AddrInt8PtrTy =
2596 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2598 // Create a variable that binds the atexit to this shared object.
2599 llvm::Constant *handle =
2600 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2601 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2602 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2604 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2605 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2606 llvm::FunctionType *atexitTy =
2607 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2609 // Fetch the actual function.
2610 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2611 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2612 fn->setDoesNotThrow();
2615 // addr is null when we are trying to register a dtor annotated with
2616 // __attribute__((destructor)) in a constructor function. Using null here is
2617 // okay because this argument is just passed back to the destructor
2619 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2621 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2622 cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2623 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2625 CGF.EmitNounwindRuntimeCall(atexit, args);
2628 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2630 // Create a function that registers/unregisters destructors that have the same
2632 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2633 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2634 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2636 return GlobalInitOrCleanupFn;
2639 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2640 for (const auto &I : DtorsUsingAtExit) {
2641 int Priority = I.first;
2642 std::string GlobalCleanupFnName =
2643 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2645 llvm::Function *GlobalCleanupFn =
2646 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2648 CodeGenFunction CGF(*this);
2649 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2650 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2651 SourceLocation(), SourceLocation());
2652 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2654 // Get the destructor function type, void(*)(void).
2655 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2656 llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2658 // Destructor functions are run/unregistered in non-ascending
2659 // order of their priorities.
2660 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2661 auto itv = Dtors.rbegin();
2662 while (itv != Dtors.rend()) {
2663 llvm::Function *Dtor = *itv;
2665 // We're assuming that the destructor function is something we can
2666 // reasonably call with the correct CC. Go ahead and cast it to the
2668 llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2669 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2670 llvm::Value *NeedsDestruct =
2671 CGF.Builder.CreateIsNull(V, "needs_destruct");
2673 llvm::BasicBlock *DestructCallBlock =
2674 CGF.createBasicBlock("destruct.call");
2675 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2676 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2677 // Check if unatexit returns a value of 0. If it does, jump to
2678 // DestructCallBlock, otherwise jump to EndBlock directly.
2679 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2681 CGF.EmitBlock(DestructCallBlock);
2683 // Emit the call to casted Dtor.
2684 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2685 // Make sure the call and the callee agree on calling convention.
2686 CI->setCallingConv(Dtor->getCallingConv());
2688 CGF.EmitBlock(EndBlock);
2693 CGF.FinishFunction();
2694 AddGlobalDtor(GlobalCleanupFn, Priority);
2698 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2699 for (const auto &I : DtorsUsingAtExit) {
2700 int Priority = I.first;
2701 std::string GlobalInitFnName =
2702 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2703 llvm::Function *GlobalInitFn =
2704 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2706 CodeGenFunction CGF(*this);
2707 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2708 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2709 SourceLocation(), SourceLocation());
2710 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2712 // Since constructor functions are run in non-descending order of their
2713 // priorities, destructors are registered in non-descending order of their
2714 // priorities, and since destructor functions are run in the reverse order
2715 // of their registration, destructor functions are run in non-ascending
2716 // order of their priorities.
2717 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2718 for (auto *Dtor : Dtors) {
2719 // Register the destructor function calling __cxa_atexit if it is
2720 // available. Otherwise fall back on calling atexit.
2721 if (getCodeGenOpts().CXAAtExit) {
2722 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2724 // Get the destructor function type, void(*)(void).
2725 llvm::Type *dtorTy =
2726 llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2728 // We're assuming that the destructor function is something we can
2729 // reasonably call with the correct CC. Go ahead and cast it to the
2731 CGF.registerGlobalDtorWithAtExit(
2732 llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2736 CGF.FinishFunction();
2737 AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2740 if (getCXXABI().useSinitAndSterm())
2741 unregisterGlobalDtorsWithUnAtExit();
2744 /// Register a global destructor as best as we know how.
2745 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2746 llvm::FunctionCallee dtor,
2747 llvm::Constant *addr) {
2748 if (D.isNoDestroy(CGM.getContext()))
2751 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2752 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2753 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2754 // We can always use __cxa_thread_atexit.
2755 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2756 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2758 // In Apple kexts, we want to add a global destructor entry.
2759 // FIXME: shouldn't this be guarded by some variable?
2760 if (CGM.getLangOpts().AppleKext) {
2761 // Generate a global destructor entry.
2762 return CGM.AddCXXDtorEntry(dtor, addr);
2765 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2768 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2769 CodeGen::CodeGenModule &CGM) {
2770 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2771 // Darwin prefers to have references to thread local variables to go through
2772 // the thread wrapper instead of directly referencing the backing variable.
2773 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2774 CGM.getTarget().getTriple().isOSDarwin();
2777 /// Get the appropriate linkage for the wrapper function. This is essentially
2778 /// the weak form of the variable's linkage; every translation unit which needs
2779 /// the wrapper emits a copy, and we want the linker to merge them.
2780 static llvm::GlobalValue::LinkageTypes
2781 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2782 llvm::GlobalValue::LinkageTypes VarLinkage =
2783 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2785 // For internal linkage variables, we don't need an external or weak wrapper.
2786 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2789 // If the thread wrapper is replaceable, give it appropriate linkage.
2790 if (isThreadWrapperReplaceable(VD, CGM))
2791 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2792 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2794 return llvm::GlobalValue::WeakODRLinkage;
2798 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2800 // Mangle the name for the thread_local wrapper function.
2801 SmallString<256> WrapperName;
2803 llvm::raw_svector_ostream Out(WrapperName);
2804 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2807 // FIXME: If VD is a definition, we should regenerate the function attributes
2808 // before returning.
2809 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2810 return cast<llvm::Function>(V);
2812 QualType RetQT = VD->getType();
2813 if (RetQT->isReferenceType())
2814 RetQT = RetQT.getNonReferenceType();
2816 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2817 getContext().getPointerType(RetQT), FunctionArgList());
2819 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2820 llvm::Function *Wrapper =
2821 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2822 WrapperName.str(), &CGM.getModule());
2824 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2825 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2827 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2829 // Always resolve references to the wrapper at link time.
2830 if (!Wrapper->hasLocalLinkage())
2831 if (!isThreadWrapperReplaceable(VD, CGM) ||
2832 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2833 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2834 VD->getVisibility() == HiddenVisibility)
2835 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2837 if (isThreadWrapperReplaceable(VD, CGM)) {
2838 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2839 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2842 ThreadWrappers.push_back({VD, Wrapper});
2846 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2847 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2848 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2849 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2850 llvm::Function *InitFunc = nullptr;
2852 // Separate initializers into those with ordered (or partially-ordered)
2853 // initialization and those with unordered initialization.
2854 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2855 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2856 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2857 if (isTemplateInstantiation(
2858 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2859 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2860 CXXThreadLocalInits[I];
2862 OrderedInits.push_back(CXXThreadLocalInits[I]);
2865 if (!OrderedInits.empty()) {
2866 // Generate a guarded initialization function.
2867 llvm::FunctionType *FTy =
2868 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2869 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2870 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2873 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2874 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2875 llvm::GlobalVariable::InternalLinkage,
2876 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2877 Guard->setThreadLocal(true);
2878 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2880 CharUnits GuardAlign = CharUnits::One();
2881 Guard->setAlignment(GuardAlign.getAsAlign());
2883 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2884 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2885 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2886 if (CGM.getTarget().getTriple().isOSDarwin()) {
2887 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2888 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2892 // Create declarations for thread wrappers for all thread-local variables
2893 // with non-discardable definitions in this translation unit.
2894 for (const VarDecl *VD : CXXThreadLocals) {
2895 if (VD->hasDefinition() &&
2896 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2897 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2898 getOrCreateThreadLocalWrapper(VD, GV);
2902 // Emit all referenced thread wrappers.
2903 for (auto VDAndWrapper : ThreadWrappers) {
2904 const VarDecl *VD = VDAndWrapper.first;
2905 llvm::GlobalVariable *Var =
2906 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2907 llvm::Function *Wrapper = VDAndWrapper.second;
2909 // Some targets require that all access to thread local variables go through
2910 // the thread wrapper. This means that we cannot attempt to create a thread
2911 // wrapper or a thread helper.
2912 if (!VD->hasDefinition()) {
2913 if (isThreadWrapperReplaceable(VD, CGM)) {
2914 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2918 // If this isn't a TU in which this variable is defined, the thread
2919 // wrapper is discardable.
2920 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2921 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2924 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2926 // Mangle the name for the thread_local initialization function.
2927 SmallString<256> InitFnName;
2929 llvm::raw_svector_ostream Out(InitFnName);
2930 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2933 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2935 // If we have a definition for the variable, emit the initialization
2936 // function as an alias to the global Init function (if any). Otherwise,
2937 // produce a declaration of the initialization function.
2938 llvm::GlobalValue *Init = nullptr;
2939 bool InitIsInitFunc = false;
2940 bool HasConstantInitialization = false;
2941 if (!usesThreadWrapperFunction(VD)) {
2942 HasConstantInitialization = true;
2943 } else if (VD->hasDefinition()) {
2944 InitIsInitFunc = true;
2945 llvm::Function *InitFuncToUse = InitFunc;
2946 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2947 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2949 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2952 // Emit a weak global function referring to the initialization function.
2953 // This function will not exist if the TU defining the thread_local
2954 // variable in question does not need any dynamic initialization for
2955 // its thread_local variables.
2956 Init = llvm::Function::Create(InitFnTy,
2957 llvm::GlobalVariable::ExternalWeakLinkage,
2958 InitFnName.str(), &CGM.getModule());
2959 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2960 CGM.SetLLVMFunctionAttributes(
2961 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2965 Init->setVisibility(Var->getVisibility());
2966 // Don't mark an extern_weak function DSO local on windows.
2967 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2968 Init->setDSOLocal(Var->isDSOLocal());
2971 llvm::LLVMContext &Context = CGM.getModule().getContext();
2973 // The linker on AIX is not happy with missing weak symbols. However,
2974 // other TUs will not know whether the initialization routine exists
2975 // so create an empty, init function to satisfy the linker.
2976 // This is needed whenever a thread wrapper function is not used, and
2977 // also when the symbol is weak.
2978 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2979 isEmittedWithConstantInitializer(VD, true) &&
2980 !mayNeedDestruction(VD)) {
2981 // Init should be null. If it were non-null, then the logic above would
2982 // either be defining the function to be an alias or declaring the
2983 // function with the expectation that the definition of the variable
2985 assert(Init == nullptr && "Expected Init to be null.");
2987 llvm::Function *Func = llvm::Function::Create(
2988 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2989 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2990 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2991 cast<llvm::Function>(Func),
2993 // Create a function body that just returns
2994 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2995 CGBuilderTy Builder(CGM, Entry);
2996 Builder.CreateRetVoid();
2999 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
3000 CGBuilderTy Builder(CGM, Entry);
3001 if (HasConstantInitialization) {
3002 // No dynamic initialization to invoke.
3003 } else if (InitIsInitFunc) {
3005 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
3006 if (isThreadWrapperReplaceable(VD, CGM)) {
3007 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3008 llvm::Function *Fn =
3009 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
3010 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
3013 } else if (CGM.getTriple().isOSAIX()) {
3014 // On AIX, except if constinit and also neither of class type or of
3015 // (possibly multi-dimensional) array of class type, thread_local vars
3016 // will have init routines regardless of whether they are
3017 // const-initialized. Since the routine is guaranteed to exist, we can
3018 // unconditionally call it without testing for its existance. This
3019 // avoids potentially unresolved weak symbols which the AIX linker
3020 // isn't happy with.
3021 Builder.CreateCall(InitFnTy, Init);
3023 // Don't know whether we have an init function. Call it if it exists.
3024 llvm::Value *Have = Builder.CreateIsNotNull(Init);
3025 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3026 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
3027 Builder.CreateCondBr(Have, InitBB, ExitBB);
3029 Builder.SetInsertPoint(InitBB);
3030 Builder.CreateCall(InitFnTy, Init);
3031 Builder.CreateBr(ExitBB);
3033 Builder.SetInsertPoint(ExitBB);
3036 // For a reference, the result of the wrapper function is a pointer to
3037 // the referenced object.
3038 llvm::Value *Val = Var;
3039 if (VD->getType()->isReferenceType()) {
3040 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3041 Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
3043 if (Val->getType() != Wrapper->getReturnType())
3044 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3045 Val, Wrapper->getReturnType(), "");
3046 Builder.CreateRet(Val);
3050 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3052 QualType LValType) {
3053 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3054 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3056 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3057 CallVal->setCallingConv(Wrapper->getCallingConv());
3060 if (VD->getType()->isReferenceType())
3061 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3063 LV = CGF.MakeAddrLValue(CallVal, LValType,
3064 CGF.getContext().getDeclAlign(VD));
3065 // FIXME: need setObjCGCLValueClass?
3069 /// Return whether the given global decl needs a VTT parameter, which it does
3070 /// if it's a base constructor or destructor with virtual bases.
3071 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3072 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3074 // We don't have any virtual bases, just return early.
3075 if (!MD->getParent()->getNumVBases())
3078 // Check if we have a base constructor.
3079 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3082 // Check if we have a base destructor.
3083 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3090 class ItaniumRTTIBuilder {
3091 CodeGenModule &CGM; // Per-module state.
3092 llvm::LLVMContext &VMContext;
3093 const ItaniumCXXABI &CXXABI; // Per-module state.
3095 /// Fields - The fields of the RTTI descriptor currently being built.
3096 SmallVector<llvm::Constant *, 16> Fields;
3098 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3099 llvm::GlobalVariable *
3100 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3102 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3103 /// descriptor of the given type.
3104 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3106 /// BuildVTablePointer - Build the vtable pointer for the given type.
3107 void BuildVTablePointer(const Type *Ty);
3109 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3110 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3111 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3113 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3114 /// classes with bases that do not satisfy the abi::__si_class_type_info
3115 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3116 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3118 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3119 /// for pointer types.
3120 void BuildPointerTypeInfo(QualType PointeeTy);
3122 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3123 /// type_info for an object type.
3124 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3126 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3127 /// struct, used for member pointer types.
3128 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3131 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3132 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3134 // Pointer type info flags.
3136 /// PTI_Const - Type has const qualifier.
3139 /// PTI_Volatile - Type has volatile qualifier.
3142 /// PTI_Restrict - Type has restrict qualifier.
3145 /// PTI_Incomplete - Type is incomplete.
3146 PTI_Incomplete = 0x8,
3148 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3149 /// (in pointer to member).
3150 PTI_ContainingClassIncomplete = 0x10,
3152 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3153 //PTI_TransactionSafe = 0x20,
3155 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3156 PTI_Noexcept = 0x40,
3159 // VMI type info flags.
3161 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3162 VMI_NonDiamondRepeat = 0x1,
3164 /// VMI_DiamondShaped - Class is diamond shaped.
3165 VMI_DiamondShaped = 0x2
3168 // Base class type info flags.
3170 /// BCTI_Virtual - Base class is virtual.
3173 /// BCTI_Public - Base class is public.
3177 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3178 /// link to an existing RTTI descriptor if one already exists.
3179 llvm::Constant *BuildTypeInfo(QualType Ty);
3181 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3182 llvm::Constant *BuildTypeInfo(
3184 llvm::GlobalVariable::LinkageTypes Linkage,
3185 llvm::GlobalValue::VisibilityTypes Visibility,
3186 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3190 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3191 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3192 SmallString<256> Name;
3193 llvm::raw_svector_ostream Out(Name);
3194 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3196 // We know that the mangled name of the type starts at index 4 of the
3197 // mangled name of the typename, so we can just index into it in order to
3198 // get the mangled name of the type.
3199 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3201 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3203 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3204 Name, Init->getType(), Linkage, Align.getQuantity());
3206 GV->setInitializer(Init);
3212 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3213 // Mangle the RTTI name.
3214 SmallString<256> Name;
3215 llvm::raw_svector_ostream Out(Name);
3216 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3218 // Look for an existing global.
3219 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3222 // Create a new global variable.
3223 // Note for the future: If we would ever like to do deferred emission of
3224 // RTTI, check if emitting vtables opportunistically need any adjustment.
3226 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3227 /*isConstant=*/true,
3228 llvm::GlobalValue::ExternalLinkage, nullptr,
3230 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3231 CGM.setGVProperties(GV, RD);
3232 // Import the typeinfo symbol when all non-inline virtual methods are
3234 if (CGM.getTarget().hasPS4DLLImportExport()) {
3235 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3236 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3237 CGM.setDSOLocal(GV);
3242 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3245 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3246 /// info for that type is defined in the standard library.
3247 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3248 // Itanium C++ ABI 2.9.2:
3249 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3250 // the run-time support library. Specifically, the run-time support
3251 // library should contain type_info objects for the types X, X* and
3252 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3253 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3254 // long, unsigned long, long long, unsigned long long, float, double,
3255 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3256 // half-precision floating point types.
3258 // GCC also emits RTTI for __int128.
3259 // FIXME: We do not emit RTTI information for decimal types here.
3261 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3262 switch (Ty->getKind()) {
3263 case BuiltinType::Void:
3264 case BuiltinType::NullPtr:
3265 case BuiltinType::Bool:
3266 case BuiltinType::WChar_S:
3267 case BuiltinType::WChar_U:
3268 case BuiltinType::Char_U:
3269 case BuiltinType::Char_S:
3270 case BuiltinType::UChar:
3271 case BuiltinType::SChar:
3272 case BuiltinType::Short:
3273 case BuiltinType::UShort:
3274 case BuiltinType::Int:
3275 case BuiltinType::UInt:
3276 case BuiltinType::Long:
3277 case BuiltinType::ULong:
3278 case BuiltinType::LongLong:
3279 case BuiltinType::ULongLong:
3280 case BuiltinType::Half:
3281 case BuiltinType::Float:
3282 case BuiltinType::Double:
3283 case BuiltinType::LongDouble:
3284 case BuiltinType::Float16:
3285 case BuiltinType::Float128:
3286 case BuiltinType::Ibm128:
3287 case BuiltinType::Char8:
3288 case BuiltinType::Char16:
3289 case BuiltinType::Char32:
3290 case BuiltinType::Int128:
3291 case BuiltinType::UInt128:
3294 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3295 case BuiltinType::Id:
3296 #include "clang/Basic/OpenCLImageTypes.def"
3297 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3298 case BuiltinType::Id:
3299 #include "clang/Basic/OpenCLExtensionTypes.def"
3300 case BuiltinType::OCLSampler:
3301 case BuiltinType::OCLEvent:
3302 case BuiltinType::OCLClkEvent:
3303 case BuiltinType::OCLQueue:
3304 case BuiltinType::OCLReserveID:
3305 #define SVE_TYPE(Name, Id, SingletonId) \
3306 case BuiltinType::Id:
3307 #include "clang/Basic/AArch64SVEACLETypes.def"
3308 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3309 case BuiltinType::Id:
3310 #include "clang/Basic/PPCTypes.def"
3311 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3312 #include "clang/Basic/RISCVVTypes.def"
3313 case BuiltinType::ShortAccum:
3314 case BuiltinType::Accum:
3315 case BuiltinType::LongAccum:
3316 case BuiltinType::UShortAccum:
3317 case BuiltinType::UAccum:
3318 case BuiltinType::ULongAccum:
3319 case BuiltinType::ShortFract:
3320 case BuiltinType::Fract:
3321 case BuiltinType::LongFract:
3322 case BuiltinType::UShortFract:
3323 case BuiltinType::UFract:
3324 case BuiltinType::ULongFract:
3325 case BuiltinType::SatShortAccum:
3326 case BuiltinType::SatAccum:
3327 case BuiltinType::SatLongAccum:
3328 case BuiltinType::SatUShortAccum:
3329 case BuiltinType::SatUAccum:
3330 case BuiltinType::SatULongAccum:
3331 case BuiltinType::SatShortFract:
3332 case BuiltinType::SatFract:
3333 case BuiltinType::SatLongFract:
3334 case BuiltinType::SatUShortFract:
3335 case BuiltinType::SatUFract:
3336 case BuiltinType::SatULongFract:
3337 case BuiltinType::BFloat16:
3340 case BuiltinType::Dependent:
3341 #define BUILTIN_TYPE(Id, SingletonId)
3342 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3343 case BuiltinType::Id:
3344 #include "clang/AST/BuiltinTypes.def"
3345 llvm_unreachable("asking for RRTI for a placeholder type!");
3347 case BuiltinType::ObjCId:
3348 case BuiltinType::ObjCClass:
3349 case BuiltinType::ObjCSel:
3350 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3353 llvm_unreachable("Invalid BuiltinType Kind!");
3356 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3357 QualType PointeeTy = PointerTy->getPointeeType();
3358 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3362 // Check the qualifiers.
3363 Qualifiers Quals = PointeeTy.getQualifiers();
3364 Quals.removeConst();
3369 return TypeInfoIsInStandardLibrary(BuiltinTy);
3372 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3373 /// information for the given type exists in the standard library.
3374 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3375 // Type info for builtin types is defined in the standard library.
3376 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3377 return TypeInfoIsInStandardLibrary(BuiltinTy);
3379 // Type info for some pointer types to builtin types is defined in the
3380 // standard library.
3381 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3382 return TypeInfoIsInStandardLibrary(PointerTy);
3387 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3388 /// the given type exists somewhere else, and that we should not emit the type
3389 /// information in this translation unit. Assumes that it is not a
3390 /// standard-library type.
3391 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3393 ASTContext &Context = CGM.getContext();
3395 // If RTTI is disabled, assume it might be disabled in the
3396 // translation unit that defines any potential key function, too.
3397 if (!Context.getLangOpts().RTTI) return false;
3399 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3400 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3401 if (!RD->hasDefinition())
3404 if (!RD->isDynamicClass())
3407 // FIXME: this may need to be reconsidered if the key function
3409 // N.B. We must always emit the RTTI data ourselves if there exists a key
3411 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3413 // Don't import the RTTI but emit it locally.
3414 if (CGM.getTriple().isWindowsGNUEnvironment())
3417 if (CGM.getVTables().isVTableExternal(RD)) {
3418 if (CGM.getTarget().hasPS4DLLImportExport())
3421 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3432 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3433 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3434 return !RecordTy->getDecl()->isCompleteDefinition();
3437 /// ContainsIncompleteClassType - Returns whether the given type contains an
3438 /// incomplete class type. This is true if
3440 /// * The given type is an incomplete class type.
3441 /// * The given type is a pointer type whose pointee type contains an
3442 /// incomplete class type.
3443 /// * The given type is a member pointer type whose class is an incomplete
3445 /// * The given type is a member pointer type whoise pointee type contains an
3446 /// incomplete class type.
3447 /// is an indirect or direct pointer to an incomplete class type.
3448 static bool ContainsIncompleteClassType(QualType Ty) {
3449 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3450 if (IsIncompleteClassType(RecordTy))
3454 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3455 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3457 if (const MemberPointerType *MemberPointerTy =
3458 dyn_cast<MemberPointerType>(Ty)) {
3459 // Check if the class type is incomplete.
3460 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3461 if (IsIncompleteClassType(ClassType))
3464 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3470 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3471 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3472 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3473 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3474 // Check the number of bases.
3475 if (RD->getNumBases() != 1)
3479 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3481 // Check that the base is not virtual.
3482 if (Base->isVirtual())
3485 // Check that the base is public.
3486 if (Base->getAccessSpecifier() != AS_public)
3489 // Check that the class is dynamic iff the base is.
3491 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3492 if (!BaseDecl->isEmpty() &&
3493 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3499 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3500 // abi::__class_type_info.
3501 static const char * const ClassTypeInfo =
3502 "_ZTVN10__cxxabiv117__class_type_infoE";
3503 // abi::__si_class_type_info.
3504 static const char * const SIClassTypeInfo =
3505 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3506 // abi::__vmi_class_type_info.
3507 static const char * const VMIClassTypeInfo =
3508 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3510 const char *VTableName = nullptr;
3512 switch (Ty->getTypeClass()) {
3513 #define TYPE(Class, Base)
3514 #define ABSTRACT_TYPE(Class, Base)
3515 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3516 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3517 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3518 #include "clang/AST/TypeNodes.inc"
3519 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3521 case Type::LValueReference:
3522 case Type::RValueReference:
3523 llvm_unreachable("References shouldn't get here");
3526 case Type::DeducedTemplateSpecialization:
3527 llvm_unreachable("Undeduced type shouldn't get here");
3530 llvm_unreachable("Pipe types shouldn't get here");
3534 // GCC treats vector and complex types as fundamental types.
3536 case Type::ExtVector:
3537 case Type::ConstantMatrix:
3540 // FIXME: GCC treats block pointers as fundamental types?!
3541 case Type::BlockPointer:
3542 // abi::__fundamental_type_info.
3543 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3546 case Type::ConstantArray:
3547 case Type::IncompleteArray:
3548 case Type::VariableArray:
3549 // abi::__array_type_info.
3550 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3553 case Type::FunctionNoProto:
3554 case Type::FunctionProto:
3555 // abi::__function_type_info.
3556 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3560 // abi::__enum_type_info.
3561 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3564 case Type::Record: {
3565 const CXXRecordDecl *RD =
3566 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3568 if (!RD->hasDefinition() || !RD->getNumBases()) {
3569 VTableName = ClassTypeInfo;
3570 } else if (CanUseSingleInheritance(RD)) {
3571 VTableName = SIClassTypeInfo;
3573 VTableName = VMIClassTypeInfo;
3579 case Type::ObjCObject:
3580 // Ignore protocol qualifiers.
3581 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3583 // Handle id and Class.
3584 if (isa<BuiltinType>(Ty)) {
3585 VTableName = ClassTypeInfo;
3589 assert(isa<ObjCInterfaceType>(Ty));
3592 case Type::ObjCInterface:
3593 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3594 VTableName = SIClassTypeInfo;
3596 VTableName = ClassTypeInfo;
3600 case Type::ObjCObjectPointer:
3602 // abi::__pointer_type_info.
3603 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3606 case Type::MemberPointer:
3607 // abi::__pointer_to_member_type_info.
3608 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3612 llvm::Constant *VTable = nullptr;
3614 // Check if the alias exists. If it doesn't, then get or create the global.
3615 if (CGM.getItaniumVTableContext().isRelativeLayout())
3616 VTable = CGM.getModule().getNamedAlias(VTableName);
3618 VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3620 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3622 llvm::Type *PtrDiffTy =
3623 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3625 // The vtable address point is 2.
3626 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3627 // The vtable address point is 8 bytes after its start:
3628 // 4 for the offset to top + 4 for the relative offset to rtti.
3629 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3630 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3632 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3634 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3635 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3638 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3640 Fields.push_back(VTable);
3643 /// Return the linkage that the type info and type info name constants
3644 /// should have for the given type.
3645 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3647 // Itanium C++ ABI 2.9.5p7:
3648 // In addition, it and all of the intermediate abi::__pointer_type_info
3649 // structs in the chain down to the abi::__class_type_info for the
3650 // incomplete class type must be prevented from resolving to the
3651 // corresponding type_info structs for the complete class type, possibly
3652 // by making them local static objects. Finally, a dummy class RTTI is
3653 // generated for the incomplete type that will not resolve to the final
3654 // complete class RTTI (because the latter need not exist), possibly by
3655 // making it a local static object.
3656 if (ContainsIncompleteClassType(Ty))
3657 return llvm::GlobalValue::InternalLinkage;
3659 switch (Ty->getLinkage()) {
3661 case InternalLinkage:
3662 case UniqueExternalLinkage:
3663 return llvm::GlobalValue::InternalLinkage;
3665 case VisibleNoLinkage:
3666 case ModuleInternalLinkage:
3668 case ExternalLinkage:
3669 // RTTI is not enabled, which means that this type info struct is going
3670 // to be used for exception handling. Give it linkonce_odr linkage.
3671 if (!CGM.getLangOpts().RTTI)
3672 return llvm::GlobalValue::LinkOnceODRLinkage;
3674 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3675 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3676 if (RD->hasAttr<WeakAttr>())
3677 return llvm::GlobalValue::WeakODRLinkage;
3678 if (CGM.getTriple().isWindowsItaniumEnvironment())
3679 if (RD->hasAttr<DLLImportAttr>() &&
3680 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3681 return llvm::GlobalValue::ExternalLinkage;
3682 // MinGW always uses LinkOnceODRLinkage for type info.
3683 if (RD->isDynamicClass() &&
3687 .isWindowsGNUEnvironment())
3688 return CGM.getVTableLinkage(RD);
3691 return llvm::GlobalValue::LinkOnceODRLinkage;
3694 llvm_unreachable("Invalid linkage!");
3697 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3698 // We want to operate on the canonical type.
3699 Ty = Ty.getCanonicalType();
3701 // Check if we've already emitted an RTTI descriptor for this type.
3702 SmallString<256> Name;
3703 llvm::raw_svector_ostream Out(Name);
3704 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3706 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3707 if (OldGV && !OldGV->isDeclaration()) {
3708 assert(!OldGV->hasAvailableExternallyLinkage() &&
3709 "available_externally typeinfos not yet implemented");
3711 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3714 // Check if there is already an external RTTI descriptor for this type.
3715 if (IsStandardLibraryRTTIDescriptor(Ty) ||
3716 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3717 return GetAddrOfExternalRTTIDescriptor(Ty);
3719 // Emit the standard library with external linkage.
3720 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3722 // Give the type_info object and name the formal visibility of the
3724 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3725 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3726 // If the linkage is local, only default visibility makes sense.
3727 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3728 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3729 ItaniumCXXABI::RUK_NonUniqueHidden)
3730 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3732 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3734 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3735 llvm::GlobalValue::DefaultStorageClass;
3736 if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3737 auto RD = Ty->getAsCXXRecordDecl();
3738 if (RD && RD->hasAttr<DLLExportAttr>())
3739 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3742 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3745 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3747 llvm::GlobalVariable::LinkageTypes Linkage,
3748 llvm::GlobalValue::VisibilityTypes Visibility,
3749 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3750 // Add the vtable pointer.
3751 BuildVTablePointer(cast<Type>(Ty));
3754 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3755 llvm::Constant *TypeNameField;
3757 // If we're supposed to demote the visibility, be sure to set a flag
3758 // to use a string comparison for type_info comparisons.
3759 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3760 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3761 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3762 // The flag is the sign bit, which on ARM64 is defined to be clear
3763 // for global pointers. This is very ARM64-specific.
3764 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3765 llvm::Constant *flag =
3766 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3767 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3769 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3771 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3773 Fields.push_back(TypeNameField);
3775 switch (Ty->getTypeClass()) {
3776 #define TYPE(Class, Base)
3777 #define ABSTRACT_TYPE(Class, Base)
3778 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3779 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3780 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3781 #include "clang/AST/TypeNodes.inc"
3782 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3784 // GCC treats vector types as fundamental types.
3787 case Type::ExtVector:
3788 case Type::ConstantMatrix:
3790 case Type::BlockPointer:
3791 // Itanium C++ ABI 2.9.5p4:
3792 // abi::__fundamental_type_info adds no data members to std::type_info.
3795 case Type::LValueReference:
3796 case Type::RValueReference:
3797 llvm_unreachable("References shouldn't get here");
3800 case Type::DeducedTemplateSpecialization:
3801 llvm_unreachable("Undeduced type shouldn't get here");
3809 case Type::ConstantArray:
3810 case Type::IncompleteArray:
3811 case Type::VariableArray:
3812 // Itanium C++ ABI 2.9.5p5:
3813 // abi::__array_type_info adds no data members to std::type_info.
3816 case Type::FunctionNoProto:
3817 case Type::FunctionProto:
3818 // Itanium C++ ABI 2.9.5p5:
3819 // abi::__function_type_info adds no data members to std::type_info.
3823 // Itanium C++ ABI 2.9.5p5:
3824 // abi::__enum_type_info adds no data members to std::type_info.
3827 case Type::Record: {
3828 const CXXRecordDecl *RD =
3829 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3830 if (!RD->hasDefinition() || !RD->getNumBases()) {
3831 // We don't need to emit any fields.
3835 if (CanUseSingleInheritance(RD))
3836 BuildSIClassTypeInfo(RD);
3838 BuildVMIClassTypeInfo(RD);
3843 case Type::ObjCObject:
3844 case Type::ObjCInterface:
3845 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3848 case Type::ObjCObjectPointer:
3849 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3853 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3856 case Type::MemberPointer:
3857 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3861 // No fields, at least for the moment.
3865 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3867 SmallString<256> Name;
3868 llvm::raw_svector_ostream Out(Name);
3869 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3870 llvm::Module &M = CGM.getModule();
3871 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3872 llvm::GlobalVariable *GV =
3873 new llvm::GlobalVariable(M, Init->getType(),
3874 /*isConstant=*/true, Linkage, Init, Name);
3876 // Export the typeinfo in the same circumstances as the vtable is exported.
3877 auto GVDLLStorageClass = DLLStorageClass;
3878 if (CGM.getTarget().hasPS4DLLImportExport()) {
3879 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3880 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3881 if (RD->hasAttr<DLLExportAttr>() ||
3882 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3883 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3888 // If there's already an old global variable, replace it with the new one.
3890 GV->takeName(OldGV);
3891 llvm::Constant *NewPtr =
3892 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3893 OldGV->replaceAllUsesWith(NewPtr);
3894 OldGV->eraseFromParent();
3897 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3898 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3901 CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3902 GV->setAlignment(Align.getAsAlign());
3904 // The Itanium ABI specifies that type_info objects must be globally
3905 // unique, with one exception: if the type is an incomplete class
3906 // type or a (possibly indirect) pointer to one. That exception
3907 // affects the general case of comparing type_info objects produced
3908 // by the typeid operator, which is why the comparison operators on
3909 // std::type_info generally use the type_info name pointers instead
3910 // of the object addresses. However, the language's built-in uses
3911 // of RTTI generally require class types to be complete, even when
3912 // manipulating pointers to those class types. This allows the
3913 // implementation of dynamic_cast to rely on address equality tests,
3914 // which is much faster.
3916 // All of this is to say that it's important that both the type_info
3917 // object and the type_info name be uniqued when weakly emitted.
3919 TypeName->setVisibility(Visibility);
3920 CGM.setDSOLocal(TypeName);
3922 GV->setVisibility(Visibility);
3923 CGM.setDSOLocal(GV);
3925 TypeName->setDLLStorageClass(DLLStorageClass);
3926 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3930 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3931 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3933 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3936 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3937 /// for the given Objective-C object type.
3938 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3940 const Type *T = OT->getBaseType().getTypePtr();
3941 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3943 // The builtin types are abi::__class_type_infos and don't require
3945 if (isa<BuiltinType>(T)) return;
3947 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3948 ObjCInterfaceDecl *Super = Class->getSuperClass();
3950 // Root classes are also __class_type_info.
3953 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3955 // Everything else is single inheritance.
3956 llvm::Constant *BaseTypeInfo =
3957 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3958 Fields.push_back(BaseTypeInfo);
3961 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3962 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3963 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3964 // Itanium C++ ABI 2.9.5p6b:
3965 // It adds to abi::__class_type_info a single member pointing to the
3966 // type_info structure for the base type,
3967 llvm::Constant *BaseTypeInfo =
3968 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3969 Fields.push_back(BaseTypeInfo);
3973 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3974 /// a class hierarchy.
3976 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3977 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3981 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3982 /// abi::__vmi_class_type_info.
3984 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3990 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3992 if (Base->isVirtual()) {
3993 // Mark the virtual base as seen.
3994 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3995 // If this virtual base has been seen before, then the class is diamond
3997 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3999 if (Bases.NonVirtualBases.count(BaseDecl))
4000 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4003 // Mark the non-virtual base as seen.
4004 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
4005 // If this non-virtual base has been seen before, then the class has non-
4006 // diamond shaped repeated inheritance.
4007 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4009 if (Bases.VirtualBases.count(BaseDecl))
4010 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
4015 for (const auto &I : BaseDecl->bases())
4016 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4021 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
4026 for (const auto &I : RD->bases())
4027 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
4032 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
4033 /// classes with bases that do not satisfy the abi::__si_class_type_info
4034 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
4035 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
4036 llvm::Type *UnsignedIntLTy =
4037 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4039 // Itanium C++ ABI 2.9.5p6c:
4040 // __flags is a word with flags describing details about the class
4041 // structure, which may be referenced by using the __flags_masks
4042 // enumeration. These flags refer to both direct and indirect bases.
4043 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4044 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4046 // Itanium C++ ABI 2.9.5p6c:
4047 // __base_count is a word with the number of direct proper base class
4048 // descriptions that follow.
4049 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4051 if (!RD->getNumBases())
4054 // Now add the base class descriptions.
4056 // Itanium C++ ABI 2.9.5p6c:
4057 // __base_info[] is an array of base class descriptions -- one for every
4058 // direct proper base. Each description is of the type:
4060 // struct abi::__base_class_type_info {
4062 // const __class_type_info *__base_type;
4063 // long __offset_flags;
4065 // enum __offset_flags_masks {
4066 // __virtual_mask = 0x1,
4067 // __public_mask = 0x2,
4068 // __offset_shift = 8
4072 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4073 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4075 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4077 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4078 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4079 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
4080 OffsetFlagsTy = CGM.getContext().LongLongTy;
4081 llvm::Type *OffsetFlagsLTy =
4082 CGM.getTypes().ConvertType(OffsetFlagsTy);
4084 for (const auto &Base : RD->bases()) {
4085 // The __base_type member points to the RTTI for the base type.
4086 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4089 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4091 int64_t OffsetFlags = 0;
4093 // All but the lower 8 bits of __offset_flags are a signed offset.
4094 // For a non-virtual base, this is the offset in the object of the base
4095 // subobject. For a virtual base, this is the offset in the virtual table of
4096 // the virtual base offset for the virtual base referenced (negative).
4098 if (Base.isVirtual())
4100 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4102 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4103 Offset = Layout.getBaseClassOffset(BaseDecl);
4106 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4108 // The low-order byte of __offset_flags contains flags, as given by the
4109 // masks from the enumeration __offset_flags_masks.
4110 if (Base.isVirtual())
4111 OffsetFlags |= BCTI_Virtual;
4112 if (Base.getAccessSpecifier() == AS_public)
4113 OffsetFlags |= BCTI_Public;
4115 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4119 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4120 /// pieces from \p Type.
4121 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4124 if (Type.isConstQualified())
4125 Flags |= ItaniumRTTIBuilder::PTI_Const;
4126 if (Type.isVolatileQualified())
4127 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4128 if (Type.isRestrictQualified())
4129 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4130 Type = Type.getUnqualifiedType();
4132 // Itanium C++ ABI 2.9.5p7:
4133 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4134 // incomplete class type, the incomplete target type flag is set.
4135 if (ContainsIncompleteClassType(Type))
4136 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4138 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4139 if (Proto->isNothrow()) {
4140 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4141 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4148 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4149 /// used for pointer types.
4150 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4151 // Itanium C++ ABI 2.9.5p7:
4152 // __flags is a flag word describing the cv-qualification and other
4153 // attributes of the type pointed to
4154 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4156 llvm::Type *UnsignedIntLTy =
4157 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4158 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4160 // Itanium C++ ABI 2.9.5p7:
4161 // __pointee is a pointer to the std::type_info derivation for the
4162 // unqualified type being pointed to.
4163 llvm::Constant *PointeeTypeInfo =
4164 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4165 Fields.push_back(PointeeTypeInfo);
4168 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4169 /// struct, used for member pointer types.
4171 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4172 QualType PointeeTy = Ty->getPointeeType();
4174 // Itanium C++ ABI 2.9.5p7:
4175 // __flags is a flag word describing the cv-qualification and other
4176 // attributes of the type pointed to.
4177 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4179 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4180 if (IsIncompleteClassType(ClassType))
4181 Flags |= PTI_ContainingClassIncomplete;
4183 llvm::Type *UnsignedIntLTy =
4184 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4185 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4187 // Itanium C++ ABI 2.9.5p7:
4188 // __pointee is a pointer to the std::type_info derivation for the
4189 // unqualified type being pointed to.
4190 llvm::Constant *PointeeTypeInfo =
4191 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4192 Fields.push_back(PointeeTypeInfo);
4194 // Itanium C++ ABI 2.9.5p9:
4195 // __context is a pointer to an abi::__class_type_info corresponding to the
4196 // class type containing the member pointed to
4197 // (e.g., the "A" in "int A::*").
4199 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4202 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4203 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4206 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4207 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4208 QualType FundamentalTypes[] = {
4209 getContext().VoidTy, getContext().NullPtrTy,
4210 getContext().BoolTy, getContext().WCharTy,
4211 getContext().CharTy, getContext().UnsignedCharTy,
4212 getContext().SignedCharTy, getContext().ShortTy,
4213 getContext().UnsignedShortTy, getContext().IntTy,
4214 getContext().UnsignedIntTy, getContext().LongTy,
4215 getContext().UnsignedLongTy, getContext().LongLongTy,
4216 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4217 getContext().UnsignedInt128Ty, getContext().HalfTy,
4218 getContext().FloatTy, getContext().DoubleTy,
4219 getContext().LongDoubleTy, getContext().Float128Ty,
4220 getContext().Char8Ty, getContext().Char16Ty,
4221 getContext().Char32Ty
4223 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4224 RD->hasAttr<DLLExportAttr>()
4225 ? llvm::GlobalValue::DLLExportStorageClass
4226 : llvm::GlobalValue::DefaultStorageClass;
4227 llvm::GlobalValue::VisibilityTypes Visibility =
4228 CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4229 for (const QualType &FundamentalType : FundamentalTypes) {
4230 QualType PointerType = getContext().getPointerType(FundamentalType);
4231 QualType PointerTypeConst = getContext().getPointerType(
4232 FundamentalType.withConst());
4233 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4234 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4235 Type, llvm::GlobalValue::ExternalLinkage,
4236 Visibility, DLLStorageClass);
4240 /// What sort of uniqueness rules should we use for the RTTI for the
4242 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4243 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4244 if (shouldRTTIBeUnique())
4247 // It's only necessary for linkonce_odr or weak_odr linkage.
4248 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4249 Linkage != llvm::GlobalValue::WeakODRLinkage)
4252 // It's only necessary with default visibility.
4253 if (CanTy->getVisibility() != DefaultVisibility)
4256 // If we're not required to publish this symbol, hide it.
4257 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4258 return RUK_NonUniqueHidden;
4260 // If we're required to publish this symbol, as we might be under an
4261 // explicit instantiation, leave it with default visibility but
4262 // enable string-comparisons.
4263 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4264 return RUK_NonUniqueVisible;
4267 // Find out how to codegen the complete destructor and constructor
4269 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4271 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4272 const CXXMethodDecl *MD) {
4273 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4274 return StructorCodegen::Emit;
4276 // The complete and base structors are not equivalent if there are any virtual
4277 // bases, so emit separate functions.
4278 if (MD->getParent()->getNumVBases())
4279 return StructorCodegen::Emit;
4281 GlobalDecl AliasDecl;
4282 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4283 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4285 const auto *CD = cast<CXXConstructorDecl>(MD);
4286 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4288 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4290 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4291 return StructorCodegen::RAUW;
4293 // FIXME: Should we allow available_externally aliases?
4294 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4295 return StructorCodegen::RAUW;
4297 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4298 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4299 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4300 CGM.getTarget().getTriple().isOSBinFormatWasm())
4301 return StructorCodegen::COMDAT;
4302 return StructorCodegen::Emit;
4305 return StructorCodegen::Alias;
4308 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4309 GlobalDecl AliasDecl,
4310 GlobalDecl TargetDecl) {
4311 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4313 StringRef MangledName = CGM.getMangledName(AliasDecl);
4314 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4315 if (Entry && !Entry->isDeclaration())
4318 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4320 // Create the alias with no name.
4321 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4323 // Constructors and destructors are always unnamed_addr.
4324 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4326 // Switch any previous uses to the alias.
4328 assert(Entry->getType() == Aliasee->getType() &&
4329 "declaration exists with different type");
4330 Alias->takeName(Entry);
4331 Entry->replaceAllUsesWith(Alias);
4332 Entry->eraseFromParent();
4334 Alias->setName(MangledName);
4337 // Finally, set up the alias with its proper name and attributes.
4338 CGM.SetCommonAttributes(AliasDecl, Alias);
4341 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4342 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4343 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4344 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4346 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4348 if (CD ? GD.getCtorType() == Ctor_Complete
4349 : GD.getDtorType() == Dtor_Complete) {
4350 GlobalDecl BaseDecl;
4352 BaseDecl = GD.getWithCtorType(Ctor_Base);
4354 BaseDecl = GD.getWithDtorType(Dtor_Base);
4356 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4357 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4361 if (CGType == StructorCodegen::RAUW) {
4362 StringRef MangledName = CGM.getMangledName(GD);
4363 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4364 CGM.addReplacement(MangledName, Aliasee);
4369 // The base destructor is equivalent to the base destructor of its
4370 // base class if there is exactly one non-virtual base class with a
4371 // non-trivial destructor, there are no fields with a non-trivial
4372 // destructor, and the body of the destructor is trivial.
4373 if (DD && GD.getDtorType() == Dtor_Base &&
4374 CGType != StructorCodegen::COMDAT &&
4375 !CGM.TryEmitBaseDestructorAsAlias(DD))
4378 // FIXME: The deleting destructor is equivalent to the selected operator
4380 // * either the delete is a destroying operator delete or the destructor
4381 // would be trivial if it weren't virtual,
4382 // * the conversion from the 'this' parameter to the first parameter of the
4383 // destructor is equivalent to a bitcast,
4384 // * the destructor does not have an implicit "this" return, and
4385 // * the operator delete has the same calling convention and IR function type
4386 // as the destructor.
4387 // In such cases we should try to emit the deleting dtor as an alias to the
4388 // selected 'operator delete'.
4390 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4392 if (CGType == StructorCodegen::COMDAT) {
4393 SmallString<256> Buffer;
4394 llvm::raw_svector_ostream Out(Buffer);
4396 getMangleContext().mangleCXXDtorComdat(DD, Out);
4398 getMangleContext().mangleCXXCtorComdat(CD, Out);
4399 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4402 CGM.maybeSetTrivialComdat(*MD, *Fn);
4406 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4407 // void *__cxa_begin_catch(void*);
4408 llvm::FunctionType *FTy = llvm::FunctionType::get(
4409 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4411 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4414 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4415 // void __cxa_end_catch();
4416 llvm::FunctionType *FTy =
4417 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4419 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4422 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4423 // void *__cxa_get_exception_ptr(void*);
4424 llvm::FunctionType *FTy = llvm::FunctionType::get(
4425 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4427 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4431 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4432 /// exception type lets us state definitively that the thrown exception
4433 /// type does not have a destructor. In particular:
4434 /// - Catch-alls tell us nothing, so we have to conservatively
4435 /// assume that the thrown exception might have a destructor.
4436 /// - Catches by reference behave according to their base types.
4437 /// - Catches of non-record types will only trigger for exceptions
4438 /// of non-record types, which never have destructors.
4439 /// - Catches of record types can trigger for arbitrary subclasses
4440 /// of the caught type, so we have to assume the actual thrown
4441 /// exception type might have a throwing destructor, even if the
4442 /// caught type's destructor is trivial or nothrow.
4443 struct CallEndCatch final : EHScopeStack::Cleanup {
4444 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4447 void Emit(CodeGenFunction &CGF, Flags flags) override {
4449 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4453 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4458 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4459 /// __cxa_end_catch.
4461 /// \param EndMightThrow - true if __cxa_end_catch might throw
4462 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4464 bool EndMightThrow) {
4465 llvm::CallInst *call =
4466 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4468 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4473 /// A "special initializer" callback for initializing a catch
4474 /// parameter during catch initialization.
4475 static void InitCatchParam(CodeGenFunction &CGF,
4476 const VarDecl &CatchParam,
4478 SourceLocation Loc) {
4479 // Load the exception from where the landing pad saved it.
4480 llvm::Value *Exn = CGF.getExceptionFromSlot();
4482 CanQualType CatchType =
4483 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4484 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4486 // If we're catching by reference, we can just cast the object
4487 // pointer to the appropriate pointer.
4488 if (isa<ReferenceType>(CatchType)) {
4489 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4490 bool EndCatchMightThrow = CaughtType->isRecordType();
4492 // __cxa_begin_catch returns the adjusted object pointer.
4493 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4495 // We have no way to tell the personality function that we're
4496 // catching by reference, so if we're catching a pointer,
4497 // __cxa_begin_catch will actually return that pointer by value.
4498 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4499 QualType PointeeType = PT->getPointeeType();
4501 // When catching by reference, generally we should just ignore
4502 // this by-value pointer and use the exception object instead.
4503 if (!PointeeType->isRecordType()) {
4505 // Exn points to the struct _Unwind_Exception header, which
4506 // we have to skip past in order to reach the exception data.
4507 unsigned HeaderSize =
4508 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4510 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4512 // However, if we're catching a pointer-to-record type that won't
4513 // work, because the personality function might have adjusted
4514 // the pointer. There's actually no way for us to fully satisfy
4515 // the language/ABI contract here: we can't use Exn because it
4516 // might have the wrong adjustment, but we can't use the by-value
4517 // pointer because it's off by a level of abstraction.
4519 // The current solution is to dump the adjusted pointer into an
4520 // alloca, which breaks language semantics (because changing the
4521 // pointer doesn't change the exception) but at least works.
4522 // The better solution would be to filter out non-exact matches
4523 // and rethrow them, but this is tricky because the rethrow
4524 // really needs to be catchable by other sites at this landing
4525 // pad. The best solution is to fix the personality function.
4527 // Pull the pointer for the reference type off.
4529 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4531 // Create the temporary and write the adjusted pointer into it.
4533 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4534 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4535 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4537 // Bind the reference to the temporary.
4538 AdjustedExn = ExnPtrTmp.getPointer();
4542 llvm::Value *ExnCast =
4543 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4544 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4548 // Scalars and complexes.
4549 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4550 if (TEK != TEK_Aggregate) {
4551 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4553 // If the catch type is a pointer type, __cxa_begin_catch returns
4554 // the pointer by value.
4555 if (CatchType->hasPointerRepresentation()) {
4556 llvm::Value *CastExn =
4557 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4559 switch (CatchType.getQualifiers().getObjCLifetime()) {
4560 case Qualifiers::OCL_Strong:
4561 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4564 case Qualifiers::OCL_None:
4565 case Qualifiers::OCL_ExplicitNone:
4566 case Qualifiers::OCL_Autoreleasing:
4567 CGF.Builder.CreateStore(CastExn, ParamAddr);
4570 case Qualifiers::OCL_Weak:
4571 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4574 llvm_unreachable("bad ownership qualifier!");
4577 // Otherwise, it returns a pointer into the exception object.
4579 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4580 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4582 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4583 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4586 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4590 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4591 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4595 llvm_unreachable("evaluation kind filtered out!");
4597 llvm_unreachable("bad evaluation kind");
4600 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4601 auto catchRD = CatchType->getAsCXXRecordDecl();
4602 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4604 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4606 // Check for a copy expression. If we don't have a copy expression,
4607 // that means a trivial copy is okay.
4608 const Expr *copyExpr = CatchParam.getInit();
4610 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4611 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4612 caughtExnAlignment);
4613 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4614 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4615 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4619 // We have to call __cxa_get_exception_ptr to get the adjusted
4620 // pointer before copying.
4621 llvm::CallInst *rawAdjustedExn =
4622 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4624 // Cast that to the appropriate type.
4625 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4626 caughtExnAlignment);
4628 // The copy expression is defined in terms of an OpaqueValueExpr.
4629 // Find it and map it to the adjusted expression.
4630 CodeGenFunction::OpaqueValueMapping
4631 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4632 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4634 // Call the copy ctor in a terminate scope.
4635 CGF.EHStack.pushTerminate();
4637 // Perform the copy construction.
4638 CGF.EmitAggExpr(copyExpr,
4639 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4640 AggValueSlot::IsNotDestructed,
4641 AggValueSlot::DoesNotNeedGCBarriers,
4642 AggValueSlot::IsNotAliased,
4643 AggValueSlot::DoesNotOverlap));
4645 // Leave the terminate scope.
4646 CGF.EHStack.popTerminate();
4648 // Undo the opaque value mapping.
4651 // Finally we can call __cxa_begin_catch.
4652 CallBeginCatch(CGF, Exn, true);
4655 /// Begins a catch statement by initializing the catch variable and
4656 /// calling __cxa_begin_catch.
4657 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4658 const CXXCatchStmt *S) {
4659 // We have to be very careful with the ordering of cleanups here:
4660 // C++ [except.throw]p4:
4661 // The destruction [of the exception temporary] occurs
4662 // immediately after the destruction of the object declared in
4663 // the exception-declaration in the handler.
4665 // So the precise ordering is:
4666 // 1. Construct catch variable.
4667 // 2. __cxa_begin_catch
4668 // 3. Enter __cxa_end_catch cleanup
4669 // 4. Enter dtor cleanup
4671 // We do this by using a slightly abnormal initialization process.
4672 // Delegation sequence:
4673 // - ExitCXXTryStmt opens a RunCleanupsScope
4674 // - EmitAutoVarAlloca creates the variable and debug info
4675 // - InitCatchParam initializes the variable from the exception
4676 // - CallBeginCatch calls __cxa_begin_catch
4677 // - CallBeginCatch enters the __cxa_end_catch cleanup
4678 // - EmitAutoVarCleanups enters the variable destructor cleanup
4679 // - EmitCXXTryStmt emits the code for the catch body
4680 // - EmitCXXTryStmt close the RunCleanupsScope
4682 VarDecl *CatchParam = S->getExceptionDecl();
4684 llvm::Value *Exn = CGF.getExceptionFromSlot();
4685 CallBeginCatch(CGF, Exn, true);
4690 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4691 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4692 CGF.EmitAutoVarCleanups(var);
4695 /// Get or define the following function:
4696 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4697 /// This code is used only in C++.
4698 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4699 llvm::FunctionType *fnTy =
4700 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4701 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4702 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4703 llvm::Function *fn =
4704 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4706 fn->setDoesNotThrow();
4707 fn->setDoesNotReturn();
4709 // What we really want is to massively penalize inlining without
4710 // forbidding it completely. The difference between that and
4711 // 'noinline' is negligible.
4712 fn->addFnAttr(llvm::Attribute::NoInline);
4714 // Allow this function to be shared across translation units, but
4715 // we don't want it to turn into an exported symbol.
4716 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4717 fn->setVisibility(llvm::Function::HiddenVisibility);
4718 if (CGM.supportsCOMDAT())
4719 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4721 // Set up the function.
4722 llvm::BasicBlock *entry =
4723 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4724 CGBuilderTy builder(CGM, entry);
4726 // Pull the exception pointer out of the parameter list.
4727 llvm::Value *exn = &*fn->arg_begin();
4729 // Call __cxa_begin_catch(exn).
4730 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4731 catchCall->setDoesNotThrow();
4732 catchCall->setCallingConv(CGM.getRuntimeCC());
4734 // Call std::terminate().
4735 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4736 termCall->setDoesNotThrow();
4737 termCall->setDoesNotReturn();
4738 termCall->setCallingConv(CGM.getRuntimeCC());
4740 // std::terminate cannot return.
4741 builder.CreateUnreachable();
4747 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4749 // In C++, we want to call __cxa_begin_catch() before terminating.
4751 assert(CGF.CGM.getLangOpts().CPlusPlus);
4752 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4754 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4757 std::pair<llvm::Value *, const CXXRecordDecl *>
4758 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4759 const CXXRecordDecl *RD) {
4760 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4763 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4764 const CXXCatchStmt *C) {
4765 if (CGF.getTarget().hasFeature("exception-handling"))
4766 CGF.EHStack.pushCleanup<CatchRetScope>(
4767 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4768 ItaniumCXXABI::emitBeginCatch(CGF, C);
4772 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4774 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4775 // the violating exception to mark it handled, but it is currently hard to do
4776 // with wasm EH instruction structure with catch/catch_all, we just call
4777 // std::terminate and ignore the violating exception as in CGCXXABI.
4778 // TODO Consider code transformation that makes calling __clang_call_terminate
4780 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4783 /// Register a global destructor as best as we know how.
4784 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4785 llvm::FunctionCallee Dtor,
4786 llvm::Constant *Addr) {
4787 if (D.getTLSKind() != VarDecl::TLS_None) {
4788 // atexit routine expects "int(*)(int,...)"
4789 llvm::FunctionType *FTy =
4790 llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4791 llvm::PointerType *FpTy = FTy->getPointerTo();
4793 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4794 llvm::FunctionType *AtExitTy =
4795 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4797 // Fetch the actual function.
4798 llvm::FunctionCallee AtExit =
4799 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4801 // Create __dtor function for the var decl.
4802 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4804 // Register above __dtor with atexit().
4805 // First param is flags and must be 0, second param is function ptr
4806 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4807 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4809 // Cannot unregister TLS __dtor so done
4813 // Create __dtor function for the var decl.
4814 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4816 // Register above __dtor with atexit().
4817 CGF.registerGlobalDtorWithAtExit(DtorStub);
4819 // Emit __finalize function to unregister __dtor and (as appropriate) call
4821 emitCXXStermFinalizer(D, DtorStub, Addr);
4824 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4825 llvm::Constant *addr) {
4826 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4827 SmallString<256> FnName;
4829 llvm::raw_svector_ostream Out(FnName);
4830 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4833 // Create the finalization action associated with a variable.
4834 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4835 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4836 FTy, FnName.str(), FI, D.getLocation());
4838 CodeGenFunction CGF(CGM);
4840 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4841 FunctionArgList(), D.getLocation(),
4842 D.getInit()->getExprLoc());
4844 // The unatexit subroutine unregisters __dtor functions that were previously
4845 // registered by the atexit subroutine. If the referenced function is found,
4846 // the unatexit returns a value of 0, meaning that the cleanup is still
4847 // pending (and we should call the __dtor function).
4848 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4850 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4852 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4853 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4855 // Check if unatexit returns a value of 0. If it does, jump to
4856 // DestructCallBlock, otherwise jump to EndBlock directly.
4857 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4859 CGF.EmitBlock(DestructCallBlock);
4861 // Emit the call to dtorStub.
4862 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4864 // Make sure the call and the callee agree on calling convention.
4865 CI->setCallingConv(dtorStub->getCallingConv());
4867 CGF.EmitBlock(EndBlock);
4869 CGF.FinishFunction();
4871 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4872 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4873 IPA->getPriority());
4874 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4875 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4876 // According to C++ [basic.start.init]p2, class template static data
4877 // members (i.e., implicitly or explicitly instantiated specializations)
4878 // have unordered initialization. As a consequence, we can put them into
4879 // their own llvm.global_dtors entry.
4880 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4882 CGM.AddCXXStermFinalizerEntry(StermFinalizer);