1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
19 //===----------------------------------------------------------------------===//
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "ConstantBuilder.h"
28 #include "TargetInfo.h"
29 #include "clang/AST/Mangle.h"
30 #include "clang/AST/Type.h"
31 #include "clang/AST/StmtCXX.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
38 using namespace clang;
39 using namespace CodeGen;
42 class ItaniumCXXABI : public CodeGen::CGCXXABI {
43 /// VTables - All the vtables which have been defined.
44 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 bool UseARMMethodPtrABI;
48 bool UseARMGuardVarABI;
49 bool Use32BitVTableOffsetABI;
51 ItaniumMangleContext &getMangleContext() {
52 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
56 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
57 bool UseARMMethodPtrABI = false,
58 bool UseARMGuardVarABI = false) :
59 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
60 UseARMGuardVarABI(UseARMGuardVarABI),
61 Use32BitVTableOffsetABI(false) { }
63 bool classifyReturnType(CGFunctionInfo &FI) const override;
65 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
66 // Structures with either a non-trivial destructor or a non-trivial
67 // copy constructor are always indirect.
68 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
70 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor())
75 bool isThisCompleteObject(GlobalDecl GD) const override {
76 // The Itanium ABI has separate complete-object vs. base-object
77 // variants of both constructors and destructors.
78 if (isa<CXXDestructorDecl>(GD.getDecl())) {
79 switch (GD.getDtorType()) {
88 llvm_unreachable("emitting dtor comdat as function?");
90 llvm_unreachable("bad dtor kind");
92 if (isa<CXXConstructorDecl>(GD.getDecl())) {
93 switch (GD.getCtorType()) {
100 case Ctor_CopyingClosure:
101 case Ctor_DefaultClosure:
102 llvm_unreachable("closure ctors in Itanium ABI?");
105 llvm_unreachable("emitting ctor comdat as function?");
107 llvm_unreachable("bad dtor kind");
114 bool isZeroInitializable(const MemberPointerType *MPT) override;
116 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122 llvm::Value *&ThisPtrForCall,
123 llvm::Value *MemFnPtr,
124 const MemberPointerType *MPT) override;
127 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130 const MemberPointerType *MPT) override;
132 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
134 llvm::Value *Src) override;
135 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
136 llvm::Constant *Src) override;
138 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
140 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
141 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
142 CharUnits offset) override;
143 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
144 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
145 CharUnits ThisAdjustment);
147 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
148 llvm::Value *L, llvm::Value *R,
149 const MemberPointerType *MPT,
150 bool Inequality) override;
152 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
154 const MemberPointerType *MPT) override;
156 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
157 Address Ptr, QualType ElementType,
158 const CXXDestructorDecl *Dtor) override;
160 CharUnits getAlignmentOfExnObject() {
161 unsigned Align = CGM.getContext().getTargetInfo().getExnObjectAlignment();
162 return CGM.getContext().toCharUnitsFromBits(Align);
165 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
166 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
168 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
171 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
172 llvm::Value *Exn) override;
174 void EmitFundamentalRTTIDescriptor(QualType Type, bool DLLExport);
175 void EmitFundamentalRTTIDescriptors(bool DLLExport);
176 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
178 getAddrOfCXXCatchHandlerType(QualType Ty,
179 QualType CatchHandlerType) override {
180 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
183 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
184 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
185 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
187 llvm::Type *StdTypeInfoPtrTy) override;
189 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
190 QualType SrcRecordTy) override;
192 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
193 QualType SrcRecordTy, QualType DestTy,
194 QualType DestRecordTy,
195 llvm::BasicBlock *CastEnd) override;
197 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
198 QualType SrcRecordTy,
199 QualType DestTy) override;
201 bool EmitBadCastCall(CodeGenFunction &CGF) override;
204 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
205 const CXXRecordDecl *ClassDecl,
206 const CXXRecordDecl *BaseClassDecl) override;
208 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
210 void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
211 SmallVectorImpl<CanQualType> &ArgTys) override;
213 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
214 CXXDtorType DT) const override {
215 // Itanium does not emit any destructor variant as an inline thunk.
216 // Delegating may occur as an optimization, but all variants are either
217 // emitted with external linkage or as linkonce if they are inline and used.
221 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
223 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
224 FunctionArgList &Params) override;
226 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
228 unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
229 const CXXConstructorDecl *D,
230 CXXCtorType Type, bool ForVirtualBase,
232 CallArgList &Args) override;
234 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
235 CXXDtorType Type, bool ForVirtualBase,
236 bool Delegating, Address This) override;
238 void emitVTableDefinitions(CodeGenVTables &CGVT,
239 const CXXRecordDecl *RD) override;
241 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
242 CodeGenFunction::VPtr Vptr) override;
244 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249 getVTableAddressPoint(BaseSubobject Base,
250 const CXXRecordDecl *VTableClass) override;
252 llvm::Value *getVTableAddressPointInStructor(
253 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
254 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
256 llvm::Value *getVTableAddressPointInStructorWithVTT(
257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
261 getVTableAddressPointForConstExpr(BaseSubobject Base,
262 const CXXRecordDecl *VTableClass) override;
264 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
265 CharUnits VPtrOffset) override;
267 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
268 Address This, llvm::Type *Ty,
269 SourceLocation Loc) override;
271 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
272 const CXXDestructorDecl *Dtor,
273 CXXDtorType DtorType,
275 const CXXMemberCallExpr *CE) override;
277 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
279 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
281 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
282 bool ReturnAdjustment) override {
283 // Allow inlining of thunks by emitting them with available_externally
284 // linkage together with vtables when needed.
285 if (ForVTable && !Thunk->hasLocalLinkage())
286 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
289 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
290 const ThisAdjustment &TA) override;
292 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
293 const ReturnAdjustment &RA) override;
295 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
296 FunctionArgList &Args) const override {
297 assert(!Args.empty() && "expected the arglist to not be empty!");
298 return Args.size() - 1;
301 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
302 StringRef GetDeletedVirtualCallName() override
303 { return "__cxa_deleted_virtual"; }
305 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
306 Address InitializeArrayCookie(CodeGenFunction &CGF,
308 llvm::Value *NumElements,
309 const CXXNewExpr *expr,
310 QualType ElementType) override;
311 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
313 CharUnits cookieSize) override;
315 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
316 llvm::GlobalVariable *DeclPtr,
317 bool PerformInit) override;
318 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
319 llvm::Constant *dtor, llvm::Constant *addr) override;
321 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
323 void EmitThreadLocalInitFuncs(
325 ArrayRef<const VarDecl *> CXXThreadLocals,
326 ArrayRef<llvm::Function *> CXXThreadLocalInits,
327 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
329 bool usesThreadWrapperFunction() const override { return true; }
330 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
331 QualType LValType) override;
333 bool NeedsVTTParameter(GlobalDecl GD) override;
335 /**************************** RTTI Uniqueness ******************************/
338 /// Returns true if the ABI requires RTTI type_info objects to be unique
339 /// across a program.
340 virtual bool shouldRTTIBeUnique() const { return true; }
343 /// What sort of unique-RTTI behavior should we use?
344 enum RTTIUniquenessKind {
345 /// We are guaranteeing, or need to guarantee, that the RTTI string
349 /// We are not guaranteeing uniqueness for the RTTI string, so we
350 /// can demote to hidden visibility but must use string comparisons.
353 /// We are not guaranteeing uniqueness for the RTTI string, so we
354 /// have to use string comparisons, but we also have to emit it with
355 /// non-hidden visibility.
359 /// Return the required visibility status for the given type and linkage in
362 classifyRTTIUniqueness(QualType CanTy,
363 llvm::GlobalValue::LinkageTypes Linkage) const;
364 friend class ItaniumRTTIBuilder;
366 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
369 bool hasAnyVirtualInlineFunction(const CXXRecordDecl *RD) const {
370 const auto &VtableLayout =
371 CGM.getItaniumVTableContext().getVTableLayout(RD);
373 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
375 if (!VtableComponent.isUsedFunctionPointerKind())
378 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
379 if (Method->getCanonicalDecl()->isInlined())
385 bool isVTableHidden(const CXXRecordDecl *RD) const {
386 const auto &VtableLayout =
387 CGM.getItaniumVTableContext().getVTableLayout(RD);
389 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
390 if (VtableComponent.isRTTIKind()) {
391 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
392 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
394 } else if (VtableComponent.isUsedFunctionPointerKind()) {
395 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
396 if (Method->getVisibility() == Visibility::HiddenVisibility &&
397 !Method->isDefined())
405 class ARMCXXABI : public ItaniumCXXABI {
407 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
408 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
409 /* UseARMGuardVarABI = */ true) {}
411 bool HasThisReturn(GlobalDecl GD) const override {
412 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
413 isa<CXXDestructorDecl>(GD.getDecl()) &&
414 GD.getDtorType() != Dtor_Deleting));
417 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
418 QualType ResTy) override;
420 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
421 Address InitializeArrayCookie(CodeGenFunction &CGF,
423 llvm::Value *NumElements,
424 const CXXNewExpr *expr,
425 QualType ElementType) override;
426 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
427 CharUnits cookieSize) override;
430 class iOS64CXXABI : public ARMCXXABI {
432 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
433 Use32BitVTableOffsetABI = true;
436 // ARM64 libraries are prepared for non-unique RTTI.
437 bool shouldRTTIBeUnique() const override { return false; }
440 class WebAssemblyCXXABI final : public ItaniumCXXABI {
442 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
443 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
444 /*UseARMGuardVarABI=*/true) {}
447 bool HasThisReturn(GlobalDecl GD) const override {
448 return isa<CXXConstructorDecl>(GD.getDecl()) ||
449 (isa<CXXDestructorDecl>(GD.getDecl()) &&
450 GD.getDtorType() != Dtor_Deleting);
452 bool canCallMismatchedFunctionType() const override { return false; }
456 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
457 switch (CGM.getTarget().getCXXABI().getKind()) {
458 // For IR-generation purposes, there's no significant difference
459 // between the ARM and iOS ABIs.
460 case TargetCXXABI::GenericARM:
461 case TargetCXXABI::iOS:
462 case TargetCXXABI::WatchOS:
463 return new ARMCXXABI(CGM);
465 case TargetCXXABI::iOS64:
466 return new iOS64CXXABI(CGM);
468 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
469 // include the other 32-bit ARM oddities: constructor/destructor return values
470 // and array cookies.
471 case TargetCXXABI::GenericAArch64:
472 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
473 /* UseARMGuardVarABI = */ true);
475 case TargetCXXABI::GenericMIPS:
476 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
478 case TargetCXXABI::WebAssembly:
479 return new WebAssemblyCXXABI(CGM);
481 case TargetCXXABI::GenericItanium:
482 if (CGM.getContext().getTargetInfo().getTriple().getArch()
483 == llvm::Triple::le32) {
484 // For PNaCl, use ARM-style method pointers so that PNaCl code
485 // does not assume anything about the alignment of function
487 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
488 /* UseARMGuardVarABI = */ false);
490 return new ItaniumCXXABI(CGM);
492 case TargetCXXABI::Microsoft:
493 llvm_unreachable("Microsoft ABI is not Itanium-based");
495 llvm_unreachable("bad ABI kind");
499 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
500 if (MPT->isMemberDataPointer())
501 return CGM.PtrDiffTy;
502 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr);
505 /// In the Itanium and ARM ABIs, method pointers have the form:
506 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
508 /// In the Itanium ABI:
509 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
510 /// - the this-adjustment is (memptr.adj)
511 /// - the virtual offset is (memptr.ptr - 1)
514 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
515 /// - the this-adjustment is (memptr.adj >> 1)
516 /// - the virtual offset is (memptr.ptr)
517 /// ARM uses 'adj' for the virtual flag because Thumb functions
518 /// may be only single-byte aligned.
520 /// If the member is virtual, the adjusted 'this' pointer points
521 /// to a vtable pointer from which the virtual offset is applied.
523 /// If the member is non-virtual, memptr.ptr is the address of
524 /// the function to call.
525 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
526 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
527 llvm::Value *&ThisPtrForCall,
528 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
529 CGBuilderTy &Builder = CGF.Builder;
531 const FunctionProtoType *FPT =
532 MPT->getPointeeType()->getAs<FunctionProtoType>();
533 const CXXRecordDecl *RD =
534 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
536 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
537 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
539 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
541 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
542 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
543 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
545 // Extract memptr.adj, which is in the second field.
546 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
548 // Compute the true adjustment.
549 llvm::Value *Adj = RawAdj;
550 if (UseARMMethodPtrABI)
551 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
553 // Apply the adjustment and cast back to the original struct type
555 llvm::Value *This = ThisAddr.getPointer();
556 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
557 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
558 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
559 ThisPtrForCall = This;
561 // Load the function pointer.
562 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
564 // If the LSB in the function pointer is 1, the function pointer points to
565 // a virtual function.
566 llvm::Value *IsVirtual;
567 if (UseARMMethodPtrABI)
568 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
570 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
571 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
572 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
574 // In the virtual path, the adjustment left 'This' pointing to the
575 // vtable of the correct base subobject. The "function pointer" is an
576 // offset within the vtable (+1 for the virtual flag on non-ARM).
577 CGF.EmitBlock(FnVirtual);
579 // Cast the adjusted this to a pointer to vtable pointer and load.
580 llvm::Type *VTableTy = Builder.getInt8PtrTy();
581 CharUnits VTablePtrAlign =
582 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
583 CGF.getPointerAlign());
584 llvm::Value *VTable =
585 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
588 // On ARM64, to reserve extra space in virtual member function pointers,
589 // we only pay attention to the low 32 bits of the offset.
590 llvm::Value *VTableOffset = FnAsInt;
591 if (!UseARMMethodPtrABI)
592 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
593 if (Use32BitVTableOffsetABI) {
594 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
595 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
597 VTable = Builder.CreateGEP(VTable, VTableOffset);
599 // Load the virtual function to call.
600 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
601 llvm::Value *VirtualFn =
602 Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
604 CGF.EmitBranch(FnEnd);
606 // In the non-virtual path, the function pointer is actually a
608 CGF.EmitBlock(FnNonVirtual);
609 llvm::Value *NonVirtualFn =
610 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
613 CGF.EmitBlock(FnEnd);
614 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
615 CalleePtr->addIncoming(VirtualFn, FnVirtual);
616 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
618 CGCallee Callee(FPT, CalleePtr);
622 /// Compute an l-value by applying the given pointer-to-member to a
624 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
625 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
626 const MemberPointerType *MPT) {
627 assert(MemPtr->getType() == CGM.PtrDiffTy);
629 CGBuilderTy &Builder = CGF.Builder;
632 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
634 // Apply the offset, which we assume is non-null.
636 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
638 // Cast the address to the appropriate pointer type, adopting the
639 // address space of the base pointer.
640 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
641 ->getPointerTo(Base.getAddressSpace());
642 return Builder.CreateBitCast(Addr, PType);
645 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
648 /// Bitcast conversions are always a no-op under Itanium.
650 /// Obligatory offset/adjustment diagram:
651 /// <-- offset --> <-- adjustment -->
652 /// |--------------------------|----------------------|--------------------|
653 /// ^Derived address point ^Base address point ^Member address point
655 /// So when converting a base member pointer to a derived member pointer,
656 /// we add the offset to the adjustment because the address point has
657 /// decreased; and conversely, when converting a derived MP to a base MP
658 /// we subtract the offset from the adjustment because the address point
661 /// The standard forbids (at compile time) conversion to and from
662 /// virtual bases, which is why we don't have to consider them here.
664 /// The standard forbids (at run time) casting a derived MP to a base
665 /// MP when the derived MP does not point to a member of the base.
666 /// This is why -1 is a reasonable choice for null data member
669 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
672 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
673 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
674 E->getCastKind() == CK_ReinterpretMemberPointer);
676 // Under Itanium, reinterprets don't require any additional processing.
677 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
679 // Use constant emission if we can.
680 if (isa<llvm::Constant>(src))
681 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
683 llvm::Constant *adj = getMemberPointerAdjustment(E);
684 if (!adj) return src;
686 CGBuilderTy &Builder = CGF.Builder;
687 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
689 const MemberPointerType *destTy =
690 E->getType()->castAs<MemberPointerType>();
692 // For member data pointers, this is just a matter of adding the
693 // offset if the source is non-null.
694 if (destTy->isMemberDataPointer()) {
697 dst = Builder.CreateNSWSub(src, adj, "adj");
699 dst = Builder.CreateNSWAdd(src, adj, "adj");
702 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
703 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
704 return Builder.CreateSelect(isNull, src, dst);
707 // The this-adjustment is left-shifted by 1 on ARM.
708 if (UseARMMethodPtrABI) {
709 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
711 adj = llvm::ConstantInt::get(adj->getType(), offset);
714 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
717 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
719 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
721 return Builder.CreateInsertValue(src, dstAdj, 1);
725 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
726 llvm::Constant *src) {
727 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
728 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
729 E->getCastKind() == CK_ReinterpretMemberPointer);
731 // Under Itanium, reinterprets don't require any additional processing.
732 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
734 // If the adjustment is trivial, we don't need to do anything.
735 llvm::Constant *adj = getMemberPointerAdjustment(E);
736 if (!adj) return src;
738 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
740 const MemberPointerType *destTy =
741 E->getType()->castAs<MemberPointerType>();
743 // For member data pointers, this is just a matter of adding the
744 // offset if the source is non-null.
745 if (destTy->isMemberDataPointer()) {
746 // null maps to null.
747 if (src->isAllOnesValue()) return src;
750 return llvm::ConstantExpr::getNSWSub(src, adj);
752 return llvm::ConstantExpr::getNSWAdd(src, adj);
755 // The this-adjustment is left-shifted by 1 on ARM.
756 if (UseARMMethodPtrABI) {
757 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
759 adj = llvm::ConstantInt::get(adj->getType(), offset);
762 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
763 llvm::Constant *dstAdj;
765 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
767 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
769 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
773 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
774 // Itanium C++ ABI 2.3:
775 // A NULL pointer is represented as -1.
776 if (MPT->isMemberDataPointer())
777 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
779 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
780 llvm::Constant *Values[2] = { Zero, Zero };
781 return llvm::ConstantStruct::getAnon(Values);
785 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
787 // Itanium C++ ABI 2.3:
788 // A pointer to data member is an offset from the base address of
789 // the class object containing it, represented as a ptrdiff_t
790 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
794 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
795 return BuildMemberPointer(MD, CharUnits::Zero());
798 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
799 CharUnits ThisAdjustment) {
800 assert(MD->isInstance() && "Member function must not be static!");
801 MD = MD->getCanonicalDecl();
803 CodeGenTypes &Types = CGM.getTypes();
805 // Get the function pointer (or index if this is a virtual function).
806 llvm::Constant *MemPtr[2];
807 if (MD->isVirtual()) {
808 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
810 const ASTContext &Context = getContext();
811 CharUnits PointerWidth =
812 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
813 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
815 if (UseARMMethodPtrABI) {
816 // ARM C++ ABI 3.2.1:
817 // This ABI specifies that adj contains twice the this
818 // adjustment, plus 1 if the member function is virtual. The
819 // least significant bit of adj then makes exactly the same
820 // discrimination as the least significant bit of ptr does for
822 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
823 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
824 2 * ThisAdjustment.getQuantity() + 1);
826 // Itanium C++ ABI 2.3:
827 // For a virtual function, [the pointer field] is 1 plus the
828 // virtual table offset (in bytes) of the function,
829 // represented as a ptrdiff_t.
830 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
831 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
832 ThisAdjustment.getQuantity());
835 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
837 // Check whether the function has a computable LLVM signature.
838 if (Types.isFuncTypeConvertible(FPT)) {
839 // The function has a computable LLVM signature; use the correct type.
840 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
842 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
843 // function type is incomplete.
846 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
848 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
849 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
850 (UseARMMethodPtrABI ? 2 : 1) *
851 ThisAdjustment.getQuantity());
854 return llvm::ConstantStruct::getAnon(MemPtr);
857 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
859 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
860 const ValueDecl *MPD = MP.getMemberPointerDecl();
862 return EmitNullMemberPointer(MPT);
864 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
866 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
867 return BuildMemberPointer(MD, ThisAdjustment);
869 CharUnits FieldOffset =
870 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
871 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
874 /// The comparison algorithm is pretty easy: the member pointers are
875 /// the same if they're either bitwise identical *or* both null.
877 /// ARM is different here only because null-ness is more complicated.
879 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
882 const MemberPointerType *MPT,
884 CGBuilderTy &Builder = CGF.Builder;
886 llvm::ICmpInst::Predicate Eq;
887 llvm::Instruction::BinaryOps And, Or;
889 Eq = llvm::ICmpInst::ICMP_NE;
890 And = llvm::Instruction::Or;
891 Or = llvm::Instruction::And;
893 Eq = llvm::ICmpInst::ICMP_EQ;
894 And = llvm::Instruction::And;
895 Or = llvm::Instruction::Or;
898 // Member data pointers are easy because there's a unique null
899 // value, so it just comes down to bitwise equality.
900 if (MPT->isMemberDataPointer())
901 return Builder.CreateICmp(Eq, L, R);
903 // For member function pointers, the tautologies are more complex.
904 // The Itanium tautology is:
905 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
906 // The ARM tautology is:
907 // (L == R) <==> (L.ptr == R.ptr &&
908 // (L.adj == R.adj ||
909 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
910 // The inequality tautologies have exactly the same structure, except
911 // applying De Morgan's laws.
913 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
914 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
916 // This condition tests whether L.ptr == R.ptr. This must always be
917 // true for equality to hold.
918 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
920 // This condition, together with the assumption that L.ptr == R.ptr,
921 // tests whether the pointers are both null. ARM imposes an extra
923 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
924 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
926 // This condition tests whether L.adj == R.adj. If this isn't
927 // true, the pointers are unequal unless they're both null.
928 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
929 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
930 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
932 // Null member function pointers on ARM clear the low bit of Adj,
933 // so the zero condition has to check that neither low bit is set.
934 if (UseARMMethodPtrABI) {
935 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
937 // Compute (l.adj | r.adj) & 1 and test it against zero.
938 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
939 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
940 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
942 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
945 // Tie together all our conditions.
946 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
947 Result = Builder.CreateBinOp(And, PtrEq, Result,
948 Inequality ? "memptr.ne" : "memptr.eq");
953 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
955 const MemberPointerType *MPT) {
956 CGBuilderTy &Builder = CGF.Builder;
958 /// For member data pointers, this is just a check against -1.
959 if (MPT->isMemberDataPointer()) {
960 assert(MemPtr->getType() == CGM.PtrDiffTy);
961 llvm::Value *NegativeOne =
962 llvm::Constant::getAllOnesValue(MemPtr->getType());
963 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
966 // In Itanium, a member function pointer is not null if 'ptr' is not null.
967 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
969 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
970 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
972 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
973 // (the virtual bit) is set.
974 if (UseARMMethodPtrABI) {
975 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
976 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
977 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
978 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
980 Result = Builder.CreateOr(Result, IsVirtual);
986 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
987 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
991 // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor.
992 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
994 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
995 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
996 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1002 /// The Itanium ABI requires non-zero initialization only for data
1003 /// member pointers, for which '0' is a valid offset.
1004 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1005 return MPT->isMemberFunctionPointer();
1008 /// The Itanium ABI always places an offset to the complete object
1009 /// at entry -2 in the vtable.
1010 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1011 const CXXDeleteExpr *DE,
1013 QualType ElementType,
1014 const CXXDestructorDecl *Dtor) {
1015 bool UseGlobalDelete = DE->isGlobalDelete();
1016 if (UseGlobalDelete) {
1017 // Derive the complete-object pointer, which is what we need
1018 // to pass to the deallocation function.
1020 // Grab the vtable pointer as an intptr_t*.
1022 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1023 llvm::Value *VTable =
1024 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1026 // Track back to entry -2 and pull out the offset there.
1027 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1028 VTable, -2, "complete-offset.ptr");
1029 llvm::Value *Offset =
1030 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1032 // Apply the offset.
1033 llvm::Value *CompletePtr =
1034 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1035 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1037 // If we're supposed to call the global delete, make sure we do so
1038 // even if the destructor throws.
1039 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1043 // FIXME: Provide a source location here even though there's no
1044 // CXXMemberCallExpr for dtor call.
1045 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1046 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1048 if (UseGlobalDelete)
1049 CGF.PopCleanupBlock();
1052 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1053 // void __cxa_rethrow();
1055 llvm::FunctionType *FTy =
1056 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1058 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1061 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1063 CGF.EmitRuntimeCallOrInvoke(Fn);
1066 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1067 // void *__cxa_allocate_exception(size_t thrown_size);
1069 llvm::FunctionType *FTy =
1070 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1072 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1075 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1076 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1077 // void (*dest) (void *));
1079 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1080 llvm::FunctionType *FTy =
1081 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1083 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1086 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1087 QualType ThrowType = E->getSubExpr()->getType();
1088 // Now allocate the exception object.
1089 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1090 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1092 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1093 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1094 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1096 CharUnits ExnAlign = getAlignmentOfExnObject();
1097 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1099 // Now throw the exception.
1100 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1103 // The address of the destructor. If the exception type has a
1104 // trivial destructor (or isn't a record), we just pass null.
1105 llvm::Constant *Dtor = nullptr;
1106 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1107 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1108 if (!Record->hasTrivialDestructor()) {
1109 CXXDestructorDecl *DtorD = Record->getDestructor();
1110 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1111 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1114 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1116 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1117 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1120 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1121 // void *__dynamic_cast(const void *sub,
1122 // const abi::__class_type_info *src,
1123 // const abi::__class_type_info *dst,
1124 // std::ptrdiff_t src2dst_offset);
1126 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1127 llvm::Type *PtrDiffTy =
1128 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1130 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1132 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1134 // Mark the function as nounwind readonly.
1135 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1136 llvm::Attribute::ReadOnly };
1137 llvm::AttributeSet Attrs = llvm::AttributeSet::get(
1138 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
1140 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1143 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1144 // void __cxa_bad_cast();
1145 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1146 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1149 /// \brief Compute the src2dst_offset hint as described in the
1150 /// Itanium C++ ABI [2.9.7]
1151 static CharUnits computeOffsetHint(ASTContext &Context,
1152 const CXXRecordDecl *Src,
1153 const CXXRecordDecl *Dst) {
1154 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1155 /*DetectVirtual=*/false);
1157 // If Dst is not derived from Src we can skip the whole computation below and
1158 // return that Src is not a public base of Dst. Record all inheritance paths.
1159 if (!Dst->isDerivedFrom(Src, Paths))
1160 return CharUnits::fromQuantity(-2ULL);
1162 unsigned NumPublicPaths = 0;
1165 // Now walk all possible inheritance paths.
1166 for (const CXXBasePath &Path : Paths) {
1167 if (Path.Access != AS_public) // Ignore non-public inheritance.
1172 for (const CXXBasePathElement &PathElement : Path) {
1173 // If the path contains a virtual base class we can't give any hint.
1175 if (PathElement.Base->isVirtual())
1176 return CharUnits::fromQuantity(-1ULL);
1178 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1181 // Accumulate the base class offsets.
1182 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1183 Offset += L.getBaseClassOffset(
1184 PathElement.Base->getType()->getAsCXXRecordDecl());
1188 // -2: Src is not a public base of Dst.
1189 if (NumPublicPaths == 0)
1190 return CharUnits::fromQuantity(-2ULL);
1192 // -3: Src is a multiple public base type but never a virtual base type.
1193 if (NumPublicPaths > 1)
1194 return CharUnits::fromQuantity(-3ULL);
1196 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1197 // Return the offset of Src from the origin of Dst.
1201 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1202 // void __cxa_bad_typeid();
1203 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1205 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1208 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1209 QualType SrcRecordTy) {
1213 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1214 llvm::Value *Fn = getBadTypeidFn(CGF);
1215 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1216 CGF.Builder.CreateUnreachable();
1219 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1220 QualType SrcRecordTy,
1222 llvm::Type *StdTypeInfoPtrTy) {
1224 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1225 llvm::Value *Value =
1226 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1228 // Load the type info.
1229 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1230 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1233 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1234 QualType SrcRecordTy) {
1238 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1239 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1240 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1241 llvm::Type *PtrDiffLTy =
1242 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1243 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1245 llvm::Value *SrcRTTI =
1246 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1247 llvm::Value *DestRTTI =
1248 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1250 // Compute the offset hint.
1251 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1252 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1253 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1255 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1257 // Emit the call to __dynamic_cast.
1258 llvm::Value *Value = ThisAddr.getPointer();
1259 Value = CGF.EmitCastToVoidPtr(Value);
1261 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1262 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1263 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1265 /// C++ [expr.dynamic.cast]p9:
1266 /// A failed cast to reference type throws std::bad_cast
1267 if (DestTy->isReferenceType()) {
1268 llvm::BasicBlock *BadCastBlock =
1269 CGF.createBasicBlock("dynamic_cast.bad_cast");
1271 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1272 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1274 CGF.EmitBlock(BadCastBlock);
1275 EmitBadCastCall(CGF);
1281 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1283 QualType SrcRecordTy,
1285 llvm::Type *PtrDiffLTy =
1286 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1287 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1290 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1291 // Get the vtable pointer.
1292 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1295 // Get the offset-to-top from the vtable.
1296 llvm::Value *OffsetToTop =
1297 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1299 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1302 // Finally, add the offset to the pointer.
1303 llvm::Value *Value = ThisAddr.getPointer();
1304 Value = CGF.EmitCastToVoidPtr(Value);
1305 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1307 return CGF.Builder.CreateBitCast(Value, DestLTy);
1310 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1311 llvm::Value *Fn = getBadCastFn(CGF);
1312 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1313 CGF.Builder.CreateUnreachable();
1318 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1320 const CXXRecordDecl *ClassDecl,
1321 const CXXRecordDecl *BaseClassDecl) {
1322 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1323 CharUnits VBaseOffsetOffset =
1324 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1327 llvm::Value *VBaseOffsetPtr =
1328 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1329 "vbase.offset.ptr");
1330 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1331 CGM.PtrDiffTy->getPointerTo());
1333 llvm::Value *VBaseOffset =
1334 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1340 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1341 // Just make sure we're in sync with TargetCXXABI.
1342 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1344 // The constructor used for constructing this as a base class;
1345 // ignores virtual bases.
1346 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1348 // The constructor used for constructing this as a complete class;
1349 // constructs the virtual bases, then calls the base constructor.
1350 if (!D->getParent()->isAbstract()) {
1351 // We don't need to emit the complete ctor if the class is abstract.
1352 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1357 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1358 SmallVectorImpl<CanQualType> &ArgTys) {
1359 ASTContext &Context = getContext();
1361 // All parameters are already in place except VTT, which goes after 'this'.
1362 // These are Clang types, so we don't need to worry about sret yet.
1364 // Check if we need to add a VTT parameter (which has type void **).
1365 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0)
1366 ArgTys.insert(ArgTys.begin() + 1,
1367 Context.getPointerType(Context.VoidPtrTy));
1370 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1371 // The destructor used for destructing this as a base class; ignores
1373 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1375 // The destructor used for destructing this as a most-derived class;
1376 // call the base destructor and then destructs any virtual bases.
1377 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1379 // The destructor in a virtual table is always a 'deleting'
1380 // destructor, which calls the complete destructor and then uses the
1381 // appropriate operator delete.
1383 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1386 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1388 FunctionArgList &Params) {
1389 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1390 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1392 // Check if we need a VTT parameter as well.
1393 if (NeedsVTTParameter(CGF.CurGD)) {
1394 ASTContext &Context = getContext();
1396 // FIXME: avoid the fake decl
1397 QualType T = Context.getPointerType(Context.VoidPtrTy);
1398 ImplicitParamDecl *VTTDecl
1399 = ImplicitParamDecl::Create(Context, nullptr, MD->getLocation(),
1400 &Context.Idents.get("vtt"), T);
1401 Params.insert(Params.begin() + 1, VTTDecl);
1402 getStructorImplicitParamDecl(CGF) = VTTDecl;
1406 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1407 // Naked functions have no prolog.
1408 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1411 /// Initialize the 'this' slot.
1414 /// Initialize the 'vtt' slot if needed.
1415 if (getStructorImplicitParamDecl(CGF)) {
1416 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1417 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1420 /// If this is a function that the ABI specifies returns 'this', initialize
1421 /// the return slot to 'this' at the start of the function.
1423 /// Unlike the setting of return types, this is done within the ABI
1424 /// implementation instead of by clients of CGCXXABI because:
1425 /// 1) getThisValue is currently protected
1426 /// 2) in theory, an ABI could implement 'this' returns some other way;
1427 /// HasThisReturn only specifies a contract, not the implementation
1428 if (HasThisReturn(CGF.CurGD))
1429 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1432 unsigned ItaniumCXXABI::addImplicitConstructorArgs(
1433 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1434 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1435 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1438 // Insert the implicit 'vtt' argument as the second argument.
1440 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1441 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1442 Args.insert(Args.begin() + 1,
1443 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
1444 return 1; // Added one arg.
1447 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1448 const CXXDestructorDecl *DD,
1449 CXXDtorType Type, bool ForVirtualBase,
1450 bool Delegating, Address This) {
1451 GlobalDecl GD(DD, Type);
1452 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1453 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1456 if (getContext().getLangOpts().AppleKext &&
1457 Type != Dtor_Base && DD->isVirtual())
1458 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1461 CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
1464 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1465 This.getPointer(), VTT, VTTTy,
1469 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1470 const CXXRecordDecl *RD) {
1471 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1472 if (VTable->hasInitializer())
1475 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1476 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1477 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1478 llvm::Constant *RTTI =
1479 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1481 // Create and set the initializer.
1482 ConstantInitBuilder Builder(CGM);
1483 auto Components = Builder.beginStruct();
1484 CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1485 Components.finishAndSetAsInitializer(VTable);
1487 // Set the correct linkage.
1488 VTable->setLinkage(Linkage);
1490 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1491 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1493 // Set the right visibility.
1494 CGM.setGlobalVisibility(VTable, RD);
1496 // Use pointer alignment for the vtable. Otherwise we would align them based
1497 // on the size of the initializer which doesn't make sense as only single
1499 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1500 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1502 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1503 // we will emit the typeinfo for the fundamental types. This is the
1504 // same behaviour as GCC.
1505 const DeclContext *DC = RD->getDeclContext();
1506 if (RD->getIdentifier() &&
1507 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1508 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1509 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1510 DC->getParent()->isTranslationUnit())
1511 EmitFundamentalRTTIDescriptors(RD->hasAttr<DLLExportAttr>());
1513 if (!VTable->isDeclarationForLinker())
1514 CGM.EmitVTableTypeMetadata(VTable, VTLayout);
1517 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1518 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1519 if (Vptr.NearestVBase == nullptr)
1521 return NeedsVTTParameter(CGF.CurGD);
1524 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1525 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1526 const CXXRecordDecl *NearestVBase) {
1528 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1529 NeedsVTTParameter(CGF.CurGD)) {
1530 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1533 return getVTableAddressPoint(Base, VTableClass);
1537 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1538 const CXXRecordDecl *VTableClass) {
1539 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1541 // Find the appropriate vtable within the vtable group, and the address point
1542 // within that vtable.
1543 VTableLayout::AddressPointLocation AddressPoint =
1544 CGM.getItaniumVTableContext()
1545 .getVTableLayout(VTableClass)
1546 .getAddressPoint(Base);
1547 llvm::Value *Indices[] = {
1548 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1549 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1550 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1553 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1554 Indices, /*InBounds=*/true,
1555 /*InRangeIndex=*/1);
1558 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1559 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1560 const CXXRecordDecl *NearestVBase) {
1561 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1562 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1564 // Get the secondary vpointer index.
1565 uint64_t VirtualPointerIndex =
1566 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1569 llvm::Value *VTT = CGF.LoadCXXVTT();
1570 if (VirtualPointerIndex)
1571 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1573 // And load the address point from the VTT.
1574 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1577 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1578 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1579 return getVTableAddressPoint(Base, VTableClass);
1582 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1583 CharUnits VPtrOffset) {
1584 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1586 llvm::GlobalVariable *&VTable = VTables[RD];
1590 // Queue up this vtable for possible deferred emission.
1591 CGM.addDeferredVTable(RD);
1593 SmallString<256> Name;
1594 llvm::raw_svector_ostream Out(Name);
1595 getMangleContext().mangleCXXVTable(RD, Out);
1597 const VTableLayout &VTLayout =
1598 CGM.getItaniumVTableContext().getVTableLayout(RD);
1599 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1601 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1602 Name, VTableType, llvm::GlobalValue::ExternalLinkage);
1603 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1605 if (RD->hasAttr<DLLImportAttr>())
1606 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1607 else if (RD->hasAttr<DLLExportAttr>())
1608 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1613 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1617 SourceLocation Loc) {
1618 GD = GD.getCanonicalDecl();
1619 Ty = Ty->getPointerTo()->getPointerTo();
1620 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1621 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1623 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1625 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1626 VFunc = CGF.EmitVTableTypeCheckedLoad(
1627 MethodDecl->getParent(), VTable,
1628 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1630 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1632 llvm::Value *VFuncPtr =
1633 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1635 CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1637 // Add !invariant.load md to virtual function load to indicate that
1638 // function didn't change inside vtable.
1639 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1640 // help in devirtualization because it will only matter if we will have 2
1641 // the same virtual function loads from the same vtable load, which won't
1642 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1643 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1644 CGM.getCodeGenOpts().StrictVTablePointers)
1645 VFuncLoad->setMetadata(
1646 llvm::LLVMContext::MD_invariant_load,
1647 llvm::MDNode::get(CGM.getLLVMContext(),
1648 llvm::ArrayRef<llvm::Metadata *>()));
1652 CGCallee Callee(MethodDecl, VFunc);
1656 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1657 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1658 Address This, const CXXMemberCallExpr *CE) {
1659 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1660 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1662 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1663 Dtor, getFromDtorType(DtorType));
1664 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1666 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
1667 CE ? CE->getLocStart() : SourceLocation());
1669 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1670 This.getPointer(), /*ImplicitParam=*/nullptr,
1671 QualType(), CE, nullptr);
1675 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1676 CodeGenVTables &VTables = CGM.getVTables();
1677 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1678 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1681 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1682 // We don't emit available_externally vtables if we are in -fapple-kext mode
1683 // because kext mode does not permit devirtualization.
1684 if (CGM.getLangOpts().AppleKext)
1687 // If we don't have any inline virtual functions, and if vtable is not hidden,
1688 // then we are safe to emit available_externally copy of vtable.
1689 // FIXME we can still emit a copy of the vtable if we
1690 // can emit definition of the inline functions.
1691 return !hasAnyVirtualInlineFunction(RD) && !isVTableHidden(RD);
1693 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1695 int64_t NonVirtualAdjustment,
1696 int64_t VirtualAdjustment,
1697 bool IsReturnAdjustment) {
1698 if (!NonVirtualAdjustment && !VirtualAdjustment)
1699 return InitialPtr.getPointer();
1701 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1703 // In a base-to-derived cast, the non-virtual adjustment is applied first.
1704 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1705 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1706 CharUnits::fromQuantity(NonVirtualAdjustment));
1709 // Perform the virtual adjustment if we have one.
1710 llvm::Value *ResultPtr;
1711 if (VirtualAdjustment) {
1712 llvm::Type *PtrDiffTy =
1713 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1715 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1716 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1718 llvm::Value *OffsetPtr =
1719 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1721 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1723 // Load the adjustment offset from the vtable.
1724 llvm::Value *Offset =
1725 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1727 // Adjust our pointer.
1728 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1730 ResultPtr = V.getPointer();
1733 // In a derived-to-base conversion, the non-virtual adjustment is
1735 if (NonVirtualAdjustment && IsReturnAdjustment) {
1736 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1737 NonVirtualAdjustment);
1740 // Cast back to the original type.
1741 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1744 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1746 const ThisAdjustment &TA) {
1747 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1748 TA.Virtual.Itanium.VCallOffsetOffset,
1749 /*IsReturnAdjustment=*/false);
1753 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1754 const ReturnAdjustment &RA) {
1755 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1756 RA.Virtual.Itanium.VBaseOffsetOffset,
1757 /*IsReturnAdjustment=*/true);
1760 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1761 RValue RV, QualType ResultType) {
1762 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1763 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1765 // Destructor thunks in the ARM ABI have indeterminate results.
1766 llvm::Type *T = CGF.ReturnValue.getElementType();
1767 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1768 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1771 /************************** Array allocation cookies **************************/
1773 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1774 // The array cookie is a size_t; pad that up to the element alignment.
1775 // The cookie is actually right-justified in that space.
1776 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1777 CGM.getContext().getTypeAlignInChars(elementType));
1780 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1782 llvm::Value *NumElements,
1783 const CXXNewExpr *expr,
1784 QualType ElementType) {
1785 assert(requiresArrayCookie(expr));
1787 unsigned AS = NewPtr.getAddressSpace();
1789 ASTContext &Ctx = getContext();
1790 CharUnits SizeSize = CGF.getSizeSize();
1792 // The size of the cookie.
1793 CharUnits CookieSize =
1794 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1795 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1797 // Compute an offset to the cookie.
1798 Address CookiePtr = NewPtr;
1799 CharUnits CookieOffset = CookieSize - SizeSize;
1800 if (!CookieOffset.isZero())
1801 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1803 // Write the number of elements into the appropriate slot.
1804 Address NumElementsPtr =
1805 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1806 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1808 // Handle the array cookie specially in ASan.
1809 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1810 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
1811 // The store to the CookiePtr does not need to be instrumented.
1812 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1813 llvm::FunctionType *FTy =
1814 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1816 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1817 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1820 // Finally, compute a pointer to the actual data buffer by skipping
1821 // over the cookie completely.
1822 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1825 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1827 CharUnits cookieSize) {
1828 // The element size is right-justified in the cookie.
1829 Address numElementsPtr = allocPtr;
1830 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1831 if (!numElementsOffset.isZero())
1833 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1835 unsigned AS = allocPtr.getAddressSpace();
1836 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1837 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1838 return CGF.Builder.CreateLoad(numElementsPtr);
1839 // In asan mode emit a function call instead of a regular load and let the
1840 // run-time deal with it: if the shadow is properly poisoned return the
1841 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1842 // We can't simply ignore this load using nosanitize metadata because
1843 // the metadata may be lost.
1844 llvm::FunctionType *FTy =
1845 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1847 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1848 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1851 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1852 // ARM says that the cookie is always:
1853 // struct array_cookie {
1854 // std::size_t element_size; // element_size != 0
1855 // std::size_t element_count;
1857 // But the base ABI doesn't give anything an alignment greater than
1858 // 8, so we can dismiss this as typical ABI-author blindness to
1859 // actual language complexity and round up to the element alignment.
1860 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1861 CGM.getContext().getTypeAlignInChars(elementType));
1864 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1866 llvm::Value *numElements,
1867 const CXXNewExpr *expr,
1868 QualType elementType) {
1869 assert(requiresArrayCookie(expr));
1871 // The cookie is always at the start of the buffer.
1872 Address cookie = newPtr;
1874 // The first element is the element size.
1875 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
1876 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1877 getContext().getTypeSizeInChars(elementType).getQuantity());
1878 CGF.Builder.CreateStore(elementSize, cookie);
1880 // The second element is the element count.
1881 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
1882 CGF.Builder.CreateStore(numElements, cookie);
1884 // Finally, compute a pointer to the actual data buffer by skipping
1885 // over the cookie completely.
1886 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1887 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
1890 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1892 CharUnits cookieSize) {
1893 // The number of elements is at offset sizeof(size_t) relative to
1894 // the allocated pointer.
1895 Address numElementsPtr
1896 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
1898 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1899 return CGF.Builder.CreateLoad(numElementsPtr);
1902 /*********************** Static local initialization **************************/
1904 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1905 llvm::PointerType *GuardPtrTy) {
1906 // int __cxa_guard_acquire(__guard *guard_object);
1907 llvm::FunctionType *FTy =
1908 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1909 GuardPtrTy, /*isVarArg=*/false);
1910 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
1911 llvm::AttributeSet::get(CGM.getLLVMContext(),
1912 llvm::AttributeSet::FunctionIndex,
1913 llvm::Attribute::NoUnwind));
1916 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1917 llvm::PointerType *GuardPtrTy) {
1918 // void __cxa_guard_release(__guard *guard_object);
1919 llvm::FunctionType *FTy =
1920 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1921 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
1922 llvm::AttributeSet::get(CGM.getLLVMContext(),
1923 llvm::AttributeSet::FunctionIndex,
1924 llvm::Attribute::NoUnwind));
1927 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1928 llvm::PointerType *GuardPtrTy) {
1929 // void __cxa_guard_abort(__guard *guard_object);
1930 llvm::FunctionType *FTy =
1931 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1932 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
1933 llvm::AttributeSet::get(CGM.getLLVMContext(),
1934 llvm::AttributeSet::FunctionIndex,
1935 llvm::Attribute::NoUnwind));
1939 struct CallGuardAbort final : EHScopeStack::Cleanup {
1940 llvm::GlobalVariable *Guard;
1941 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1943 void Emit(CodeGenFunction &CGF, Flags flags) override {
1944 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
1950 /// The ARM code here follows the Itanium code closely enough that we
1951 /// just special-case it at particular places.
1952 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1954 llvm::GlobalVariable *var,
1955 bool shouldPerformInit) {
1956 CGBuilderTy &Builder = CGF.Builder;
1958 // Inline variables that weren't instantiated from variable templates have
1959 // partially-ordered initialization within their translation unit.
1960 bool NonTemplateInline =
1962 !isTemplateInstantiation(D.getTemplateSpecializationKind());
1964 // We only need to use thread-safe statics for local non-TLS variables and
1965 // inline variables; other global initialization is always single-threaded
1966 // or (through lazy dynamic loading in multiple threads) unsequenced.
1967 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
1968 (D.isLocalVarDecl() || NonTemplateInline) &&
1971 // If we have a global variable with internal linkage and thread-safe statics
1972 // are disabled, we can just let the guard variable be of type i8.
1973 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
1975 llvm::IntegerType *guardTy;
1976 CharUnits guardAlignment;
1977 if (useInt8GuardVariable) {
1978 guardTy = CGF.Int8Ty;
1979 guardAlignment = CharUnits::One();
1981 // Guard variables are 64 bits in the generic ABI and size width on ARM
1982 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
1983 if (UseARMGuardVarABI) {
1984 guardTy = CGF.SizeTy;
1985 guardAlignment = CGF.getSizeAlign();
1987 guardTy = CGF.Int64Ty;
1988 guardAlignment = CharUnits::fromQuantity(
1989 CGM.getDataLayout().getABITypeAlignment(guardTy));
1992 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
1994 // Create the guard variable if we don't already have it (as we
1995 // might if we're double-emitting this function body).
1996 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
1998 // Mangle the name for the guard.
1999 SmallString<256> guardName;
2001 llvm::raw_svector_ostream out(guardName);
2002 getMangleContext().mangleStaticGuardVariable(&D, out);
2005 // Create the guard variable with a zero-initializer.
2006 // Just absorb linkage and visibility from the guarded variable.
2007 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2008 false, var->getLinkage(),
2009 llvm::ConstantInt::get(guardTy, 0),
2011 guard->setVisibility(var->getVisibility());
2012 // If the variable is thread-local, so is its guard variable.
2013 guard->setThreadLocalMode(var->getThreadLocalMode());
2014 guard->setAlignment(guardAlignment.getQuantity());
2016 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2017 // group as the associated data object." In practice, this doesn't work for
2018 // non-ELF object formats, so only do it for ELF.
2019 llvm::Comdat *C = var->getComdat();
2020 if (!D.isLocalVarDecl() && C &&
2021 CGM.getTarget().getTriple().isOSBinFormatELF()) {
2022 guard->setComdat(C);
2023 // An inline variable's guard function is run from the per-TU
2024 // initialization function, not via a dedicated global ctor function, so
2025 // we can't put it in a comdat.
2026 if (!NonTemplateInline)
2027 CGF.CurFn->setComdat(C);
2028 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2029 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2032 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2035 Address guardAddr = Address(guard, guardAlignment);
2037 // Test whether the variable has completed initialization.
2039 // Itanium C++ ABI 3.3.2:
2040 // The following is pseudo-code showing how these functions can be used:
2041 // if (obj_guard.first_byte == 0) {
2042 // if ( __cxa_guard_acquire (&obj_guard) ) {
2044 // ... initialize the object ...;
2046 // __cxa_guard_abort (&obj_guard);
2049 // ... queue object destructor with __cxa_atexit() ...;
2050 // __cxa_guard_release (&obj_guard);
2054 // Load the first byte of the guard variable.
2055 llvm::LoadInst *LI =
2056 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2059 // An implementation supporting thread-safety on multiprocessor
2060 // systems must also guarantee that references to the initialized
2061 // object do not occur before the load of the initialization flag.
2063 // In LLVM, we do this by marking the load Acquire.
2065 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2067 // For ARM, we should only check the first bit, rather than the entire byte:
2069 // ARM C++ ABI 3.2.3.1:
2070 // To support the potential use of initialization guard variables
2071 // as semaphores that are the target of ARM SWP and LDREX/STREX
2072 // synchronizing instructions we define a static initialization
2073 // guard variable to be a 4-byte aligned, 4-byte word with the
2074 // following inline access protocol.
2075 // #define INITIALIZED 1
2076 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2077 // if (__cxa_guard_acquire(&obj_guard))
2081 // and similarly for ARM64:
2083 // ARM64 C++ ABI 3.2.2:
2084 // This ABI instead only specifies the value bit 0 of the static guard
2085 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2086 // variable is not initialized and 1 when it is.
2088 (UseARMGuardVarABI && !useInt8GuardVariable)
2089 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2091 llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
2093 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2094 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2096 // Check if the first byte of the guard variable is zero.
2097 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
2099 CGF.EmitBlock(InitCheckBlock);
2101 // Variables used when coping with thread-safe statics and exceptions.
2103 // Call __cxa_guard_acquire.
2105 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2107 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2109 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2110 InitBlock, EndBlock);
2112 // Call __cxa_guard_abort along the exceptional edge.
2113 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2115 CGF.EmitBlock(InitBlock);
2118 // Emit the initializer and add a global destructor if appropriate.
2119 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2122 // Pop the guard-abort cleanup if we pushed one.
2123 CGF.PopCleanupBlock();
2125 // Call __cxa_guard_release. This cannot throw.
2126 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2127 guardAddr.getPointer());
2129 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2132 CGF.EmitBlock(EndBlock);
2135 /// Register a global destructor using __cxa_atexit.
2136 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2137 llvm::Constant *dtor,
2138 llvm::Constant *addr,
2140 const char *Name = "__cxa_atexit";
2142 const llvm::Triple &T = CGF.getTarget().getTriple();
2143 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2146 // We're assuming that the destructor function is something we can
2147 // reasonably call with the default CC. Go ahead and cast it to the
2149 llvm::Type *dtorTy =
2150 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2152 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2153 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2154 llvm::FunctionType *atexitTy =
2155 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2157 // Fetch the actual function.
2158 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2159 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2160 fn->setDoesNotThrow();
2162 // Create a variable that binds the atexit to this shared object.
2163 llvm::Constant *handle =
2164 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2166 llvm::Value *args[] = {
2167 llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2168 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2171 CGF.EmitNounwindRuntimeCall(atexit, args);
2174 /// Register a global destructor as best as we know how.
2175 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2177 llvm::Constant *dtor,
2178 llvm::Constant *addr) {
2179 // Use __cxa_atexit if available.
2180 if (CGM.getCodeGenOpts().CXAAtExit)
2181 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2184 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2186 // In Apple kexts, we want to add a global destructor entry.
2187 // FIXME: shouldn't this be guarded by some variable?
2188 if (CGM.getLangOpts().AppleKext) {
2189 // Generate a global destructor entry.
2190 return CGM.AddCXXDtorEntry(dtor, addr);
2193 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2196 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2197 CodeGen::CodeGenModule &CGM) {
2198 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2199 // Darwin prefers to have references to thread local variables to go through
2200 // the thread wrapper instead of directly referencing the backing variable.
2201 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2202 CGM.getTarget().getTriple().isOSDarwin();
2205 /// Get the appropriate linkage for the wrapper function. This is essentially
2206 /// the weak form of the variable's linkage; every translation unit which needs
2207 /// the wrapper emits a copy, and we want the linker to merge them.
2208 static llvm::GlobalValue::LinkageTypes
2209 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2210 llvm::GlobalValue::LinkageTypes VarLinkage =
2211 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2213 // For internal linkage variables, we don't need an external or weak wrapper.
2214 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2217 // If the thread wrapper is replaceable, give it appropriate linkage.
2218 if (isThreadWrapperReplaceable(VD, CGM))
2219 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2220 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2222 return llvm::GlobalValue::WeakODRLinkage;
2226 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2228 // Mangle the name for the thread_local wrapper function.
2229 SmallString<256> WrapperName;
2231 llvm::raw_svector_ostream Out(WrapperName);
2232 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2235 // FIXME: If VD is a definition, we should regenerate the function attributes
2236 // before returning.
2237 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2238 return cast<llvm::Function>(V);
2240 QualType RetQT = VD->getType();
2241 if (RetQT->isReferenceType())
2242 RetQT = RetQT.getNonReferenceType();
2244 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2245 getContext().getPointerType(RetQT), FunctionArgList());
2247 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2248 llvm::Function *Wrapper =
2249 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2250 WrapperName.str(), &CGM.getModule());
2252 CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper);
2254 if (VD->hasDefinition())
2255 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2257 // Always resolve references to the wrapper at link time.
2258 if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
2259 !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
2260 !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
2261 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2263 if (isThreadWrapperReplaceable(VD, CGM)) {
2264 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2265 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2270 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2271 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2272 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2273 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2274 llvm::Function *InitFunc = nullptr;
2276 // Separate initializers into those with ordered (or partially-ordered)
2277 // initialization and those with unordered initialization.
2278 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2279 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2280 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2281 if (isTemplateInstantiation(
2282 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2283 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2284 CXXThreadLocalInits[I];
2286 OrderedInits.push_back(CXXThreadLocalInits[I]);
2289 if (!OrderedInits.empty()) {
2290 // Generate a guarded initialization function.
2291 llvm::FunctionType *FTy =
2292 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2293 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2294 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2297 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2298 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2299 llvm::GlobalVariable::InternalLinkage,
2300 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2301 Guard->setThreadLocal(true);
2303 CharUnits GuardAlign = CharUnits::One();
2304 Guard->setAlignment(GuardAlign.getQuantity());
2306 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, OrderedInits,
2307 Address(Guard, GuardAlign));
2308 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2309 if (CGM.getTarget().getTriple().isOSDarwin()) {
2310 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2311 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2315 // Emit thread wrappers.
2316 for (const VarDecl *VD : CXXThreadLocals) {
2317 llvm::GlobalVariable *Var =
2318 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2319 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2321 // Some targets require that all access to thread local variables go through
2322 // the thread wrapper. This means that we cannot attempt to create a thread
2323 // wrapper or a thread helper.
2324 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) {
2325 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2329 // Mangle the name for the thread_local initialization function.
2330 SmallString<256> InitFnName;
2332 llvm::raw_svector_ostream Out(InitFnName);
2333 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2336 // If we have a definition for the variable, emit the initialization
2337 // function as an alias to the global Init function (if any). Otherwise,
2338 // produce a declaration of the initialization function.
2339 llvm::GlobalValue *Init = nullptr;
2340 bool InitIsInitFunc = false;
2341 if (VD->hasDefinition()) {
2342 InitIsInitFunc = true;
2343 llvm::Function *InitFuncToUse = InitFunc;
2344 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2345 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2347 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2350 // Emit a weak global function referring to the initialization function.
2351 // This function will not exist if the TU defining the thread_local
2352 // variable in question does not need any dynamic initialization for
2353 // its thread_local variables.
2354 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2355 Init = llvm::Function::Create(FnTy,
2356 llvm::GlobalVariable::ExternalWeakLinkage,
2357 InitFnName.str(), &CGM.getModule());
2358 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2359 CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
2363 Init->setVisibility(Var->getVisibility());
2365 llvm::LLVMContext &Context = CGM.getModule().getContext();
2366 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2367 CGBuilderTy Builder(CGM, Entry);
2368 if (InitIsInitFunc) {
2370 llvm::CallInst *CallVal = Builder.CreateCall(Init);
2371 if (isThreadWrapperReplaceable(VD, CGM))
2372 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2375 // Don't know whether we have an init function. Call it if it exists.
2376 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2377 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2378 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2379 Builder.CreateCondBr(Have, InitBB, ExitBB);
2381 Builder.SetInsertPoint(InitBB);
2382 Builder.CreateCall(Init);
2383 Builder.CreateBr(ExitBB);
2385 Builder.SetInsertPoint(ExitBB);
2388 // For a reference, the result of the wrapper function is a pointer to
2389 // the referenced object.
2390 llvm::Value *Val = Var;
2391 if (VD->getType()->isReferenceType()) {
2392 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2393 Val = Builder.CreateAlignedLoad(Val, Align);
2395 if (Val->getType() != Wrapper->getReturnType())
2396 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2397 Val, Wrapper->getReturnType(), "");
2398 Builder.CreateRet(Val);
2402 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2404 QualType LValType) {
2405 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2406 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2408 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2409 CallVal->setCallingConv(Wrapper->getCallingConv());
2412 if (VD->getType()->isReferenceType())
2413 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2415 LV = CGF.MakeAddrLValue(CallVal, LValType,
2416 CGF.getContext().getDeclAlign(VD));
2417 // FIXME: need setObjCGCLValueClass?
2421 /// Return whether the given global decl needs a VTT parameter, which it does
2422 /// if it's a base constructor or destructor with virtual bases.
2423 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2424 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2426 // We don't have any virtual bases, just return early.
2427 if (!MD->getParent()->getNumVBases())
2430 // Check if we have a base constructor.
2431 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2434 // Check if we have a base destructor.
2435 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2442 class ItaniumRTTIBuilder {
2443 CodeGenModule &CGM; // Per-module state.
2444 llvm::LLVMContext &VMContext;
2445 const ItaniumCXXABI &CXXABI; // Per-module state.
2447 /// Fields - The fields of the RTTI descriptor currently being built.
2448 SmallVector<llvm::Constant *, 16> Fields;
2450 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2451 llvm::GlobalVariable *
2452 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2454 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2455 /// descriptor of the given type.
2456 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2458 /// BuildVTablePointer - Build the vtable pointer for the given type.
2459 void BuildVTablePointer(const Type *Ty);
2461 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2462 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2463 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2465 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2466 /// classes with bases that do not satisfy the abi::__si_class_type_info
2467 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2468 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2470 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2471 /// for pointer types.
2472 void BuildPointerTypeInfo(QualType PointeeTy);
2474 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2475 /// type_info for an object type.
2476 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2478 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2479 /// struct, used for member pointer types.
2480 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2483 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2484 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2486 // Pointer type info flags.
2488 /// PTI_Const - Type has const qualifier.
2491 /// PTI_Volatile - Type has volatile qualifier.
2494 /// PTI_Restrict - Type has restrict qualifier.
2497 /// PTI_Incomplete - Type is incomplete.
2498 PTI_Incomplete = 0x8,
2500 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2501 /// (in pointer to member).
2502 PTI_ContainingClassIncomplete = 0x10,
2504 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2505 //PTI_TransactionSafe = 0x20,
2507 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2508 PTI_Noexcept = 0x40,
2511 // VMI type info flags.
2513 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2514 VMI_NonDiamondRepeat = 0x1,
2516 /// VMI_DiamondShaped - Class is diamond shaped.
2517 VMI_DiamondShaped = 0x2
2520 // Base class type info flags.
2522 /// BCTI_Virtual - Base class is virtual.
2525 /// BCTI_Public - Base class is public.
2529 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2531 /// \param Force - true to force the creation of this RTTI value
2532 /// \param DLLExport - true to mark the RTTI value as DLLExport
2533 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false,
2534 bool DLLExport = false);
2538 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2539 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2540 SmallString<256> Name;
2541 llvm::raw_svector_ostream Out(Name);
2542 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2544 // We know that the mangled name of the type starts at index 4 of the
2545 // mangled name of the typename, so we can just index into it in order to
2546 // get the mangled name of the type.
2547 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2550 llvm::GlobalVariable *GV =
2551 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2553 GV->setInitializer(Init);
2559 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2560 // Mangle the RTTI name.
2561 SmallString<256> Name;
2562 llvm::raw_svector_ostream Out(Name);
2563 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2565 // Look for an existing global.
2566 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2569 // Create a new global variable.
2570 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2572 llvm::GlobalValue::ExternalLinkage, nullptr,
2574 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2575 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2576 if (RD->hasAttr<DLLImportAttr>())
2577 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2581 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2584 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2585 /// info for that type is defined in the standard library.
2586 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2587 // Itanium C++ ABI 2.9.2:
2588 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2589 // the run-time support library. Specifically, the run-time support
2590 // library should contain type_info objects for the types X, X* and
2591 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2592 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2593 // long, unsigned long, long long, unsigned long long, float, double,
2594 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2595 // half-precision floating point types.
2597 // GCC also emits RTTI for __int128.
2598 // FIXME: We do not emit RTTI information for decimal types here.
2600 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2601 switch (Ty->getKind()) {
2602 case BuiltinType::Void:
2603 case BuiltinType::NullPtr:
2604 case BuiltinType::Bool:
2605 case BuiltinType::WChar_S:
2606 case BuiltinType::WChar_U:
2607 case BuiltinType::Char_U:
2608 case BuiltinType::Char_S:
2609 case BuiltinType::UChar:
2610 case BuiltinType::SChar:
2611 case BuiltinType::Short:
2612 case BuiltinType::UShort:
2613 case BuiltinType::Int:
2614 case BuiltinType::UInt:
2615 case BuiltinType::Long:
2616 case BuiltinType::ULong:
2617 case BuiltinType::LongLong:
2618 case BuiltinType::ULongLong:
2619 case BuiltinType::Half:
2620 case BuiltinType::Float:
2621 case BuiltinType::Double:
2622 case BuiltinType::LongDouble:
2623 case BuiltinType::Float128:
2624 case BuiltinType::Char16:
2625 case BuiltinType::Char32:
2626 case BuiltinType::Int128:
2627 case BuiltinType::UInt128:
2630 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2631 case BuiltinType::Id:
2632 #include "clang/Basic/OpenCLImageTypes.def"
2633 case BuiltinType::OCLSampler:
2634 case BuiltinType::OCLEvent:
2635 case BuiltinType::OCLClkEvent:
2636 case BuiltinType::OCLQueue:
2637 case BuiltinType::OCLNDRange:
2638 case BuiltinType::OCLReserveID:
2641 case BuiltinType::Dependent:
2642 #define BUILTIN_TYPE(Id, SingletonId)
2643 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2644 case BuiltinType::Id:
2645 #include "clang/AST/BuiltinTypes.def"
2646 llvm_unreachable("asking for RRTI for a placeholder type!");
2648 case BuiltinType::ObjCId:
2649 case BuiltinType::ObjCClass:
2650 case BuiltinType::ObjCSel:
2651 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2654 llvm_unreachable("Invalid BuiltinType Kind!");
2657 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2658 QualType PointeeTy = PointerTy->getPointeeType();
2659 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2663 // Check the qualifiers.
2664 Qualifiers Quals = PointeeTy.getQualifiers();
2665 Quals.removeConst();
2670 return TypeInfoIsInStandardLibrary(BuiltinTy);
2673 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2674 /// information for the given type exists in the standard library.
2675 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2676 // Type info for builtin types is defined in the standard library.
2677 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2678 return TypeInfoIsInStandardLibrary(BuiltinTy);
2680 // Type info for some pointer types to builtin types is defined in the
2681 // standard library.
2682 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2683 return TypeInfoIsInStandardLibrary(PointerTy);
2688 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2689 /// the given type exists somewhere else, and that we should not emit the type
2690 /// information in this translation unit. Assumes that it is not a
2691 /// standard-library type.
2692 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2694 ASTContext &Context = CGM.getContext();
2696 // If RTTI is disabled, assume it might be disabled in the
2697 // translation unit that defines any potential key function, too.
2698 if (!Context.getLangOpts().RTTI) return false;
2700 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2701 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2702 if (!RD->hasDefinition())
2705 if (!RD->isDynamicClass())
2708 // FIXME: this may need to be reconsidered if the key function
2710 // N.B. We must always emit the RTTI data ourselves if there exists a key
2712 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2713 if (CGM.getVTables().isVTableExternal(RD))
2714 return IsDLLImport ? false : true;
2723 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
2724 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2725 return !RecordTy->getDecl()->isCompleteDefinition();
2728 /// ContainsIncompleteClassType - Returns whether the given type contains an
2729 /// incomplete class type. This is true if
2731 /// * The given type is an incomplete class type.
2732 /// * The given type is a pointer type whose pointee type contains an
2733 /// incomplete class type.
2734 /// * The given type is a member pointer type whose class is an incomplete
2736 /// * The given type is a member pointer type whoise pointee type contains an
2737 /// incomplete class type.
2738 /// is an indirect or direct pointer to an incomplete class type.
2739 static bool ContainsIncompleteClassType(QualType Ty) {
2740 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2741 if (IsIncompleteClassType(RecordTy))
2745 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2746 return ContainsIncompleteClassType(PointerTy->getPointeeType());
2748 if (const MemberPointerType *MemberPointerTy =
2749 dyn_cast<MemberPointerType>(Ty)) {
2750 // Check if the class type is incomplete.
2751 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2752 if (IsIncompleteClassType(ClassType))
2755 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2761 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2762 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2763 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
2764 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2765 // Check the number of bases.
2766 if (RD->getNumBases() != 1)
2770 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2772 // Check that the base is not virtual.
2773 if (Base->isVirtual())
2776 // Check that the base is public.
2777 if (Base->getAccessSpecifier() != AS_public)
2780 // Check that the class is dynamic iff the base is.
2781 const CXXRecordDecl *BaseDecl =
2782 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2783 if (!BaseDecl->isEmpty() &&
2784 BaseDecl->isDynamicClass() != RD->isDynamicClass())
2790 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2791 // abi::__class_type_info.
2792 static const char * const ClassTypeInfo =
2793 "_ZTVN10__cxxabiv117__class_type_infoE";
2794 // abi::__si_class_type_info.
2795 static const char * const SIClassTypeInfo =
2796 "_ZTVN10__cxxabiv120__si_class_type_infoE";
2797 // abi::__vmi_class_type_info.
2798 static const char * const VMIClassTypeInfo =
2799 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
2801 const char *VTableName = nullptr;
2803 switch (Ty->getTypeClass()) {
2804 #define TYPE(Class, Base)
2805 #define ABSTRACT_TYPE(Class, Base)
2806 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2807 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2808 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2809 #include "clang/AST/TypeNodes.def"
2810 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2812 case Type::LValueReference:
2813 case Type::RValueReference:
2814 llvm_unreachable("References shouldn't get here");
2817 llvm_unreachable("Undeduced auto type shouldn't get here");
2820 llvm_unreachable("Pipe types shouldn't get here");
2823 // GCC treats vector and complex types as fundamental types.
2825 case Type::ExtVector:
2828 // FIXME: GCC treats block pointers as fundamental types?!
2829 case Type::BlockPointer:
2830 // abi::__fundamental_type_info.
2831 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
2834 case Type::ConstantArray:
2835 case Type::IncompleteArray:
2836 case Type::VariableArray:
2837 // abi::__array_type_info.
2838 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
2841 case Type::FunctionNoProto:
2842 case Type::FunctionProto:
2843 // abi::__function_type_info.
2844 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
2848 // abi::__enum_type_info.
2849 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
2852 case Type::Record: {
2853 const CXXRecordDecl *RD =
2854 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2856 if (!RD->hasDefinition() || !RD->getNumBases()) {
2857 VTableName = ClassTypeInfo;
2858 } else if (CanUseSingleInheritance(RD)) {
2859 VTableName = SIClassTypeInfo;
2861 VTableName = VMIClassTypeInfo;
2867 case Type::ObjCObject:
2868 // Ignore protocol qualifiers.
2869 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
2871 // Handle id and Class.
2872 if (isa<BuiltinType>(Ty)) {
2873 VTableName = ClassTypeInfo;
2877 assert(isa<ObjCInterfaceType>(Ty));
2880 case Type::ObjCInterface:
2881 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
2882 VTableName = SIClassTypeInfo;
2884 VTableName = ClassTypeInfo;
2888 case Type::ObjCObjectPointer:
2890 // abi::__pointer_type_info.
2891 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
2894 case Type::MemberPointer:
2895 // abi::__pointer_to_member_type_info.
2896 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
2900 llvm::Constant *VTable =
2901 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
2903 llvm::Type *PtrDiffTy =
2904 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
2906 // The vtable address point is 2.
2907 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
2909 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
2910 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
2912 Fields.push_back(VTable);
2915 /// \brief Return the linkage that the type info and type info name constants
2916 /// should have for the given type.
2917 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
2919 // Itanium C++ ABI 2.9.5p7:
2920 // In addition, it and all of the intermediate abi::__pointer_type_info
2921 // structs in the chain down to the abi::__class_type_info for the
2922 // incomplete class type must be prevented from resolving to the
2923 // corresponding type_info structs for the complete class type, possibly
2924 // by making them local static objects. Finally, a dummy class RTTI is
2925 // generated for the incomplete type that will not resolve to the final
2926 // complete class RTTI (because the latter need not exist), possibly by
2927 // making it a local static object.
2928 if (ContainsIncompleteClassType(Ty))
2929 return llvm::GlobalValue::InternalLinkage;
2931 switch (Ty->getLinkage()) {
2933 case InternalLinkage:
2934 case UniqueExternalLinkage:
2935 return llvm::GlobalValue::InternalLinkage;
2937 case VisibleNoLinkage:
2938 case ExternalLinkage:
2939 // RTTI is not enabled, which means that this type info struct is going
2940 // to be used for exception handling. Give it linkonce_odr linkage.
2941 if (!CGM.getLangOpts().RTTI)
2942 return llvm::GlobalValue::LinkOnceODRLinkage;
2944 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
2945 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
2946 if (RD->hasAttr<WeakAttr>())
2947 return llvm::GlobalValue::WeakODRLinkage;
2948 if (CGM.getTriple().isWindowsItaniumEnvironment())
2949 if (RD->hasAttr<DLLImportAttr>())
2950 return llvm::GlobalValue::ExternalLinkage;
2951 if (RD->isDynamicClass()) {
2952 llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
2953 // MinGW won't export the RTTI information when there is a key function.
2954 // Make sure we emit our own copy instead of attempting to dllimport it.
2955 if (RD->hasAttr<DLLImportAttr>() &&
2956 llvm::GlobalValue::isAvailableExternallyLinkage(LT))
2957 LT = llvm::GlobalValue::LinkOnceODRLinkage;
2962 return llvm::GlobalValue::LinkOnceODRLinkage;
2965 llvm_unreachable("Invalid linkage!");
2968 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
2970 // We want to operate on the canonical type.
2971 Ty = Ty.getCanonicalType();
2973 // Check if we've already emitted an RTTI descriptor for this type.
2974 SmallString<256> Name;
2975 llvm::raw_svector_ostream Out(Name);
2976 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2978 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
2979 if (OldGV && !OldGV->isDeclaration()) {
2980 assert(!OldGV->hasAvailableExternallyLinkage() &&
2981 "available_externally typeinfos not yet implemented");
2983 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
2986 // Check if there is already an external RTTI descriptor for this type.
2987 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
2988 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
2989 return GetAddrOfExternalRTTIDescriptor(Ty);
2991 // Emit the standard library with external linkage.
2992 llvm::GlobalVariable::LinkageTypes Linkage;
2994 Linkage = llvm::GlobalValue::ExternalLinkage;
2996 Linkage = getTypeInfoLinkage(CGM, Ty);
2998 // Add the vtable pointer.
2999 BuildVTablePointer(cast<Type>(Ty));
3002 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3003 llvm::Constant *TypeNameField;
3005 // If we're supposed to demote the visibility, be sure to set a flag
3006 // to use a string comparison for type_info comparisons.
3007 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3008 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3009 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3010 // The flag is the sign bit, which on ARM64 is defined to be clear
3011 // for global pointers. This is very ARM64-specific.
3012 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3013 llvm::Constant *flag =
3014 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3015 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3017 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3019 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3021 Fields.push_back(TypeNameField);
3023 switch (Ty->getTypeClass()) {
3024 #define TYPE(Class, Base)
3025 #define ABSTRACT_TYPE(Class, Base)
3026 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3027 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3028 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3029 #include "clang/AST/TypeNodes.def"
3030 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3032 // GCC treats vector types as fundamental types.
3035 case Type::ExtVector:
3037 case Type::BlockPointer:
3038 // Itanium C++ ABI 2.9.5p4:
3039 // abi::__fundamental_type_info adds no data members to std::type_info.
3042 case Type::LValueReference:
3043 case Type::RValueReference:
3044 llvm_unreachable("References shouldn't get here");
3047 llvm_unreachable("Undeduced auto type shouldn't get here");
3050 llvm_unreachable("Pipe type shouldn't get here");
3052 case Type::ConstantArray:
3053 case Type::IncompleteArray:
3054 case Type::VariableArray:
3055 // Itanium C++ ABI 2.9.5p5:
3056 // abi::__array_type_info adds no data members to std::type_info.
3059 case Type::FunctionNoProto:
3060 case Type::FunctionProto:
3061 // Itanium C++ ABI 2.9.5p5:
3062 // abi::__function_type_info adds no data members to std::type_info.
3066 // Itanium C++ ABI 2.9.5p5:
3067 // abi::__enum_type_info adds no data members to std::type_info.
3070 case Type::Record: {
3071 const CXXRecordDecl *RD =
3072 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3073 if (!RD->hasDefinition() || !RD->getNumBases()) {
3074 // We don't need to emit any fields.
3078 if (CanUseSingleInheritance(RD))
3079 BuildSIClassTypeInfo(RD);
3081 BuildVMIClassTypeInfo(RD);
3086 case Type::ObjCObject:
3087 case Type::ObjCInterface:
3088 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3091 case Type::ObjCObjectPointer:
3092 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3096 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3099 case Type::MemberPointer:
3100 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3104 // No fields, at least for the moment.
3108 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3110 llvm::Module &M = CGM.getModule();
3111 llvm::GlobalVariable *GV =
3112 new llvm::GlobalVariable(M, Init->getType(),
3113 /*Constant=*/true, Linkage, Init, Name);
3115 // If there's already an old global variable, replace it with the new one.
3117 GV->takeName(OldGV);
3118 llvm::Constant *NewPtr =
3119 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3120 OldGV->replaceAllUsesWith(NewPtr);
3121 OldGV->eraseFromParent();
3124 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3125 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3127 // The Itanium ABI specifies that type_info objects must be globally
3128 // unique, with one exception: if the type is an incomplete class
3129 // type or a (possibly indirect) pointer to one. That exception
3130 // affects the general case of comparing type_info objects produced
3131 // by the typeid operator, which is why the comparison operators on
3132 // std::type_info generally use the type_info name pointers instead
3133 // of the object addresses. However, the language's built-in uses
3134 // of RTTI generally require class types to be complete, even when
3135 // manipulating pointers to those class types. This allows the
3136 // implementation of dynamic_cast to rely on address equality tests,
3137 // which is much faster.
3139 // All of this is to say that it's important that both the type_info
3140 // object and the type_info name be uniqued when weakly emitted.
3142 // Give the type_info object and name the formal visibility of the
3144 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3145 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3146 // If the linkage is local, only default visibility makes sense.
3147 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3148 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
3149 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3151 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3153 TypeName->setVisibility(llvmVisibility);
3154 GV->setVisibility(llvmVisibility);
3156 if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3157 auto RD = Ty->getAsCXXRecordDecl();
3158 if (DLLExport || (RD && RD->hasAttr<DLLExportAttr>())) {
3159 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3160 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
3161 } else if (CGM.getLangOpts().RTTI && RD && RD->hasAttr<DLLImportAttr>()) {
3162 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3163 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3165 // Because the typename and the typeinfo are DLL import, convert them to
3166 // declarations rather than definitions. The initializers still need to
3167 // be constructed to calculate the type for the declarations.
3168 TypeName->setInitializer(nullptr);
3169 GV->setInitializer(nullptr);
3173 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3176 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3177 /// for the given Objective-C object type.
3178 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3180 const Type *T = OT->getBaseType().getTypePtr();
3181 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3183 // The builtin types are abi::__class_type_infos and don't require
3185 if (isa<BuiltinType>(T)) return;
3187 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3188 ObjCInterfaceDecl *Super = Class->getSuperClass();
3190 // Root classes are also __class_type_info.
3193 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3195 // Everything else is single inheritance.
3196 llvm::Constant *BaseTypeInfo =
3197 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3198 Fields.push_back(BaseTypeInfo);
3201 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3202 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3203 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3204 // Itanium C++ ABI 2.9.5p6b:
3205 // It adds to abi::__class_type_info a single member pointing to the
3206 // type_info structure for the base type,
3207 llvm::Constant *BaseTypeInfo =
3208 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3209 Fields.push_back(BaseTypeInfo);
3213 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3214 /// a class hierarchy.
3216 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3217 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3221 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3222 /// abi::__vmi_class_type_info.
3224 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3229 const CXXRecordDecl *BaseDecl =
3230 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3232 if (Base->isVirtual()) {
3233 // Mark the virtual base as seen.
3234 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3235 // If this virtual base has been seen before, then the class is diamond
3237 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3239 if (Bases.NonVirtualBases.count(BaseDecl))
3240 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3243 // Mark the non-virtual base as seen.
3244 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3245 // If this non-virtual base has been seen before, then the class has non-
3246 // diamond shaped repeated inheritance.
3247 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3249 if (Bases.VirtualBases.count(BaseDecl))
3250 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3255 for (const auto &I : BaseDecl->bases())
3256 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3261 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3266 for (const auto &I : RD->bases())
3267 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3272 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3273 /// classes with bases that do not satisfy the abi::__si_class_type_info
3274 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3275 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3276 llvm::Type *UnsignedIntLTy =
3277 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3279 // Itanium C++ ABI 2.9.5p6c:
3280 // __flags is a word with flags describing details about the class
3281 // structure, which may be referenced by using the __flags_masks
3282 // enumeration. These flags refer to both direct and indirect bases.
3283 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3284 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3286 // Itanium C++ ABI 2.9.5p6c:
3287 // __base_count is a word with the number of direct proper base class
3288 // descriptions that follow.
3289 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3291 if (!RD->getNumBases())
3294 // Now add the base class descriptions.
3296 // Itanium C++ ABI 2.9.5p6c:
3297 // __base_info[] is an array of base class descriptions -- one for every
3298 // direct proper base. Each description is of the type:
3300 // struct abi::__base_class_type_info {
3302 // const __class_type_info *__base_type;
3303 // long __offset_flags;
3305 // enum __offset_flags_masks {
3306 // __virtual_mask = 0x1,
3307 // __public_mask = 0x2,
3308 // __offset_shift = 8
3312 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3313 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3315 // FIXME: Consider updating libc++abi to match, and extend this logic to all
3317 QualType OffsetFlagsTy = CGM.getContext().LongTy;
3318 const TargetInfo &TI = CGM.getContext().getTargetInfo();
3319 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3320 OffsetFlagsTy = CGM.getContext().LongLongTy;
3321 llvm::Type *OffsetFlagsLTy =
3322 CGM.getTypes().ConvertType(OffsetFlagsTy);
3324 for (const auto &Base : RD->bases()) {
3325 // The __base_type member points to the RTTI for the base type.
3326 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3328 const CXXRecordDecl *BaseDecl =
3329 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3331 int64_t OffsetFlags = 0;
3333 // All but the lower 8 bits of __offset_flags are a signed offset.
3334 // For a non-virtual base, this is the offset in the object of the base
3335 // subobject. For a virtual base, this is the offset in the virtual table of
3336 // the virtual base offset for the virtual base referenced (negative).
3338 if (Base.isVirtual())
3340 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3342 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3343 Offset = Layout.getBaseClassOffset(BaseDecl);
3346 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3348 // The low-order byte of __offset_flags contains flags, as given by the
3349 // masks from the enumeration __offset_flags_masks.
3350 if (Base.isVirtual())
3351 OffsetFlags |= BCTI_Virtual;
3352 if (Base.getAccessSpecifier() == AS_public)
3353 OffsetFlags |= BCTI_Public;
3355 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3359 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3360 /// pieces from \p Type.
3361 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3364 if (Type.isConstQualified())
3365 Flags |= ItaniumRTTIBuilder::PTI_Const;
3366 if (Type.isVolatileQualified())
3367 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3368 if (Type.isRestrictQualified())
3369 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3370 Type = Type.getUnqualifiedType();
3372 // Itanium C++ ABI 2.9.5p7:
3373 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3374 // incomplete class type, the incomplete target type flag is set.
3375 if (ContainsIncompleteClassType(Type))
3376 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3378 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3379 if (Proto->isNothrow(Ctx)) {
3380 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3381 Type = Ctx.getFunctionType(
3382 Proto->getReturnType(), Proto->getParamTypes(),
3383 Proto->getExtProtoInfo().withExceptionSpec(EST_None));
3390 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3391 /// used for pointer types.
3392 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3393 // Itanium C++ ABI 2.9.5p7:
3394 // __flags is a flag word describing the cv-qualification and other
3395 // attributes of the type pointed to
3396 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3398 llvm::Type *UnsignedIntLTy =
3399 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3400 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3402 // Itanium C++ ABI 2.9.5p7:
3403 // __pointee is a pointer to the std::type_info derivation for the
3404 // unqualified type being pointed to.
3405 llvm::Constant *PointeeTypeInfo =
3406 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3407 Fields.push_back(PointeeTypeInfo);
3410 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3411 /// struct, used for member pointer types.
3413 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3414 QualType PointeeTy = Ty->getPointeeType();
3416 // Itanium C++ ABI 2.9.5p7:
3417 // __flags is a flag word describing the cv-qualification and other
3418 // attributes of the type pointed to.
3419 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3421 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3422 if (IsIncompleteClassType(ClassType))
3423 Flags |= PTI_ContainingClassIncomplete;
3425 llvm::Type *UnsignedIntLTy =
3426 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3427 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3429 // Itanium C++ ABI 2.9.5p7:
3430 // __pointee is a pointer to the std::type_info derivation for the
3431 // unqualified type being pointed to.
3432 llvm::Constant *PointeeTypeInfo =
3433 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3434 Fields.push_back(PointeeTypeInfo);
3436 // Itanium C++ ABI 2.9.5p9:
3437 // __context is a pointer to an abi::__class_type_info corresponding to the
3438 // class type containing the member pointed to
3439 // (e.g., the "A" in "int A::*").
3441 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3444 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3445 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3448 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type,
3450 QualType PointerType = getContext().getPointerType(Type);
3451 QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
3452 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, /*Force=*/true, DLLExport);
3453 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, /*Force=*/true,
3455 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, /*Force=*/true,
3459 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) {
3460 // Types added here must also be added to TypeInfoIsInStandardLibrary.
3461 QualType FundamentalTypes[] = {
3462 getContext().VoidTy, getContext().NullPtrTy,
3463 getContext().BoolTy, getContext().WCharTy,
3464 getContext().CharTy, getContext().UnsignedCharTy,
3465 getContext().SignedCharTy, getContext().ShortTy,
3466 getContext().UnsignedShortTy, getContext().IntTy,
3467 getContext().UnsignedIntTy, getContext().LongTy,
3468 getContext().UnsignedLongTy, getContext().LongLongTy,
3469 getContext().UnsignedLongLongTy, getContext().Int128Ty,
3470 getContext().UnsignedInt128Ty, getContext().HalfTy,
3471 getContext().FloatTy, getContext().DoubleTy,
3472 getContext().LongDoubleTy, getContext().Float128Ty,
3473 getContext().Char16Ty, getContext().Char32Ty
3475 for (const QualType &FundamentalType : FundamentalTypes)
3476 EmitFundamentalRTTIDescriptor(FundamentalType, DLLExport);
3479 /// What sort of uniqueness rules should we use for the RTTI for the
3481 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3482 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3483 if (shouldRTTIBeUnique())
3486 // It's only necessary for linkonce_odr or weak_odr linkage.
3487 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3488 Linkage != llvm::GlobalValue::WeakODRLinkage)
3491 // It's only necessary with default visibility.
3492 if (CanTy->getVisibility() != DefaultVisibility)
3495 // If we're not required to publish this symbol, hide it.
3496 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3497 return RUK_NonUniqueHidden;
3499 // If we're required to publish this symbol, as we might be under an
3500 // explicit instantiation, leave it with default visibility but
3501 // enable string-comparisons.
3502 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3503 return RUK_NonUniqueVisible;
3506 // Find out how to codegen the complete destructor and constructor
3508 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3510 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3511 const CXXMethodDecl *MD) {
3512 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3513 return StructorCodegen::Emit;
3515 // The complete and base structors are not equivalent if there are any virtual
3516 // bases, so emit separate functions.
3517 if (MD->getParent()->getNumVBases())
3518 return StructorCodegen::Emit;
3520 GlobalDecl AliasDecl;
3521 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3522 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3524 const auto *CD = cast<CXXConstructorDecl>(MD);
3525 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3527 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3529 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3530 return StructorCodegen::RAUW;
3532 // FIXME: Should we allow available_externally aliases?
3533 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3534 return StructorCodegen::RAUW;
3536 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3537 // Only ELF supports COMDATs with arbitrary names (C5/D5).
3538 if (CGM.getTarget().getTriple().isOSBinFormatELF())
3539 return StructorCodegen::COMDAT;
3540 return StructorCodegen::Emit;
3543 return StructorCodegen::Alias;
3546 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3547 GlobalDecl AliasDecl,
3548 GlobalDecl TargetDecl) {
3549 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3551 StringRef MangledName = CGM.getMangledName(AliasDecl);
3552 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3553 if (Entry && !Entry->isDeclaration())
3556 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3558 // Create the alias with no name.
3559 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3561 // Switch any previous uses to the alias.
3563 assert(Entry->getType() == Aliasee->getType() &&
3564 "declaration exists with different type");
3565 Alias->takeName(Entry);
3566 Entry->replaceAllUsesWith(Alias);
3567 Entry->eraseFromParent();
3569 Alias->setName(MangledName);
3572 // Finally, set up the alias with its proper name and attributes.
3573 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
3576 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3577 StructorType Type) {
3578 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3579 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3581 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3583 if (Type == StructorType::Complete) {
3584 GlobalDecl CompleteDecl;
3585 GlobalDecl BaseDecl;
3587 CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3588 BaseDecl = GlobalDecl(CD, Ctor_Base);
3590 CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3591 BaseDecl = GlobalDecl(DD, Dtor_Base);
3594 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3595 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3599 if (CGType == StructorCodegen::RAUW) {
3600 StringRef MangledName = CGM.getMangledName(CompleteDecl);
3601 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3602 CGM.addReplacement(MangledName, Aliasee);
3607 // The base destructor is equivalent to the base destructor of its
3608 // base class if there is exactly one non-virtual base class with a
3609 // non-trivial destructor, there are no fields with a non-trivial
3610 // destructor, and the body of the destructor is trivial.
3611 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3612 !CGM.TryEmitBaseDestructorAsAlias(DD))
3615 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3617 if (CGType == StructorCodegen::COMDAT) {
3618 SmallString<256> Buffer;
3619 llvm::raw_svector_ostream Out(Buffer);
3621 getMangleContext().mangleCXXDtorComdat(DD, Out);
3623 getMangleContext().mangleCXXCtorComdat(CD, Out);
3624 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3627 CGM.maybeSetTrivialComdat(*MD, *Fn);
3631 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3632 // void *__cxa_begin_catch(void*);
3633 llvm::FunctionType *FTy = llvm::FunctionType::get(
3634 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3636 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3639 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3640 // void __cxa_end_catch();
3641 llvm::FunctionType *FTy =
3642 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3644 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3647 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3648 // void *__cxa_get_exception_ptr(void*);
3649 llvm::FunctionType *FTy = llvm::FunctionType::get(
3650 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3652 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3656 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3657 /// exception type lets us state definitively that the thrown exception
3658 /// type does not have a destructor. In particular:
3659 /// - Catch-alls tell us nothing, so we have to conservatively
3660 /// assume that the thrown exception might have a destructor.
3661 /// - Catches by reference behave according to their base types.
3662 /// - Catches of non-record types will only trigger for exceptions
3663 /// of non-record types, which never have destructors.
3664 /// - Catches of record types can trigger for arbitrary subclasses
3665 /// of the caught type, so we have to assume the actual thrown
3666 /// exception type might have a throwing destructor, even if the
3667 /// caught type's destructor is trivial or nothrow.
3668 struct CallEndCatch final : EHScopeStack::Cleanup {
3669 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3672 void Emit(CodeGenFunction &CGF, Flags flags) override {
3674 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3678 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3683 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3684 /// __cxa_end_catch.
3686 /// \param EndMightThrow - true if __cxa_end_catch might throw
3687 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3689 bool EndMightThrow) {
3690 llvm::CallInst *call =
3691 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3693 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3698 /// A "special initializer" callback for initializing a catch
3699 /// parameter during catch initialization.
3700 static void InitCatchParam(CodeGenFunction &CGF,
3701 const VarDecl &CatchParam,
3703 SourceLocation Loc) {
3704 // Load the exception from where the landing pad saved it.
3705 llvm::Value *Exn = CGF.getExceptionFromSlot();
3707 CanQualType CatchType =
3708 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3709 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3711 // If we're catching by reference, we can just cast the object
3712 // pointer to the appropriate pointer.
3713 if (isa<ReferenceType>(CatchType)) {
3714 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3715 bool EndCatchMightThrow = CaughtType->isRecordType();
3717 // __cxa_begin_catch returns the adjusted object pointer.
3718 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3720 // We have no way to tell the personality function that we're
3721 // catching by reference, so if we're catching a pointer,
3722 // __cxa_begin_catch will actually return that pointer by value.
3723 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3724 QualType PointeeType = PT->getPointeeType();
3726 // When catching by reference, generally we should just ignore
3727 // this by-value pointer and use the exception object instead.
3728 if (!PointeeType->isRecordType()) {
3730 // Exn points to the struct _Unwind_Exception header, which
3731 // we have to skip past in order to reach the exception data.
3732 unsigned HeaderSize =
3733 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3734 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3736 // However, if we're catching a pointer-to-record type that won't
3737 // work, because the personality function might have adjusted
3738 // the pointer. There's actually no way for us to fully satisfy
3739 // the language/ABI contract here: we can't use Exn because it
3740 // might have the wrong adjustment, but we can't use the by-value
3741 // pointer because it's off by a level of abstraction.
3743 // The current solution is to dump the adjusted pointer into an
3744 // alloca, which breaks language semantics (because changing the
3745 // pointer doesn't change the exception) but at least works.
3746 // The better solution would be to filter out non-exact matches
3747 // and rethrow them, but this is tricky because the rethrow
3748 // really needs to be catchable by other sites at this landing
3749 // pad. The best solution is to fix the personality function.
3751 // Pull the pointer for the reference type off.
3753 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3755 // Create the temporary and write the adjusted pointer into it.
3757 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
3758 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3759 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3761 // Bind the reference to the temporary.
3762 AdjustedExn = ExnPtrTmp.getPointer();
3766 llvm::Value *ExnCast =
3767 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
3768 CGF.Builder.CreateStore(ExnCast, ParamAddr);
3772 // Scalars and complexes.
3773 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
3774 if (TEK != TEK_Aggregate) {
3775 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
3777 // If the catch type is a pointer type, __cxa_begin_catch returns
3778 // the pointer by value.
3779 if (CatchType->hasPointerRepresentation()) {
3780 llvm::Value *CastExn =
3781 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
3783 switch (CatchType.getQualifiers().getObjCLifetime()) {
3784 case Qualifiers::OCL_Strong:
3785 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
3788 case Qualifiers::OCL_None:
3789 case Qualifiers::OCL_ExplicitNone:
3790 case Qualifiers::OCL_Autoreleasing:
3791 CGF.Builder.CreateStore(CastExn, ParamAddr);
3794 case Qualifiers::OCL_Weak:
3795 CGF.EmitARCInitWeak(ParamAddr, CastExn);
3798 llvm_unreachable("bad ownership qualifier!");
3801 // Otherwise, it returns a pointer into the exception object.
3803 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3804 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3806 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
3807 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
3810 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
3814 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
3815 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
3819 llvm_unreachable("evaluation kind filtered out!");
3821 llvm_unreachable("bad evaluation kind");
3824 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
3825 auto catchRD = CatchType->getAsCXXRecordDecl();
3826 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
3828 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3830 // Check for a copy expression. If we don't have a copy expression,
3831 // that means a trivial copy is okay.
3832 const Expr *copyExpr = CatchParam.getInit();
3834 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
3835 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3836 caughtExnAlignment);
3837 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
3841 // We have to call __cxa_get_exception_ptr to get the adjusted
3842 // pointer before copying.
3843 llvm::CallInst *rawAdjustedExn =
3844 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
3846 // Cast that to the appropriate type.
3847 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3848 caughtExnAlignment);
3850 // The copy expression is defined in terms of an OpaqueValueExpr.
3851 // Find it and map it to the adjusted expression.
3852 CodeGenFunction::OpaqueValueMapping
3853 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
3854 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
3856 // Call the copy ctor in a terminate scope.
3857 CGF.EHStack.pushTerminate();
3859 // Perform the copy construction.
3860 CGF.EmitAggExpr(copyExpr,
3861 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
3862 AggValueSlot::IsNotDestructed,
3863 AggValueSlot::DoesNotNeedGCBarriers,
3864 AggValueSlot::IsNotAliased));
3866 // Leave the terminate scope.
3867 CGF.EHStack.popTerminate();
3869 // Undo the opaque value mapping.
3872 // Finally we can call __cxa_begin_catch.
3873 CallBeginCatch(CGF, Exn, true);
3876 /// Begins a catch statement by initializing the catch variable and
3877 /// calling __cxa_begin_catch.
3878 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
3879 const CXXCatchStmt *S) {
3880 // We have to be very careful with the ordering of cleanups here:
3881 // C++ [except.throw]p4:
3882 // The destruction [of the exception temporary] occurs
3883 // immediately after the destruction of the object declared in
3884 // the exception-declaration in the handler.
3886 // So the precise ordering is:
3887 // 1. Construct catch variable.
3888 // 2. __cxa_begin_catch
3889 // 3. Enter __cxa_end_catch cleanup
3890 // 4. Enter dtor cleanup
3892 // We do this by using a slightly abnormal initialization process.
3893 // Delegation sequence:
3894 // - ExitCXXTryStmt opens a RunCleanupsScope
3895 // - EmitAutoVarAlloca creates the variable and debug info
3896 // - InitCatchParam initializes the variable from the exception
3897 // - CallBeginCatch calls __cxa_begin_catch
3898 // - CallBeginCatch enters the __cxa_end_catch cleanup
3899 // - EmitAutoVarCleanups enters the variable destructor cleanup
3900 // - EmitCXXTryStmt emits the code for the catch body
3901 // - EmitCXXTryStmt close the RunCleanupsScope
3903 VarDecl *CatchParam = S->getExceptionDecl();
3905 llvm::Value *Exn = CGF.getExceptionFromSlot();
3906 CallBeginCatch(CGF, Exn, true);
3911 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
3912 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
3913 CGF.EmitAutoVarCleanups(var);
3916 /// Get or define the following function:
3917 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
3918 /// This code is used only in C++.
3919 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
3920 llvm::FunctionType *fnTy =
3921 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3922 llvm::Constant *fnRef =
3923 CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate",
3924 llvm::AttributeSet(), /*Local=*/true);
3926 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
3927 if (fn && fn->empty()) {
3928 fn->setDoesNotThrow();
3929 fn->setDoesNotReturn();
3931 // What we really want is to massively penalize inlining without
3932 // forbidding it completely. The difference between that and
3933 // 'noinline' is negligible.
3934 fn->addFnAttr(llvm::Attribute::NoInline);
3936 // Allow this function to be shared across translation units, but
3937 // we don't want it to turn into an exported symbol.
3938 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
3939 fn->setVisibility(llvm::Function::HiddenVisibility);
3940 if (CGM.supportsCOMDAT())
3941 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
3943 // Set up the function.
3944 llvm::BasicBlock *entry =
3945 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
3946 CGBuilderTy builder(CGM, entry);
3948 // Pull the exception pointer out of the parameter list.
3949 llvm::Value *exn = &*fn->arg_begin();
3951 // Call __cxa_begin_catch(exn).
3952 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
3953 catchCall->setDoesNotThrow();
3954 catchCall->setCallingConv(CGM.getRuntimeCC());
3956 // Call std::terminate().
3957 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
3958 termCall->setDoesNotThrow();
3959 termCall->setDoesNotReturn();
3960 termCall->setCallingConv(CGM.getRuntimeCC());
3962 // std::terminate cannot return.
3963 builder.CreateUnreachable();
3970 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
3972 // In C++, we want to call __cxa_begin_catch() before terminating.
3974 assert(CGF.CGM.getLangOpts().CPlusPlus);
3975 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
3977 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());