1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
19 //===----------------------------------------------------------------------===//
22 #include "CGCleanup.h"
23 #include "CGRecordLayout.h"
24 #include "CGVTables.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenModule.h"
27 #include "TargetInfo.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/Type.h"
30 #include "clang/AST/StmtCXX.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/Value.h"
37 using namespace clang;
38 using namespace CodeGen;
41 class ItaniumCXXABI : public CodeGen::CGCXXABI {
42 /// VTables - All the vtables which have been defined.
43 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46 bool UseARMMethodPtrABI;
47 bool UseARMGuardVarABI;
49 ItaniumMangleContext &getMangleContext() {
50 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
54 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
55 bool UseARMMethodPtrABI = false,
56 bool UseARMGuardVarABI = false) :
57 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
58 UseARMGuardVarABI(UseARMGuardVarABI) { }
60 bool classifyReturnType(CGFunctionInfo &FI) const override;
62 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
63 // Structures with either a non-trivial destructor or a non-trivial
64 // copy constructor are always indirect.
65 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
67 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor())
72 bool isThisCompleteObject(GlobalDecl GD) const override {
73 // The Itanium ABI has separate complete-object vs. base-object
74 // variants of both constructors and destructors.
75 if (isa<CXXDestructorDecl>(GD.getDecl())) {
76 switch (GD.getDtorType()) {
85 llvm_unreachable("emitting dtor comdat as function?");
87 llvm_unreachable("bad dtor kind");
89 if (isa<CXXConstructorDecl>(GD.getDecl())) {
90 switch (GD.getCtorType()) {
97 case Ctor_CopyingClosure:
98 case Ctor_DefaultClosure:
99 llvm_unreachable("closure ctors in Itanium ABI?");
102 llvm_unreachable("emitting ctor comdat as function?");
104 llvm_unreachable("bad dtor kind");
111 bool isZeroInitializable(const MemberPointerType *MPT) override;
113 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
116 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
119 llvm::Value *&ThisPtrForCall,
120 llvm::Value *MemFnPtr,
121 const MemberPointerType *MPT) override;
124 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
127 const MemberPointerType *MPT) override;
129 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
131 llvm::Value *Src) override;
132 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
133 llvm::Constant *Src) override;
135 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
137 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
138 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
139 CharUnits offset) override;
140 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
141 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
142 CharUnits ThisAdjustment);
144 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
145 llvm::Value *L, llvm::Value *R,
146 const MemberPointerType *MPT,
147 bool Inequality) override;
149 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
151 const MemberPointerType *MPT) override;
153 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
154 Address Ptr, QualType ElementType,
155 const CXXDestructorDecl *Dtor) override;
157 /// Itanium says that an _Unwind_Exception has to be "double-word"
158 /// aligned (and thus the end of it is also so-aligned), meaning 16
159 /// bytes. Of course, that was written for the actual Itanium,
160 /// which is a 64-bit platform. Classically, the ABI doesn't really
161 /// specify the alignment on other platforms, but in practice
162 /// libUnwind declares the struct with __attribute__((aligned)), so
163 /// we assume that alignment here. (It's generally 16 bytes, but
164 /// some targets overwrite it.)
165 CharUnits getAlignmentOfExnObject() {
166 auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
167 return CGM.getContext().toCharUnitsFromBits(align);
170 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
171 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
173 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
176 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
177 llvm::Value *Exn) override;
179 void EmitFundamentalRTTIDescriptor(QualType Type);
180 void EmitFundamentalRTTIDescriptors();
181 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
183 getAddrOfCXXCatchHandlerType(QualType Ty,
184 QualType CatchHandlerType) override {
185 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
188 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
189 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
190 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
192 llvm::Type *StdTypeInfoPtrTy) override;
194 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
195 QualType SrcRecordTy) override;
197 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
198 QualType SrcRecordTy, QualType DestTy,
199 QualType DestRecordTy,
200 llvm::BasicBlock *CastEnd) override;
202 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
203 QualType SrcRecordTy,
204 QualType DestTy) override;
206 bool EmitBadCastCall(CodeGenFunction &CGF) override;
209 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
210 const CXXRecordDecl *ClassDecl,
211 const CXXRecordDecl *BaseClassDecl) override;
213 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
215 void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
216 SmallVectorImpl<CanQualType> &ArgTys) override;
218 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
219 CXXDtorType DT) const override {
220 // Itanium does not emit any destructor variant as an inline thunk.
221 // Delegating may occur as an optimization, but all variants are either
222 // emitted with external linkage or as linkonce if they are inline and used.
226 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
228 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
229 FunctionArgList &Params) override;
231 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
233 unsigned addImplicitConstructorArgs(CodeGenFunction &CGF,
234 const CXXConstructorDecl *D,
235 CXXCtorType Type, bool ForVirtualBase,
237 CallArgList &Args) override;
239 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
240 CXXDtorType Type, bool ForVirtualBase,
241 bool Delegating, Address This) override;
243 void emitVTableDefinitions(CodeGenVTables &CGVT,
244 const CXXRecordDecl *RD) override;
246 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
247 CodeGenFunction::VPtr Vptr) override;
249 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
254 getVTableAddressPoint(BaseSubobject Base,
255 const CXXRecordDecl *VTableClass) override;
257 llvm::Value *getVTableAddressPointInStructor(
258 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
259 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
261 llvm::Value *getVTableAddressPointInStructorWithVTT(
262 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
263 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
266 getVTableAddressPointForConstExpr(BaseSubobject Base,
267 const CXXRecordDecl *VTableClass) override;
269 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
270 CharUnits VPtrOffset) override;
272 llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
273 Address This, llvm::Type *Ty,
274 SourceLocation Loc) override;
276 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
277 const CXXDestructorDecl *Dtor,
278 CXXDtorType DtorType,
280 const CXXMemberCallExpr *CE) override;
282 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
284 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
286 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
287 bool ReturnAdjustment) override {
288 // Allow inlining of thunks by emitting them with available_externally
289 // linkage together with vtables when needed.
290 if (ForVTable && !Thunk->hasLocalLinkage())
291 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
294 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
295 const ThisAdjustment &TA) override;
297 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
298 const ReturnAdjustment &RA) override;
300 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
301 FunctionArgList &Args) const override {
302 assert(!Args.empty() && "expected the arglist to not be empty!");
303 return Args.size() - 1;
306 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
307 StringRef GetDeletedVirtualCallName() override
308 { return "__cxa_deleted_virtual"; }
310 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
311 Address InitializeArrayCookie(CodeGenFunction &CGF,
313 llvm::Value *NumElements,
314 const CXXNewExpr *expr,
315 QualType ElementType) override;
316 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
318 CharUnits cookieSize) override;
320 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
321 llvm::GlobalVariable *DeclPtr,
322 bool PerformInit) override;
323 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
324 llvm::Constant *dtor, llvm::Constant *addr) override;
326 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
328 void EmitThreadLocalInitFuncs(
330 ArrayRef<const VarDecl *> CXXThreadLocals,
331 ArrayRef<llvm::Function *> CXXThreadLocalInits,
332 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
334 bool usesThreadWrapperFunction() const override { return true; }
335 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
336 QualType LValType) override;
338 bool NeedsVTTParameter(GlobalDecl GD) override;
340 /**************************** RTTI Uniqueness ******************************/
343 /// Returns true if the ABI requires RTTI type_info objects to be unique
344 /// across a program.
345 virtual bool shouldRTTIBeUnique() const { return true; }
348 /// What sort of unique-RTTI behavior should we use?
349 enum RTTIUniquenessKind {
350 /// We are guaranteeing, or need to guarantee, that the RTTI string
354 /// We are not guaranteeing uniqueness for the RTTI string, so we
355 /// can demote to hidden visibility but must use string comparisons.
358 /// We are not guaranteeing uniqueness for the RTTI string, so we
359 /// have to use string comparisons, but we also have to emit it with
360 /// non-hidden visibility.
364 /// Return the required visibility status for the given type and linkage in
367 classifyRTTIUniqueness(QualType CanTy,
368 llvm::GlobalValue::LinkageTypes Linkage) const;
369 friend class ItaniumRTTIBuilder;
371 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
374 bool hasAnyUsedVirtualInlineFunction(const CXXRecordDecl *RD) const {
375 const auto &VtableLayout =
376 CGM.getItaniumVTableContext().getVTableLayout(RD);
378 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
379 if (!VtableComponent.isUsedFunctionPointerKind())
382 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
383 if (Method->getCanonicalDecl()->isInlined())
389 bool isVTableHidden(const CXXRecordDecl *RD) const {
390 const auto &VtableLayout =
391 CGM.getItaniumVTableContext().getVTableLayout(RD);
393 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
394 if (VtableComponent.isRTTIKind()) {
395 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
396 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
398 } else if (VtableComponent.isUsedFunctionPointerKind()) {
399 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
400 if (Method->getVisibility() == Visibility::HiddenVisibility &&
401 !Method->isDefined())
409 class ARMCXXABI : public ItaniumCXXABI {
411 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
412 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
413 /* UseARMGuardVarABI = */ true) {}
415 bool HasThisReturn(GlobalDecl GD) const override {
416 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
417 isa<CXXDestructorDecl>(GD.getDecl()) &&
418 GD.getDtorType() != Dtor_Deleting));
421 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
422 QualType ResTy) override;
424 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
425 Address InitializeArrayCookie(CodeGenFunction &CGF,
427 llvm::Value *NumElements,
428 const CXXNewExpr *expr,
429 QualType ElementType) override;
430 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
431 CharUnits cookieSize) override;
434 class iOS64CXXABI : public ARMCXXABI {
436 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
438 // ARM64 libraries are prepared for non-unique RTTI.
439 bool shouldRTTIBeUnique() const override { return false; }
442 class WebAssemblyCXXABI final : public ItaniumCXXABI {
444 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
445 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
446 /*UseARMGuardVarABI=*/true) {}
449 bool HasThisReturn(GlobalDecl GD) const override {
450 return isa<CXXConstructorDecl>(GD.getDecl()) ||
451 (isa<CXXDestructorDecl>(GD.getDecl()) &&
452 GD.getDtorType() != Dtor_Deleting);
457 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
458 switch (CGM.getTarget().getCXXABI().getKind()) {
459 // For IR-generation purposes, there's no significant difference
460 // between the ARM and iOS ABIs.
461 case TargetCXXABI::GenericARM:
462 case TargetCXXABI::iOS:
463 case TargetCXXABI::WatchOS:
464 return new ARMCXXABI(CGM);
466 case TargetCXXABI::iOS64:
467 return new iOS64CXXABI(CGM);
469 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
470 // include the other 32-bit ARM oddities: constructor/destructor return values
471 // and array cookies.
472 case TargetCXXABI::GenericAArch64:
473 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
474 /* UseARMGuardVarABI = */ true);
476 case TargetCXXABI::GenericMIPS:
477 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
479 case TargetCXXABI::WebAssembly:
480 return new WebAssemblyCXXABI(CGM);
482 case TargetCXXABI::GenericItanium:
483 if (CGM.getContext().getTargetInfo().getTriple().getArch()
484 == llvm::Triple::le32) {
485 // For PNaCl, use ARM-style method pointers so that PNaCl code
486 // does not assume anything about the alignment of function
488 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true,
489 /* UseARMGuardVarABI = */ false);
491 return new ItaniumCXXABI(CGM);
493 case TargetCXXABI::Microsoft:
494 llvm_unreachable("Microsoft ABI is not Itanium-based");
496 llvm_unreachable("bad ABI kind");
500 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
501 if (MPT->isMemberDataPointer())
502 return CGM.PtrDiffTy;
503 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr);
506 /// In the Itanium and ARM ABIs, method pointers have the form:
507 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
509 /// In the Itanium ABI:
510 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
511 /// - the this-adjustment is (memptr.adj)
512 /// - the virtual offset is (memptr.ptr - 1)
515 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
516 /// - the this-adjustment is (memptr.adj >> 1)
517 /// - the virtual offset is (memptr.ptr)
518 /// ARM uses 'adj' for the virtual flag because Thumb functions
519 /// may be only single-byte aligned.
521 /// If the member is virtual, the adjusted 'this' pointer points
522 /// to a vtable pointer from which the virtual offset is applied.
524 /// If the member is non-virtual, memptr.ptr is the address of
525 /// the function to call.
526 llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
527 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
528 llvm::Value *&ThisPtrForCall,
529 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
530 CGBuilderTy &Builder = CGF.Builder;
532 const FunctionProtoType *FPT =
533 MPT->getPointeeType()->getAs<FunctionProtoType>();
534 const CXXRecordDecl *RD =
535 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
537 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
538 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
540 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
542 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
543 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
544 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
546 // Extract memptr.adj, which is in the second field.
547 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
549 // Compute the true adjustment.
550 llvm::Value *Adj = RawAdj;
551 if (UseARMMethodPtrABI)
552 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
554 // Apply the adjustment and cast back to the original struct type
556 llvm::Value *This = ThisAddr.getPointer();
557 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
558 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
559 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
560 ThisPtrForCall = This;
562 // Load the function pointer.
563 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
565 // If the LSB in the function pointer is 1, the function pointer points to
566 // a virtual function.
567 llvm::Value *IsVirtual;
568 if (UseARMMethodPtrABI)
569 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
571 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
572 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
573 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
575 // In the virtual path, the adjustment left 'This' pointing to the
576 // vtable of the correct base subobject. The "function pointer" is an
577 // offset within the vtable (+1 for the virtual flag on non-ARM).
578 CGF.EmitBlock(FnVirtual);
580 // Cast the adjusted this to a pointer to vtable pointer and load.
581 llvm::Type *VTableTy = Builder.getInt8PtrTy();
582 CharUnits VTablePtrAlign =
583 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
584 CGF.getPointerAlign());
585 llvm::Value *VTable =
586 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
589 llvm::Value *VTableOffset = FnAsInt;
590 if (!UseARMMethodPtrABI)
591 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
592 VTable = Builder.CreateGEP(VTable, VTableOffset);
594 // Load the virtual function to call.
595 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
596 llvm::Value *VirtualFn =
597 Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
599 CGF.EmitBranch(FnEnd);
601 // In the non-virtual path, the function pointer is actually a
603 CGF.EmitBlock(FnNonVirtual);
604 llvm::Value *NonVirtualFn =
605 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
608 CGF.EmitBlock(FnEnd);
609 llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
610 Callee->addIncoming(VirtualFn, FnVirtual);
611 Callee->addIncoming(NonVirtualFn, FnNonVirtual);
615 /// Compute an l-value by applying the given pointer-to-member to a
617 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
618 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
619 const MemberPointerType *MPT) {
620 assert(MemPtr->getType() == CGM.PtrDiffTy);
622 CGBuilderTy &Builder = CGF.Builder;
625 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
627 // Apply the offset, which we assume is non-null.
629 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
631 // Cast the address to the appropriate pointer type, adopting the
632 // address space of the base pointer.
633 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
634 ->getPointerTo(Base.getAddressSpace());
635 return Builder.CreateBitCast(Addr, PType);
638 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
641 /// Bitcast conversions are always a no-op under Itanium.
643 /// Obligatory offset/adjustment diagram:
644 /// <-- offset --> <-- adjustment -->
645 /// |--------------------------|----------------------|--------------------|
646 /// ^Derived address point ^Base address point ^Member address point
648 /// So when converting a base member pointer to a derived member pointer,
649 /// we add the offset to the adjustment because the address point has
650 /// decreased; and conversely, when converting a derived MP to a base MP
651 /// we subtract the offset from the adjustment because the address point
654 /// The standard forbids (at compile time) conversion to and from
655 /// virtual bases, which is why we don't have to consider them here.
657 /// The standard forbids (at run time) casting a derived MP to a base
658 /// MP when the derived MP does not point to a member of the base.
659 /// This is why -1 is a reasonable choice for null data member
662 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
665 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
666 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
667 E->getCastKind() == CK_ReinterpretMemberPointer);
669 // Under Itanium, reinterprets don't require any additional processing.
670 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
672 // Use constant emission if we can.
673 if (isa<llvm::Constant>(src))
674 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
676 llvm::Constant *adj = getMemberPointerAdjustment(E);
677 if (!adj) return src;
679 CGBuilderTy &Builder = CGF.Builder;
680 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
682 const MemberPointerType *destTy =
683 E->getType()->castAs<MemberPointerType>();
685 // For member data pointers, this is just a matter of adding the
686 // offset if the source is non-null.
687 if (destTy->isMemberDataPointer()) {
690 dst = Builder.CreateNSWSub(src, adj, "adj");
692 dst = Builder.CreateNSWAdd(src, adj, "adj");
695 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
696 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
697 return Builder.CreateSelect(isNull, src, dst);
700 // The this-adjustment is left-shifted by 1 on ARM.
701 if (UseARMMethodPtrABI) {
702 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
704 adj = llvm::ConstantInt::get(adj->getType(), offset);
707 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
710 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
712 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
714 return Builder.CreateInsertValue(src, dstAdj, 1);
718 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
719 llvm::Constant *src) {
720 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
721 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
722 E->getCastKind() == CK_ReinterpretMemberPointer);
724 // Under Itanium, reinterprets don't require any additional processing.
725 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
727 // If the adjustment is trivial, we don't need to do anything.
728 llvm::Constant *adj = getMemberPointerAdjustment(E);
729 if (!adj) return src;
731 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
733 const MemberPointerType *destTy =
734 E->getType()->castAs<MemberPointerType>();
736 // For member data pointers, this is just a matter of adding the
737 // offset if the source is non-null.
738 if (destTy->isMemberDataPointer()) {
739 // null maps to null.
740 if (src->isAllOnesValue()) return src;
743 return llvm::ConstantExpr::getNSWSub(src, adj);
745 return llvm::ConstantExpr::getNSWAdd(src, adj);
748 // The this-adjustment is left-shifted by 1 on ARM.
749 if (UseARMMethodPtrABI) {
750 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
752 adj = llvm::ConstantInt::get(adj->getType(), offset);
755 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
756 llvm::Constant *dstAdj;
758 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
760 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
762 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
766 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
767 // Itanium C++ ABI 2.3:
768 // A NULL pointer is represented as -1.
769 if (MPT->isMemberDataPointer())
770 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
772 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
773 llvm::Constant *Values[2] = { Zero, Zero };
774 return llvm::ConstantStruct::getAnon(Values);
778 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
780 // Itanium C++ ABI 2.3:
781 // A pointer to data member is an offset from the base address of
782 // the class object containing it, represented as a ptrdiff_t
783 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
787 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
788 return BuildMemberPointer(MD, CharUnits::Zero());
791 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
792 CharUnits ThisAdjustment) {
793 assert(MD->isInstance() && "Member function must not be static!");
794 MD = MD->getCanonicalDecl();
796 CodeGenTypes &Types = CGM.getTypes();
798 // Get the function pointer (or index if this is a virtual function).
799 llvm::Constant *MemPtr[2];
800 if (MD->isVirtual()) {
801 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
803 const ASTContext &Context = getContext();
804 CharUnits PointerWidth =
805 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
806 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
808 if (UseARMMethodPtrABI) {
809 // ARM C++ ABI 3.2.1:
810 // This ABI specifies that adj contains twice the this
811 // adjustment, plus 1 if the member function is virtual. The
812 // least significant bit of adj then makes exactly the same
813 // discrimination as the least significant bit of ptr does for
815 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
816 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
817 2 * ThisAdjustment.getQuantity() + 1);
819 // Itanium C++ ABI 2.3:
820 // For a virtual function, [the pointer field] is 1 plus the
821 // virtual table offset (in bytes) of the function,
822 // represented as a ptrdiff_t.
823 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
824 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
825 ThisAdjustment.getQuantity());
828 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
830 // Check whether the function has a computable LLVM signature.
831 if (Types.isFuncTypeConvertible(FPT)) {
832 // The function has a computable LLVM signature; use the correct type.
833 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
835 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
836 // function type is incomplete.
839 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
841 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
842 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
843 (UseARMMethodPtrABI ? 2 : 1) *
844 ThisAdjustment.getQuantity());
847 return llvm::ConstantStruct::getAnon(MemPtr);
850 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
852 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
853 const ValueDecl *MPD = MP.getMemberPointerDecl();
855 return EmitNullMemberPointer(MPT);
857 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
859 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
860 return BuildMemberPointer(MD, ThisAdjustment);
862 CharUnits FieldOffset =
863 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
864 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
867 /// The comparison algorithm is pretty easy: the member pointers are
868 /// the same if they're either bitwise identical *or* both null.
870 /// ARM is different here only because null-ness is more complicated.
872 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
875 const MemberPointerType *MPT,
877 CGBuilderTy &Builder = CGF.Builder;
879 llvm::ICmpInst::Predicate Eq;
880 llvm::Instruction::BinaryOps And, Or;
882 Eq = llvm::ICmpInst::ICMP_NE;
883 And = llvm::Instruction::Or;
884 Or = llvm::Instruction::And;
886 Eq = llvm::ICmpInst::ICMP_EQ;
887 And = llvm::Instruction::And;
888 Or = llvm::Instruction::Or;
891 // Member data pointers are easy because there's a unique null
892 // value, so it just comes down to bitwise equality.
893 if (MPT->isMemberDataPointer())
894 return Builder.CreateICmp(Eq, L, R);
896 // For member function pointers, the tautologies are more complex.
897 // The Itanium tautology is:
898 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
899 // The ARM tautology is:
900 // (L == R) <==> (L.ptr == R.ptr &&
901 // (L.adj == R.adj ||
902 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
903 // The inequality tautologies have exactly the same structure, except
904 // applying De Morgan's laws.
906 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
907 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
909 // This condition tests whether L.ptr == R.ptr. This must always be
910 // true for equality to hold.
911 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
913 // This condition, together with the assumption that L.ptr == R.ptr,
914 // tests whether the pointers are both null. ARM imposes an extra
916 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
917 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
919 // This condition tests whether L.adj == R.adj. If this isn't
920 // true, the pointers are unequal unless they're both null.
921 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
922 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
923 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
925 // Null member function pointers on ARM clear the low bit of Adj,
926 // so the zero condition has to check that neither low bit is set.
927 if (UseARMMethodPtrABI) {
928 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
930 // Compute (l.adj | r.adj) & 1 and test it against zero.
931 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
932 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
933 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
935 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
938 // Tie together all our conditions.
939 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
940 Result = Builder.CreateBinOp(And, PtrEq, Result,
941 Inequality ? "memptr.ne" : "memptr.eq");
946 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
948 const MemberPointerType *MPT) {
949 CGBuilderTy &Builder = CGF.Builder;
951 /// For member data pointers, this is just a check against -1.
952 if (MPT->isMemberDataPointer()) {
953 assert(MemPtr->getType() == CGM.PtrDiffTy);
954 llvm::Value *NegativeOne =
955 llvm::Constant::getAllOnesValue(MemPtr->getType());
956 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
959 // In Itanium, a member function pointer is not null if 'ptr' is not null.
960 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
962 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
963 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
965 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
966 // (the virtual bit) is set.
967 if (UseARMMethodPtrABI) {
968 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
969 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
970 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
971 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
973 Result = Builder.CreateOr(Result, IsVirtual);
979 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
980 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
984 // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor.
985 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
987 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
988 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
989 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
995 /// The Itanium ABI requires non-zero initialization only for data
996 /// member pointers, for which '0' is a valid offset.
997 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
998 return MPT->isMemberFunctionPointer();
1001 /// The Itanium ABI always places an offset to the complete object
1002 /// at entry -2 in the vtable.
1003 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1004 const CXXDeleteExpr *DE,
1006 QualType ElementType,
1007 const CXXDestructorDecl *Dtor) {
1008 bool UseGlobalDelete = DE->isGlobalDelete();
1009 if (UseGlobalDelete) {
1010 // Derive the complete-object pointer, which is what we need
1011 // to pass to the deallocation function.
1013 // Grab the vtable pointer as an intptr_t*.
1015 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
1016 llvm::Value *VTable =
1017 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1019 // Track back to entry -2 and pull out the offset there.
1020 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1021 VTable, -2, "complete-offset.ptr");
1022 llvm::Value *Offset =
1023 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1025 // Apply the offset.
1026 llvm::Value *CompletePtr =
1027 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1028 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1030 // If we're supposed to call the global delete, make sure we do so
1031 // even if the destructor throws.
1032 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1036 // FIXME: Provide a source location here even though there's no
1037 // CXXMemberCallExpr for dtor call.
1038 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1039 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr);
1041 if (UseGlobalDelete)
1042 CGF.PopCleanupBlock();
1045 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1046 // void __cxa_rethrow();
1048 llvm::FunctionType *FTy =
1049 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
1051 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1054 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1056 CGF.EmitRuntimeCallOrInvoke(Fn);
1059 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) {
1060 // void *__cxa_allocate_exception(size_t thrown_size);
1062 llvm::FunctionType *FTy =
1063 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false);
1065 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1068 static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
1069 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1070 // void (*dest) (void *));
1072 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1073 llvm::FunctionType *FTy =
1074 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false);
1076 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1079 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1080 QualType ThrowType = E->getSubExpr()->getType();
1081 // Now allocate the exception object.
1082 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1083 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1085 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM);
1086 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1087 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1089 CharUnits ExnAlign = getAlignmentOfExnObject();
1090 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1092 // Now throw the exception.
1093 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1096 // The address of the destructor. If the exception type has a
1097 // trivial destructor (or isn't a record), we just pass null.
1098 llvm::Constant *Dtor = nullptr;
1099 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1100 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1101 if (!Record->hasTrivialDestructor()) {
1102 CXXDestructorDecl *DtorD = Record->getDestructor();
1103 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
1104 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1107 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1109 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1110 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1113 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1114 // void *__dynamic_cast(const void *sub,
1115 // const abi::__class_type_info *src,
1116 // const abi::__class_type_info *dst,
1117 // std::ptrdiff_t src2dst_offset);
1119 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1120 llvm::Type *PtrDiffTy =
1121 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1123 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1125 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1127 // Mark the function as nounwind readonly.
1128 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1129 llvm::Attribute::ReadOnly };
1130 llvm::AttributeSet Attrs = llvm::AttributeSet::get(
1131 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
1133 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1136 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1137 // void __cxa_bad_cast();
1138 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1139 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1142 /// \brief Compute the src2dst_offset hint as described in the
1143 /// Itanium C++ ABI [2.9.7]
1144 static CharUnits computeOffsetHint(ASTContext &Context,
1145 const CXXRecordDecl *Src,
1146 const CXXRecordDecl *Dst) {
1147 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1148 /*DetectVirtual=*/false);
1150 // If Dst is not derived from Src we can skip the whole computation below and
1151 // return that Src is not a public base of Dst. Record all inheritance paths.
1152 if (!Dst->isDerivedFrom(Src, Paths))
1153 return CharUnits::fromQuantity(-2ULL);
1155 unsigned NumPublicPaths = 0;
1158 // Now walk all possible inheritance paths.
1159 for (const CXXBasePath &Path : Paths) {
1160 if (Path.Access != AS_public) // Ignore non-public inheritance.
1165 for (const CXXBasePathElement &PathElement : Path) {
1166 // If the path contains a virtual base class we can't give any hint.
1168 if (PathElement.Base->isVirtual())
1169 return CharUnits::fromQuantity(-1ULL);
1171 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1174 // Accumulate the base class offsets.
1175 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1176 Offset += L.getBaseClassOffset(
1177 PathElement.Base->getType()->getAsCXXRecordDecl());
1181 // -2: Src is not a public base of Dst.
1182 if (NumPublicPaths == 0)
1183 return CharUnits::fromQuantity(-2ULL);
1185 // -3: Src is a multiple public base type but never a virtual base type.
1186 if (NumPublicPaths > 1)
1187 return CharUnits::fromQuantity(-3ULL);
1189 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1190 // Return the offset of Src from the origin of Dst.
1194 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1195 // void __cxa_bad_typeid();
1196 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1198 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1201 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1202 QualType SrcRecordTy) {
1206 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1207 llvm::Value *Fn = getBadTypeidFn(CGF);
1208 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1209 CGF.Builder.CreateUnreachable();
1212 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1213 QualType SrcRecordTy,
1215 llvm::Type *StdTypeInfoPtrTy) {
1217 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1218 llvm::Value *Value =
1219 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1221 // Load the type info.
1222 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1223 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1226 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1227 QualType SrcRecordTy) {
1231 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1232 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1233 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1234 llvm::Type *PtrDiffLTy =
1235 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1236 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1238 llvm::Value *SrcRTTI =
1239 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1240 llvm::Value *DestRTTI =
1241 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1243 // Compute the offset hint.
1244 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1245 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1246 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1248 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1250 // Emit the call to __dynamic_cast.
1251 llvm::Value *Value = ThisAddr.getPointer();
1252 Value = CGF.EmitCastToVoidPtr(Value);
1254 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1255 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1256 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1258 /// C++ [expr.dynamic.cast]p9:
1259 /// A failed cast to reference type throws std::bad_cast
1260 if (DestTy->isReferenceType()) {
1261 llvm::BasicBlock *BadCastBlock =
1262 CGF.createBasicBlock("dynamic_cast.bad_cast");
1264 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1265 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1267 CGF.EmitBlock(BadCastBlock);
1268 EmitBadCastCall(CGF);
1274 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1276 QualType SrcRecordTy,
1278 llvm::Type *PtrDiffLTy =
1279 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1280 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1283 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
1284 // Get the vtable pointer.
1285 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1288 // Get the offset-to-top from the vtable.
1289 llvm::Value *OffsetToTop =
1290 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1292 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1295 // Finally, add the offset to the pointer.
1296 llvm::Value *Value = ThisAddr.getPointer();
1297 Value = CGF.EmitCastToVoidPtr(Value);
1298 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1300 return CGF.Builder.CreateBitCast(Value, DestLTy);
1303 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1304 llvm::Value *Fn = getBadCastFn(CGF);
1305 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1306 CGF.Builder.CreateUnreachable();
1311 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1313 const CXXRecordDecl *ClassDecl,
1314 const CXXRecordDecl *BaseClassDecl) {
1315 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1316 CharUnits VBaseOffsetOffset =
1317 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1320 llvm::Value *VBaseOffsetPtr =
1321 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1322 "vbase.offset.ptr");
1323 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1324 CGM.PtrDiffTy->getPointerTo());
1326 llvm::Value *VBaseOffset =
1327 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1333 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1334 // Just make sure we're in sync with TargetCXXABI.
1335 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1337 // The constructor used for constructing this as a base class;
1338 // ignores virtual bases.
1339 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1341 // The constructor used for constructing this as a complete class;
1342 // constructs the virtual bases, then calls the base constructor.
1343 if (!D->getParent()->isAbstract()) {
1344 // We don't need to emit the complete ctor if the class is abstract.
1345 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1350 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
1351 SmallVectorImpl<CanQualType> &ArgTys) {
1352 ASTContext &Context = getContext();
1354 // All parameters are already in place except VTT, which goes after 'this'.
1355 // These are Clang types, so we don't need to worry about sret yet.
1357 // Check if we need to add a VTT parameter (which has type void **).
1358 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0)
1359 ArgTys.insert(ArgTys.begin() + 1,
1360 Context.getPointerType(Context.VoidPtrTy));
1363 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1364 // The destructor used for destructing this as a base class; ignores
1366 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1368 // The destructor used for destructing this as a most-derived class;
1369 // call the base destructor and then destructs any virtual bases.
1370 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1372 // The destructor in a virtual table is always a 'deleting'
1373 // destructor, which calls the complete destructor and then uses the
1374 // appropriate operator delete.
1376 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1379 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1381 FunctionArgList &Params) {
1382 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1383 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1385 // Check if we need a VTT parameter as well.
1386 if (NeedsVTTParameter(CGF.CurGD)) {
1387 ASTContext &Context = getContext();
1389 // FIXME: avoid the fake decl
1390 QualType T = Context.getPointerType(Context.VoidPtrTy);
1391 ImplicitParamDecl *VTTDecl
1392 = ImplicitParamDecl::Create(Context, nullptr, MD->getLocation(),
1393 &Context.Idents.get("vtt"), T);
1394 Params.insert(Params.begin() + 1, VTTDecl);
1395 getStructorImplicitParamDecl(CGF) = VTTDecl;
1399 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1400 /// Initialize the 'this' slot.
1403 /// Initialize the 'vtt' slot if needed.
1404 if (getStructorImplicitParamDecl(CGF)) {
1405 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1406 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1409 /// If this is a function that the ABI specifies returns 'this', initialize
1410 /// the return slot to 'this' at the start of the function.
1412 /// Unlike the setting of return types, this is done within the ABI
1413 /// implementation instead of by clients of CGCXXABI because:
1414 /// 1) getThisValue is currently protected
1415 /// 2) in theory, an ABI could implement 'this' returns some other way;
1416 /// HasThisReturn only specifies a contract, not the implementation
1417 if (HasThisReturn(CGF.CurGD))
1418 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1421 unsigned ItaniumCXXABI::addImplicitConstructorArgs(
1422 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1423 bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1424 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1427 // Insert the implicit 'vtt' argument as the second argument.
1429 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1430 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1431 Args.insert(Args.begin() + 1,
1432 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
1433 return 1; // Added one arg.
1436 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1437 const CXXDestructorDecl *DD,
1438 CXXDtorType Type, bool ForVirtualBase,
1439 bool Delegating, Address This) {
1440 GlobalDecl GD(DD, Type);
1441 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1442 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1444 llvm::Value *Callee = nullptr;
1445 if (getContext().getLangOpts().AppleKext)
1446 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1449 Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
1451 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
1452 This.getPointer(), VTT, VTTTy, nullptr);
1455 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1456 const CXXRecordDecl *RD) {
1457 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1458 if (VTable->hasInitializer())
1461 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1462 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1463 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1464 llvm::Constant *RTTI =
1465 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1467 // Create and set the initializer.
1468 llvm::Constant *Init = CGVT.CreateVTableInitializer(
1469 RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(),
1470 VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks(), RTTI);
1471 VTable->setInitializer(Init);
1473 // Set the correct linkage.
1474 VTable->setLinkage(Linkage);
1476 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1477 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1479 // Set the right visibility.
1480 CGM.setGlobalVisibility(VTable, RD);
1482 // Use pointer alignment for the vtable. Otherwise we would align them based
1483 // on the size of the initializer which doesn't make sense as only single
1485 unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1486 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
1488 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1489 // we will emit the typeinfo for the fundamental types. This is the
1490 // same behaviour as GCC.
1491 const DeclContext *DC = RD->getDeclContext();
1492 if (RD->getIdentifier() &&
1493 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1494 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1495 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1496 DC->getParent()->isTranslationUnit())
1497 EmitFundamentalRTTIDescriptors();
1499 CGM.EmitVTableBitSetEntries(VTable, VTLayout);
1502 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1503 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1504 if (Vptr.NearestVBase == nullptr)
1506 return NeedsVTTParameter(CGF.CurGD);
1509 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1510 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1511 const CXXRecordDecl *NearestVBase) {
1513 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1514 NeedsVTTParameter(CGF.CurGD)) {
1515 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1518 return getVTableAddressPoint(Base, VTableClass);
1522 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1523 const CXXRecordDecl *VTableClass) {
1524 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1526 // Find the appropriate vtable within the vtable group.
1527 uint64_t AddressPoint = CGM.getItaniumVTableContext()
1528 .getVTableLayout(VTableClass)
1529 .getAddressPoint(Base);
1530 llvm::Value *Indices[] = {
1531 llvm::ConstantInt::get(CGM.Int64Ty, 0),
1532 llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
1535 return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable->getValueType(),
1539 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1540 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1541 const CXXRecordDecl *NearestVBase) {
1542 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1543 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1545 // Get the secondary vpointer index.
1546 uint64_t VirtualPointerIndex =
1547 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1550 llvm::Value *VTT = CGF.LoadCXXVTT();
1551 if (VirtualPointerIndex)
1552 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1554 // And load the address point from the VTT.
1555 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1558 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1559 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1560 return getVTableAddressPoint(Base, VTableClass);
1563 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1564 CharUnits VPtrOffset) {
1565 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1567 llvm::GlobalVariable *&VTable = VTables[RD];
1571 // Queue up this v-table for possible deferred emission.
1572 CGM.addDeferredVTable(RD);
1574 SmallString<256> Name;
1575 llvm::raw_svector_ostream Out(Name);
1576 getMangleContext().mangleCXXVTable(RD, Out);
1578 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1579 llvm::ArrayType *ArrayType = llvm::ArrayType::get(
1580 CGM.Int8PtrTy, VTContext.getVTableLayout(RD).getNumVTableComponents());
1582 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1583 Name, ArrayType, llvm::GlobalValue::ExternalLinkage);
1584 VTable->setUnnamedAddr(true);
1586 if (RD->hasAttr<DLLImportAttr>())
1587 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1588 else if (RD->hasAttr<DLLExportAttr>())
1589 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1594 llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1598 SourceLocation Loc) {
1599 GD = GD.getCanonicalDecl();
1600 Ty = Ty->getPointerTo()->getPointerTo();
1601 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1602 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1604 if (CGF.SanOpts.has(SanitizerKind::CFIVCall))
1605 CGF.EmitVTablePtrCheckForCall(MethodDecl, VTable,
1606 CodeGenFunction::CFITCK_VCall, Loc);
1608 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1609 llvm::Value *VFuncPtr =
1610 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1611 return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1614 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1615 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1616 Address This, const CXXMemberCallExpr *CE) {
1617 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1618 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1620 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
1621 Dtor, getFromDtorType(DtorType));
1622 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1623 llvm::Value *Callee =
1624 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
1625 CE ? CE->getLocStart() : SourceLocation());
1627 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
1628 This.getPointer(), /*ImplicitParam=*/nullptr,
1633 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1634 CodeGenVTables &VTables = CGM.getVTables();
1635 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1636 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1639 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1640 // We don't emit available_externally vtables if we are in -fapple-kext mode
1641 // because kext mode does not permit devirtualization.
1642 if (CGM.getLangOpts().AppleKext)
1645 // If we don't have any inline virtual functions, and if vtable is not hidden,
1646 // then we are safe to emit available_externally copy of vtable.
1647 // FIXME we can still emit a copy of the vtable if we
1648 // can emit definition of the inline functions.
1649 return !hasAnyUsedVirtualInlineFunction(RD) && !isVTableHidden(RD);
1651 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1653 int64_t NonVirtualAdjustment,
1654 int64_t VirtualAdjustment,
1655 bool IsReturnAdjustment) {
1656 if (!NonVirtualAdjustment && !VirtualAdjustment)
1657 return InitialPtr.getPointer();
1659 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1661 // In a base-to-derived cast, the non-virtual adjustment is applied first.
1662 if (NonVirtualAdjustment && !IsReturnAdjustment) {
1663 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1664 CharUnits::fromQuantity(NonVirtualAdjustment));
1667 // Perform the virtual adjustment if we have one.
1668 llvm::Value *ResultPtr;
1669 if (VirtualAdjustment) {
1670 llvm::Type *PtrDiffTy =
1671 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1673 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1674 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1676 llvm::Value *OffsetPtr =
1677 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1679 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1681 // Load the adjustment offset from the vtable.
1682 llvm::Value *Offset =
1683 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1685 // Adjust our pointer.
1686 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1688 ResultPtr = V.getPointer();
1691 // In a derived-to-base conversion, the non-virtual adjustment is
1693 if (NonVirtualAdjustment && IsReturnAdjustment) {
1694 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1695 NonVirtualAdjustment);
1698 // Cast back to the original type.
1699 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1702 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1704 const ThisAdjustment &TA) {
1705 return performTypeAdjustment(CGF, This, TA.NonVirtual,
1706 TA.Virtual.Itanium.VCallOffsetOffset,
1707 /*IsReturnAdjustment=*/false);
1711 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1712 const ReturnAdjustment &RA) {
1713 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1714 RA.Virtual.Itanium.VBaseOffsetOffset,
1715 /*IsReturnAdjustment=*/true);
1718 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1719 RValue RV, QualType ResultType) {
1720 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1721 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1723 // Destructor thunks in the ARM ABI have indeterminate results.
1724 llvm::Type *T = CGF.ReturnValue.getElementType();
1725 RValue Undef = RValue::get(llvm::UndefValue::get(T));
1726 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1729 /************************** Array allocation cookies **************************/
1731 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1732 // The array cookie is a size_t; pad that up to the element alignment.
1733 // The cookie is actually right-justified in that space.
1734 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
1735 CGM.getContext().getTypeAlignInChars(elementType));
1738 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1740 llvm::Value *NumElements,
1741 const CXXNewExpr *expr,
1742 QualType ElementType) {
1743 assert(requiresArrayCookie(expr));
1745 unsigned AS = NewPtr.getAddressSpace();
1747 ASTContext &Ctx = getContext();
1748 CharUnits SizeSize = CGF.getSizeSize();
1750 // The size of the cookie.
1751 CharUnits CookieSize =
1752 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
1753 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
1755 // Compute an offset to the cookie.
1756 Address CookiePtr = NewPtr;
1757 CharUnits CookieOffset = CookieSize - SizeSize;
1758 if (!CookieOffset.isZero())
1759 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
1761 // Write the number of elements into the appropriate slot.
1762 Address NumElementsPtr =
1763 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
1764 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
1766 // Handle the array cookie specially in ASan.
1767 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
1768 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
1769 // The store to the CookiePtr does not need to be instrumented.
1770 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
1771 llvm::FunctionType *FTy =
1772 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
1774 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
1775 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
1778 // Finally, compute a pointer to the actual data buffer by skipping
1779 // over the cookie completely.
1780 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
1783 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1785 CharUnits cookieSize) {
1786 // The element size is right-justified in the cookie.
1787 Address numElementsPtr = allocPtr;
1788 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
1789 if (!numElementsOffset.isZero())
1791 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
1793 unsigned AS = allocPtr.getAddressSpace();
1794 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1795 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
1796 return CGF.Builder.CreateLoad(numElementsPtr);
1797 // In asan mode emit a function call instead of a regular load and let the
1798 // run-time deal with it: if the shadow is properly poisoned return the
1799 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
1800 // We can't simply ignore this load using nosanitize metadata because
1801 // the metadata may be lost.
1802 llvm::FunctionType *FTy =
1803 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
1805 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
1806 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
1809 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
1810 // ARM says that the cookie is always:
1811 // struct array_cookie {
1812 // std::size_t element_size; // element_size != 0
1813 // std::size_t element_count;
1815 // But the base ABI doesn't give anything an alignment greater than
1816 // 8, so we can dismiss this as typical ABI-author blindness to
1817 // actual language complexity and round up to the element alignment.
1818 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
1819 CGM.getContext().getTypeAlignInChars(elementType));
1822 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
1824 llvm::Value *numElements,
1825 const CXXNewExpr *expr,
1826 QualType elementType) {
1827 assert(requiresArrayCookie(expr));
1829 // The cookie is always at the start of the buffer.
1830 Address cookie = newPtr;
1832 // The first element is the element size.
1833 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
1834 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
1835 getContext().getTypeSizeInChars(elementType).getQuantity());
1836 CGF.Builder.CreateStore(elementSize, cookie);
1838 // The second element is the element count.
1839 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
1840 CGF.Builder.CreateStore(numElements, cookie);
1842 // Finally, compute a pointer to the actual data buffer by skipping
1843 // over the cookie completely.
1844 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
1845 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
1848 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
1850 CharUnits cookieSize) {
1851 // The number of elements is at offset sizeof(size_t) relative to
1852 // the allocated pointer.
1853 Address numElementsPtr
1854 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
1856 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
1857 return CGF.Builder.CreateLoad(numElementsPtr);
1860 /*********************** Static local initialization **************************/
1862 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1863 llvm::PointerType *GuardPtrTy) {
1864 // int __cxa_guard_acquire(__guard *guard_object);
1865 llvm::FunctionType *FTy =
1866 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1867 GuardPtrTy, /*isVarArg=*/false);
1868 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
1869 llvm::AttributeSet::get(CGM.getLLVMContext(),
1870 llvm::AttributeSet::FunctionIndex,
1871 llvm::Attribute::NoUnwind));
1874 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1875 llvm::PointerType *GuardPtrTy) {
1876 // void __cxa_guard_release(__guard *guard_object);
1877 llvm::FunctionType *FTy =
1878 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1879 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
1880 llvm::AttributeSet::get(CGM.getLLVMContext(),
1881 llvm::AttributeSet::FunctionIndex,
1882 llvm::Attribute::NoUnwind));
1885 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1886 llvm::PointerType *GuardPtrTy) {
1887 // void __cxa_guard_abort(__guard *guard_object);
1888 llvm::FunctionType *FTy =
1889 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
1890 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
1891 llvm::AttributeSet::get(CGM.getLLVMContext(),
1892 llvm::AttributeSet::FunctionIndex,
1893 llvm::Attribute::NoUnwind));
1897 struct CallGuardAbort final : EHScopeStack::Cleanup {
1898 llvm::GlobalVariable *Guard;
1899 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1901 void Emit(CodeGenFunction &CGF, Flags flags) override {
1902 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
1908 /// The ARM code here follows the Itanium code closely enough that we
1909 /// just special-case it at particular places.
1910 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1912 llvm::GlobalVariable *var,
1913 bool shouldPerformInit) {
1914 CGBuilderTy &Builder = CGF.Builder;
1916 // We only need to use thread-safe statics for local non-TLS variables;
1917 // global initialization is always single-threaded.
1918 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
1919 D.isLocalVarDecl() && !D.getTLSKind();
1921 // If we have a global variable with internal linkage and thread-safe statics
1922 // are disabled, we can just let the guard variable be of type i8.
1923 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
1925 llvm::IntegerType *guardTy;
1926 CharUnits guardAlignment;
1927 if (useInt8GuardVariable) {
1928 guardTy = CGF.Int8Ty;
1929 guardAlignment = CharUnits::One();
1931 // Guard variables are 64 bits in the generic ABI and size width on ARM
1932 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
1933 if (UseARMGuardVarABI) {
1934 guardTy = CGF.SizeTy;
1935 guardAlignment = CGF.getSizeAlign();
1937 guardTy = CGF.Int64Ty;
1938 guardAlignment = CharUnits::fromQuantity(
1939 CGM.getDataLayout().getABITypeAlignment(guardTy));
1942 llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
1944 // Create the guard variable if we don't already have it (as we
1945 // might if we're double-emitting this function body).
1946 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
1948 // Mangle the name for the guard.
1949 SmallString<256> guardName;
1951 llvm::raw_svector_ostream out(guardName);
1952 getMangleContext().mangleStaticGuardVariable(&D, out);
1955 // Create the guard variable with a zero-initializer.
1956 // Just absorb linkage and visibility from the guarded variable.
1957 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
1958 false, var->getLinkage(),
1959 llvm::ConstantInt::get(guardTy, 0),
1961 guard->setVisibility(var->getVisibility());
1962 // If the variable is thread-local, so is its guard variable.
1963 guard->setThreadLocalMode(var->getThreadLocalMode());
1964 guard->setAlignment(guardAlignment.getQuantity());
1966 // The ABI says: "It is suggested that it be emitted in the same COMDAT
1967 // group as the associated data object." In practice, this doesn't work for
1968 // non-ELF object formats, so only do it for ELF.
1969 llvm::Comdat *C = var->getComdat();
1970 if (!D.isLocalVarDecl() && C &&
1971 CGM.getTarget().getTriple().isOSBinFormatELF()) {
1972 guard->setComdat(C);
1973 CGF.CurFn->setComdat(C);
1974 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
1975 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
1978 CGM.setStaticLocalDeclGuardAddress(&D, guard);
1981 Address guardAddr = Address(guard, guardAlignment);
1983 // Test whether the variable has completed initialization.
1985 // Itanium C++ ABI 3.3.2:
1986 // The following is pseudo-code showing how these functions can be used:
1987 // if (obj_guard.first_byte == 0) {
1988 // if ( __cxa_guard_acquire (&obj_guard) ) {
1990 // ... initialize the object ...;
1992 // __cxa_guard_abort (&obj_guard);
1995 // ... queue object destructor with __cxa_atexit() ...;
1996 // __cxa_guard_release (&obj_guard);
2000 // Load the first byte of the guard variable.
2001 llvm::LoadInst *LI =
2002 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2005 // An implementation supporting thread-safety on multiprocessor
2006 // systems must also guarantee that references to the initialized
2007 // object do not occur before the load of the initialization flag.
2009 // In LLVM, we do this by marking the load Acquire.
2011 LI->setAtomic(llvm::Acquire);
2013 // For ARM, we should only check the first bit, rather than the entire byte:
2015 // ARM C++ ABI 3.2.3.1:
2016 // To support the potential use of initialization guard variables
2017 // as semaphores that are the target of ARM SWP and LDREX/STREX
2018 // synchronizing instructions we define a static initialization
2019 // guard variable to be a 4-byte aligned, 4-byte word with the
2020 // following inline access protocol.
2021 // #define INITIALIZED 1
2022 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2023 // if (__cxa_guard_acquire(&obj_guard))
2027 // and similarly for ARM64:
2029 // ARM64 C++ ABI 3.2.2:
2030 // This ABI instead only specifies the value bit 0 of the static guard
2031 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2032 // variable is not initialized and 1 when it is.
2034 (UseARMGuardVarABI && !useInt8GuardVariable)
2035 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2037 llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
2039 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2040 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2042 // Check if the first byte of the guard variable is zero.
2043 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
2045 CGF.EmitBlock(InitCheckBlock);
2047 // Variables used when coping with thread-safe statics and exceptions.
2049 // Call __cxa_guard_acquire.
2051 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2053 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2055 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2056 InitBlock, EndBlock);
2058 // Call __cxa_guard_abort along the exceptional edge.
2059 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2061 CGF.EmitBlock(InitBlock);
2064 // Emit the initializer and add a global destructor if appropriate.
2065 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2068 // Pop the guard-abort cleanup if we pushed one.
2069 CGF.PopCleanupBlock();
2071 // Call __cxa_guard_release. This cannot throw.
2072 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2073 guardAddr.getPointer());
2075 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2078 CGF.EmitBlock(EndBlock);
2081 /// Register a global destructor using __cxa_atexit.
2082 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2083 llvm::Constant *dtor,
2084 llvm::Constant *addr,
2086 const char *Name = "__cxa_atexit";
2088 const llvm::Triple &T = CGF.getTarget().getTriple();
2089 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2092 // We're assuming that the destructor function is something we can
2093 // reasonably call with the default CC. Go ahead and cast it to the
2095 llvm::Type *dtorTy =
2096 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2098 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2099 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
2100 llvm::FunctionType *atexitTy =
2101 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2103 // Fetch the actual function.
2104 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2105 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
2106 fn->setDoesNotThrow();
2108 // Create a variable that binds the atexit to this shared object.
2109 llvm::Constant *handle =
2110 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2112 llvm::Value *args[] = {
2113 llvm::ConstantExpr::getBitCast(dtor, dtorTy),
2114 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
2117 CGF.EmitNounwindRuntimeCall(atexit, args);
2120 /// Register a global destructor as best as we know how.
2121 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
2123 llvm::Constant *dtor,
2124 llvm::Constant *addr) {
2125 // Use __cxa_atexit if available.
2126 if (CGM.getCodeGenOpts().CXAAtExit)
2127 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2130 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
2132 // In Apple kexts, we want to add a global destructor entry.
2133 // FIXME: shouldn't this be guarded by some variable?
2134 if (CGM.getLangOpts().AppleKext) {
2135 // Generate a global destructor entry.
2136 return CGM.AddCXXDtorEntry(dtor, addr);
2139 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2142 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2143 CodeGen::CodeGenModule &CGM) {
2144 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2145 // Darwin prefers to have references to thread local variables to go through
2146 // the thread wrapper instead of directly referencing the backing variable.
2147 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2148 CGM.getTarget().getTriple().isOSDarwin();
2151 /// Get the appropriate linkage for the wrapper function. This is essentially
2152 /// the weak form of the variable's linkage; every translation unit which needs
2153 /// the wrapper emits a copy, and we want the linker to merge them.
2154 static llvm::GlobalValue::LinkageTypes
2155 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2156 llvm::GlobalValue::LinkageTypes VarLinkage =
2157 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false);
2159 // For internal linkage variables, we don't need an external or weak wrapper.
2160 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2163 // If the thread wrapper is replaceable, give it appropriate linkage.
2164 if (isThreadWrapperReplaceable(VD, CGM))
2165 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2166 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2168 return llvm::GlobalValue::WeakODRLinkage;
2172 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2174 // Mangle the name for the thread_local wrapper function.
2175 SmallString<256> WrapperName;
2177 llvm::raw_svector_ostream Out(WrapperName);
2178 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2181 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2182 return cast<llvm::Function>(V);
2184 llvm::Type *RetTy = Val->getType();
2185 if (VD->getType()->isReferenceType())
2186 RetTy = RetTy->getPointerElementType();
2188 llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false);
2189 llvm::Function *Wrapper =
2190 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2191 WrapperName.str(), &CGM.getModule());
2192 // Always resolve references to the wrapper at link time.
2193 if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
2194 !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
2195 !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
2196 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2198 if (isThreadWrapperReplaceable(VD, CGM)) {
2199 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2200 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2205 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2206 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2207 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2208 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2209 llvm::Function *InitFunc = nullptr;
2210 if (!CXXThreadLocalInits.empty()) {
2211 // Generate a guarded initialization function.
2212 llvm::FunctionType *FTy =
2213 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2214 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2215 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2218 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2219 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2220 llvm::GlobalVariable::InternalLinkage,
2221 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2222 Guard->setThreadLocal(true);
2224 CharUnits GuardAlign = CharUnits::One();
2225 Guard->setAlignment(GuardAlign.getQuantity());
2227 CodeGenFunction(CGM)
2228 .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits,
2229 Address(Guard, GuardAlign));
2231 for (const VarDecl *VD : CXXThreadLocals) {
2232 llvm::GlobalVariable *Var =
2233 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2235 // Some targets require that all access to thread local variables go through
2236 // the thread wrapper. This means that we cannot attempt to create a thread
2237 // wrapper or a thread helper.
2238 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition())
2241 // Mangle the name for the thread_local initialization function.
2242 SmallString<256> InitFnName;
2244 llvm::raw_svector_ostream Out(InitFnName);
2245 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2248 // If we have a definition for the variable, emit the initialization
2249 // function as an alias to the global Init function (if any). Otherwise,
2250 // produce a declaration of the initialization function.
2251 llvm::GlobalValue *Init = nullptr;
2252 bool InitIsInitFunc = false;
2253 if (VD->hasDefinition()) {
2254 InitIsInitFunc = true;
2256 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2259 // Emit a weak global function referring to the initialization function.
2260 // This function will not exist if the TU defining the thread_local
2261 // variable in question does not need any dynamic initialization for
2262 // its thread_local variables.
2263 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2264 Init = llvm::Function::Create(
2265 FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(),
2270 Init->setVisibility(Var->getVisibility());
2272 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
2273 llvm::LLVMContext &Context = CGM.getModule().getContext();
2274 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2275 CGBuilderTy Builder(CGM, Entry);
2276 if (InitIsInitFunc) {
2278 Builder.CreateCall(Init);
2280 // Don't know whether we have an init function. Call it if it exists.
2281 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2282 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2283 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2284 Builder.CreateCondBr(Have, InitBB, ExitBB);
2286 Builder.SetInsertPoint(InitBB);
2287 Builder.CreateCall(Init);
2288 Builder.CreateBr(ExitBB);
2290 Builder.SetInsertPoint(ExitBB);
2293 // For a reference, the result of the wrapper function is a pointer to
2294 // the referenced object.
2295 llvm::Value *Val = Var;
2296 if (VD->getType()->isReferenceType()) {
2297 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2298 Val = Builder.CreateAlignedLoad(Val, Align);
2300 if (Val->getType() != Wrapper->getReturnType())
2301 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2302 Val, Wrapper->getReturnType(), "");
2303 Builder.CreateRet(Val);
2307 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2309 QualType LValType) {
2310 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2311 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2313 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2314 if (isThreadWrapperReplaceable(VD, CGF.CGM))
2315 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2318 if (VD->getType()->isReferenceType())
2319 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2321 LV = CGF.MakeAddrLValue(CallVal, LValType,
2322 CGF.getContext().getDeclAlign(VD));
2323 // FIXME: need setObjCGCLValueClass?
2327 /// Return whether the given global decl needs a VTT parameter, which it does
2328 /// if it's a base constructor or destructor with virtual bases.
2329 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2330 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2332 // We don't have any virtual bases, just return early.
2333 if (!MD->getParent()->getNumVBases())
2336 // Check if we have a base constructor.
2337 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2340 // Check if we have a base destructor.
2341 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2348 class ItaniumRTTIBuilder {
2349 CodeGenModule &CGM; // Per-module state.
2350 llvm::LLVMContext &VMContext;
2351 const ItaniumCXXABI &CXXABI; // Per-module state.
2353 /// Fields - The fields of the RTTI descriptor currently being built.
2354 SmallVector<llvm::Constant *, 16> Fields;
2356 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2357 llvm::GlobalVariable *
2358 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2360 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2361 /// descriptor of the given type.
2362 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2364 /// BuildVTablePointer - Build the vtable pointer for the given type.
2365 void BuildVTablePointer(const Type *Ty);
2367 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2368 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2369 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2371 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2372 /// classes with bases that do not satisfy the abi::__si_class_type_info
2373 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2374 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2376 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2377 /// for pointer types.
2378 void BuildPointerTypeInfo(QualType PointeeTy);
2380 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2381 /// type_info for an object type.
2382 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2384 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2385 /// struct, used for member pointer types.
2386 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2389 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2390 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2392 // Pointer type info flags.
2394 /// PTI_Const - Type has const qualifier.
2397 /// PTI_Volatile - Type has volatile qualifier.
2400 /// PTI_Restrict - Type has restrict qualifier.
2403 /// PTI_Incomplete - Type is incomplete.
2404 PTI_Incomplete = 0x8,
2406 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2407 /// (in pointer to member).
2408 PTI_ContainingClassIncomplete = 0x10
2411 // VMI type info flags.
2413 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2414 VMI_NonDiamondRepeat = 0x1,
2416 /// VMI_DiamondShaped - Class is diamond shaped.
2417 VMI_DiamondShaped = 0x2
2420 // Base class type info flags.
2422 /// BCTI_Virtual - Base class is virtual.
2425 /// BCTI_Public - Base class is public.
2429 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2431 /// \param Force - true to force the creation of this RTTI value
2432 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
2436 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2437 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2438 SmallString<256> Name;
2439 llvm::raw_svector_ostream Out(Name);
2440 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2442 // We know that the mangled name of the type starts at index 4 of the
2443 // mangled name of the typename, so we can just index into it in order to
2444 // get the mangled name of the type.
2445 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2448 llvm::GlobalVariable *GV =
2449 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
2451 GV->setInitializer(Init);
2457 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2458 // Mangle the RTTI name.
2459 SmallString<256> Name;
2460 llvm::raw_svector_ostream Out(Name);
2461 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2463 // Look for an existing global.
2464 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2467 // Create a new global variable.
2468 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2470 llvm::GlobalValue::ExternalLinkage, nullptr,
2472 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2473 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2474 if (RD->hasAttr<DLLImportAttr>())
2475 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
2479 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2482 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2483 /// info for that type is defined in the standard library.
2484 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2485 // Itanium C++ ABI 2.9.2:
2486 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
2487 // the run-time support library. Specifically, the run-time support
2488 // library should contain type_info objects for the types X, X* and
2489 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2490 // unsigned char, signed char, short, unsigned short, int, unsigned int,
2491 // long, unsigned long, long long, unsigned long long, float, double,
2492 // long double, char16_t, char32_t, and the IEEE 754r decimal and
2493 // half-precision floating point types.
2494 switch (Ty->getKind()) {
2495 case BuiltinType::Void:
2496 case BuiltinType::NullPtr:
2497 case BuiltinType::Bool:
2498 case BuiltinType::WChar_S:
2499 case BuiltinType::WChar_U:
2500 case BuiltinType::Char_U:
2501 case BuiltinType::Char_S:
2502 case BuiltinType::UChar:
2503 case BuiltinType::SChar:
2504 case BuiltinType::Short:
2505 case BuiltinType::UShort:
2506 case BuiltinType::Int:
2507 case BuiltinType::UInt:
2508 case BuiltinType::Long:
2509 case BuiltinType::ULong:
2510 case BuiltinType::LongLong:
2511 case BuiltinType::ULongLong:
2512 case BuiltinType::Half:
2513 case BuiltinType::Float:
2514 case BuiltinType::Double:
2515 case BuiltinType::LongDouble:
2516 case BuiltinType::Char16:
2517 case BuiltinType::Char32:
2518 case BuiltinType::Int128:
2519 case BuiltinType::UInt128:
2520 case BuiltinType::OCLImage1d:
2521 case BuiltinType::OCLImage1dArray:
2522 case BuiltinType::OCLImage1dBuffer:
2523 case BuiltinType::OCLImage2d:
2524 case BuiltinType::OCLImage2dArray:
2525 case BuiltinType::OCLImage2dDepth:
2526 case BuiltinType::OCLImage2dArrayDepth:
2527 case BuiltinType::OCLImage2dMSAA:
2528 case BuiltinType::OCLImage2dArrayMSAA:
2529 case BuiltinType::OCLImage2dMSAADepth:
2530 case BuiltinType::OCLImage2dArrayMSAADepth:
2531 case BuiltinType::OCLImage3d:
2532 case BuiltinType::OCLSampler:
2533 case BuiltinType::OCLEvent:
2534 case BuiltinType::OCLClkEvent:
2535 case BuiltinType::OCLQueue:
2536 case BuiltinType::OCLNDRange:
2537 case BuiltinType::OCLReserveID:
2540 case BuiltinType::Dependent:
2541 #define BUILTIN_TYPE(Id, SingletonId)
2542 #define PLACEHOLDER_TYPE(Id, SingletonId) \
2543 case BuiltinType::Id:
2544 #include "clang/AST/BuiltinTypes.def"
2545 llvm_unreachable("asking for RRTI for a placeholder type!");
2547 case BuiltinType::ObjCId:
2548 case BuiltinType::ObjCClass:
2549 case BuiltinType::ObjCSel:
2550 llvm_unreachable("FIXME: Objective-C types are unsupported!");
2553 llvm_unreachable("Invalid BuiltinType Kind!");
2556 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
2557 QualType PointeeTy = PointerTy->getPointeeType();
2558 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
2562 // Check the qualifiers.
2563 Qualifiers Quals = PointeeTy.getQualifiers();
2564 Quals.removeConst();
2569 return TypeInfoIsInStandardLibrary(BuiltinTy);
2572 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
2573 /// information for the given type exists in the standard library.
2574 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
2575 // Type info for builtin types is defined in the standard library.
2576 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
2577 return TypeInfoIsInStandardLibrary(BuiltinTy);
2579 // Type info for some pointer types to builtin types is defined in the
2580 // standard library.
2581 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2582 return TypeInfoIsInStandardLibrary(PointerTy);
2587 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
2588 /// the given type exists somewhere else, and that we should not emit the type
2589 /// information in this translation unit. Assumes that it is not a
2590 /// standard-library type.
2591 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
2593 ASTContext &Context = CGM.getContext();
2595 // If RTTI is disabled, assume it might be disabled in the
2596 // translation unit that defines any potential key function, too.
2597 if (!Context.getLangOpts().RTTI) return false;
2599 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2600 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
2601 if (!RD->hasDefinition())
2604 if (!RD->isDynamicClass())
2607 // FIXME: this may need to be reconsidered if the key function
2609 // N.B. We must always emit the RTTI data ourselves if there exists a key
2611 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
2612 if (CGM.getVTables().isVTableExternal(RD))
2613 return IsDLLImport ? false : true;
2622 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
2623 static bool IsIncompleteClassType(const RecordType *RecordTy) {
2624 return !RecordTy->getDecl()->isCompleteDefinition();
2627 /// ContainsIncompleteClassType - Returns whether the given type contains an
2628 /// incomplete class type. This is true if
2630 /// * The given type is an incomplete class type.
2631 /// * The given type is a pointer type whose pointee type contains an
2632 /// incomplete class type.
2633 /// * The given type is a member pointer type whose class is an incomplete
2635 /// * The given type is a member pointer type whoise pointee type contains an
2636 /// incomplete class type.
2637 /// is an indirect or direct pointer to an incomplete class type.
2638 static bool ContainsIncompleteClassType(QualType Ty) {
2639 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
2640 if (IsIncompleteClassType(RecordTy))
2644 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
2645 return ContainsIncompleteClassType(PointerTy->getPointeeType());
2647 if (const MemberPointerType *MemberPointerTy =
2648 dyn_cast<MemberPointerType>(Ty)) {
2649 // Check if the class type is incomplete.
2650 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
2651 if (IsIncompleteClassType(ClassType))
2654 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
2660 // CanUseSingleInheritance - Return whether the given record decl has a "single,
2661 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
2662 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
2663 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
2664 // Check the number of bases.
2665 if (RD->getNumBases() != 1)
2669 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
2671 // Check that the base is not virtual.
2672 if (Base->isVirtual())
2675 // Check that the base is public.
2676 if (Base->getAccessSpecifier() != AS_public)
2679 // Check that the class is dynamic iff the base is.
2680 const CXXRecordDecl *BaseDecl =
2681 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
2682 if (!BaseDecl->isEmpty() &&
2683 BaseDecl->isDynamicClass() != RD->isDynamicClass())
2689 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
2690 // abi::__class_type_info.
2691 static const char * const ClassTypeInfo =
2692 "_ZTVN10__cxxabiv117__class_type_infoE";
2693 // abi::__si_class_type_info.
2694 static const char * const SIClassTypeInfo =
2695 "_ZTVN10__cxxabiv120__si_class_type_infoE";
2696 // abi::__vmi_class_type_info.
2697 static const char * const VMIClassTypeInfo =
2698 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
2700 const char *VTableName = nullptr;
2702 switch (Ty->getTypeClass()) {
2703 #define TYPE(Class, Base)
2704 #define ABSTRACT_TYPE(Class, Base)
2705 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2706 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2707 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2708 #include "clang/AST/TypeNodes.def"
2709 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2711 case Type::LValueReference:
2712 case Type::RValueReference:
2713 llvm_unreachable("References shouldn't get here");
2716 llvm_unreachable("Undeduced auto type shouldn't get here");
2719 llvm_unreachable("Pipe types shouldn't get here");
2722 // GCC treats vector and complex types as fundamental types.
2724 case Type::ExtVector:
2727 // FIXME: GCC treats block pointers as fundamental types?!
2728 case Type::BlockPointer:
2729 // abi::__fundamental_type_info.
2730 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
2733 case Type::ConstantArray:
2734 case Type::IncompleteArray:
2735 case Type::VariableArray:
2736 // abi::__array_type_info.
2737 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
2740 case Type::FunctionNoProto:
2741 case Type::FunctionProto:
2742 // abi::__function_type_info.
2743 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
2747 // abi::__enum_type_info.
2748 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
2751 case Type::Record: {
2752 const CXXRecordDecl *RD =
2753 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2755 if (!RD->hasDefinition() || !RD->getNumBases()) {
2756 VTableName = ClassTypeInfo;
2757 } else if (CanUseSingleInheritance(RD)) {
2758 VTableName = SIClassTypeInfo;
2760 VTableName = VMIClassTypeInfo;
2766 case Type::ObjCObject:
2767 // Ignore protocol qualifiers.
2768 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
2770 // Handle id and Class.
2771 if (isa<BuiltinType>(Ty)) {
2772 VTableName = ClassTypeInfo;
2776 assert(isa<ObjCInterfaceType>(Ty));
2779 case Type::ObjCInterface:
2780 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
2781 VTableName = SIClassTypeInfo;
2783 VTableName = ClassTypeInfo;
2787 case Type::ObjCObjectPointer:
2789 // abi::__pointer_type_info.
2790 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
2793 case Type::MemberPointer:
2794 // abi::__pointer_to_member_type_info.
2795 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
2799 llvm::Constant *VTable =
2800 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
2802 llvm::Type *PtrDiffTy =
2803 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
2805 // The vtable address point is 2.
2806 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
2808 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
2809 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
2811 Fields.push_back(VTable);
2814 /// \brief Return the linkage that the type info and type info name constants
2815 /// should have for the given type.
2816 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
2818 // Itanium C++ ABI 2.9.5p7:
2819 // In addition, it and all of the intermediate abi::__pointer_type_info
2820 // structs in the chain down to the abi::__class_type_info for the
2821 // incomplete class type must be prevented from resolving to the
2822 // corresponding type_info structs for the complete class type, possibly
2823 // by making them local static objects. Finally, a dummy class RTTI is
2824 // generated for the incomplete type that will not resolve to the final
2825 // complete class RTTI (because the latter need not exist), possibly by
2826 // making it a local static object.
2827 if (ContainsIncompleteClassType(Ty))
2828 return llvm::GlobalValue::InternalLinkage;
2830 switch (Ty->getLinkage()) {
2832 case InternalLinkage:
2833 case UniqueExternalLinkage:
2834 return llvm::GlobalValue::InternalLinkage;
2836 case VisibleNoLinkage:
2837 case ExternalLinkage:
2838 if (!CGM.getLangOpts().RTTI) {
2839 // RTTI is not enabled, which means that this type info struct is going
2840 // to be used for exception handling. Give it linkonce_odr linkage.
2841 return llvm::GlobalValue::LinkOnceODRLinkage;
2844 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
2845 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
2846 if (RD->hasAttr<WeakAttr>())
2847 return llvm::GlobalValue::WeakODRLinkage;
2848 if (RD->isDynamicClass()) {
2849 llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
2850 // MinGW won't export the RTTI information when there is a key function.
2851 // Make sure we emit our own copy instead of attempting to dllimport it.
2852 if (RD->hasAttr<DLLImportAttr>() &&
2853 llvm::GlobalValue::isAvailableExternallyLinkage(LT))
2854 LT = llvm::GlobalValue::LinkOnceODRLinkage;
2859 return llvm::GlobalValue::LinkOnceODRLinkage;
2862 llvm_unreachable("Invalid linkage!");
2865 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
2866 // We want to operate on the canonical type.
2867 Ty = CGM.getContext().getCanonicalType(Ty);
2869 // Check if we've already emitted an RTTI descriptor for this type.
2870 SmallString<256> Name;
2871 llvm::raw_svector_ostream Out(Name);
2872 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2874 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
2875 if (OldGV && !OldGV->isDeclaration()) {
2876 assert(!OldGV->hasAvailableExternallyLinkage() &&
2877 "available_externally typeinfos not yet implemented");
2879 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
2882 // Check if there is already an external RTTI descriptor for this type.
2883 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
2884 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
2885 return GetAddrOfExternalRTTIDescriptor(Ty);
2887 // Emit the standard library with external linkage.
2888 llvm::GlobalVariable::LinkageTypes Linkage;
2890 Linkage = llvm::GlobalValue::ExternalLinkage;
2892 Linkage = getTypeInfoLinkage(CGM, Ty);
2894 // Add the vtable pointer.
2895 BuildVTablePointer(cast<Type>(Ty));
2898 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
2899 llvm::Constant *TypeNameField;
2901 // If we're supposed to demote the visibility, be sure to set a flag
2902 // to use a string comparison for type_info comparisons.
2903 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
2904 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
2905 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
2906 // The flag is the sign bit, which on ARM64 is defined to be clear
2907 // for global pointers. This is very ARM64-specific.
2908 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
2909 llvm::Constant *flag =
2910 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
2911 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
2913 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
2915 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
2917 Fields.push_back(TypeNameField);
2919 switch (Ty->getTypeClass()) {
2920 #define TYPE(Class, Base)
2921 #define ABSTRACT_TYPE(Class, Base)
2922 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
2923 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2924 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2925 #include "clang/AST/TypeNodes.def"
2926 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
2928 // GCC treats vector types as fundamental types.
2931 case Type::ExtVector:
2933 case Type::BlockPointer:
2934 // Itanium C++ ABI 2.9.5p4:
2935 // abi::__fundamental_type_info adds no data members to std::type_info.
2938 case Type::LValueReference:
2939 case Type::RValueReference:
2940 llvm_unreachable("References shouldn't get here");
2943 llvm_unreachable("Undeduced auto type shouldn't get here");
2946 llvm_unreachable("Pipe type shouldn't get here");
2948 case Type::ConstantArray:
2949 case Type::IncompleteArray:
2950 case Type::VariableArray:
2951 // Itanium C++ ABI 2.9.5p5:
2952 // abi::__array_type_info adds no data members to std::type_info.
2955 case Type::FunctionNoProto:
2956 case Type::FunctionProto:
2957 // Itanium C++ ABI 2.9.5p5:
2958 // abi::__function_type_info adds no data members to std::type_info.
2962 // Itanium C++ ABI 2.9.5p5:
2963 // abi::__enum_type_info adds no data members to std::type_info.
2966 case Type::Record: {
2967 const CXXRecordDecl *RD =
2968 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
2969 if (!RD->hasDefinition() || !RD->getNumBases()) {
2970 // We don't need to emit any fields.
2974 if (CanUseSingleInheritance(RD))
2975 BuildSIClassTypeInfo(RD);
2977 BuildVMIClassTypeInfo(RD);
2982 case Type::ObjCObject:
2983 case Type::ObjCInterface:
2984 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
2987 case Type::ObjCObjectPointer:
2988 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
2992 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
2995 case Type::MemberPointer:
2996 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3000 // No fields, at least for the moment.
3004 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3006 llvm::Module &M = CGM.getModule();
3007 llvm::GlobalVariable *GV =
3008 new llvm::GlobalVariable(M, Init->getType(),
3009 /*Constant=*/true, Linkage, Init, Name);
3011 // If there's already an old global variable, replace it with the new one.
3013 GV->takeName(OldGV);
3014 llvm::Constant *NewPtr =
3015 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3016 OldGV->replaceAllUsesWith(NewPtr);
3017 OldGV->eraseFromParent();
3020 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3021 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3023 // The Itanium ABI specifies that type_info objects must be globally
3024 // unique, with one exception: if the type is an incomplete class
3025 // type or a (possibly indirect) pointer to one. That exception
3026 // affects the general case of comparing type_info objects produced
3027 // by the typeid operator, which is why the comparison operators on
3028 // std::type_info generally use the type_info name pointers instead
3029 // of the object addresses. However, the language's built-in uses
3030 // of RTTI generally require class types to be complete, even when
3031 // manipulating pointers to those class types. This allows the
3032 // implementation of dynamic_cast to rely on address equality tests,
3033 // which is much faster.
3035 // All of this is to say that it's important that both the type_info
3036 // object and the type_info name be uniqued when weakly emitted.
3038 // Give the type_info object and name the formal visibility of the
3040 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3041 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3042 // If the linkage is local, only default visibility makes sense.
3043 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3044 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
3045 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3047 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3048 TypeName->setVisibility(llvmVisibility);
3049 GV->setVisibility(llvmVisibility);
3051 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3054 /// ComputeQualifierFlags - Compute the pointer type info flags from the
3055 /// given qualifier.
3056 static unsigned ComputeQualifierFlags(Qualifiers Quals) {
3059 if (Quals.hasConst())
3060 Flags |= ItaniumRTTIBuilder::PTI_Const;
3061 if (Quals.hasVolatile())
3062 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3063 if (Quals.hasRestrict())
3064 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3069 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3070 /// for the given Objective-C object type.
3071 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3073 const Type *T = OT->getBaseType().getTypePtr();
3074 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3076 // The builtin types are abi::__class_type_infos and don't require
3078 if (isa<BuiltinType>(T)) return;
3080 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3081 ObjCInterfaceDecl *Super = Class->getSuperClass();
3083 // Root classes are also __class_type_info.
3086 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3088 // Everything else is single inheritance.
3089 llvm::Constant *BaseTypeInfo =
3090 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3091 Fields.push_back(BaseTypeInfo);
3094 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3095 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3096 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3097 // Itanium C++ ABI 2.9.5p6b:
3098 // It adds to abi::__class_type_info a single member pointing to the
3099 // type_info structure for the base type,
3100 llvm::Constant *BaseTypeInfo =
3101 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3102 Fields.push_back(BaseTypeInfo);
3106 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3107 /// a class hierarchy.
3109 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3110 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3114 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3115 /// abi::__vmi_class_type_info.
3117 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3122 const CXXRecordDecl *BaseDecl =
3123 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
3125 if (Base->isVirtual()) {
3126 // Mark the virtual base as seen.
3127 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3128 // If this virtual base has been seen before, then the class is diamond
3130 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3132 if (Bases.NonVirtualBases.count(BaseDecl))
3133 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3136 // Mark the non-virtual base as seen.
3137 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3138 // If this non-virtual base has been seen before, then the class has non-
3139 // diamond shaped repeated inheritance.
3140 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3142 if (Bases.VirtualBases.count(BaseDecl))
3143 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3148 for (const auto &I : BaseDecl->bases())
3149 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3154 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3159 for (const auto &I : RD->bases())
3160 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3165 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3166 /// classes with bases that do not satisfy the abi::__si_class_type_info
3167 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3168 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3169 llvm::Type *UnsignedIntLTy =
3170 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3172 // Itanium C++ ABI 2.9.5p6c:
3173 // __flags is a word with flags describing details about the class
3174 // structure, which may be referenced by using the __flags_masks
3175 // enumeration. These flags refer to both direct and indirect bases.
3176 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3177 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3179 // Itanium C++ ABI 2.9.5p6c:
3180 // __base_count is a word with the number of direct proper base class
3181 // descriptions that follow.
3182 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3184 if (!RD->getNumBases())
3187 llvm::Type *LongLTy =
3188 CGM.getTypes().ConvertType(CGM.getContext().LongTy);
3190 // Now add the base class descriptions.
3192 // Itanium C++ ABI 2.9.5p6c:
3193 // __base_info[] is an array of base class descriptions -- one for every
3194 // direct proper base. Each description is of the type:
3196 // struct abi::__base_class_type_info {
3198 // const __class_type_info *__base_type;
3199 // long __offset_flags;
3201 // enum __offset_flags_masks {
3202 // __virtual_mask = 0x1,
3203 // __public_mask = 0x2,
3204 // __offset_shift = 8
3207 for (const auto &Base : RD->bases()) {
3208 // The __base_type member points to the RTTI for the base type.
3209 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3211 const CXXRecordDecl *BaseDecl =
3212 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
3214 int64_t OffsetFlags = 0;
3216 // All but the lower 8 bits of __offset_flags are a signed offset.
3217 // For a non-virtual base, this is the offset in the object of the base
3218 // subobject. For a virtual base, this is the offset in the virtual table of
3219 // the virtual base offset for the virtual base referenced (negative).
3221 if (Base.isVirtual())
3223 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3225 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3226 Offset = Layout.getBaseClassOffset(BaseDecl);
3229 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3231 // The low-order byte of __offset_flags contains flags, as given by the
3232 // masks from the enumeration __offset_flags_masks.
3233 if (Base.isVirtual())
3234 OffsetFlags |= BCTI_Virtual;
3235 if (Base.getAccessSpecifier() == AS_public)
3236 OffsetFlags |= BCTI_Public;
3238 Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
3242 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3243 /// used for pointer types.
3244 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3246 QualType UnqualifiedPointeeTy =
3247 CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
3249 // Itanium C++ ABI 2.9.5p7:
3250 // __flags is a flag word describing the cv-qualification and other
3251 // attributes of the type pointed to
3252 unsigned Flags = ComputeQualifierFlags(Quals);
3254 // Itanium C++ ABI 2.9.5p7:
3255 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3256 // incomplete class type, the incomplete target type flag is set.
3257 if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
3258 Flags |= PTI_Incomplete;
3260 llvm::Type *UnsignedIntLTy =
3261 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3262 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3264 // Itanium C++ ABI 2.9.5p7:
3265 // __pointee is a pointer to the std::type_info derivation for the
3266 // unqualified type being pointed to.
3267 llvm::Constant *PointeeTypeInfo =
3268 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
3269 Fields.push_back(PointeeTypeInfo);
3272 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3273 /// struct, used for member pointer types.
3275 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3276 QualType PointeeTy = Ty->getPointeeType();
3279 QualType UnqualifiedPointeeTy =
3280 CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
3282 // Itanium C++ ABI 2.9.5p7:
3283 // __flags is a flag word describing the cv-qualification and other
3284 // attributes of the type pointed to.
3285 unsigned Flags = ComputeQualifierFlags(Quals);
3287 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3289 // Itanium C++ ABI 2.9.5p7:
3290 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
3291 // incomplete class type, the incomplete target type flag is set.
3292 if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
3293 Flags |= PTI_Incomplete;
3295 if (IsIncompleteClassType(ClassType))
3296 Flags |= PTI_ContainingClassIncomplete;
3298 llvm::Type *UnsignedIntLTy =
3299 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3300 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3302 // Itanium C++ ABI 2.9.5p7:
3303 // __pointee is a pointer to the std::type_info derivation for the
3304 // unqualified type being pointed to.
3305 llvm::Constant *PointeeTypeInfo =
3306 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
3307 Fields.push_back(PointeeTypeInfo);
3309 // Itanium C++ ABI 2.9.5p9:
3310 // __context is a pointer to an abi::__class_type_info corresponding to the
3311 // class type containing the member pointed to
3312 // (e.g., the "A" in "int A::*").
3314 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3317 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3318 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3321 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type) {
3322 QualType PointerType = getContext().getPointerType(Type);
3323 QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
3324 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, true);
3325 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, true);
3326 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
3329 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors() {
3330 QualType FundamentalTypes[] = {
3331 getContext().VoidTy, getContext().NullPtrTy,
3332 getContext().BoolTy, getContext().WCharTy,
3333 getContext().CharTy, getContext().UnsignedCharTy,
3334 getContext().SignedCharTy, getContext().ShortTy,
3335 getContext().UnsignedShortTy, getContext().IntTy,
3336 getContext().UnsignedIntTy, getContext().LongTy,
3337 getContext().UnsignedLongTy, getContext().LongLongTy,
3338 getContext().UnsignedLongLongTy, getContext().HalfTy,
3339 getContext().FloatTy, getContext().DoubleTy,
3340 getContext().LongDoubleTy, getContext().Char16Ty,
3341 getContext().Char32Ty,
3343 for (const QualType &FundamentalType : FundamentalTypes)
3344 EmitFundamentalRTTIDescriptor(FundamentalType);
3347 /// What sort of uniqueness rules should we use for the RTTI for the
3349 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3350 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3351 if (shouldRTTIBeUnique())
3354 // It's only necessary for linkonce_odr or weak_odr linkage.
3355 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3356 Linkage != llvm::GlobalValue::WeakODRLinkage)
3359 // It's only necessary with default visibility.
3360 if (CanTy->getVisibility() != DefaultVisibility)
3363 // If we're not required to publish this symbol, hide it.
3364 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3365 return RUK_NonUniqueHidden;
3367 // If we're required to publish this symbol, as we might be under an
3368 // explicit instantiation, leave it with default visibility but
3369 // enable string-comparisons.
3370 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3371 return RUK_NonUniqueVisible;
3374 // Find out how to codegen the complete destructor and constructor
3376 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3378 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3379 const CXXMethodDecl *MD) {
3380 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3381 return StructorCodegen::Emit;
3383 // The complete and base structors are not equivalent if there are any virtual
3384 // bases, so emit separate functions.
3385 if (MD->getParent()->getNumVBases())
3386 return StructorCodegen::Emit;
3388 GlobalDecl AliasDecl;
3389 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3390 AliasDecl = GlobalDecl(DD, Dtor_Complete);
3392 const auto *CD = cast<CXXConstructorDecl>(MD);
3393 AliasDecl = GlobalDecl(CD, Ctor_Complete);
3395 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3397 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3398 return StructorCodegen::RAUW;
3400 // FIXME: Should we allow available_externally aliases?
3401 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3402 return StructorCodegen::RAUW;
3404 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3405 // Only ELF supports COMDATs with arbitrary names (C5/D5).
3406 if (CGM.getTarget().getTriple().isOSBinFormatELF())
3407 return StructorCodegen::COMDAT;
3408 return StructorCodegen::Emit;
3411 return StructorCodegen::Alias;
3414 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3415 GlobalDecl AliasDecl,
3416 GlobalDecl TargetDecl) {
3417 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3419 StringRef MangledName = CGM.getMangledName(AliasDecl);
3420 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3421 if (Entry && !Entry->isDeclaration())
3424 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3426 // Create the alias with no name.
3427 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3429 // Switch any previous uses to the alias.
3431 assert(Entry->getType() == Aliasee->getType() &&
3432 "declaration exists with different type");
3433 Alias->takeName(Entry);
3434 Entry->replaceAllUsesWith(Alias);
3435 Entry->eraseFromParent();
3437 Alias->setName(MangledName);
3440 // Finally, set up the alias with its proper name and attributes.
3441 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
3444 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
3445 StructorType Type) {
3446 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3447 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3449 StructorCodegen CGType = getCodegenToUse(CGM, MD);
3451 if (Type == StructorType::Complete) {
3452 GlobalDecl CompleteDecl;
3453 GlobalDecl BaseDecl;
3455 CompleteDecl = GlobalDecl(CD, Ctor_Complete);
3456 BaseDecl = GlobalDecl(CD, Ctor_Base);
3458 CompleteDecl = GlobalDecl(DD, Dtor_Complete);
3459 BaseDecl = GlobalDecl(DD, Dtor_Base);
3462 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3463 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl);
3467 if (CGType == StructorCodegen::RAUW) {
3468 StringRef MangledName = CGM.getMangledName(CompleteDecl);
3469 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3470 CGM.addReplacement(MangledName, Aliasee);
3475 // The base destructor is equivalent to the base destructor of its
3476 // base class if there is exactly one non-virtual base class with a
3477 // non-trivial destructor, there are no fields with a non-trivial
3478 // destructor, and the body of the destructor is trivial.
3479 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT &&
3480 !CGM.TryEmitBaseDestructorAsAlias(DD))
3483 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type);
3485 if (CGType == StructorCodegen::COMDAT) {
3486 SmallString<256> Buffer;
3487 llvm::raw_svector_ostream Out(Buffer);
3489 getMangleContext().mangleCXXDtorComdat(DD, Out);
3491 getMangleContext().mangleCXXCtorComdat(CD, Out);
3492 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
3495 CGM.maybeSetTrivialComdat(*MD, *Fn);
3499 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) {
3500 // void *__cxa_begin_catch(void*);
3501 llvm::FunctionType *FTy = llvm::FunctionType::get(
3502 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3504 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
3507 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
3508 // void __cxa_end_catch();
3509 llvm::FunctionType *FTy =
3510 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
3512 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
3515 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
3516 // void *__cxa_get_exception_ptr(void*);
3517 llvm::FunctionType *FTy = llvm::FunctionType::get(
3518 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3520 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
3524 /// A cleanup to call __cxa_end_catch. In many cases, the caught
3525 /// exception type lets us state definitively that the thrown exception
3526 /// type does not have a destructor. In particular:
3527 /// - Catch-alls tell us nothing, so we have to conservatively
3528 /// assume that the thrown exception might have a destructor.
3529 /// - Catches by reference behave according to their base types.
3530 /// - Catches of non-record types will only trigger for exceptions
3531 /// of non-record types, which never have destructors.
3532 /// - Catches of record types can trigger for arbitrary subclasses
3533 /// of the caught type, so we have to assume the actual thrown
3534 /// exception type might have a throwing destructor, even if the
3535 /// caught type's destructor is trivial or nothrow.
3536 struct CallEndCatch final : EHScopeStack::Cleanup {
3537 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
3540 void Emit(CodeGenFunction &CGF, Flags flags) override {
3542 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
3546 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
3551 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
3552 /// __cxa_end_catch.
3554 /// \param EndMightThrow - true if __cxa_end_catch might throw
3555 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
3557 bool EndMightThrow) {
3558 llvm::CallInst *call =
3559 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
3561 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
3566 /// A "special initializer" callback for initializing a catch
3567 /// parameter during catch initialization.
3568 static void InitCatchParam(CodeGenFunction &CGF,
3569 const VarDecl &CatchParam,
3571 SourceLocation Loc) {
3572 // Load the exception from where the landing pad saved it.
3573 llvm::Value *Exn = CGF.getExceptionFromSlot();
3575 CanQualType CatchType =
3576 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
3577 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
3579 // If we're catching by reference, we can just cast the object
3580 // pointer to the appropriate pointer.
3581 if (isa<ReferenceType>(CatchType)) {
3582 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
3583 bool EndCatchMightThrow = CaughtType->isRecordType();
3585 // __cxa_begin_catch returns the adjusted object pointer.
3586 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
3588 // We have no way to tell the personality function that we're
3589 // catching by reference, so if we're catching a pointer,
3590 // __cxa_begin_catch will actually return that pointer by value.
3591 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
3592 QualType PointeeType = PT->getPointeeType();
3594 // When catching by reference, generally we should just ignore
3595 // this by-value pointer and use the exception object instead.
3596 if (!PointeeType->isRecordType()) {
3598 // Exn points to the struct _Unwind_Exception header, which
3599 // we have to skip past in order to reach the exception data.
3600 unsigned HeaderSize =
3601 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
3602 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
3604 // However, if we're catching a pointer-to-record type that won't
3605 // work, because the personality function might have adjusted
3606 // the pointer. There's actually no way for us to fully satisfy
3607 // the language/ABI contract here: we can't use Exn because it
3608 // might have the wrong adjustment, but we can't use the by-value
3609 // pointer because it's off by a level of abstraction.
3611 // The current solution is to dump the adjusted pointer into an
3612 // alloca, which breaks language semantics (because changing the
3613 // pointer doesn't change the exception) but at least works.
3614 // The better solution would be to filter out non-exact matches
3615 // and rethrow them, but this is tricky because the rethrow
3616 // really needs to be catchable by other sites at this landing
3617 // pad. The best solution is to fix the personality function.
3619 // Pull the pointer for the reference type off.
3621 cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
3623 // Create the temporary and write the adjusted pointer into it.
3625 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
3626 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3627 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
3629 // Bind the reference to the temporary.
3630 AdjustedExn = ExnPtrTmp.getPointer();
3634 llvm::Value *ExnCast =
3635 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
3636 CGF.Builder.CreateStore(ExnCast, ParamAddr);
3640 // Scalars and complexes.
3641 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
3642 if (TEK != TEK_Aggregate) {
3643 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
3645 // If the catch type is a pointer type, __cxa_begin_catch returns
3646 // the pointer by value.
3647 if (CatchType->hasPointerRepresentation()) {
3648 llvm::Value *CastExn =
3649 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
3651 switch (CatchType.getQualifiers().getObjCLifetime()) {
3652 case Qualifiers::OCL_Strong:
3653 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
3656 case Qualifiers::OCL_None:
3657 case Qualifiers::OCL_ExplicitNone:
3658 case Qualifiers::OCL_Autoreleasing:
3659 CGF.Builder.CreateStore(CastExn, ParamAddr);
3662 case Qualifiers::OCL_Weak:
3663 CGF.EmitARCInitWeak(ParamAddr, CastExn);
3666 llvm_unreachable("bad ownership qualifier!");
3669 // Otherwise, it returns a pointer into the exception object.
3671 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3672 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
3674 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
3675 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
3678 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
3682 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
3683 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
3687 llvm_unreachable("evaluation kind filtered out!");
3689 llvm_unreachable("bad evaluation kind");
3692 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
3693 auto catchRD = CatchType->getAsCXXRecordDecl();
3694 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
3696 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
3698 // Check for a copy expression. If we don't have a copy expression,
3699 // that means a trivial copy is okay.
3700 const Expr *copyExpr = CatchParam.getInit();
3702 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
3703 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3704 caughtExnAlignment);
3705 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
3709 // We have to call __cxa_get_exception_ptr to get the adjusted
3710 // pointer before copying.
3711 llvm::CallInst *rawAdjustedExn =
3712 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
3714 // Cast that to the appropriate type.
3715 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
3716 caughtExnAlignment);
3718 // The copy expression is defined in terms of an OpaqueValueExpr.
3719 // Find it and map it to the adjusted expression.
3720 CodeGenFunction::OpaqueValueMapping
3721 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
3722 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
3724 // Call the copy ctor in a terminate scope.
3725 CGF.EHStack.pushTerminate();
3727 // Perform the copy construction.
3728 CGF.EmitAggExpr(copyExpr,
3729 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
3730 AggValueSlot::IsNotDestructed,
3731 AggValueSlot::DoesNotNeedGCBarriers,
3732 AggValueSlot::IsNotAliased));
3734 // Leave the terminate scope.
3735 CGF.EHStack.popTerminate();
3737 // Undo the opaque value mapping.
3740 // Finally we can call __cxa_begin_catch.
3741 CallBeginCatch(CGF, Exn, true);
3744 /// Begins a catch statement by initializing the catch variable and
3745 /// calling __cxa_begin_catch.
3746 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
3747 const CXXCatchStmt *S) {
3748 // We have to be very careful with the ordering of cleanups here:
3749 // C++ [except.throw]p4:
3750 // The destruction [of the exception temporary] occurs
3751 // immediately after the destruction of the object declared in
3752 // the exception-declaration in the handler.
3754 // So the precise ordering is:
3755 // 1. Construct catch variable.
3756 // 2. __cxa_begin_catch
3757 // 3. Enter __cxa_end_catch cleanup
3758 // 4. Enter dtor cleanup
3760 // We do this by using a slightly abnormal initialization process.
3761 // Delegation sequence:
3762 // - ExitCXXTryStmt opens a RunCleanupsScope
3763 // - EmitAutoVarAlloca creates the variable and debug info
3764 // - InitCatchParam initializes the variable from the exception
3765 // - CallBeginCatch calls __cxa_begin_catch
3766 // - CallBeginCatch enters the __cxa_end_catch cleanup
3767 // - EmitAutoVarCleanups enters the variable destructor cleanup
3768 // - EmitCXXTryStmt emits the code for the catch body
3769 // - EmitCXXTryStmt close the RunCleanupsScope
3771 VarDecl *CatchParam = S->getExceptionDecl();
3773 llvm::Value *Exn = CGF.getExceptionFromSlot();
3774 CallBeginCatch(CGF, Exn, true);
3779 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
3780 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
3781 CGF.EmitAutoVarCleanups(var);
3784 /// Get or define the following function:
3785 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
3786 /// This code is used only in C++.
3787 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
3788 llvm::FunctionType *fnTy =
3789 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
3790 llvm::Constant *fnRef =
3791 CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate");
3793 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
3794 if (fn && fn->empty()) {
3795 fn->setDoesNotThrow();
3796 fn->setDoesNotReturn();
3798 // What we really want is to massively penalize inlining without
3799 // forbidding it completely. The difference between that and
3800 // 'noinline' is negligible.
3801 fn->addFnAttr(llvm::Attribute::NoInline);
3803 // Allow this function to be shared across translation units, but
3804 // we don't want it to turn into an exported symbol.
3805 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
3806 fn->setVisibility(llvm::Function::HiddenVisibility);
3807 if (CGM.supportsCOMDAT())
3808 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
3810 // Set up the function.
3811 llvm::BasicBlock *entry =
3812 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
3813 CGBuilderTy builder(CGM, entry);
3815 // Pull the exception pointer out of the parameter list.
3816 llvm::Value *exn = &*fn->arg_begin();
3818 // Call __cxa_begin_catch(exn).
3819 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
3820 catchCall->setDoesNotThrow();
3821 catchCall->setCallingConv(CGM.getRuntimeCC());
3823 // Call std::terminate().
3824 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
3825 termCall->setDoesNotThrow();
3826 termCall->setDoesNotReturn();
3827 termCall->setCallingConv(CGM.getRuntimeCC());
3829 // std::terminate cannot return.
3830 builder.CreateUnreachable();
3837 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
3839 // In C++, we want to call __cxa_begin_catch() before terminating.
3841 assert(CGF.CGM.getLangOpts().CPlusPlus);
3842 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
3844 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());