1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Builder implementation for CGRecordLayout objects.
11 //===----------------------------------------------------------------------===//
13 #include "CGRecordLayout.h"
15 #include "CodeGenTypes.h"
16 #include "clang/AST/ASTContext.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/CXXInheritance.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/Expr.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/CodeGenOptions.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Support/raw_ostream.h"
29 using namespace clang;
30 using namespace CodeGen;
33 /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
34 /// llvm::Type. Some of the lowering is straightforward, some is not. Here we
35 /// detail some of the complexities and weirdnesses here.
36 /// * LLVM does not have unions - Unions can, in theory be represented by any
37 /// llvm::Type with correct size. We choose a field via a specific heuristic
38 /// and add padding if necessary.
39 /// * LLVM does not have bitfields - Bitfields are collected into contiguous
40 /// runs and allocated as a single storage type for the run. ASTRecordLayout
41 /// contains enough information to determine where the runs break. Microsoft
42 /// and Itanium follow different rules and use different codepaths.
43 /// * It is desired that, when possible, bitfields use the appropriate iN type
44 /// when lowered to llvm types. For example unsigned x : 24 gets lowered to
45 /// i24. This isn't always possible because i24 has storage size of 32 bit
46 /// and if it is possible to use that extra byte of padding we must use
47 /// [i8 x 3] instead of i24. The function clipTailPadding does this.
48 /// C++ examples that require clipping:
49 /// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
50 /// struct A { int a : 24; }; // a must be clipped because a struct like B
51 // could exist: struct B : A { char b; }; // b goes at offset 3
52 /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
53 /// fields. The existing asserts suggest that LLVM assumes that *every* field
54 /// has an underlying storage type. Therefore empty structures containing
55 /// zero sized subobjects such as empty records or zero sized arrays still get
56 /// a zero sized (empty struct) storage type.
57 /// * Clang reads the complete type rather than the base type when generating
58 /// code to access fields. Bitfields in tail position with tail padding may
59 /// be clipped in the base class but not the complete class (we may discover
60 /// that the tail padding is not used in the complete class.) However,
61 /// because LLVM reads from the complete type it can generate incorrect code
62 /// if we do not clip the tail padding off of the bitfield in the complete
63 /// layout. This introduces a somewhat awkward extra unnecessary clip stage.
64 /// The location of the clip is stored internally as a sentinel of type
65 /// SCISSOR. If LLVM were updated to read base types (which it probably
66 /// should because locations of things such as VBases are bogus in the llvm
67 /// type anyway) then we could eliminate the SCISSOR.
68 /// * Itanium allows nearly empty primary virtual bases. These bases don't get
69 /// get their own storage because they're laid out as part of another base
70 /// or at the beginning of the structure. Determining if a VBase actually
71 /// gets storage awkwardly involves a walk of all bases.
72 /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
73 struct CGRecordLowering {
74 // MemberInfo is a helper structure that contains information about a record
75 // member. In additional to the standard member types, there exists a
76 // sentinel member type that ensures correct rounding.
79 enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
83 const CXXRecordDecl *RD;
85 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
86 const FieldDecl *FD = nullptr)
87 : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
88 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
89 const CXXRecordDecl *RD)
90 : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
91 // MemberInfos are sorted so we define a < operator.
92 bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
95 CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
96 // Short helper routines.
97 /// Constructs a MemberInfo instance from an offset and llvm::Type *.
98 MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
99 return MemberInfo(Offset, MemberInfo::Field, Data);
102 /// The Microsoft bitfield layout rule allocates discrete storage
103 /// units of the field's formal type and only combines adjacent
104 /// fields of the same formal type. We want to emit a layout with
105 /// these discrete storage units instead of combining them into a
107 bool isDiscreteBitFieldABI() {
108 return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
109 D->isMsStruct(Context);
112 /// The Itanium base layout rule allows virtual bases to overlap
113 /// other bases, which complicates layout in specific ways.
115 /// Note specifically that the ms_struct attribute doesn't change this.
116 bool isOverlappingVBaseABI() {
117 return !Context.getTargetInfo().getCXXABI().isMicrosoft();
120 /// Wraps llvm::Type::getIntNTy with some implicit arguments.
121 llvm::Type *getIntNType(uint64_t NumBits) {
122 return llvm::Type::getIntNTy(Types.getLLVMContext(),
123 (unsigned)llvm::alignTo(NumBits, 8));
125 /// Gets an llvm type of size NumBytes and alignment 1.
126 llvm::Type *getByteArrayType(CharUnits NumBytes) {
127 assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
128 llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
129 return NumBytes == CharUnits::One() ? Type :
130 (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
132 /// Gets the storage type for a field decl and handles storage
133 /// for itanium bitfields that are smaller than their declared type.
134 llvm::Type *getStorageType(const FieldDecl *FD) {
135 llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
136 if (!FD->isBitField()) return Type;
137 if (isDiscreteBitFieldABI()) return Type;
138 return getIntNType(std::min(FD->getBitWidthValue(Context),
139 (unsigned)Context.toBits(getSize(Type))));
141 /// Gets the llvm Basesubobject type from a CXXRecordDecl.
142 llvm::Type *getStorageType(const CXXRecordDecl *RD) {
143 return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
145 CharUnits bitsToCharUnits(uint64_t BitOffset) {
146 return Context.toCharUnitsFromBits(BitOffset);
148 CharUnits getSize(llvm::Type *Type) {
149 return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
151 CharUnits getAlignment(llvm::Type *Type) {
152 return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
154 bool isZeroInitializable(const FieldDecl *FD) {
155 return Types.isZeroInitializable(FD->getType());
157 bool isZeroInitializable(const RecordDecl *RD) {
158 return Types.isZeroInitializable(RD);
160 void appendPaddingBytes(CharUnits Size) {
162 FieldTypes.push_back(getByteArrayType(Size));
164 uint64_t getFieldBitOffset(const FieldDecl *FD) {
165 return Layout.getFieldOffset(FD->getFieldIndex());
168 void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
169 llvm::Type *StorageType);
170 /// Lowers an ASTRecordLayout to a llvm type.
171 void lower(bool NonVirtualBaseType);
173 void accumulateFields();
174 void accumulateBitFields(RecordDecl::field_iterator Field,
175 RecordDecl::field_iterator FieldEnd);
176 void accumulateBases();
177 void accumulateVPtrs();
178 void accumulateVBases();
179 /// Recursively searches all of the bases to find out if a vbase is
180 /// not the primary vbase of some base class.
181 bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
182 void calculateZeroInit();
183 /// Lowers bitfield storage types to I8 arrays for bitfields with tail
184 /// padding that is or can potentially be used.
185 void clipTailPadding();
186 /// Determines if we need a packed llvm struct.
187 void determinePacked(bool NVBaseType);
188 /// Inserts padding everywhere it's needed.
189 void insertPadding();
190 /// Fills out the structures that are ultimately consumed.
191 void fillOutputFields();
192 // Input memoization fields.
194 const ASTContext &Context;
196 const CXXRecordDecl *RD;
197 const ASTRecordLayout &Layout;
198 const llvm::DataLayout &DataLayout;
199 // Helpful intermediate data-structures.
200 std::vector<MemberInfo> Members;
201 // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
202 SmallVector<llvm::Type *, 16> FieldTypes;
203 llvm::DenseMap<const FieldDecl *, unsigned> Fields;
204 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
205 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
206 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
207 bool IsZeroInitializable : 1;
208 bool IsZeroInitializableAsBase : 1;
211 CGRecordLowering(const CGRecordLowering &) = delete;
212 void operator =(const CGRecordLowering &) = delete;
216 CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
218 : Types(Types), Context(Types.getContext()), D(D),
219 RD(dyn_cast<CXXRecordDecl>(D)),
220 Layout(Types.getContext().getASTRecordLayout(D)),
221 DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
222 IsZeroInitializableAsBase(true), Packed(Packed) {}
224 void CGRecordLowering::setBitFieldInfo(
225 const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
226 CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
227 Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
228 Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
229 Info.Size = FD->getBitWidthValue(Context);
230 Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
231 Info.StorageOffset = StartOffset;
232 if (Info.Size > Info.StorageSize)
233 Info.Size = Info.StorageSize;
234 // Reverse the bit offsets for big endian machines. Because we represent
235 // a bitfield as a single large integer load, we can imagine the bits
236 // counting from the most-significant-bit instead of the
237 // least-significant-bit.
238 if (DataLayout.isBigEndian())
239 Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
242 void CGRecordLowering::lower(bool NVBaseType) {
243 // The lowering process implemented in this function takes a variety of
244 // carefully ordered phases.
245 // 1) Store all members (fields and bases) in a list and sort them by offset.
246 // 2) Add a 1-byte capstone member at the Size of the structure.
247 // 3) Clip bitfield storages members if their tail padding is or might be
248 // used by another field or base. The clipping process uses the capstone
249 // by treating it as another object that occurs after the record.
250 // 4) Determine if the llvm-struct requires packing. It's important that this
251 // phase occur after clipping, because clipping changes the llvm type.
252 // This phase reads the offset of the capstone when determining packedness
253 // and updates the alignment of the capstone to be equal of the alignment
254 // of the record after doing so.
255 // 5) Insert padding everywhere it is needed. This phase requires 'Packed' to
256 // have been computed and needs to know the alignment of the record in
257 // order to understand if explicit tail padding is needed.
258 // 6) Remove the capstone, we don't need it anymore.
259 // 7) Determine if this record can be zero-initialized. This phase could have
260 // been placed anywhere after phase 1.
261 // 8) Format the complete list of members in a way that can be consumed by
262 // CodeGenTypes::ComputeRecordLayout.
263 CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
272 return appendPaddingBytes(Size);
276 llvm::stable_sort(Members);
277 Members.push_back(StorageInfo(Size, getIntNType(8)));
279 determinePacked(NVBaseType);
286 void CGRecordLowering::lowerUnion() {
287 CharUnits LayoutSize = Layout.getSize();
288 llvm::Type *StorageType = nullptr;
289 bool SeenNamedMember = false;
290 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
291 // locate the "most appropriate" storage type. The heuristic for finding the
292 // storage type isn't necessary, the first (non-0-length-bitfield) field's
293 // type would work fine and be simpler but would be different than what we've
294 // been doing and cause lit tests to change.
295 for (const auto *Field : D->fields()) {
296 if (Field->isBitField()) {
297 if (Field->isZeroLengthBitField(Context))
299 llvm::Type *FieldType = getStorageType(Field);
300 if (LayoutSize < getSize(FieldType))
301 FieldType = getByteArrayType(LayoutSize);
302 setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
304 Fields[Field->getCanonicalDecl()] = 0;
305 llvm::Type *FieldType = getStorageType(Field);
306 // Compute zero-initializable status.
307 // This union might not be zero initialized: it may contain a pointer to
308 // data member which might have some exotic initialization sequence.
309 // If this is the case, then we aught not to try and come up with a "better"
310 // type, it might not be very easy to come up with a Constant which
311 // correctly initializes it.
312 if (!SeenNamedMember) {
313 SeenNamedMember = Field->getIdentifier();
314 if (!SeenNamedMember)
315 if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
316 SeenNamedMember = FieldRD->findFirstNamedDataMember();
317 if (SeenNamedMember && !isZeroInitializable(Field)) {
318 IsZeroInitializable = IsZeroInitializableAsBase = false;
319 StorageType = FieldType;
322 // Because our union isn't zero initializable, we won't be getting a better
324 if (!IsZeroInitializable)
326 // Conditionally update our storage type if we've got a new "better" one.
328 getAlignment(FieldType) > getAlignment(StorageType) ||
329 (getAlignment(FieldType) == getAlignment(StorageType) &&
330 getSize(FieldType) > getSize(StorageType)))
331 StorageType = FieldType;
333 // If we have no storage type just pad to the appropriate size and return.
335 return appendPaddingBytes(LayoutSize);
336 // If our storage size was bigger than our required size (can happen in the
337 // case of packed bitfields on Itanium) then just use an I8 array.
338 if (LayoutSize < getSize(StorageType))
339 StorageType = getByteArrayType(LayoutSize);
340 FieldTypes.push_back(StorageType);
341 appendPaddingBytes(LayoutSize - getSize(StorageType));
342 // Set packed if we need it.
343 if (LayoutSize % getAlignment(StorageType))
347 void CGRecordLowering::accumulateFields() {
348 for (RecordDecl::field_iterator Field = D->field_begin(),
349 FieldEnd = D->field_end();
350 Field != FieldEnd;) {
351 if (Field->isBitField()) {
352 RecordDecl::field_iterator Start = Field;
353 // Iterate to gather the list of bitfields.
354 for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
355 accumulateBitFields(Start, Field);
356 } else if (!Field->isZeroSize(Context)) {
357 Members.push_back(MemberInfo(
358 bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
359 getStorageType(*Field), *Field));
368 CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
369 RecordDecl::field_iterator FieldEnd) {
370 // Run stores the first element of the current run of bitfields. FieldEnd is
371 // used as a special value to note that we don't have a current run. A
372 // bitfield run is a contiguous collection of bitfields that can be stored in
373 // the same storage block. Zero-sized bitfields and bitfields that would
374 // cross an alignment boundary break a run and start a new one.
375 RecordDecl::field_iterator Run = FieldEnd;
376 // Tail is the offset of the first bit off the end of the current run. It's
377 // used to determine if the ASTRecordLayout is treating these two bitfields as
378 // contiguous. StartBitOffset is offset of the beginning of the Run.
379 uint64_t StartBitOffset, Tail = 0;
380 if (isDiscreteBitFieldABI()) {
381 for (; Field != FieldEnd; ++Field) {
382 uint64_t BitOffset = getFieldBitOffset(*Field);
383 // Zero-width bitfields end runs.
384 if (Field->isZeroLengthBitField(Context)) {
389 Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
390 // If we don't have a run yet, or don't live within the previous run's
391 // allocated storage then we allocate some storage and start a new run.
392 if (Run == FieldEnd || BitOffset >= Tail) {
394 StartBitOffset = BitOffset;
395 Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
396 // Add the storage member to the record. This must be added to the
397 // record before the bitfield members so that it gets laid out before
398 // the bitfields it contains get laid out.
399 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
401 // Bitfields get the offset of their storage but come afterward and remain
402 // there after a stable sort.
403 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
404 MemberInfo::Field, nullptr, *Field));
409 // Check if OffsetInRecord (the size in bits of the current run) is better
410 // as a single field run. When OffsetInRecord has legal integer width, and
411 // its bitfield offset is naturally aligned, it is better to make the
412 // bitfield a separate storage component so as it can be accessed directly
414 auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
415 uint64_t StartBitOffset) {
416 if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
418 if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
419 !DataLayout.fitsInLegalInteger(OffsetInRecord))
421 // Make sure StartBitOffset is natually aligned if it is treated as an
424 Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
430 // The start field is better as a single field run.
431 bool StartFieldAsSingleRun = false;
433 // Check to see if we need to start a new run.
434 if (Run == FieldEnd) {
435 // If we're out of fields, return.
436 if (Field == FieldEnd)
438 // Any non-zero-length bitfield can start a new run.
439 if (!Field->isZeroLengthBitField(Context)) {
441 StartBitOffset = getFieldBitOffset(*Field);
442 Tail = StartBitOffset + Field->getBitWidthValue(Context);
443 StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
450 // If the start field of a new run is better as a single run, or
451 // if current field (or consecutive fields) is better as a single run, or
452 // if current field has zero width bitfield and either
453 // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
455 // if the offset of current field is inconsistent with the offset of
456 // previous field plus its offset,
457 // skip the block below and go ahead to emit the storage.
458 // Otherwise, try to add bitfields to the run.
459 if (!StartFieldAsSingleRun && Field != FieldEnd &&
460 !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
461 (!Field->isZeroLengthBitField(Context) ||
462 (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
463 !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
464 Tail == getFieldBitOffset(*Field)) {
465 Tail += Field->getBitWidthValue(Context);
470 // We've hit a break-point in the run and need to emit a storage field.
471 llvm::Type *Type = getIntNType(Tail - StartBitOffset);
472 // Add the storage member to the record and set the bitfield info for all of
473 // the bitfields in the run. Bitfields get the offset of their storage but
474 // come afterward and remain there after a stable sort.
475 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
476 for (; Run != Field; ++Run)
477 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
478 MemberInfo::Field, nullptr, *Run));
480 StartFieldAsSingleRun = false;
484 void CGRecordLowering::accumulateBases() {
485 // If we've got a primary virtual base, we need to add it with the bases.
486 if (Layout.isPrimaryBaseVirtual()) {
487 const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
488 Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
489 getStorageType(BaseDecl), BaseDecl));
491 // Accumulate the non-virtual bases.
492 for (const auto &Base : RD->bases()) {
493 if (Base.isVirtual())
496 // Bases can be zero-sized even if not technically empty if they
497 // contain only a trailing array member.
498 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
499 if (!BaseDecl->isEmpty() &&
500 !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
501 Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
502 MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
506 void CGRecordLowering::accumulateVPtrs() {
507 if (Layout.hasOwnVFPtr())
508 Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
509 llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
510 getPointerTo()->getPointerTo()));
511 if (Layout.hasOwnVBPtr())
512 Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
513 llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
516 void CGRecordLowering::accumulateVBases() {
517 CharUnits ScissorOffset = Layout.getNonVirtualSize();
518 // In the itanium ABI, it's possible to place a vbase at a dsize that is
519 // smaller than the nvsize. Here we check to see if such a base is placed
520 // before the nvsize and set the scissor offset to that, instead of the
522 if (isOverlappingVBaseABI())
523 for (const auto &Base : RD->vbases()) {
524 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
525 if (BaseDecl->isEmpty())
527 // If the vbase is a primary virtual base of some base, then it doesn't
528 // get its own storage location but instead lives inside of that base.
529 if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
531 ScissorOffset = std::min(ScissorOffset,
532 Layout.getVBaseClassOffset(BaseDecl));
534 Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
536 for (const auto &Base : RD->vbases()) {
537 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
538 if (BaseDecl->isEmpty())
540 CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
541 // If the vbase is a primary virtual base of some base, then it doesn't
542 // get its own storage location but instead lives inside of that base.
543 if (isOverlappingVBaseABI() &&
544 Context.isNearlyEmpty(BaseDecl) &&
545 !hasOwnStorage(RD, BaseDecl)) {
546 Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
550 // If we've got a vtordisp, add it as a storage type.
551 if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
552 Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
554 Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
555 getStorageType(BaseDecl), BaseDecl));
559 bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
560 const CXXRecordDecl *Query) {
561 const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
562 if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
564 for (const auto &Base : Decl->bases())
565 if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
570 void CGRecordLowering::calculateZeroInit() {
571 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
572 MemberEnd = Members.end();
573 IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
574 if (Member->Kind == MemberInfo::Field) {
575 if (!Member->FD || isZeroInitializable(Member->FD))
577 IsZeroInitializable = IsZeroInitializableAsBase = false;
578 } else if (Member->Kind == MemberInfo::Base ||
579 Member->Kind == MemberInfo::VBase) {
580 if (isZeroInitializable(Member->RD))
582 IsZeroInitializable = false;
583 if (Member->Kind == MemberInfo::Base)
584 IsZeroInitializableAsBase = false;
589 void CGRecordLowering::clipTailPadding() {
590 std::vector<MemberInfo>::iterator Prior = Members.begin();
591 CharUnits Tail = getSize(Prior->Data);
592 for (std::vector<MemberInfo>::iterator Member = Prior + 1,
593 MemberEnd = Members.end();
594 Member != MemberEnd; ++Member) {
595 // Only members with data and the scissor can cut into tail padding.
596 if (!Member->Data && Member->Kind != MemberInfo::Scissor)
598 if (Member->Offset < Tail) {
599 assert(Prior->Kind == MemberInfo::Field &&
600 "Only storage fields have tail padding!");
601 if (!Prior->FD || Prior->FD->isBitField())
602 Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
603 cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
605 assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
606 "should not have reused this field's tail padding");
607 Prior->Data = getByteArrayType(
608 Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).first);
613 Tail = Prior->Offset + getSize(Prior->Data);
617 void CGRecordLowering::determinePacked(bool NVBaseType) {
620 CharUnits Alignment = CharUnits::One();
621 CharUnits NVAlignment = CharUnits::One();
623 !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
624 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
625 MemberEnd = Members.end();
626 Member != MemberEnd; ++Member) {
629 // If any member falls at an offset that it not a multiple of its alignment,
630 // then the entire record must be packed.
631 if (Member->Offset % getAlignment(Member->Data))
633 if (Member->Offset < NVSize)
634 NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
635 Alignment = std::max(Alignment, getAlignment(Member->Data));
637 // If the size of the record (the capstone's offset) is not a multiple of the
638 // record's alignment, it must be packed.
639 if (Members.back().Offset % Alignment)
641 // If the non-virtual sub-object is not a multiple of the non-virtual
642 // sub-object's alignment, it must be packed. We cannot have a packed
643 // non-virtual sub-object and an unpacked complete object or vise versa.
644 if (NVSize % NVAlignment)
646 // Update the alignment of the sentinel.
648 Members.back().Data = getIntNType(Context.toBits(Alignment));
651 void CGRecordLowering::insertPadding() {
652 std::vector<std::pair<CharUnits, CharUnits> > Padding;
653 CharUnits Size = CharUnits::Zero();
654 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
655 MemberEnd = Members.end();
656 Member != MemberEnd; ++Member) {
659 CharUnits Offset = Member->Offset;
660 assert(Offset >= Size);
661 // Insert padding if we need to.
663 Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
664 Padding.push_back(std::make_pair(Size, Offset - Size));
665 Size = Offset + getSize(Member->Data);
669 // Add the padding to the Members list and sort it.
670 for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
671 Pad = Padding.begin(), PadEnd = Padding.end();
672 Pad != PadEnd; ++Pad)
673 Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
674 llvm::stable_sort(Members);
677 void CGRecordLowering::fillOutputFields() {
678 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
679 MemberEnd = Members.end();
680 Member != MemberEnd; ++Member) {
682 FieldTypes.push_back(Member->Data);
683 if (Member->Kind == MemberInfo::Field) {
685 Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
686 // A field without storage must be a bitfield.
688 setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
689 } else if (Member->Kind == MemberInfo::Base)
690 NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
691 else if (Member->Kind == MemberInfo::VBase)
692 VirtualBases[Member->RD] = FieldTypes.size() - 1;
696 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
698 uint64_t Offset, uint64_t Size,
699 uint64_t StorageSize,
700 CharUnits StorageOffset) {
701 // This function is vestigial from CGRecordLayoutBuilder days but is still
702 // used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that
703 // when addressed will allow for the removal of this function.
704 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
705 CharUnits TypeSizeInBytes =
706 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
707 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
709 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
711 if (Size > TypeSizeInBits) {
712 // We have a wide bit-field. The extra bits are only used for padding, so
713 // if we have a bitfield of type T, with size N:
717 // We can just assume that it's:
721 Size = TypeSizeInBits;
724 // Reverse the bit offsets for big endian machines. Because we represent
725 // a bitfield as a single large integer load, we can imagine the bits
726 // counting from the most-significant-bit instead of the
727 // least-significant-bit.
728 if (Types.getDataLayout().isBigEndian()) {
729 Offset = StorageSize - (Offset + Size);
732 return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
735 std::unique_ptr<CGRecordLayout>
736 CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
737 CGRecordLowering Builder(*this, D, /*Packed=*/false);
739 Builder.lower(/*NonVirtualBaseType=*/false);
741 // If we're in C++, compute the base subobject type.
742 llvm::StructType *BaseTy = nullptr;
743 if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
745 if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
746 CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
747 BaseBuilder.lower(/*NonVirtualBaseType=*/true);
748 BaseTy = llvm::StructType::create(
749 getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
750 addRecordTypeName(D, BaseTy, ".base");
751 // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
752 // on both of them with the same index.
753 assert(Builder.Packed == BaseBuilder.Packed &&
754 "Non-virtual and complete types must agree on packedness");
758 // Fill in the struct *after* computing the base type. Filling in the body
759 // signifies that the type is no longer opaque and record layout is complete,
760 // but we may need to recursively layout D while laying D out as a base type.
761 Ty->setBody(Builder.FieldTypes, Builder.Packed);
763 auto RL = std::make_unique<CGRecordLayout>(
764 Ty, BaseTy, (bool)Builder.IsZeroInitializable,
765 (bool)Builder.IsZeroInitializableAsBase);
767 RL->NonVirtualBases.swap(Builder.NonVirtualBases);
768 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
770 // Add all the field numbers.
771 RL->FieldInfo.swap(Builder.Fields);
773 // Add bitfield info.
774 RL->BitFields.swap(Builder.BitFields);
776 // Dump the layout, if requested.
777 if (getContext().getLangOpts().DumpRecordLayouts) {
778 llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
779 llvm::outs() << "Record: ";
780 D->dump(llvm::outs());
781 llvm::outs() << "\nLayout: ";
782 RL->print(llvm::outs());
786 // Verify that the computed LLVM struct size matches the AST layout size.
787 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
789 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
790 assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
791 "Type size mismatch!");
794 CharUnits NonVirtualSize = Layout.getNonVirtualSize();
796 uint64_t AlignedNonVirtualTypeSizeInBits =
797 getContext().toBits(NonVirtualSize);
799 assert(AlignedNonVirtualTypeSizeInBits ==
800 getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
801 "Type size mismatch!");
804 // Verify that the LLVM and AST field offsets agree.
805 llvm::StructType *ST = RL->getLLVMType();
806 const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
808 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
809 RecordDecl::field_iterator it = D->field_begin();
810 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
811 const FieldDecl *FD = *it;
813 // Ignore zero-sized fields.
814 if (FD->isZeroSize(getContext()))
817 // For non-bit-fields, just check that the LLVM struct offset matches the
819 if (!FD->isBitField()) {
820 unsigned FieldNo = RL->getLLVMFieldNo(FD);
821 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
822 "Invalid field offset!");
826 // Ignore unnamed bit-fields.
827 if (!FD->getDeclName())
830 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
831 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
833 // Unions have overlapping elements dictating their layout, but for
834 // non-unions we can verify that this section of the layout is the exact
837 // For unions we verify that the start is zero and the size
838 // is in-bounds. However, on BE systems, the offset may be non-zero, but
839 // the size + offset should match the storage size in that case as it
840 // "starts" at the back.
841 if (getDataLayout().isBigEndian())
842 assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
844 "Big endian union bitfield does not end at the back");
846 assert(Info.Offset == 0 &&
847 "Little endian union bitfield with a non-zero offset");
848 assert(Info.StorageSize <= SL->getSizeInBits() &&
849 "Union not large enough for bitfield storage");
851 assert(Info.StorageSize ==
852 getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
853 "Storage size does not match the element type size");
855 assert(Info.Size > 0 && "Empty bitfield!");
856 assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
857 "Bitfield outside of its allocated storage");
864 void CGRecordLayout::print(raw_ostream &OS) const {
865 OS << "<CGRecordLayout\n";
866 OS << " LLVMType:" << *CompleteObjectType << "\n";
867 if (BaseSubobjectType)
868 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
869 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
870 OS << " BitFields:[\n";
872 // Print bit-field infos in declaration order.
873 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
874 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
875 it = BitFields.begin(), ie = BitFields.end();
877 const RecordDecl *RD = it->first->getParent();
879 for (RecordDecl::field_iterator
880 it2 = RD->field_begin(); *it2 != it->first; ++it2)
882 BFIs.push_back(std::make_pair(Index, &it->second));
884 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
885 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
887 BFIs[i].second->print(OS);
894 LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
898 void CGBitFieldInfo::print(raw_ostream &OS) const {
899 OS << "<CGBitFieldInfo"
900 << " Offset:" << Offset
902 << " IsSigned:" << IsSigned
903 << " StorageSize:" << StorageSize
904 << " StorageOffset:" << StorageOffset.getQuantity() << ">";
907 LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {