1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Constant Expr nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
16 #include "CGObjCRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenModule.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/AST/StmtVisitor.h"
23 #include "clang/Basic/Builtins.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GlobalVariable.h"
28 using namespace clang;
29 using namespace CodeGen;
31 //===----------------------------------------------------------------------===//
33 //===----------------------------------------------------------------------===//
36 class ConstStructBuilder {
41 CharUnits NextFieldOffsetInChars;
42 CharUnits LLVMStructAlignment;
43 SmallVector<llvm::Constant *, 32> Elements;
45 static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
47 static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
48 const APValue &Value, QualType ValTy);
51 ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
52 : CGM(CGM), CGF(CGF), Packed(false),
53 NextFieldOffsetInChars(CharUnits::Zero()),
54 LLVMStructAlignment(CharUnits::One()) { }
56 void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
57 llvm::Constant *InitExpr);
59 void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
61 void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
62 llvm::ConstantInt *InitExpr);
64 void AppendPadding(CharUnits PadSize);
66 void AppendTailPadding(CharUnits RecordSize);
68 void ConvertStructToPacked();
70 bool Build(InitListExpr *ILE);
71 void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
72 const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
73 llvm::Constant *Finalize(QualType Ty);
75 CharUnits getAlignment(const llvm::Constant *C) const {
76 if (Packed) return CharUnits::One();
77 return CharUnits::fromQuantity(
78 CGM.getDataLayout().getABITypeAlignment(C->getType()));
81 CharUnits getSizeInChars(const llvm::Constant *C) const {
82 return CharUnits::fromQuantity(
83 CGM.getDataLayout().getTypeAllocSize(C->getType()));
87 void ConstStructBuilder::
88 AppendField(const FieldDecl *Field, uint64_t FieldOffset,
89 llvm::Constant *InitCst) {
90 const ASTContext &Context = CGM.getContext();
92 CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
94 AppendBytes(FieldOffsetInChars, InitCst);
97 void ConstStructBuilder::
98 AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
100 assert(NextFieldOffsetInChars <= FieldOffsetInChars
101 && "Field offset mismatch!");
103 CharUnits FieldAlignment = getAlignment(InitCst);
105 // Round up the field offset to the alignment of the field type.
106 CharUnits AlignedNextFieldOffsetInChars =
107 NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
109 if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
110 // We need to append padding.
111 AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
113 assert(NextFieldOffsetInChars == FieldOffsetInChars &&
114 "Did not add enough padding!");
116 AlignedNextFieldOffsetInChars =
117 NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
120 if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
121 assert(!Packed && "Alignment is wrong even with a packed struct!");
123 // Convert the struct to a packed struct.
124 ConvertStructToPacked();
126 // After we pack the struct, we may need to insert padding.
127 if (NextFieldOffsetInChars < FieldOffsetInChars) {
128 // We need to append padding.
129 AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
131 assert(NextFieldOffsetInChars == FieldOffsetInChars &&
132 "Did not add enough padding!");
134 AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
138 Elements.push_back(InitCst);
139 NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
140 getSizeInChars(InitCst);
143 assert(LLVMStructAlignment == CharUnits::One() &&
144 "Packed struct not byte-aligned!");
146 LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
149 void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
150 uint64_t FieldOffset,
151 llvm::ConstantInt *CI) {
152 const ASTContext &Context = CGM.getContext();
153 const uint64_t CharWidth = Context.getCharWidth();
154 uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
155 if (FieldOffset > NextFieldOffsetInBits) {
156 // We need to add padding.
157 CharUnits PadSize = Context.toCharUnitsFromBits(
158 llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
159 Context.getTargetInfo().getCharAlign()));
161 AppendPadding(PadSize);
164 uint64_t FieldSize = Field->getBitWidthValue(Context);
166 llvm::APInt FieldValue = CI->getValue();
168 // Promote the size of FieldValue if necessary
169 // FIXME: This should never occur, but currently it can because initializer
170 // constants are cast to bool, and because clang is not enforcing bitfield
172 if (FieldSize > FieldValue.getBitWidth())
173 FieldValue = FieldValue.zext(FieldSize);
175 // Truncate the size of FieldValue to the bit field size.
176 if (FieldSize < FieldValue.getBitWidth())
177 FieldValue = FieldValue.trunc(FieldSize);
179 NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
180 if (FieldOffset < NextFieldOffsetInBits) {
181 // Either part of the field or the entire field can go into the previous
183 assert(!Elements.empty() && "Elements can't be empty!");
185 unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
187 bool FitsCompletelyInPreviousByte =
188 BitsInPreviousByte >= FieldValue.getBitWidth();
190 llvm::APInt Tmp = FieldValue;
192 if (!FitsCompletelyInPreviousByte) {
193 unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
195 if (CGM.getDataLayout().isBigEndian()) {
196 Tmp = Tmp.lshr(NewFieldWidth);
197 Tmp = Tmp.trunc(BitsInPreviousByte);
199 // We want the remaining high bits.
200 FieldValue = FieldValue.trunc(NewFieldWidth);
202 Tmp = Tmp.trunc(BitsInPreviousByte);
204 // We want the remaining low bits.
205 FieldValue = FieldValue.lshr(BitsInPreviousByte);
206 FieldValue = FieldValue.trunc(NewFieldWidth);
210 Tmp = Tmp.zext(CharWidth);
211 if (CGM.getDataLayout().isBigEndian()) {
212 if (FitsCompletelyInPreviousByte)
213 Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
215 Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
218 // 'or' in the bits that go into the previous byte.
219 llvm::Value *LastElt = Elements.back();
220 if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
221 Tmp |= Val->getValue();
223 assert(isa<llvm::UndefValue>(LastElt));
224 // If there is an undef field that we're adding to, it can either be a
225 // scalar undef (in which case, we just replace it with our field) or it
226 // is an array. If it is an array, we have to pull one byte off the
227 // array so that the other undef bytes stay around.
228 if (!isa<llvm::IntegerType>(LastElt->getType())) {
229 // The undef padding will be a multibyte array, create a new smaller
230 // padding and then an hole for our i8 to get plopped into.
231 assert(isa<llvm::ArrayType>(LastElt->getType()) &&
232 "Expected array padding of undefs");
233 llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
234 assert(AT->getElementType()->isIntegerTy(CharWidth) &&
235 AT->getNumElements() != 0 &&
236 "Expected non-empty array padding of undefs");
238 // Remove the padding array.
239 NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
242 // Add the padding back in two chunks.
243 AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
244 AppendPadding(CharUnits::One());
245 assert(isa<llvm::UndefValue>(Elements.back()) &&
246 Elements.back()->getType()->isIntegerTy(CharWidth) &&
247 "Padding addition didn't work right");
251 Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
253 if (FitsCompletelyInPreviousByte)
257 while (FieldValue.getBitWidth() > CharWidth) {
260 if (CGM.getDataLayout().isBigEndian()) {
261 // We want the high bits.
263 FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
265 // We want the low bits.
266 Tmp = FieldValue.trunc(CharWidth);
268 FieldValue = FieldValue.lshr(CharWidth);
271 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
272 ++NextFieldOffsetInChars;
274 FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
277 assert(FieldValue.getBitWidth() > 0 &&
278 "Should have at least one bit left!");
279 assert(FieldValue.getBitWidth() <= CharWidth &&
280 "Should not have more than a byte left!");
282 if (FieldValue.getBitWidth() < CharWidth) {
283 if (CGM.getDataLayout().isBigEndian()) {
284 unsigned BitWidth = FieldValue.getBitWidth();
286 FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
288 FieldValue = FieldValue.zext(CharWidth);
291 // Append the last element.
292 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
294 ++NextFieldOffsetInChars;
297 void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
298 if (PadSize.isZero())
301 llvm::Type *Ty = CGM.Int8Ty;
302 if (PadSize > CharUnits::One())
303 Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
305 llvm::Constant *C = llvm::UndefValue::get(Ty);
306 Elements.push_back(C);
307 assert(getAlignment(C) == CharUnits::One() &&
308 "Padding must have 1 byte alignment!");
310 NextFieldOffsetInChars += getSizeInChars(C);
313 void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
314 assert(NextFieldOffsetInChars <= RecordSize &&
317 AppendPadding(RecordSize - NextFieldOffsetInChars);
320 void ConstStructBuilder::ConvertStructToPacked() {
321 SmallVector<llvm::Constant *, 16> PackedElements;
322 CharUnits ElementOffsetInChars = CharUnits::Zero();
324 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
325 llvm::Constant *C = Elements[i];
327 CharUnits ElementAlign = CharUnits::fromQuantity(
328 CGM.getDataLayout().getABITypeAlignment(C->getType()));
329 CharUnits AlignedElementOffsetInChars =
330 ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
332 if (AlignedElementOffsetInChars > ElementOffsetInChars) {
333 // We need some padding.
335 AlignedElementOffsetInChars - ElementOffsetInChars;
337 llvm::Type *Ty = CGM.Int8Ty;
338 if (NumChars > CharUnits::One())
339 Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
341 llvm::Constant *Padding = llvm::UndefValue::get(Ty);
342 PackedElements.push_back(Padding);
343 ElementOffsetInChars += getSizeInChars(Padding);
346 PackedElements.push_back(C);
347 ElementOffsetInChars += getSizeInChars(C);
350 assert(ElementOffsetInChars == NextFieldOffsetInChars &&
351 "Packing the struct changed its size!");
353 Elements.swap(PackedElements);
354 LLVMStructAlignment = CharUnits::One();
358 bool ConstStructBuilder::Build(InitListExpr *ILE) {
359 RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
360 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
362 unsigned FieldNo = 0;
363 unsigned ElementNo = 0;
365 for (RecordDecl::field_iterator Field = RD->field_begin(),
366 FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
367 // If this is a union, skip all the fields that aren't being initialized.
368 if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
371 // Don't emit anonymous bitfields, they just affect layout.
372 if (Field->isUnnamedBitfield())
375 // Get the initializer. A struct can include fields without initializers,
376 // we just use explicit null values for them.
377 llvm::Constant *EltInit;
378 if (ElementNo < ILE->getNumInits())
379 EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
380 Field->getType(), CGF);
382 EltInit = CGM.EmitNullConstant(Field->getType());
387 if (!Field->isBitField()) {
388 // Handle non-bitfield members.
389 AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
391 // Otherwise we have a bitfield.
392 AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
393 cast<llvm::ConstantInt>(EltInit));
402 BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
403 : Decl(Decl), Offset(Offset), Index(Index) {
406 const CXXRecordDecl *Decl;
410 bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
414 void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
416 const CXXRecordDecl *VTableClass,
418 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
420 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
421 // Add a vtable pointer, if we need one and it hasn't already been added.
422 if (CD->isDynamicClass() && !IsPrimaryBase) {
423 llvm::Constant *VTableAddressPoint =
424 CGM.getCXXABI().getVTableAddressPointForConstExpr(
425 BaseSubobject(CD, Offset), VTableClass);
426 AppendBytes(Offset, VTableAddressPoint);
429 // Accumulate and sort bases, in order to visit them in address order, which
430 // may not be the same as declaration order.
431 SmallVector<BaseInfo, 8> Bases;
432 Bases.reserve(CD->getNumBases());
434 for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
435 BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
436 assert(!Base->isVirtual() && "should not have virtual bases here");
437 const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
438 CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
439 Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
441 std::stable_sort(Bases.begin(), Bases.end());
443 for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
444 BaseInfo &Base = Bases[I];
446 bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
447 Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
448 VTableClass, Offset + Base.Offset);
452 unsigned FieldNo = 0;
453 uint64_t OffsetBits = CGM.getContext().toBits(Offset);
455 for (RecordDecl::field_iterator Field = RD->field_begin(),
456 FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
457 // If this is a union, skip all the fields that aren't being initialized.
458 if (RD->isUnion() && Val.getUnionField() != *Field)
461 // Don't emit anonymous bitfields, they just affect layout.
462 if (Field->isUnnamedBitfield())
465 // Emit the value of the initializer.
466 const APValue &FieldValue =
467 RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
468 llvm::Constant *EltInit =
469 CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
470 assert(EltInit && "EmitConstantValue can't fail");
472 if (!Field->isBitField()) {
473 // Handle non-bitfield members.
474 AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
476 // Otherwise we have a bitfield.
477 AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
478 cast<llvm::ConstantInt>(EltInit));
483 llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
484 RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
485 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
487 CharUnits LayoutSizeInChars = Layout.getSize();
489 if (NextFieldOffsetInChars > LayoutSizeInChars) {
490 // If the struct is bigger than the size of the record type,
491 // we must have a flexible array member at the end.
492 assert(RD->hasFlexibleArrayMember() &&
493 "Must have flexible array member if struct is bigger than type!");
495 // No tail padding is necessary.
497 // Append tail padding if necessary.
498 CharUnits LLVMSizeInChars =
499 NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
501 if (LLVMSizeInChars != LayoutSizeInChars)
502 AppendTailPadding(LayoutSizeInChars);
505 NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
507 // Check if we need to convert the struct to a packed struct.
508 if (NextFieldOffsetInChars <= LayoutSizeInChars &&
509 LLVMSizeInChars > LayoutSizeInChars) {
510 assert(!Packed && "Size mismatch!");
512 ConvertStructToPacked();
513 assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
514 "Converting to packed did not help!");
518 NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
520 assert(LayoutSizeInChars == LLVMSizeInChars &&
521 "Tail padding mismatch!");
524 // Pick the type to use. If the type is layout identical to the ConvertType
525 // type then use it, otherwise use whatever the builder produced for us.
526 llvm::StructType *STy =
527 llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
529 llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
530 if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
531 if (ValSTy->isLayoutIdentical(STy))
535 llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
537 assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
538 getSizeInChars(Result) && "Size mismatch!");
543 llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
544 CodeGenFunction *CGF,
546 ConstStructBuilder Builder(CGM, CGF);
548 if (!Builder.Build(ILE))
551 return Builder.Finalize(ILE->getType());
554 llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
555 CodeGenFunction *CGF,
558 ConstStructBuilder Builder(CGM, CGF);
560 const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
561 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
562 Builder.Build(Val, RD, false, CD, CharUnits::Zero());
564 return Builder.Finalize(ValTy);
568 //===----------------------------------------------------------------------===//
570 //===----------------------------------------------------------------------===//
572 /// This class only needs to handle two cases:
573 /// 1) Literals (this is used by APValue emission to emit literals).
574 /// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
575 /// constant fold these types).
576 class ConstExprEmitter :
577 public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
579 CodeGenFunction *CGF;
580 llvm::LLVMContext &VMContext;
582 ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
583 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
586 //===--------------------------------------------------------------------===//
588 //===--------------------------------------------------------------------===//
590 llvm::Constant *VisitStmt(Stmt *S) {
594 llvm::Constant *VisitParenExpr(ParenExpr *PE) {
595 return Visit(PE->getSubExpr());
599 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
600 return Visit(PE->getReplacement());
603 llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
604 return Visit(GE->getResultExpr());
607 llvm::Constant *VisitChooseExpr(ChooseExpr *CE) {
608 return Visit(CE->getChosenSubExpr());
611 llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
612 return Visit(E->getInitializer());
615 llvm::Constant *VisitCastExpr(CastExpr* E) {
616 Expr *subExpr = E->getSubExpr();
617 llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
618 if (!C) return nullptr;
620 llvm::Type *destType = ConvertType(E->getType());
622 switch (E->getCastKind()) {
624 // GCC cast to union extension
625 assert(E->getType()->isUnionType() &&
626 "Destination type is not union type!");
628 // Build a struct with the union sub-element as the first member,
629 // and padded to the appropriate size
630 SmallVector<llvm::Constant*, 2> Elts;
631 SmallVector<llvm::Type*, 2> Types;
633 Types.push_back(C->getType());
634 unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
635 unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
637 assert(CurSize <= TotalSize && "Union size mismatch!");
638 if (unsigned NumPadBytes = TotalSize - CurSize) {
639 llvm::Type *Ty = CGM.Int8Ty;
641 Ty = llvm::ArrayType::get(Ty, NumPadBytes);
643 Elts.push_back(llvm::UndefValue::get(Ty));
647 llvm::StructType* STy =
648 llvm::StructType::get(C->getType()->getContext(), Types, false);
649 return llvm::ConstantStruct::get(STy, Elts);
652 case CK_AddressSpaceConversion:
653 return llvm::ConstantExpr::getAddrSpaceCast(C, destType);
655 case CK_LValueToRValue:
656 case CK_AtomicToNonAtomic:
657 case CK_NonAtomicToAtomic:
659 case CK_ConstructorConversion:
662 case CK_Dependent: llvm_unreachable("saw dependent cast!");
664 case CK_BuiltinFnToFnPtr:
665 llvm_unreachable("builtin functions are handled elsewhere");
667 case CK_ReinterpretMemberPointer:
668 case CK_DerivedToBaseMemberPointer:
669 case CK_BaseToDerivedMemberPointer:
670 return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
672 // These will never be supported.
673 case CK_ObjCObjectLValueCast:
674 case CK_ARCProduceObject:
675 case CK_ARCConsumeObject:
676 case CK_ARCReclaimReturnedObject:
677 case CK_ARCExtendBlockObject:
678 case CK_CopyAndAutoreleaseBlockObject:
681 // These don't need to be handled here because Evaluate knows how to
682 // evaluate them in the cases where they can be folded.
686 case CK_LValueBitCast:
687 case CK_NullToMemberPointer:
688 case CK_UserDefinedConversion:
689 case CK_CPointerToObjCPointerCast:
690 case CK_BlockPointerToObjCPointerCast:
691 case CK_AnyPointerToBlockPointerCast:
692 case CK_ArrayToPointerDecay:
693 case CK_FunctionToPointerDecay:
694 case CK_BaseToDerived:
695 case CK_DerivedToBase:
696 case CK_UncheckedDerivedToBase:
697 case CK_MemberPointerToBoolean:
699 case CK_FloatingRealToComplex:
700 case CK_FloatingComplexToReal:
701 case CK_FloatingComplexToBoolean:
702 case CK_FloatingComplexCast:
703 case CK_FloatingComplexToIntegralComplex:
704 case CK_IntegralRealToComplex:
705 case CK_IntegralComplexToReal:
706 case CK_IntegralComplexToBoolean:
707 case CK_IntegralComplexCast:
708 case CK_IntegralComplexToFloatingComplex:
709 case CK_PointerToIntegral:
710 case CK_PointerToBoolean:
711 case CK_NullToPointer:
712 case CK_IntegralCast:
713 case CK_IntegralToPointer:
714 case CK_IntegralToBoolean:
715 case CK_IntegralToFloating:
716 case CK_FloatingToIntegral:
717 case CK_FloatingToBoolean:
718 case CK_FloatingCast:
719 case CK_ZeroToOCLEvent:
722 llvm_unreachable("Invalid CastKind");
725 llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
726 return Visit(DAE->getExpr());
729 llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
730 // No need for a DefaultInitExprScope: we don't handle 'this' in a
731 // constant expression.
732 return Visit(DIE->getExpr());
735 llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
736 return Visit(E->GetTemporaryExpr());
739 llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
740 if (ILE->isStringLiteralInit())
741 return Visit(ILE->getInit(0));
743 llvm::ArrayType *AType =
744 cast<llvm::ArrayType>(ConvertType(ILE->getType()));
745 llvm::Type *ElemTy = AType->getElementType();
746 unsigned NumInitElements = ILE->getNumInits();
747 unsigned NumElements = AType->getNumElements();
749 // Initialising an array requires us to automatically
750 // initialise any elements that have not been initialised explicitly
751 unsigned NumInitableElts = std::min(NumInitElements, NumElements);
753 // Initialize remaining array elements.
754 // FIXME: This doesn't handle member pointers correctly!
755 llvm::Constant *fillC;
756 if (Expr *filler = ILE->getArrayFiller())
757 fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
759 fillC = llvm::Constant::getNullValue(ElemTy);
763 // Try to use a ConstantAggregateZero if we can.
764 if (fillC->isNullValue() && !NumInitableElts)
765 return llvm::ConstantAggregateZero::get(AType);
767 // Copy initializer elements.
768 std::vector<llvm::Constant*> Elts;
769 Elts.reserve(NumInitableElts + NumElements);
771 bool RewriteType = false;
772 for (unsigned i = 0; i < NumInitableElts; ++i) {
773 Expr *Init = ILE->getInit(i);
774 llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
777 RewriteType |= (C->getType() != ElemTy);
781 RewriteType |= (fillC->getType() != ElemTy);
782 Elts.resize(NumElements, fillC);
785 // FIXME: Try to avoid packing the array
786 std::vector<llvm::Type*> Types;
787 Types.reserve(NumInitableElts + NumElements);
788 for (unsigned i = 0, e = Elts.size(); i < e; ++i)
789 Types.push_back(Elts[i]->getType());
790 llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
792 return llvm::ConstantStruct::get(SType, Elts);
795 return llvm::ConstantArray::get(AType, Elts);
798 llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
799 return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
802 llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
803 return CGM.EmitNullConstant(E->getType());
806 llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
807 if (ILE->getType()->isArrayType())
808 return EmitArrayInitialization(ILE);
810 if (ILE->getType()->isRecordType())
811 return EmitRecordInitialization(ILE);
816 llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
817 if (!E->getConstructor()->isTrivial())
820 QualType Ty = E->getType();
822 // FIXME: We should not have to call getBaseElementType here.
823 const RecordType *RT =
824 CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
825 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
827 // If the class doesn't have a trivial destructor, we can't emit it as a
829 if (!RD->hasTrivialDestructor())
832 // Only copy and default constructors can be trivial.
835 if (E->getNumArgs()) {
836 assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
837 assert(E->getConstructor()->isCopyOrMoveConstructor() &&
838 "trivial ctor has argument but isn't a copy/move ctor");
840 Expr *Arg = E->getArg(0);
841 assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
842 "argument to copy ctor is of wrong type");
847 return CGM.EmitNullConstant(Ty);
850 llvm::Constant *VisitStringLiteral(StringLiteral *E) {
851 return CGM.GetConstantArrayFromStringLiteral(E);
854 llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
855 // This must be an @encode initializing an array in a static initializer.
856 // Don't emit it as the address of the string, emit the string data itself
857 // as an inline array.
859 CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
860 QualType T = E->getType();
861 if (T->getTypeClass() == Type::TypeOfExpr)
862 T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
863 const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
865 // Resize the string to the right size, adding zeros at the end, or
866 // truncating as needed.
867 Str.resize(CAT->getSize().getZExtValue(), '\0');
868 return llvm::ConstantDataArray::getString(VMContext, Str, false);
871 llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
872 return Visit(E->getSubExpr());
876 llvm::Type *ConvertType(QualType T) {
877 return CGM.getTypes().ConvertType(T);
881 llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
882 if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
883 if (Decl->hasAttr<WeakRefAttr>())
884 return CGM.GetWeakRefReference(Decl);
885 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
886 return CGM.GetAddrOfFunction(FD);
887 if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
888 // We can never refer to a variable with local storage.
889 if (!VD->hasLocalStorage()) {
890 if (VD->isFileVarDecl() || VD->hasExternalStorage())
891 return CGM.GetAddrOfGlobalVar(VD);
892 else if (VD->isLocalVarDecl())
893 return CGM.getOrCreateStaticVarDecl(
894 *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
900 Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
901 switch (E->getStmtClass()) {
903 case Expr::CompoundLiteralExprClass: {
904 // Note that due to the nature of compound literals, this is guaranteed
905 // to be the only use of the variable, so we just generate it here.
906 CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
907 llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
908 CLE->getType(), CGF);
909 // FIXME: "Leaked" on failure.
911 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
912 E->getType().isConstant(CGM.getContext()),
913 llvm::GlobalValue::InternalLinkage,
914 C, ".compoundliteral", nullptr,
915 llvm::GlobalVariable::NotThreadLocal,
916 CGM.getContext().getTargetAddressSpace(E->getType()));
919 case Expr::StringLiteralClass:
920 return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
921 case Expr::ObjCEncodeExprClass:
922 return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
923 case Expr::ObjCStringLiteralClass: {
924 ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
926 CGM.getObjCRuntime().GenerateConstantString(SL->getString());
927 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
929 case Expr::PredefinedExprClass: {
930 unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
932 LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
933 return cast<llvm::Constant>(Res.getAddress());
934 } else if (Type == PredefinedExpr::PrettyFunction) {
935 return CGM.GetAddrOfConstantCString("top level", ".tmp");
938 return CGM.GetAddrOfConstantCString("", ".tmp");
940 case Expr::AddrLabelExprClass: {
941 assert(CGF && "Invalid address of label expression outside function.");
942 llvm::Constant *Ptr =
943 CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
944 return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
946 case Expr::CallExprClass: {
947 CallExpr* CE = cast<CallExpr>(E);
948 unsigned builtin = CE->getBuiltinCallee();
950 Builtin::BI__builtin___CFStringMakeConstantString &&
952 Builtin::BI__builtin___NSStringMakeConstantString)
954 const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
955 const StringLiteral *Literal = cast<StringLiteral>(Arg);
957 Builtin::BI__builtin___NSStringMakeConstantString) {
958 return CGM.getObjCRuntime().GenerateConstantString(Literal);
960 // FIXME: need to deal with UCN conversion issues.
961 return CGM.GetAddrOfConstantCFString(Literal);
963 case Expr::BlockExprClass: {
964 std::string FunctionName;
966 FunctionName = CGF->CurFn->getName();
968 FunctionName = "global";
970 return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
972 case Expr::CXXTypeidExprClass: {
973 CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
975 if (Typeid->isTypeOperand())
976 T = Typeid->getTypeOperand(CGM.getContext());
978 T = Typeid->getExprOperand()->getType();
979 return CGM.GetAddrOfRTTIDescriptor(T);
981 case Expr::CXXUuidofExprClass: {
982 return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
984 case Expr::MaterializeTemporaryExprClass: {
985 MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
986 assert(MTE->getStorageDuration() == SD_Static);
987 SmallVector<const Expr *, 2> CommaLHSs;
988 SmallVector<SubobjectAdjustment, 2> Adjustments;
989 const Expr *Inner = MTE->GetTemporaryExpr()
990 ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
991 return CGM.GetAddrOfGlobalTemporary(MTE, Inner);
999 } // end anonymous namespace.
1001 llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
1002 CodeGenFunction *CGF) {
1003 // Make a quick check if variable can be default NULL initialized
1004 // and avoid going through rest of code which may do, for c++11,
1005 // initialization of memory to all NULLs.
1006 if (!D.hasLocalStorage()) {
1007 QualType Ty = D.getType();
1008 if (Ty->isArrayType())
1009 Ty = Context.getBaseElementType(Ty);
1010 if (Ty->isRecordType())
1011 if (const CXXConstructExpr *E =
1012 dyn_cast_or_null<CXXConstructExpr>(D.getInit())) {
1013 const CXXConstructorDecl *CD = E->getConstructor();
1014 if (CD->isTrivial() && CD->isDefaultConstructor())
1015 return EmitNullConstant(D.getType());
1019 if (const APValue *Value = D.evaluateValue())
1020 return EmitConstantValueForMemory(*Value, D.getType(), CGF);
1022 // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
1023 // reference is a constant expression, and the reference binds to a temporary,
1024 // then constant initialization is performed. ConstExprEmitter will
1025 // incorrectly emit a prvalue constant in this case, and the calling code
1026 // interprets that as the (pointer) value of the reference, rather than the
1027 // desired value of the referee.
1028 if (D.getType()->isReferenceType())
1031 const Expr *E = D.getInit();
1032 assert(E && "No initializer to emit");
1034 llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1035 if (C && C->getType()->isIntegerTy(1)) {
1036 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1037 C = llvm::ConstantExpr::getZExt(C, BoolTy);
1042 llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
1044 CodeGenFunction *CGF) {
1045 Expr::EvalResult Result;
1047 bool Success = false;
1049 if (DestType->isReferenceType())
1050 Success = E->EvaluateAsLValue(Result, Context);
1052 Success = E->EvaluateAsRValue(Result, Context);
1054 llvm::Constant *C = nullptr;
1055 if (Success && !Result.HasSideEffects)
1056 C = EmitConstantValue(Result.Val, DestType, CGF);
1058 C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1060 if (C && C->getType()->isIntegerTy(1)) {
1061 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1062 C = llvm::ConstantExpr::getZExt(C, BoolTy);
1067 llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
1069 CodeGenFunction *CGF) {
1070 // For an _Atomic-qualified constant, we may need to add tail padding.
1071 if (auto *AT = DestType->getAs<AtomicType>()) {
1072 QualType InnerType = AT->getValueType();
1073 auto *Inner = EmitConstantValue(Value, InnerType, CGF);
1075 uint64_t InnerSize = Context.getTypeSize(InnerType);
1076 uint64_t OuterSize = Context.getTypeSize(DestType);
1077 if (InnerSize == OuterSize)
1080 assert(InnerSize < OuterSize && "emitted over-large constant for atomic");
1081 llvm::Constant *Elts[] = {
1083 llvm::ConstantAggregateZero::get(
1084 llvm::ArrayType::get(Int8Ty, (OuterSize - InnerSize) / 8))
1086 return llvm::ConstantStruct::getAnon(Elts);
1089 switch (Value.getKind()) {
1090 case APValue::Uninitialized:
1091 llvm_unreachable("Constant expressions should be initialized.");
1092 case APValue::LValue: {
1093 llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
1094 llvm::Constant *Offset =
1095 llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
1098 if (APValue::LValueBase LVBase = Value.getLValueBase()) {
1099 // An array can be represented as an lvalue referring to the base.
1100 if (isa<llvm::ArrayType>(DestTy)) {
1101 assert(Offset->isNullValue() && "offset on array initializer");
1102 return ConstExprEmitter(*this, CGF).Visit(
1103 const_cast<Expr*>(LVBase.get<const Expr*>()));
1106 C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
1108 // Apply offset if necessary.
1109 if (!Offset->isNullValue()) {
1110 unsigned AS = C->getType()->getPointerAddressSpace();
1111 llvm::Type *CharPtrTy = Int8Ty->getPointerTo(AS);
1112 llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, CharPtrTy);
1113 Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
1114 C = llvm::ConstantExpr::getPointerCast(Casted, C->getType());
1117 // Convert to the appropriate type; this could be an lvalue for
1119 if (isa<llvm::PointerType>(DestTy))
1120 return llvm::ConstantExpr::getPointerCast(C, DestTy);
1122 return llvm::ConstantExpr::getPtrToInt(C, DestTy);
1126 // Convert to the appropriate type; this could be an lvalue for
1128 if (isa<llvm::PointerType>(DestTy))
1129 return llvm::ConstantExpr::getIntToPtr(C, DestTy);
1131 // If the types don't match this should only be a truncate.
1132 if (C->getType() != DestTy)
1133 return llvm::ConstantExpr::getTrunc(C, DestTy);
1139 return llvm::ConstantInt::get(VMContext, Value.getInt());
1140 case APValue::ComplexInt: {
1141 llvm::Constant *Complex[2];
1143 Complex[0] = llvm::ConstantInt::get(VMContext,
1144 Value.getComplexIntReal());
1145 Complex[1] = llvm::ConstantInt::get(VMContext,
1146 Value.getComplexIntImag());
1148 // FIXME: the target may want to specify that this is packed.
1149 llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1150 Complex[1]->getType(),
1152 return llvm::ConstantStruct::get(STy, Complex);
1154 case APValue::Float: {
1155 const llvm::APFloat &Init = Value.getFloat();
1156 if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
1157 !Context.getLangOpts().NativeHalfType &&
1158 !Context.getLangOpts().HalfArgsAndReturns)
1159 return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
1161 return llvm::ConstantFP::get(VMContext, Init);
1163 case APValue::ComplexFloat: {
1164 llvm::Constant *Complex[2];
1166 Complex[0] = llvm::ConstantFP::get(VMContext,
1167 Value.getComplexFloatReal());
1168 Complex[1] = llvm::ConstantFP::get(VMContext,
1169 Value.getComplexFloatImag());
1171 // FIXME: the target may want to specify that this is packed.
1172 llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1173 Complex[1]->getType(),
1175 return llvm::ConstantStruct::get(STy, Complex);
1177 case APValue::Vector: {
1178 SmallVector<llvm::Constant *, 4> Inits;
1179 unsigned NumElts = Value.getVectorLength();
1181 for (unsigned i = 0; i != NumElts; ++i) {
1182 const APValue &Elt = Value.getVectorElt(i);
1184 Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
1186 Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
1188 return llvm::ConstantVector::get(Inits);
1190 case APValue::AddrLabelDiff: {
1191 const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
1192 const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
1193 llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
1194 llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
1196 // Compute difference
1197 llvm::Type *ResultType = getTypes().ConvertType(DestType);
1198 LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
1199 RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
1200 llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
1202 // LLVM is a bit sensitive about the exact format of the
1203 // address-of-label difference; make sure to truncate after
1205 return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
1207 case APValue::Struct:
1208 case APValue::Union:
1209 return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
1210 case APValue::Array: {
1211 const ArrayType *CAT = Context.getAsArrayType(DestType);
1212 unsigned NumElements = Value.getArraySize();
1213 unsigned NumInitElts = Value.getArrayInitializedElts();
1215 // Emit array filler, if there is one.
1216 llvm::Constant *Filler = nullptr;
1217 if (Value.hasArrayFiller())
1218 Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
1219 CAT->getElementType(), CGF);
1221 // Emit initializer elements.
1222 llvm::Type *CommonElementType =
1223 getTypes().ConvertType(CAT->getElementType());
1225 // Try to use a ConstantAggregateZero if we can.
1226 if (Filler && Filler->isNullValue() && !NumInitElts) {
1227 llvm::ArrayType *AType =
1228 llvm::ArrayType::get(CommonElementType, NumElements);
1229 return llvm::ConstantAggregateZero::get(AType);
1232 std::vector<llvm::Constant*> Elts;
1233 Elts.reserve(NumElements);
1234 for (unsigned I = 0; I < NumElements; ++I) {
1235 llvm::Constant *C = Filler;
1236 if (I < NumInitElts)
1237 C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
1238 CAT->getElementType(), CGF);
1240 assert(Filler && "Missing filler for implicit elements of initializer");
1242 CommonElementType = C->getType();
1243 else if (C->getType() != CommonElementType)
1244 CommonElementType = nullptr;
1248 if (!CommonElementType) {
1249 // FIXME: Try to avoid packing the array
1250 std::vector<llvm::Type*> Types;
1251 Types.reserve(NumElements);
1252 for (unsigned i = 0, e = Elts.size(); i < e; ++i)
1253 Types.push_back(Elts[i]->getType());
1254 llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
1255 return llvm::ConstantStruct::get(SType, Elts);
1258 llvm::ArrayType *AType =
1259 llvm::ArrayType::get(CommonElementType, NumElements);
1260 return llvm::ConstantArray::get(AType, Elts);
1262 case APValue::MemberPointer:
1263 return getCXXABI().EmitMemberPointer(Value, DestType);
1265 llvm_unreachable("Unknown APValue kind");
1269 CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
1271 CodeGenFunction *CGF) {
1272 llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
1273 if (C->getType()->isIntegerTy(1)) {
1274 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
1275 C = llvm::ConstantExpr::getZExt(C, BoolTy);
1281 CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
1282 assert(E->isFileScope() && "not a file-scope compound literal expr");
1283 return ConstExprEmitter(*this, nullptr).EmitLValue(E);
1287 CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
1288 // Member pointer constants always have a very particular form.
1289 const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
1290 const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
1292 // A member function pointer.
1293 if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
1294 return getCXXABI().EmitMemberPointer(method);
1296 // Otherwise, a member data pointer.
1297 uint64_t fieldOffset = getContext().getFieldOffset(decl);
1298 CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
1299 return getCXXABI().EmitMemberDataPointer(type, chars);
1302 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1303 llvm::Type *baseType,
1304 const CXXRecordDecl *base);
1306 static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1307 const CXXRecordDecl *record,
1308 bool asCompleteObject) {
1309 const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1310 llvm::StructType *structure =
1311 (asCompleteObject ? layout.getLLVMType()
1312 : layout.getBaseSubobjectLLVMType());
1314 unsigned numElements = structure->getNumElements();
1315 std::vector<llvm::Constant *> elements(numElements);
1317 // Fill in all the bases.
1318 for (const auto &I : record->bases()) {
1319 if (I.isVirtual()) {
1320 // Ignore virtual bases; if we're laying out for a complete
1321 // object, we'll lay these out later.
1325 const CXXRecordDecl *base =
1326 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1328 // Ignore empty bases.
1329 if (base->isEmpty())
1332 unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1333 llvm::Type *baseType = structure->getElementType(fieldIndex);
1334 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1337 // Fill in all the fields.
1338 for (const auto *Field : record->fields()) {
1339 // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
1340 // will fill in later.)
1341 if (!Field->isBitField()) {
1342 unsigned fieldIndex = layout.getLLVMFieldNo(Field);
1343 elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
1346 // For unions, stop after the first named field.
1347 if (record->isUnion() && Field->getDeclName())
1351 // Fill in the virtual bases, if we're working with the complete object.
1352 if (asCompleteObject) {
1353 for (const auto &I : record->vbases()) {
1354 const CXXRecordDecl *base =
1355 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1357 // Ignore empty bases.
1358 if (base->isEmpty())
1361 unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1363 // We might have already laid this field out.
1364 if (elements[fieldIndex]) continue;
1366 llvm::Type *baseType = structure->getElementType(fieldIndex);
1367 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1371 // Now go through all other fields and zero them out.
1372 for (unsigned i = 0; i != numElements; ++i) {
1374 elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1377 return llvm::ConstantStruct::get(structure, elements);
1380 /// Emit the null constant for a base subobject.
1381 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1382 llvm::Type *baseType,
1383 const CXXRecordDecl *base) {
1384 const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1386 // Just zero out bases that don't have any pointer to data members.
1387 if (baseLayout.isZeroInitializableAsBase())
1388 return llvm::Constant::getNullValue(baseType);
1390 // Otherwise, we can just use its null constant.
1391 return EmitNullConstant(CGM, base, /*asCompleteObject=*/false);
1394 llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1395 if (getTypes().isZeroInitializable(T))
1396 return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1398 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1399 llvm::ArrayType *ATy =
1400 cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1402 QualType ElementTy = CAT->getElementType();
1404 llvm::Constant *Element = EmitNullConstant(ElementTy);
1405 unsigned NumElements = CAT->getSize().getZExtValue();
1407 if (Element->isNullValue())
1408 return llvm::ConstantAggregateZero::get(ATy);
1410 SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
1411 return llvm::ConstantArray::get(ATy, Array);
1414 if (const RecordType *RT = T->getAs<RecordType>()) {
1415 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1416 return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1419 assert(T->isMemberPointerType() && "Should only see member pointers here!");
1420 assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1421 "Should only see pointers to data members here!");
1423 return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1427 CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
1428 return ::EmitNullConstant(*this, Record, false);