1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Constant Expr nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
16 #include "CGObjCRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenModule.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/AST/StmtVisitor.h"
23 #include "clang/Basic/Builtins.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GlobalVariable.h"
28 using namespace clang;
29 using namespace CodeGen;
31 //===----------------------------------------------------------------------===//
33 //===----------------------------------------------------------------------===//
36 class ConstStructBuilder {
41 CharUnits NextFieldOffsetInChars;
42 CharUnits LLVMStructAlignment;
43 SmallVector<llvm::Constant *, 32> Elements;
45 static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
47 static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
48 const APValue &Value, QualType ValTy);
51 ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
52 : CGM(CGM), CGF(CGF), Packed(false),
53 NextFieldOffsetInChars(CharUnits::Zero()),
54 LLVMStructAlignment(CharUnits::One()) { }
56 void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
57 llvm::Constant *InitExpr);
59 void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
61 void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
62 llvm::ConstantInt *InitExpr);
64 void AppendPadding(CharUnits PadSize);
66 void AppendTailPadding(CharUnits RecordSize);
68 void ConvertStructToPacked();
70 bool Build(InitListExpr *ILE);
71 void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
72 const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
73 llvm::Constant *Finalize(QualType Ty);
75 CharUnits getAlignment(const llvm::Constant *C) const {
76 if (Packed) return CharUnits::One();
77 return CharUnits::fromQuantity(
78 CGM.getDataLayout().getABITypeAlignment(C->getType()));
81 CharUnits getSizeInChars(const llvm::Constant *C) const {
82 return CharUnits::fromQuantity(
83 CGM.getDataLayout().getTypeAllocSize(C->getType()));
87 void ConstStructBuilder::
88 AppendField(const FieldDecl *Field, uint64_t FieldOffset,
89 llvm::Constant *InitCst) {
90 const ASTContext &Context = CGM.getContext();
92 CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
94 AppendBytes(FieldOffsetInChars, InitCst);
97 void ConstStructBuilder::
98 AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
100 assert(NextFieldOffsetInChars <= FieldOffsetInChars
101 && "Field offset mismatch!");
103 CharUnits FieldAlignment = getAlignment(InitCst);
105 // Round up the field offset to the alignment of the field type.
106 CharUnits AlignedNextFieldOffsetInChars =
107 NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
109 if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
110 // We need to append padding.
111 AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
113 assert(NextFieldOffsetInChars == FieldOffsetInChars &&
114 "Did not add enough padding!");
116 AlignedNextFieldOffsetInChars =
117 NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
120 if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
121 assert(!Packed && "Alignment is wrong even with a packed struct!");
123 // Convert the struct to a packed struct.
124 ConvertStructToPacked();
126 // After we pack the struct, we may need to insert padding.
127 if (NextFieldOffsetInChars < FieldOffsetInChars) {
128 // We need to append padding.
129 AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
131 assert(NextFieldOffsetInChars == FieldOffsetInChars &&
132 "Did not add enough padding!");
134 AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
138 Elements.push_back(InitCst);
139 NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
140 getSizeInChars(InitCst);
143 assert(LLVMStructAlignment == CharUnits::One() &&
144 "Packed struct not byte-aligned!");
146 LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
149 void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
150 uint64_t FieldOffset,
151 llvm::ConstantInt *CI) {
152 const ASTContext &Context = CGM.getContext();
153 const uint64_t CharWidth = Context.getCharWidth();
154 uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
155 if (FieldOffset > NextFieldOffsetInBits) {
156 // We need to add padding.
157 CharUnits PadSize = Context.toCharUnitsFromBits(
158 llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
159 Context.getTargetInfo().getCharAlign()));
161 AppendPadding(PadSize);
164 uint64_t FieldSize = Field->getBitWidthValue(Context);
166 llvm::APInt FieldValue = CI->getValue();
168 // Promote the size of FieldValue if necessary
169 // FIXME: This should never occur, but currently it can because initializer
170 // constants are cast to bool, and because clang is not enforcing bitfield
172 if (FieldSize > FieldValue.getBitWidth())
173 FieldValue = FieldValue.zext(FieldSize);
175 // Truncate the size of FieldValue to the bit field size.
176 if (FieldSize < FieldValue.getBitWidth())
177 FieldValue = FieldValue.trunc(FieldSize);
179 NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
180 if (FieldOffset < NextFieldOffsetInBits) {
181 // Either part of the field or the entire field can go into the previous
183 assert(!Elements.empty() && "Elements can't be empty!");
185 unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
187 bool FitsCompletelyInPreviousByte =
188 BitsInPreviousByte >= FieldValue.getBitWidth();
190 llvm::APInt Tmp = FieldValue;
192 if (!FitsCompletelyInPreviousByte) {
193 unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
195 if (CGM.getDataLayout().isBigEndian()) {
196 Tmp = Tmp.lshr(NewFieldWidth);
197 Tmp = Tmp.trunc(BitsInPreviousByte);
199 // We want the remaining high bits.
200 FieldValue = FieldValue.trunc(NewFieldWidth);
202 Tmp = Tmp.trunc(BitsInPreviousByte);
204 // We want the remaining low bits.
205 FieldValue = FieldValue.lshr(BitsInPreviousByte);
206 FieldValue = FieldValue.trunc(NewFieldWidth);
210 Tmp = Tmp.zext(CharWidth);
211 if (CGM.getDataLayout().isBigEndian()) {
212 if (FitsCompletelyInPreviousByte)
213 Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
215 Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
218 // 'or' in the bits that go into the previous byte.
219 llvm::Value *LastElt = Elements.back();
220 if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
221 Tmp |= Val->getValue();
223 assert(isa<llvm::UndefValue>(LastElt));
224 // If there is an undef field that we're adding to, it can either be a
225 // scalar undef (in which case, we just replace it with our field) or it
226 // is an array. If it is an array, we have to pull one byte off the
227 // array so that the other undef bytes stay around.
228 if (!isa<llvm::IntegerType>(LastElt->getType())) {
229 // The undef padding will be a multibyte array, create a new smaller
230 // padding and then an hole for our i8 to get plopped into.
231 assert(isa<llvm::ArrayType>(LastElt->getType()) &&
232 "Expected array padding of undefs");
233 llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
234 assert(AT->getElementType()->isIntegerTy(CharWidth) &&
235 AT->getNumElements() != 0 &&
236 "Expected non-empty array padding of undefs");
238 // Remove the padding array.
239 NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
242 // Add the padding back in two chunks.
243 AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
244 AppendPadding(CharUnits::One());
245 assert(isa<llvm::UndefValue>(Elements.back()) &&
246 Elements.back()->getType()->isIntegerTy(CharWidth) &&
247 "Padding addition didn't work right");
251 Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
253 if (FitsCompletelyInPreviousByte)
257 while (FieldValue.getBitWidth() > CharWidth) {
260 if (CGM.getDataLayout().isBigEndian()) {
261 // We want the high bits.
263 FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
265 // We want the low bits.
266 Tmp = FieldValue.trunc(CharWidth);
268 FieldValue = FieldValue.lshr(CharWidth);
271 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
272 ++NextFieldOffsetInChars;
274 FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
277 assert(FieldValue.getBitWidth() > 0 &&
278 "Should have at least one bit left!");
279 assert(FieldValue.getBitWidth() <= CharWidth &&
280 "Should not have more than a byte left!");
282 if (FieldValue.getBitWidth() < CharWidth) {
283 if (CGM.getDataLayout().isBigEndian()) {
284 unsigned BitWidth = FieldValue.getBitWidth();
286 FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
288 FieldValue = FieldValue.zext(CharWidth);
291 // Append the last element.
292 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
294 ++NextFieldOffsetInChars;
297 void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
298 if (PadSize.isZero())
301 llvm::Type *Ty = CGM.Int8Ty;
302 if (PadSize > CharUnits::One())
303 Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
305 llvm::Constant *C = llvm::UndefValue::get(Ty);
306 Elements.push_back(C);
307 assert(getAlignment(C) == CharUnits::One() &&
308 "Padding must have 1 byte alignment!");
310 NextFieldOffsetInChars += getSizeInChars(C);
313 void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
314 assert(NextFieldOffsetInChars <= RecordSize &&
317 AppendPadding(RecordSize - NextFieldOffsetInChars);
320 void ConstStructBuilder::ConvertStructToPacked() {
321 SmallVector<llvm::Constant *, 16> PackedElements;
322 CharUnits ElementOffsetInChars = CharUnits::Zero();
324 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
325 llvm::Constant *C = Elements[i];
327 CharUnits ElementAlign = CharUnits::fromQuantity(
328 CGM.getDataLayout().getABITypeAlignment(C->getType()));
329 CharUnits AlignedElementOffsetInChars =
330 ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
332 if (AlignedElementOffsetInChars > ElementOffsetInChars) {
333 // We need some padding.
335 AlignedElementOffsetInChars - ElementOffsetInChars;
337 llvm::Type *Ty = CGM.Int8Ty;
338 if (NumChars > CharUnits::One())
339 Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
341 llvm::Constant *Padding = llvm::UndefValue::get(Ty);
342 PackedElements.push_back(Padding);
343 ElementOffsetInChars += getSizeInChars(Padding);
346 PackedElements.push_back(C);
347 ElementOffsetInChars += getSizeInChars(C);
350 assert(ElementOffsetInChars == NextFieldOffsetInChars &&
351 "Packing the struct changed its size!");
353 Elements.swap(PackedElements);
354 LLVMStructAlignment = CharUnits::One();
358 bool ConstStructBuilder::Build(InitListExpr *ILE) {
359 RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
360 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
362 unsigned FieldNo = 0;
363 unsigned ElementNo = 0;
365 for (RecordDecl::field_iterator Field = RD->field_begin(),
366 FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
367 // If this is a union, skip all the fields that aren't being initialized.
368 if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
371 // Don't emit anonymous bitfields, they just affect layout.
372 if (Field->isUnnamedBitfield())
375 // Get the initializer. A struct can include fields without initializers,
376 // we just use explicit null values for them.
377 llvm::Constant *EltInit;
378 if (ElementNo < ILE->getNumInits())
379 EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
380 Field->getType(), CGF);
382 EltInit = CGM.EmitNullConstant(Field->getType());
387 if (!Field->isBitField()) {
388 // Handle non-bitfield members.
389 AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
391 // Otherwise we have a bitfield.
392 if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) {
393 AppendBitField(*Field, Layout.getFieldOffset(FieldNo), CI);
395 // We are trying to initialize a bitfield with a non-trivial constant,
396 // this must require run-time code.
407 BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
408 : Decl(Decl), Offset(Offset), Index(Index) {
411 const CXXRecordDecl *Decl;
415 bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
419 void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
421 const CXXRecordDecl *VTableClass,
423 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
425 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
426 // Add a vtable pointer, if we need one and it hasn't already been added.
427 if (CD->isDynamicClass() && !IsPrimaryBase) {
428 llvm::Constant *VTableAddressPoint =
429 CGM.getCXXABI().getVTableAddressPointForConstExpr(
430 BaseSubobject(CD, Offset), VTableClass);
431 AppendBytes(Offset, VTableAddressPoint);
434 // Accumulate and sort bases, in order to visit them in address order, which
435 // may not be the same as declaration order.
436 SmallVector<BaseInfo, 8> Bases;
437 Bases.reserve(CD->getNumBases());
439 for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
440 BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
441 assert(!Base->isVirtual() && "should not have virtual bases here");
442 const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
443 CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
444 Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
446 std::stable_sort(Bases.begin(), Bases.end());
448 for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
449 BaseInfo &Base = Bases[I];
451 bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
452 Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
453 VTableClass, Offset + Base.Offset);
457 unsigned FieldNo = 0;
458 uint64_t OffsetBits = CGM.getContext().toBits(Offset);
460 for (RecordDecl::field_iterator Field = RD->field_begin(),
461 FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
462 // If this is a union, skip all the fields that aren't being initialized.
463 if (RD->isUnion() && Val.getUnionField() != *Field)
466 // Don't emit anonymous bitfields, they just affect layout.
467 if (Field->isUnnamedBitfield())
470 // Emit the value of the initializer.
471 const APValue &FieldValue =
472 RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
473 llvm::Constant *EltInit =
474 CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
475 assert(EltInit && "EmitConstantValue can't fail");
477 if (!Field->isBitField()) {
478 // Handle non-bitfield members.
479 AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
481 // Otherwise we have a bitfield.
482 AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
483 cast<llvm::ConstantInt>(EltInit));
488 llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
489 RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
490 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
492 CharUnits LayoutSizeInChars = Layout.getSize();
494 if (NextFieldOffsetInChars > LayoutSizeInChars) {
495 // If the struct is bigger than the size of the record type,
496 // we must have a flexible array member at the end.
497 assert(RD->hasFlexibleArrayMember() &&
498 "Must have flexible array member if struct is bigger than type!");
500 // No tail padding is necessary.
502 // Append tail padding if necessary.
503 CharUnits LLVMSizeInChars =
504 NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
506 if (LLVMSizeInChars != LayoutSizeInChars)
507 AppendTailPadding(LayoutSizeInChars);
510 NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
512 // Check if we need to convert the struct to a packed struct.
513 if (NextFieldOffsetInChars <= LayoutSizeInChars &&
514 LLVMSizeInChars > LayoutSizeInChars) {
515 assert(!Packed && "Size mismatch!");
517 ConvertStructToPacked();
518 assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
519 "Converting to packed did not help!");
523 NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
525 assert(LayoutSizeInChars == LLVMSizeInChars &&
526 "Tail padding mismatch!");
529 // Pick the type to use. If the type is layout identical to the ConvertType
530 // type then use it, otherwise use whatever the builder produced for us.
531 llvm::StructType *STy =
532 llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
534 llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
535 if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
536 if (ValSTy->isLayoutIdentical(STy))
540 llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
542 assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
543 getSizeInChars(Result) && "Size mismatch!");
548 llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
549 CodeGenFunction *CGF,
551 ConstStructBuilder Builder(CGM, CGF);
553 if (!Builder.Build(ILE))
556 return Builder.Finalize(ILE->getType());
559 llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
560 CodeGenFunction *CGF,
563 ConstStructBuilder Builder(CGM, CGF);
565 const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
566 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
567 Builder.Build(Val, RD, false, CD, CharUnits::Zero());
569 return Builder.Finalize(ValTy);
573 //===----------------------------------------------------------------------===//
575 //===----------------------------------------------------------------------===//
577 /// This class only needs to handle two cases:
578 /// 1) Literals (this is used by APValue emission to emit literals).
579 /// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
580 /// constant fold these types).
581 class ConstExprEmitter :
582 public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
584 CodeGenFunction *CGF;
585 llvm::LLVMContext &VMContext;
587 ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
588 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
591 //===--------------------------------------------------------------------===//
593 //===--------------------------------------------------------------------===//
595 llvm::Constant *VisitStmt(Stmt *S) {
599 llvm::Constant *VisitParenExpr(ParenExpr *PE) {
600 return Visit(PE->getSubExpr());
604 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
605 return Visit(PE->getReplacement());
608 llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
609 return Visit(GE->getResultExpr());
612 llvm::Constant *VisitChooseExpr(ChooseExpr *CE) {
613 return Visit(CE->getChosenSubExpr());
616 llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
617 return Visit(E->getInitializer());
620 llvm::Constant *VisitCastExpr(CastExpr* E) {
621 Expr *subExpr = E->getSubExpr();
622 llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
623 if (!C) return nullptr;
625 llvm::Type *destType = ConvertType(E->getType());
627 switch (E->getCastKind()) {
629 // GCC cast to union extension
630 assert(E->getType()->isUnionType() &&
631 "Destination type is not union type!");
633 // Build a struct with the union sub-element as the first member,
634 // and padded to the appropriate size
635 SmallVector<llvm::Constant*, 2> Elts;
636 SmallVector<llvm::Type*, 2> Types;
638 Types.push_back(C->getType());
639 unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
640 unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
642 assert(CurSize <= TotalSize && "Union size mismatch!");
643 if (unsigned NumPadBytes = TotalSize - CurSize) {
644 llvm::Type *Ty = CGM.Int8Ty;
646 Ty = llvm::ArrayType::get(Ty, NumPadBytes);
648 Elts.push_back(llvm::UndefValue::get(Ty));
652 llvm::StructType* STy =
653 llvm::StructType::get(C->getType()->getContext(), Types, false);
654 return llvm::ConstantStruct::get(STy, Elts);
657 case CK_AddressSpaceConversion:
658 return llvm::ConstantExpr::getAddrSpaceCast(C, destType);
660 case CK_LValueToRValue:
661 case CK_AtomicToNonAtomic:
662 case CK_NonAtomicToAtomic:
664 case CK_ConstructorConversion:
667 case CK_Dependent: llvm_unreachable("saw dependent cast!");
669 case CK_BuiltinFnToFnPtr:
670 llvm_unreachable("builtin functions are handled elsewhere");
672 case CK_ReinterpretMemberPointer:
673 case CK_DerivedToBaseMemberPointer:
674 case CK_BaseToDerivedMemberPointer:
675 return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
677 // These will never be supported.
678 case CK_ObjCObjectLValueCast:
679 case CK_ARCProduceObject:
680 case CK_ARCConsumeObject:
681 case CK_ARCReclaimReturnedObject:
682 case CK_ARCExtendBlockObject:
683 case CK_CopyAndAutoreleaseBlockObject:
686 // These don't need to be handled here because Evaluate knows how to
687 // evaluate them in the cases where they can be folded.
691 case CK_LValueBitCast:
692 case CK_NullToMemberPointer:
693 case CK_UserDefinedConversion:
694 case CK_CPointerToObjCPointerCast:
695 case CK_BlockPointerToObjCPointerCast:
696 case CK_AnyPointerToBlockPointerCast:
697 case CK_ArrayToPointerDecay:
698 case CK_FunctionToPointerDecay:
699 case CK_BaseToDerived:
700 case CK_DerivedToBase:
701 case CK_UncheckedDerivedToBase:
702 case CK_MemberPointerToBoolean:
704 case CK_FloatingRealToComplex:
705 case CK_FloatingComplexToReal:
706 case CK_FloatingComplexToBoolean:
707 case CK_FloatingComplexCast:
708 case CK_FloatingComplexToIntegralComplex:
709 case CK_IntegralRealToComplex:
710 case CK_IntegralComplexToReal:
711 case CK_IntegralComplexToBoolean:
712 case CK_IntegralComplexCast:
713 case CK_IntegralComplexToFloatingComplex:
714 case CK_PointerToIntegral:
715 case CK_PointerToBoolean:
716 case CK_NullToPointer:
717 case CK_IntegralCast:
718 case CK_IntegralToPointer:
719 case CK_IntegralToBoolean:
720 case CK_IntegralToFloating:
721 case CK_FloatingToIntegral:
722 case CK_FloatingToBoolean:
723 case CK_FloatingCast:
724 case CK_ZeroToOCLEvent:
727 llvm_unreachable("Invalid CastKind");
730 llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
731 return Visit(DAE->getExpr());
734 llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
735 // No need for a DefaultInitExprScope: we don't handle 'this' in a
736 // constant expression.
737 return Visit(DIE->getExpr());
740 llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
741 return Visit(E->GetTemporaryExpr());
744 llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
745 if (ILE->isStringLiteralInit())
746 return Visit(ILE->getInit(0));
748 llvm::ArrayType *AType =
749 cast<llvm::ArrayType>(ConvertType(ILE->getType()));
750 llvm::Type *ElemTy = AType->getElementType();
751 unsigned NumInitElements = ILE->getNumInits();
752 unsigned NumElements = AType->getNumElements();
754 // Initialising an array requires us to automatically
755 // initialise any elements that have not been initialised explicitly
756 unsigned NumInitableElts = std::min(NumInitElements, NumElements);
758 // Initialize remaining array elements.
759 // FIXME: This doesn't handle member pointers correctly!
760 llvm::Constant *fillC;
761 if (Expr *filler = ILE->getArrayFiller())
762 fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
764 fillC = llvm::Constant::getNullValue(ElemTy);
768 // Try to use a ConstantAggregateZero if we can.
769 if (fillC->isNullValue() && !NumInitableElts)
770 return llvm::ConstantAggregateZero::get(AType);
772 // Copy initializer elements.
773 std::vector<llvm::Constant*> Elts;
774 Elts.reserve(NumInitableElts + NumElements);
776 bool RewriteType = false;
777 for (unsigned i = 0; i < NumInitableElts; ++i) {
778 Expr *Init = ILE->getInit(i);
779 llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
782 RewriteType |= (C->getType() != ElemTy);
786 RewriteType |= (fillC->getType() != ElemTy);
787 Elts.resize(NumElements, fillC);
790 // FIXME: Try to avoid packing the array
791 std::vector<llvm::Type*> Types;
792 Types.reserve(NumInitableElts + NumElements);
793 for (unsigned i = 0, e = Elts.size(); i < e; ++i)
794 Types.push_back(Elts[i]->getType());
795 llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
797 return llvm::ConstantStruct::get(SType, Elts);
800 return llvm::ConstantArray::get(AType, Elts);
803 llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
804 return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
807 llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
808 return CGM.EmitNullConstant(E->getType());
811 llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
812 if (ILE->getType()->isArrayType())
813 return EmitArrayInitialization(ILE);
815 if (ILE->getType()->isRecordType())
816 return EmitRecordInitialization(ILE);
821 llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
822 if (!E->getConstructor()->isTrivial())
825 QualType Ty = E->getType();
827 // FIXME: We should not have to call getBaseElementType here.
828 const RecordType *RT =
829 CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
830 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
832 // If the class doesn't have a trivial destructor, we can't emit it as a
834 if (!RD->hasTrivialDestructor())
837 // Only copy and default constructors can be trivial.
840 if (E->getNumArgs()) {
841 assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
842 assert(E->getConstructor()->isCopyOrMoveConstructor() &&
843 "trivial ctor has argument but isn't a copy/move ctor");
845 Expr *Arg = E->getArg(0);
846 assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
847 "argument to copy ctor is of wrong type");
852 return CGM.EmitNullConstant(Ty);
855 llvm::Constant *VisitStringLiteral(StringLiteral *E) {
856 return CGM.GetConstantArrayFromStringLiteral(E);
859 llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
860 // This must be an @encode initializing an array in a static initializer.
861 // Don't emit it as the address of the string, emit the string data itself
862 // as an inline array.
864 CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
865 QualType T = E->getType();
866 if (T->getTypeClass() == Type::TypeOfExpr)
867 T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
868 const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
870 // Resize the string to the right size, adding zeros at the end, or
871 // truncating as needed.
872 Str.resize(CAT->getSize().getZExtValue(), '\0');
873 return llvm::ConstantDataArray::getString(VMContext, Str, false);
876 llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
877 return Visit(E->getSubExpr());
881 llvm::Type *ConvertType(QualType T) {
882 return CGM.getTypes().ConvertType(T);
886 llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
887 if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
888 if (Decl->hasAttr<WeakRefAttr>())
889 return CGM.GetWeakRefReference(Decl);
890 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
891 return CGM.GetAddrOfFunction(FD);
892 if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
893 // We can never refer to a variable with local storage.
894 if (!VD->hasLocalStorage()) {
895 if (VD->isFileVarDecl() || VD->hasExternalStorage())
896 return CGM.GetAddrOfGlobalVar(VD);
897 else if (VD->isLocalVarDecl())
898 return CGM.getOrCreateStaticVarDecl(
899 *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
905 Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
906 switch (E->getStmtClass()) {
908 case Expr::CompoundLiteralExprClass: {
909 // Note that due to the nature of compound literals, this is guaranteed
910 // to be the only use of the variable, so we just generate it here.
911 CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
912 llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
913 CLE->getType(), CGF);
914 // FIXME: "Leaked" on failure.
916 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
917 E->getType().isConstant(CGM.getContext()),
918 llvm::GlobalValue::InternalLinkage,
919 C, ".compoundliteral", nullptr,
920 llvm::GlobalVariable::NotThreadLocal,
921 CGM.getContext().getTargetAddressSpace(E->getType()));
924 case Expr::StringLiteralClass:
925 return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
926 case Expr::ObjCEncodeExprClass:
927 return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
928 case Expr::ObjCStringLiteralClass: {
929 ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
931 CGM.getObjCRuntime().GenerateConstantString(SL->getString());
932 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
934 case Expr::PredefinedExprClass: {
935 unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
937 LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
938 return cast<llvm::Constant>(Res.getAddress());
939 } else if (Type == PredefinedExpr::PrettyFunction) {
940 return CGM.GetAddrOfConstantCString("top level", ".tmp");
943 return CGM.GetAddrOfConstantCString("", ".tmp");
945 case Expr::AddrLabelExprClass: {
946 assert(CGF && "Invalid address of label expression outside function.");
947 llvm::Constant *Ptr =
948 CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
949 return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
951 case Expr::CallExprClass: {
952 CallExpr* CE = cast<CallExpr>(E);
953 unsigned builtin = CE->getBuiltinCallee();
955 Builtin::BI__builtin___CFStringMakeConstantString &&
957 Builtin::BI__builtin___NSStringMakeConstantString)
959 const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
960 const StringLiteral *Literal = cast<StringLiteral>(Arg);
962 Builtin::BI__builtin___NSStringMakeConstantString) {
963 return CGM.getObjCRuntime().GenerateConstantString(Literal);
965 // FIXME: need to deal with UCN conversion issues.
966 return CGM.GetAddrOfConstantCFString(Literal);
968 case Expr::BlockExprClass: {
969 std::string FunctionName;
971 FunctionName = CGF->CurFn->getName();
973 FunctionName = "global";
975 return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
977 case Expr::CXXTypeidExprClass: {
978 CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
980 if (Typeid->isTypeOperand())
981 T = Typeid->getTypeOperand(CGM.getContext());
983 T = Typeid->getExprOperand()->getType();
984 return CGM.GetAddrOfRTTIDescriptor(T);
986 case Expr::CXXUuidofExprClass: {
987 return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
989 case Expr::MaterializeTemporaryExprClass: {
990 MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
991 assert(MTE->getStorageDuration() == SD_Static);
992 SmallVector<const Expr *, 2> CommaLHSs;
993 SmallVector<SubobjectAdjustment, 2> Adjustments;
994 const Expr *Inner = MTE->GetTemporaryExpr()
995 ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
996 return CGM.GetAddrOfGlobalTemporary(MTE, Inner);
1004 } // end anonymous namespace.
1006 llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
1007 CodeGenFunction *CGF) {
1008 // Make a quick check if variable can be default NULL initialized
1009 // and avoid going through rest of code which may do, for c++11,
1010 // initialization of memory to all NULLs.
1011 if (!D.hasLocalStorage()) {
1012 QualType Ty = D.getType();
1013 if (Ty->isArrayType())
1014 Ty = Context.getBaseElementType(Ty);
1015 if (Ty->isRecordType())
1016 if (const CXXConstructExpr *E =
1017 dyn_cast_or_null<CXXConstructExpr>(D.getInit())) {
1018 const CXXConstructorDecl *CD = E->getConstructor();
1019 if (CD->isTrivial() && CD->isDefaultConstructor())
1020 return EmitNullConstant(D.getType());
1024 if (const APValue *Value = D.evaluateValue())
1025 return EmitConstantValueForMemory(*Value, D.getType(), CGF);
1027 // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
1028 // reference is a constant expression, and the reference binds to a temporary,
1029 // then constant initialization is performed. ConstExprEmitter will
1030 // incorrectly emit a prvalue constant in this case, and the calling code
1031 // interprets that as the (pointer) value of the reference, rather than the
1032 // desired value of the referee.
1033 if (D.getType()->isReferenceType())
1036 const Expr *E = D.getInit();
1037 assert(E && "No initializer to emit");
1039 llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1040 if (C && C->getType()->isIntegerTy(1)) {
1041 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1042 C = llvm::ConstantExpr::getZExt(C, BoolTy);
1047 llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
1049 CodeGenFunction *CGF) {
1050 Expr::EvalResult Result;
1052 bool Success = false;
1054 if (DestType->isReferenceType())
1055 Success = E->EvaluateAsLValue(Result, Context);
1057 Success = E->EvaluateAsRValue(Result, Context);
1059 llvm::Constant *C = nullptr;
1060 if (Success && !Result.HasSideEffects)
1061 C = EmitConstantValue(Result.Val, DestType, CGF);
1063 C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1065 if (C && C->getType()->isIntegerTy(1)) {
1066 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1067 C = llvm::ConstantExpr::getZExt(C, BoolTy);
1072 llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
1074 CodeGenFunction *CGF) {
1075 // For an _Atomic-qualified constant, we may need to add tail padding.
1076 if (auto *AT = DestType->getAs<AtomicType>()) {
1077 QualType InnerType = AT->getValueType();
1078 auto *Inner = EmitConstantValue(Value, InnerType, CGF);
1080 uint64_t InnerSize = Context.getTypeSize(InnerType);
1081 uint64_t OuterSize = Context.getTypeSize(DestType);
1082 if (InnerSize == OuterSize)
1085 assert(InnerSize < OuterSize && "emitted over-large constant for atomic");
1086 llvm::Constant *Elts[] = {
1088 llvm::ConstantAggregateZero::get(
1089 llvm::ArrayType::get(Int8Ty, (OuterSize - InnerSize) / 8))
1091 return llvm::ConstantStruct::getAnon(Elts);
1094 switch (Value.getKind()) {
1095 case APValue::Uninitialized:
1096 llvm_unreachable("Constant expressions should be initialized.");
1097 case APValue::LValue: {
1098 llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
1099 llvm::Constant *Offset =
1100 llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
1103 if (APValue::LValueBase LVBase = Value.getLValueBase()) {
1104 // An array can be represented as an lvalue referring to the base.
1105 if (isa<llvm::ArrayType>(DestTy)) {
1106 assert(Offset->isNullValue() && "offset on array initializer");
1107 return ConstExprEmitter(*this, CGF).Visit(
1108 const_cast<Expr*>(LVBase.get<const Expr*>()));
1111 C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
1113 // Apply offset if necessary.
1114 if (!Offset->isNullValue()) {
1115 unsigned AS = C->getType()->getPointerAddressSpace();
1116 llvm::Type *CharPtrTy = Int8Ty->getPointerTo(AS);
1117 llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, CharPtrTy);
1118 Casted = llvm::ConstantExpr::getGetElementPtr(Int8Ty, Casted, Offset);
1119 C = llvm::ConstantExpr::getPointerCast(Casted, C->getType());
1122 // Convert to the appropriate type; this could be an lvalue for
1124 if (isa<llvm::PointerType>(DestTy))
1125 return llvm::ConstantExpr::getPointerCast(C, DestTy);
1127 return llvm::ConstantExpr::getPtrToInt(C, DestTy);
1131 // Convert to the appropriate type; this could be an lvalue for
1133 if (isa<llvm::PointerType>(DestTy))
1134 return llvm::ConstantExpr::getIntToPtr(C, DestTy);
1136 // If the types don't match this should only be a truncate.
1137 if (C->getType() != DestTy)
1138 return llvm::ConstantExpr::getTrunc(C, DestTy);
1144 return llvm::ConstantInt::get(VMContext, Value.getInt());
1145 case APValue::ComplexInt: {
1146 llvm::Constant *Complex[2];
1148 Complex[0] = llvm::ConstantInt::get(VMContext,
1149 Value.getComplexIntReal());
1150 Complex[1] = llvm::ConstantInt::get(VMContext,
1151 Value.getComplexIntImag());
1153 // FIXME: the target may want to specify that this is packed.
1154 llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1155 Complex[1]->getType(),
1157 return llvm::ConstantStruct::get(STy, Complex);
1159 case APValue::Float: {
1160 const llvm::APFloat &Init = Value.getFloat();
1161 if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
1162 !Context.getLangOpts().NativeHalfType &&
1163 !Context.getLangOpts().HalfArgsAndReturns)
1164 return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
1166 return llvm::ConstantFP::get(VMContext, Init);
1168 case APValue::ComplexFloat: {
1169 llvm::Constant *Complex[2];
1171 Complex[0] = llvm::ConstantFP::get(VMContext,
1172 Value.getComplexFloatReal());
1173 Complex[1] = llvm::ConstantFP::get(VMContext,
1174 Value.getComplexFloatImag());
1176 // FIXME: the target may want to specify that this is packed.
1177 llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1178 Complex[1]->getType(),
1180 return llvm::ConstantStruct::get(STy, Complex);
1182 case APValue::Vector: {
1183 SmallVector<llvm::Constant *, 4> Inits;
1184 unsigned NumElts = Value.getVectorLength();
1186 for (unsigned i = 0; i != NumElts; ++i) {
1187 const APValue &Elt = Value.getVectorElt(i);
1189 Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
1191 Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
1193 return llvm::ConstantVector::get(Inits);
1195 case APValue::AddrLabelDiff: {
1196 const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
1197 const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
1198 llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
1199 llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
1201 // Compute difference
1202 llvm::Type *ResultType = getTypes().ConvertType(DestType);
1203 LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
1204 RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
1205 llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
1207 // LLVM is a bit sensitive about the exact format of the
1208 // address-of-label difference; make sure to truncate after
1210 return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
1212 case APValue::Struct:
1213 case APValue::Union:
1214 return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
1215 case APValue::Array: {
1216 const ArrayType *CAT = Context.getAsArrayType(DestType);
1217 unsigned NumElements = Value.getArraySize();
1218 unsigned NumInitElts = Value.getArrayInitializedElts();
1220 // Emit array filler, if there is one.
1221 llvm::Constant *Filler = nullptr;
1222 if (Value.hasArrayFiller())
1223 Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
1224 CAT->getElementType(), CGF);
1226 // Emit initializer elements.
1227 llvm::Type *CommonElementType =
1228 getTypes().ConvertType(CAT->getElementType());
1230 // Try to use a ConstantAggregateZero if we can.
1231 if (Filler && Filler->isNullValue() && !NumInitElts) {
1232 llvm::ArrayType *AType =
1233 llvm::ArrayType::get(CommonElementType, NumElements);
1234 return llvm::ConstantAggregateZero::get(AType);
1237 std::vector<llvm::Constant*> Elts;
1238 Elts.reserve(NumElements);
1239 for (unsigned I = 0; I < NumElements; ++I) {
1240 llvm::Constant *C = Filler;
1241 if (I < NumInitElts)
1242 C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
1243 CAT->getElementType(), CGF);
1245 assert(Filler && "Missing filler for implicit elements of initializer");
1247 CommonElementType = C->getType();
1248 else if (C->getType() != CommonElementType)
1249 CommonElementType = nullptr;
1253 if (!CommonElementType) {
1254 // FIXME: Try to avoid packing the array
1255 std::vector<llvm::Type*> Types;
1256 Types.reserve(NumElements);
1257 for (unsigned i = 0, e = Elts.size(); i < e; ++i)
1258 Types.push_back(Elts[i]->getType());
1259 llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
1260 return llvm::ConstantStruct::get(SType, Elts);
1263 llvm::ArrayType *AType =
1264 llvm::ArrayType::get(CommonElementType, NumElements);
1265 return llvm::ConstantArray::get(AType, Elts);
1267 case APValue::MemberPointer:
1268 return getCXXABI().EmitMemberPointer(Value, DestType);
1270 llvm_unreachable("Unknown APValue kind");
1274 CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
1276 CodeGenFunction *CGF) {
1277 llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
1278 if (C->getType()->isIntegerTy(1)) {
1279 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
1280 C = llvm::ConstantExpr::getZExt(C, BoolTy);
1286 CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
1287 assert(E->isFileScope() && "not a file-scope compound literal expr");
1288 return ConstExprEmitter(*this, nullptr).EmitLValue(E);
1292 CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
1293 // Member pointer constants always have a very particular form.
1294 const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
1295 const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
1297 // A member function pointer.
1298 if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
1299 return getCXXABI().EmitMemberPointer(method);
1301 // Otherwise, a member data pointer.
1302 uint64_t fieldOffset = getContext().getFieldOffset(decl);
1303 CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
1304 return getCXXABI().EmitMemberDataPointer(type, chars);
1307 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1308 llvm::Type *baseType,
1309 const CXXRecordDecl *base);
1311 static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1312 const CXXRecordDecl *record,
1313 bool asCompleteObject) {
1314 const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1315 llvm::StructType *structure =
1316 (asCompleteObject ? layout.getLLVMType()
1317 : layout.getBaseSubobjectLLVMType());
1319 unsigned numElements = structure->getNumElements();
1320 std::vector<llvm::Constant *> elements(numElements);
1322 // Fill in all the bases.
1323 for (const auto &I : record->bases()) {
1324 if (I.isVirtual()) {
1325 // Ignore virtual bases; if we're laying out for a complete
1326 // object, we'll lay these out later.
1330 const CXXRecordDecl *base =
1331 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1333 // Ignore empty bases.
1334 if (base->isEmpty())
1337 unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1338 llvm::Type *baseType = structure->getElementType(fieldIndex);
1339 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1342 // Fill in all the fields.
1343 for (const auto *Field : record->fields()) {
1344 // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
1345 // will fill in later.)
1346 if (!Field->isBitField()) {
1347 unsigned fieldIndex = layout.getLLVMFieldNo(Field);
1348 elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
1351 // For unions, stop after the first named field.
1352 if (record->isUnion() && Field->getDeclName())
1356 // Fill in the virtual bases, if we're working with the complete object.
1357 if (asCompleteObject) {
1358 for (const auto &I : record->vbases()) {
1359 const CXXRecordDecl *base =
1360 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1362 // Ignore empty bases.
1363 if (base->isEmpty())
1366 unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1368 // We might have already laid this field out.
1369 if (elements[fieldIndex]) continue;
1371 llvm::Type *baseType = structure->getElementType(fieldIndex);
1372 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1376 // Now go through all other fields and zero them out.
1377 for (unsigned i = 0; i != numElements; ++i) {
1379 elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1382 return llvm::ConstantStruct::get(structure, elements);
1385 /// Emit the null constant for a base subobject.
1386 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1387 llvm::Type *baseType,
1388 const CXXRecordDecl *base) {
1389 const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1391 // Just zero out bases that don't have any pointer to data members.
1392 if (baseLayout.isZeroInitializableAsBase())
1393 return llvm::Constant::getNullValue(baseType);
1395 // Otherwise, we can just use its null constant.
1396 return EmitNullConstant(CGM, base, /*asCompleteObject=*/false);
1399 llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1400 if (getTypes().isZeroInitializable(T))
1401 return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1403 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1404 llvm::ArrayType *ATy =
1405 cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1407 QualType ElementTy = CAT->getElementType();
1409 llvm::Constant *Element = EmitNullConstant(ElementTy);
1410 unsigned NumElements = CAT->getSize().getZExtValue();
1411 SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
1412 return llvm::ConstantArray::get(ATy, Array);
1415 if (const RecordType *RT = T->getAs<RecordType>()) {
1416 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1417 return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1420 assert(T->isMemberDataPointerType() &&
1421 "Should only see pointers to data members here!");
1423 return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1427 CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
1428 return ::EmitNullConstant(*this, Record, false);