1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclTemplate.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GlobalVariable.h"
24 #include "llvm/IR/Intrinsics.h"
25 using namespace clang;
26 using namespace CodeGen;
28 //===----------------------------------------------------------------------===//
29 // Aggregate Expression Emitter
30 //===----------------------------------------------------------------------===//
33 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
39 /// We want to use 'dest' as the return slot except under two
41 /// - The destination slot requires garbage collection, so we
42 /// need to use the GC API.
43 /// - The destination slot is potentially aliased.
44 bool shouldUseDestForReturnSlot() const {
45 return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
48 ReturnValueSlot getReturnValueSlot() const {
49 if (!shouldUseDestForReturnSlot())
50 return ReturnValueSlot();
52 return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
56 AggValueSlot EnsureSlot(QualType T) {
57 if (!Dest.isIgnored()) return Dest;
58 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
60 void EnsureDest(QualType T) {
61 if (!Dest.isIgnored()) return;
62 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
66 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
67 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
68 IsResultUnused(IsResultUnused) { }
70 //===--------------------------------------------------------------------===//
72 //===--------------------------------------------------------------------===//
74 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
75 /// represents a value lvalue, this method emits the address of the lvalue,
76 /// then loads the result into DestPtr.
77 void EmitAggLoadOfLValue(const Expr *E);
79 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
80 void EmitFinalDestCopy(QualType type, const LValue &src);
81 void EmitFinalDestCopy(QualType type, RValue src);
82 void EmitCopy(QualType type, const AggValueSlot &dest,
83 const AggValueSlot &src);
85 void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
87 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
88 QualType elementType, InitListExpr *E);
90 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
91 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
92 return AggValueSlot::NeedsGCBarriers;
93 return AggValueSlot::DoesNotNeedGCBarriers;
96 bool TypeRequiresGCollection(QualType T);
98 //===--------------------------------------------------------------------===//
100 //===--------------------------------------------------------------------===//
102 void Visit(Expr *E) {
103 ApplyDebugLocation DL(CGF, E);
104 StmtVisitor<AggExprEmitter>::Visit(E);
107 void VisitStmt(Stmt *S) {
108 CGF.ErrorUnsupported(S, "aggregate expression");
110 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
111 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
112 Visit(GE->getResultExpr());
114 void VisitCoawaitExpr(CoawaitExpr *E) {
115 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
117 void VisitCoyieldExpr(CoyieldExpr *E) {
118 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
120 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
121 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
122 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
123 return Visit(E->getReplacement());
127 void VisitDeclRefExpr(DeclRefExpr *E) {
128 // For aggregates, we should always be able to emit the variable
129 // as an l-value unless it's a reference. This is due to the fact
130 // that we can't actually ever see a normal l2r conversion on an
131 // aggregate in C++, and in C there's no language standard
132 // actively preventing us from listing variables in the captures
134 if (E->getDecl()->getType()->isReferenceType()) {
135 if (CodeGenFunction::ConstantEmission result
136 = CGF.tryEmitAsConstant(E)) {
137 EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
142 EmitAggLoadOfLValue(E);
145 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
146 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
147 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
148 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
149 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
150 EmitAggLoadOfLValue(E);
152 void VisitPredefinedExpr(const PredefinedExpr *E) {
153 EmitAggLoadOfLValue(E);
157 void VisitCastExpr(CastExpr *E);
158 void VisitCallExpr(const CallExpr *E);
159 void VisitStmtExpr(const StmtExpr *E);
160 void VisitBinaryOperator(const BinaryOperator *BO);
161 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
162 void VisitBinAssign(const BinaryOperator *E);
163 void VisitBinComma(const BinaryOperator *E);
165 void VisitObjCMessageExpr(ObjCMessageExpr *E);
166 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
167 EmitAggLoadOfLValue(E);
170 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
171 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
172 void VisitChooseExpr(const ChooseExpr *CE);
173 void VisitInitListExpr(InitListExpr *E);
174 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
175 llvm::Value *outerBegin = nullptr);
176 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
177 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
178 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
179 Visit(DAE->getExpr());
181 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
182 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
183 Visit(DIE->getExpr());
185 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
186 void VisitCXXConstructExpr(const CXXConstructExpr *E);
187 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
188 void VisitLambdaExpr(LambdaExpr *E);
189 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
190 void VisitExprWithCleanups(ExprWithCleanups *E);
191 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
192 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
193 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
194 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
196 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
197 if (E->isGLValue()) {
198 LValue LV = CGF.EmitPseudoObjectLValue(E);
199 return EmitFinalDestCopy(E->getType(), LV);
202 CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
205 void VisitVAArgExpr(VAArgExpr *E);
207 void EmitInitializationToLValue(Expr *E, LValue Address);
208 void EmitNullInitializationToLValue(LValue Address);
209 // case Expr::ChooseExprClass:
210 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
211 void VisitAtomicExpr(AtomicExpr *E) {
212 RValue Res = CGF.EmitAtomicExpr(E);
213 EmitFinalDestCopy(E->getType(), Res);
216 } // end anonymous namespace.
218 //===----------------------------------------------------------------------===//
220 //===----------------------------------------------------------------------===//
222 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
223 /// represents a value lvalue, this method emits the address of the lvalue,
224 /// then loads the result into DestPtr.
225 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
226 LValue LV = CGF.EmitLValue(E);
228 // If the type of the l-value is atomic, then do an atomic load.
229 if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
230 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
234 EmitFinalDestCopy(E->getType(), LV);
237 /// \brief True if the given aggregate type requires special GC API calls.
238 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
239 // Only record types have members that might require garbage collection.
240 const RecordType *RecordTy = T->getAs<RecordType>();
241 if (!RecordTy) return false;
243 // Don't mess with non-trivial C++ types.
244 RecordDecl *Record = RecordTy->getDecl();
245 if (isa<CXXRecordDecl>(Record) &&
246 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
247 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
250 // Check whether the type has an object member.
251 return Record->hasObjectMember();
254 /// \brief Perform the final move to DestPtr if for some reason
255 /// getReturnValueSlot() didn't use it directly.
257 /// The idea is that you do something like this:
258 /// RValue Result = EmitSomething(..., getReturnValueSlot());
259 /// EmitMoveFromReturnSlot(E, Result);
261 /// If nothing interferes, this will cause the result to be emitted
262 /// directly into the return value slot. Otherwise, a final move
263 /// will be performed.
264 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
265 if (shouldUseDestForReturnSlot()) {
266 // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
267 // The possibility of undef rvalues complicates that a lot,
268 // though, so we can't really assert.
272 // Otherwise, copy from there to the destination.
273 assert(Dest.getPointer() != src.getAggregatePointer());
274 EmitFinalDestCopy(E->getType(), src);
277 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
278 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
279 assert(src.isAggregate() && "value must be aggregate value!");
280 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
281 EmitFinalDestCopy(type, srcLV);
284 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
285 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
286 // If Dest is ignored, then we're evaluating an aggregate expression
287 // in a context that doesn't care about the result. Note that loads
288 // from volatile l-values force the existence of a non-ignored
290 if (Dest.isIgnored())
293 AggValueSlot srcAgg =
294 AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
295 needsGC(type), AggValueSlot::IsAliased);
296 EmitCopy(type, Dest, srcAgg);
299 /// Perform a copy from the source into the destination.
301 /// \param type - the type of the aggregate being copied; qualifiers are
303 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
304 const AggValueSlot &src) {
305 if (dest.requiresGCollection()) {
306 CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
307 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
308 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
315 // If the result of the assignment is used, copy the LHS there also.
316 // It's volatile if either side is. Use the minimum alignment of
318 CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
319 dest.isVolatile() || src.isVolatile());
322 /// \brief Emit the initializer for a std::initializer_list initialized with a
323 /// real initializer list.
325 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
326 // Emit an array containing the elements. The array is externally destructed
327 // if the std::initializer_list object is.
328 ASTContext &Ctx = CGF.getContext();
329 LValue Array = CGF.EmitLValue(E->getSubExpr());
330 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
331 Address ArrayPtr = Array.getAddress();
333 const ConstantArrayType *ArrayType =
334 Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
335 assert(ArrayType && "std::initializer_list constructed from non-array");
337 // FIXME: Perform the checks on the field types in SemaInit.
338 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
339 RecordDecl::field_iterator Field = Record->field_begin();
340 if (Field == Record->field_end()) {
341 CGF.ErrorUnsupported(E, "weird std::initializer_list");
346 if (!Field->getType()->isPointerType() ||
347 !Ctx.hasSameType(Field->getType()->getPointeeType(),
348 ArrayType->getElementType())) {
349 CGF.ErrorUnsupported(E, "weird std::initializer_list");
353 AggValueSlot Dest = EnsureSlot(E->getType());
354 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
355 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
356 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
357 llvm::Value *IdxStart[] = { Zero, Zero };
358 llvm::Value *ArrayStart =
359 Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
360 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
363 if (Field == Record->field_end()) {
364 CGF.ErrorUnsupported(E, "weird std::initializer_list");
368 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
369 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
370 if (Field->getType()->isPointerType() &&
371 Ctx.hasSameType(Field->getType()->getPointeeType(),
372 ArrayType->getElementType())) {
374 llvm::Value *IdxEnd[] = { Zero, Size };
375 llvm::Value *ArrayEnd =
376 Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
377 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
378 } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
380 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
382 CGF.ErrorUnsupported(E, "weird std::initializer_list");
387 /// \brief Determine if E is a trivial array filler, that is, one that is
388 /// equivalent to zero-initialization.
389 static bool isTrivialFiller(Expr *E) {
393 if (isa<ImplicitValueInitExpr>(E))
396 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
397 if (ILE->getNumInits())
399 return isTrivialFiller(ILE->getArrayFiller());
402 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
403 return Cons->getConstructor()->isDefaultConstructor() &&
404 Cons->getConstructor()->isTrivial();
406 // FIXME: Are there other cases where we can avoid emitting an initializer?
410 /// \brief Emit initialization of an array from an initializer list.
411 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
412 QualType elementType, InitListExpr *E) {
413 uint64_t NumInitElements = E->getNumInits();
415 uint64_t NumArrayElements = AType->getNumElements();
416 assert(NumInitElements <= NumArrayElements);
418 // DestPtr is an array*. Construct an elementType* by drilling
420 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
421 llvm::Value *indices[] = { zero, zero };
423 Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
425 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
426 CharUnits elementAlign =
427 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
429 // Exception safety requires us to destroy all the
430 // already-constructed members if an initializer throws.
431 // For that, we'll need an EH cleanup.
432 QualType::DestructionKind dtorKind = elementType.isDestructedType();
433 Address endOfInit = Address::invalid();
434 EHScopeStack::stable_iterator cleanup;
435 llvm::Instruction *cleanupDominator = nullptr;
436 if (CGF.needsEHCleanup(dtorKind)) {
437 // In principle we could tell the cleanup where we are more
438 // directly, but the control flow can get so varied here that it
439 // would actually be quite complex. Therefore we go through an
441 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
442 "arrayinit.endOfInit");
443 cleanupDominator = Builder.CreateStore(begin, endOfInit);
444 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
446 CGF.getDestroyer(dtorKind));
447 cleanup = CGF.EHStack.stable_begin();
449 // Otherwise, remember that we didn't need a cleanup.
451 dtorKind = QualType::DK_none;
454 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
456 // The 'current element to initialize'. The invariants on this
457 // variable are complicated. Essentially, after each iteration of
458 // the loop, it points to the last initialized element, except
459 // that it points to the beginning of the array before any
460 // elements have been initialized.
461 llvm::Value *element = begin;
463 // Emit the explicit initializers.
464 for (uint64_t i = 0; i != NumInitElements; ++i) {
465 // Advance to the next element.
467 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
469 // Tell the cleanup that it needs to destroy up to this
470 // element. TODO: some of these stores can be trivially
471 // observed to be unnecessary.
472 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
476 CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
477 EmitInitializationToLValue(E->getInit(i), elementLV);
480 // Check whether there's a non-trivial array-fill expression.
481 Expr *filler = E->getArrayFiller();
482 bool hasTrivialFiller = isTrivialFiller(filler);
484 // Any remaining elements need to be zero-initialized, possibly
485 // using the filler expression. We can skip this if the we're
486 // emitting to zeroed memory.
487 if (NumInitElements != NumArrayElements &&
488 !(Dest.isZeroed() && hasTrivialFiller &&
489 CGF.getTypes().isZeroInitializable(elementType))) {
491 // Use an actual loop. This is basically
492 // do { *array++ = filler; } while (array != end);
494 // Advance to the start of the rest of the array.
495 if (NumInitElements) {
496 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
497 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
500 // Compute the end of the array.
501 llvm::Value *end = Builder.CreateInBoundsGEP(begin,
502 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
505 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
506 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
508 // Jump into the body.
509 CGF.EmitBlock(bodyBB);
510 llvm::PHINode *currentElement =
511 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
512 currentElement->addIncoming(element, entryBB);
514 // Emit the actual filler expression.
516 CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
518 EmitInitializationToLValue(filler, elementLV);
520 EmitNullInitializationToLValue(elementLV);
522 // Move on to the next element.
523 llvm::Value *nextElement =
524 Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
526 // Tell the EH cleanup that we finished with the last element.
527 if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
529 // Leave the loop if we're done.
530 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
532 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
533 Builder.CreateCondBr(done, endBB, bodyBB);
534 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
536 CGF.EmitBlock(endBB);
539 // Leave the partial-array cleanup if we entered one.
540 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
543 //===----------------------------------------------------------------------===//
545 //===----------------------------------------------------------------------===//
547 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
548 Visit(E->GetTemporaryExpr());
551 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
552 EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
556 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
557 if (Dest.isPotentiallyAliased() &&
558 E->getType().isPODType(CGF.getContext())) {
559 // For a POD type, just emit a load of the lvalue + a copy, because our
560 // compound literal might alias the destination.
561 EmitAggLoadOfLValue(E);
565 AggValueSlot Slot = EnsureSlot(E->getType());
566 CGF.EmitAggExpr(E->getInitializer(), Slot);
569 /// Attempt to look through various unimportant expressions to find a
570 /// cast of the given kind.
571 static Expr *findPeephole(Expr *op, CastKind kind) {
573 op = op->IgnoreParens();
574 if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
575 if (castE->getCastKind() == kind)
576 return castE->getSubExpr();
577 if (castE->getCastKind() == CK_NoOp)
584 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
585 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
586 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
587 switch (E->getCastKind()) {
589 // FIXME: Can this actually happen? We have no test coverage for it.
590 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
591 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
592 CodeGenFunction::TCK_Load);
593 // FIXME: Do we also need to handle property references here?
595 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
597 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
599 if (!Dest.isIgnored())
600 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
605 // Evaluate even if the destination is ignored.
606 if (Dest.isIgnored()) {
607 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
608 /*ignoreResult=*/true);
612 // GCC union extension
613 QualType Ty = E->getSubExpr()->getType();
615 Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
616 EmitInitializationToLValue(E->getSubExpr(),
617 CGF.MakeAddrLValue(CastPtr, Ty));
621 case CK_DerivedToBase:
622 case CK_BaseToDerived:
623 case CK_UncheckedDerivedToBase: {
624 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
625 "should have been unpacked before we got here");
628 case CK_NonAtomicToAtomic:
629 case CK_AtomicToNonAtomic: {
630 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
632 // Determine the atomic and value types.
633 QualType atomicType = E->getSubExpr()->getType();
634 QualType valueType = E->getType();
635 if (isToAtomic) std::swap(atomicType, valueType);
637 assert(atomicType->isAtomicType());
638 assert(CGF.getContext().hasSameUnqualifiedType(valueType,
639 atomicType->castAs<AtomicType>()->getValueType()));
641 // Just recurse normally if we're ignoring the result or the
642 // atomic type doesn't change representation.
643 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
644 return Visit(E->getSubExpr());
647 CastKind peepholeTarget =
648 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
650 // These two cases are reverses of each other; try to peephole them.
651 if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
652 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
654 "peephole significantly changed types?");
658 // If we're converting an r-value of non-atomic type to an r-value
659 // of atomic type, just emit directly into the relevant sub-object.
661 AggValueSlot valueDest = Dest;
662 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
663 // Zero-initialize. (Strictly speaking, we only need to intialize
664 // the padding at the end, but this is simpler.)
665 if (!Dest.isZeroed())
666 CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
668 // Build a GEP to refer to the subobject.
670 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
672 valueDest = AggValueSlot::forAddr(valueAddr,
673 valueDest.getQualifiers(),
674 valueDest.isExternallyDestructed(),
675 valueDest.requiresGCollection(),
676 valueDest.isPotentiallyAliased(),
677 AggValueSlot::IsZeroed);
680 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
684 // Otherwise, we're converting an atomic type to a non-atomic type.
685 // Make an atomic temporary, emit into that, and then copy the value out.
686 AggValueSlot atomicSlot =
687 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
688 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
691 Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
692 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
693 return EmitFinalDestCopy(valueType, rvalue);
696 case CK_LValueToRValue:
697 // If we're loading from a volatile type, force the destination
699 if (E->getSubExpr()->getType().isVolatileQualified()) {
700 EnsureDest(E->getType());
701 return Visit(E->getSubExpr());
707 case CK_UserDefinedConversion:
708 case CK_ConstructorConversion:
709 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
711 "Implicit cast types must be compatible");
712 Visit(E->getSubExpr());
715 case CK_LValueBitCast:
716 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
720 case CK_ArrayToPointerDecay:
721 case CK_FunctionToPointerDecay:
722 case CK_NullToPointer:
723 case CK_NullToMemberPointer:
724 case CK_BaseToDerivedMemberPointer:
725 case CK_DerivedToBaseMemberPointer:
726 case CK_MemberPointerToBoolean:
727 case CK_ReinterpretMemberPointer:
728 case CK_IntegralToPointer:
729 case CK_PointerToIntegral:
730 case CK_PointerToBoolean:
733 case CK_IntegralCast:
734 case CK_BooleanToSignedIntegral:
735 case CK_IntegralToBoolean:
736 case CK_IntegralToFloating:
737 case CK_FloatingToIntegral:
738 case CK_FloatingToBoolean:
739 case CK_FloatingCast:
740 case CK_CPointerToObjCPointerCast:
741 case CK_BlockPointerToObjCPointerCast:
742 case CK_AnyPointerToBlockPointerCast:
743 case CK_ObjCObjectLValueCast:
744 case CK_FloatingRealToComplex:
745 case CK_FloatingComplexToReal:
746 case CK_FloatingComplexToBoolean:
747 case CK_FloatingComplexCast:
748 case CK_FloatingComplexToIntegralComplex:
749 case CK_IntegralRealToComplex:
750 case CK_IntegralComplexToReal:
751 case CK_IntegralComplexToBoolean:
752 case CK_IntegralComplexCast:
753 case CK_IntegralComplexToFloatingComplex:
754 case CK_ARCProduceObject:
755 case CK_ARCConsumeObject:
756 case CK_ARCReclaimReturnedObject:
757 case CK_ARCExtendBlockObject:
758 case CK_CopyAndAutoreleaseBlockObject:
759 case CK_BuiltinFnToFnPtr:
760 case CK_ZeroToOCLEvent:
761 case CK_ZeroToOCLQueue:
762 case CK_AddressSpaceConversion:
763 case CK_IntToOCLSampler:
764 llvm_unreachable("cast kind invalid for aggregate types");
768 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
769 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
770 EmitAggLoadOfLValue(E);
774 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
775 EmitMoveFromReturnSlot(E, RV);
778 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
779 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
780 EmitMoveFromReturnSlot(E, RV);
783 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
784 CGF.EmitIgnoredExpr(E->getLHS());
788 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
789 CodeGenFunction::StmtExprEvaluation eval(CGF);
790 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
793 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
794 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
795 VisitPointerToDataMemberBinaryOperator(E);
797 CGF.ErrorUnsupported(E, "aggregate binary expression");
800 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
801 const BinaryOperator *E) {
802 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
803 EmitFinalDestCopy(E->getType(), LV);
806 /// Is the value of the given expression possibly a reference to or
807 /// into a __block variable?
808 static bool isBlockVarRef(const Expr *E) {
809 // Make sure we look through parens.
810 E = E->IgnoreParens();
812 // Check for a direct reference to a __block variable.
813 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
814 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
815 return (var && var->hasAttr<BlocksAttr>());
818 // More complicated stuff.
821 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
822 // For an assignment or pointer-to-member operation, just care
824 if (op->isAssignmentOp() || op->isPtrMemOp())
825 return isBlockVarRef(op->getLHS());
827 // For a comma, just care about the RHS.
828 if (op->getOpcode() == BO_Comma)
829 return isBlockVarRef(op->getRHS());
831 // FIXME: pointer arithmetic?
834 // Check both sides of a conditional operator.
835 } else if (const AbstractConditionalOperator *op
836 = dyn_cast<AbstractConditionalOperator>(E)) {
837 return isBlockVarRef(op->getTrueExpr())
838 || isBlockVarRef(op->getFalseExpr());
840 // OVEs are required to support BinaryConditionalOperators.
841 } else if (const OpaqueValueExpr *op
842 = dyn_cast<OpaqueValueExpr>(E)) {
843 if (const Expr *src = op->getSourceExpr())
844 return isBlockVarRef(src);
846 // Casts are necessary to get things like (*(int*)&var) = foo().
847 // We don't really care about the kind of cast here, except
848 // we don't want to look through l2r casts, because it's okay
849 // to get the *value* in a __block variable.
850 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
851 if (cast->getCastKind() == CK_LValueToRValue)
853 return isBlockVarRef(cast->getSubExpr());
855 // Handle unary operators. Again, just aggressively look through
856 // it, ignoring the operation.
857 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
858 return isBlockVarRef(uop->getSubExpr());
860 // Look into the base of a field access.
861 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
862 return isBlockVarRef(mem->getBase());
864 // Look into the base of a subscript.
865 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
866 return isBlockVarRef(sub->getBase());
872 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
873 // For an assignment to work, the value on the right has
874 // to be compatible with the value on the left.
875 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
876 E->getRHS()->getType())
877 && "Invalid assignment");
879 // If the LHS might be a __block variable, and the RHS can
880 // potentially cause a block copy, we need to evaluate the RHS first
881 // so that the assignment goes the right place.
882 // This is pretty semantically fragile.
883 if (isBlockVarRef(E->getLHS()) &&
884 E->getRHS()->HasSideEffects(CGF.getContext())) {
885 // Ensure that we have a destination, and evaluate the RHS into that.
886 EnsureDest(E->getRHS()->getType());
889 // Now emit the LHS and copy into it.
890 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
892 // That copy is an atomic copy if the LHS is atomic.
893 if (LHS.getType()->isAtomicType() ||
894 CGF.LValueIsSuitableForInlineAtomic(LHS)) {
895 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
899 EmitCopy(E->getLHS()->getType(),
900 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
901 needsGC(E->getLHS()->getType()),
902 AggValueSlot::IsAliased),
907 LValue LHS = CGF.EmitLValue(E->getLHS());
909 // If we have an atomic type, evaluate into the destination and then
910 // do an atomic copy.
911 if (LHS.getType()->isAtomicType() ||
912 CGF.LValueIsSuitableForInlineAtomic(LHS)) {
913 EnsureDest(E->getRHS()->getType());
915 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
919 // Codegen the RHS so that it stores directly into the LHS.
920 AggValueSlot LHSSlot =
921 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
922 needsGC(E->getLHS()->getType()),
923 AggValueSlot::IsAliased);
924 // A non-volatile aggregate destination might have volatile member.
925 if (!LHSSlot.isVolatile() &&
926 CGF.hasVolatileMember(E->getLHS()->getType()))
927 LHSSlot.setVolatile(true);
929 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
931 // Copy into the destination if the assignment isn't ignored.
932 EmitFinalDestCopy(E->getType(), LHS);
935 void AggExprEmitter::
936 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
937 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
938 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
939 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
941 // Bind the common expression if necessary.
942 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
944 CodeGenFunction::ConditionalEvaluation eval(CGF);
945 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
946 CGF.getProfileCount(E));
948 // Save whether the destination's lifetime is externally managed.
949 bool isExternallyDestructed = Dest.isExternallyDestructed();
952 CGF.EmitBlock(LHSBlock);
953 CGF.incrementProfileCounter(E);
954 Visit(E->getTrueExpr());
957 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
958 CGF.Builder.CreateBr(ContBlock);
960 // If the result of an agg expression is unused, then the emission
961 // of the LHS might need to create a destination slot. That's fine
962 // with us, and we can safely emit the RHS into the same slot, but
963 // we shouldn't claim that it's already being destructed.
964 Dest.setExternallyDestructed(isExternallyDestructed);
967 CGF.EmitBlock(RHSBlock);
968 Visit(E->getFalseExpr());
971 CGF.EmitBlock(ContBlock);
974 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
975 Visit(CE->getChosenSubExpr());
978 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
979 Address ArgValue = Address::invalid();
980 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
982 // If EmitVAArg fails, emit an error.
983 if (!ArgPtr.isValid()) {
984 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
988 EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
991 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
992 // Ensure that we have a slot, but if we already do, remember
993 // whether it was externally destructed.
994 bool wasExternallyDestructed = Dest.isExternallyDestructed();
995 EnsureDest(E->getType());
997 // We're going to push a destructor if there isn't already one.
998 Dest.setExternallyDestructed();
1000 Visit(E->getSubExpr());
1002 // Push that destructor we promised.
1003 if (!wasExternallyDestructed)
1004 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1008 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1009 AggValueSlot Slot = EnsureSlot(E->getType());
1010 CGF.EmitCXXConstructExpr(E, Slot);
1013 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1014 const CXXInheritedCtorInitExpr *E) {
1015 AggValueSlot Slot = EnsureSlot(E->getType());
1016 CGF.EmitInheritedCXXConstructorCall(
1017 E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1018 E->inheritedFromVBase(), E);
1022 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1023 AggValueSlot Slot = EnsureSlot(E->getType());
1024 CGF.EmitLambdaExpr(E, Slot);
1027 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1028 CGF.enterFullExpression(E);
1029 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1030 Visit(E->getSubExpr());
1033 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1034 QualType T = E->getType();
1035 AggValueSlot Slot = EnsureSlot(T);
1036 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1039 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1040 QualType T = E->getType();
1041 AggValueSlot Slot = EnsureSlot(T);
1042 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1045 /// isSimpleZero - If emitting this value will obviously just cause a store of
1046 /// zero to memory, return true. This can return false if uncertain, so it just
1047 /// handles simple cases.
1048 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1049 E = E->IgnoreParens();
1052 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1053 return IL->getValue() == 0;
1055 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1056 return FL->getValue().isPosZero();
1058 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1059 CGF.getTypes().isZeroInitializable(E->getType()))
1061 // (int*)0 - Null pointer expressions.
1062 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1063 return ICE->getCastKind() == CK_NullToPointer &&
1064 CGF.getTypes().isPointerZeroInitializable(E->getType());
1066 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1067 return CL->getValue() == 0;
1069 // Otherwise, hard case: conservatively return false.
1075 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1076 QualType type = LV.getType();
1077 // FIXME: Ignore result?
1078 // FIXME: Are initializers affected by volatile?
1079 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1080 // Storing "i32 0" to a zero'd memory location is a noop.
1082 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1083 return EmitNullInitializationToLValue(LV);
1084 } else if (isa<NoInitExpr>(E)) {
1087 } else if (type->isReferenceType()) {
1088 RValue RV = CGF.EmitReferenceBindingToExpr(E);
1089 return CGF.EmitStoreThroughLValue(RV, LV);
1092 switch (CGF.getEvaluationKind(type)) {
1094 CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1097 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1098 AggValueSlot::IsDestructed,
1099 AggValueSlot::DoesNotNeedGCBarriers,
1100 AggValueSlot::IsNotAliased,
1104 if (LV.isSimple()) {
1105 CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1107 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1111 llvm_unreachable("bad evaluation kind");
1114 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1115 QualType type = lv.getType();
1117 // If the destination slot is already zeroed out before the aggregate is
1118 // copied into it, we don't have to emit any zeros here.
1119 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1122 if (CGF.hasScalarEvaluationKind(type)) {
1123 // For non-aggregates, we can store the appropriate null constant.
1124 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1125 // Note that the following is not equivalent to
1126 // EmitStoreThroughBitfieldLValue for ARC types.
1127 if (lv.isBitField()) {
1128 CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1130 assert(lv.isSimple());
1131 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1134 // There's a potential optimization opportunity in combining
1135 // memsets; that would be easy for arrays, but relatively
1136 // difficult for structures with the current code.
1137 CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1141 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1143 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1144 // (Length of globals? Chunks of zeroed-out space?).
1146 // If we can, prefer a copy from a global; this is a lot less code for long
1147 // globals, and it's easier for the current optimizers to analyze.
1148 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1149 llvm::GlobalVariable* GV =
1150 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1151 llvm::GlobalValue::InternalLinkage, C, "");
1152 EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1156 if (E->hadArrayRangeDesignator())
1157 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1159 if (E->isTransparent())
1160 return Visit(E->getInit(0));
1162 AggValueSlot Dest = EnsureSlot(E->getType());
1164 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1166 // Handle initialization of an array.
1167 if (E->getType()->isArrayType()) {
1168 QualType elementType =
1169 CGF.getContext().getAsArrayType(E->getType())->getElementType();
1171 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1172 EmitArrayInit(Dest.getAddress(), AType, elementType, E);
1176 assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1178 // Do struct initialization; this code just sets each individual member
1179 // to the approprate value. This makes bitfield support automatic;
1180 // the disadvantage is that the generated code is more difficult for
1181 // the optimizer, especially with bitfields.
1182 unsigned NumInitElements = E->getNumInits();
1183 RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1185 // We'll need to enter cleanup scopes in case any of the element
1186 // initializers throws an exception.
1187 SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1188 llvm::Instruction *cleanupDominator = nullptr;
1190 unsigned curInitIndex = 0;
1192 // Emit initialization of base classes.
1193 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1194 assert(E->getNumInits() >= CXXRD->getNumBases() &&
1195 "missing initializer for base class");
1196 for (auto &Base : CXXRD->bases()) {
1197 assert(!Base.isVirtual() && "should not see vbases here");
1198 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1199 Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1200 Dest.getAddress(), CXXRD, BaseRD,
1201 /*isBaseVirtual*/ false);
1202 AggValueSlot AggSlot =
1203 AggValueSlot::forAddr(V, Qualifiers(),
1204 AggValueSlot::IsDestructed,
1205 AggValueSlot::DoesNotNeedGCBarriers,
1206 AggValueSlot::IsNotAliased);
1207 CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1209 if (QualType::DestructionKind dtorKind =
1210 Base.getType().isDestructedType()) {
1211 CGF.pushDestroy(dtorKind, V, Base.getType());
1212 cleanups.push_back(CGF.EHStack.stable_begin());
1217 // Prepare a 'this' for CXXDefaultInitExprs.
1218 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1220 if (record->isUnion()) {
1221 // Only initialize one field of a union. The field itself is
1222 // specified by the initializer list.
1223 if (!E->getInitializedFieldInUnion()) {
1224 // Empty union; we have nothing to do.
1227 // Make sure that it's really an empty and not a failure of
1228 // semantic analysis.
1229 for (const auto *Field : record->fields())
1230 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1235 // FIXME: volatility
1236 FieldDecl *Field = E->getInitializedFieldInUnion();
1238 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1239 if (NumInitElements) {
1240 // Store the initializer into the field
1241 EmitInitializationToLValue(E->getInit(0), FieldLoc);
1243 // Default-initialize to null.
1244 EmitNullInitializationToLValue(FieldLoc);
1250 // Here we iterate over the fields; this makes it simpler to both
1251 // default-initialize fields and skip over unnamed fields.
1252 for (const auto *field : record->fields()) {
1253 // We're done once we hit the flexible array member.
1254 if (field->getType()->isIncompleteArrayType())
1257 // Always skip anonymous bitfields.
1258 if (field->isUnnamedBitfield())
1261 // We're done if we reach the end of the explicit initializers, we
1262 // have a zeroed object, and the rest of the fields are
1263 // zero-initializable.
1264 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1265 CGF.getTypes().isZeroInitializable(E->getType()))
1269 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1270 // We never generate write-barries for initialized fields.
1273 if (curInitIndex < NumInitElements) {
1274 // Store the initializer into the field.
1275 EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1277 // We're out of initializers; default-initialize to null
1278 EmitNullInitializationToLValue(LV);
1281 // Push a destructor if necessary.
1282 // FIXME: if we have an array of structures, all explicitly
1283 // initialized, we can end up pushing a linear number of cleanups.
1284 bool pushedCleanup = false;
1285 if (QualType::DestructionKind dtorKind
1286 = field->getType().isDestructedType()) {
1287 assert(LV.isSimple());
1288 if (CGF.needsEHCleanup(dtorKind)) {
1289 if (!cleanupDominator)
1290 cleanupDominator = CGF.Builder.CreateAlignedLoad(
1292 llvm::Constant::getNullValue(CGF.Int8PtrTy),
1293 CharUnits::One()); // placeholder
1295 CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1296 CGF.getDestroyer(dtorKind), false);
1297 cleanups.push_back(CGF.EHStack.stable_begin());
1298 pushedCleanup = true;
1302 // If the GEP didn't get used because of a dead zero init or something
1303 // else, clean it up for -O0 builds and general tidiness.
1304 if (!pushedCleanup && LV.isSimple())
1305 if (llvm::GetElementPtrInst *GEP =
1306 dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1307 if (GEP->use_empty())
1308 GEP->eraseFromParent();
1311 // Deactivate all the partial cleanups in reverse order, which
1312 // generally means popping them.
1313 for (unsigned i = cleanups.size(); i != 0; --i)
1314 CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1316 // Destroy the placeholder if we made one.
1317 if (cleanupDominator)
1318 cleanupDominator->eraseFromParent();
1321 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1322 llvm::Value *outerBegin) {
1323 // Emit the common subexpression.
1324 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1326 Address destPtr = EnsureSlot(E->getType()).getAddress();
1327 uint64_t numElements = E->getArraySize().getZExtValue();
1332 // destPtr is an array*. Construct an elementType* by drilling down a level.
1333 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1334 llvm::Value *indices[] = {zero, zero};
1335 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1338 // Prepare to special-case multidimensional array initialization: we avoid
1339 // emitting multiple destructor loops in that case.
1342 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1344 QualType elementType =
1345 CGF.getContext().getAsArrayType(E->getType())->getElementType();
1346 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1347 CharUnits elementAlign =
1348 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1350 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1351 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1353 // Jump into the body.
1354 CGF.EmitBlock(bodyBB);
1355 llvm::PHINode *index =
1356 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1357 index->addIncoming(zero, entryBB);
1358 llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1360 // Prepare for a cleanup.
1361 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1362 EHScopeStack::stable_iterator cleanup;
1363 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1364 if (outerBegin->getType() != element->getType())
1365 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1366 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1368 CGF.getDestroyer(dtorKind));
1369 cleanup = CGF.EHStack.stable_begin();
1371 dtorKind = QualType::DK_none;
1374 // Emit the actual filler expression.
1376 // Temporaries created in an array initialization loop are destroyed
1377 // at the end of each iteration.
1378 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1379 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1381 CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1384 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1385 auto elementSlot = AggValueSlot::forLValue(
1386 elementLV, AggValueSlot::IsDestructed,
1387 AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased);
1388 AggExprEmitter(CGF, elementSlot, false)
1389 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1391 EmitInitializationToLValue(E->getSubExpr(), elementLV);
1394 // Move on to the next element.
1395 llvm::Value *nextIndex = Builder.CreateNUWAdd(
1396 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1397 index->addIncoming(nextIndex, Builder.GetInsertBlock());
1399 // Leave the loop if we're done.
1400 llvm::Value *done = Builder.CreateICmpEQ(
1401 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1403 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1404 Builder.CreateCondBr(done, endBB, bodyBB);
1406 CGF.EmitBlock(endBB);
1408 // Leave the partial-array cleanup if we entered one.
1410 CGF.DeactivateCleanupBlock(cleanup, index);
1413 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1414 AggValueSlot Dest = EnsureSlot(E->getType());
1416 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1417 EmitInitializationToLValue(E->getBase(), DestLV);
1418 VisitInitListExpr(E->getUpdater());
1421 //===----------------------------------------------------------------------===//
1422 // Entry Points into this File
1423 //===----------------------------------------------------------------------===//
1425 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1426 /// non-zero bytes that will be stored when outputting the initializer for the
1427 /// specified initializer expression.
1428 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1429 E = E->IgnoreParens();
1431 // 0 and 0.0 won't require any non-zero stores!
1432 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1434 // If this is an initlist expr, sum up the size of sizes of the (present)
1435 // elements. If this is something weird, assume the whole thing is non-zero.
1436 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1437 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1438 return CGF.getContext().getTypeSizeInChars(E->getType());
1440 // InitListExprs for structs have to be handled carefully. If there are
1441 // reference members, we need to consider the size of the reference, not the
1442 // referencee. InitListExprs for unions and arrays can't have references.
1443 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1444 if (!RT->isUnionType()) {
1445 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1446 CharUnits NumNonZeroBytes = CharUnits::Zero();
1448 unsigned ILEElement = 0;
1449 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1450 while (ILEElement != CXXRD->getNumBases())
1452 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1453 for (const auto *Field : SD->fields()) {
1454 // We're done once we hit the flexible array member or run out of
1455 // InitListExpr elements.
1456 if (Field->getType()->isIncompleteArrayType() ||
1457 ILEElement == ILE->getNumInits())
1459 if (Field->isUnnamedBitfield())
1462 const Expr *E = ILE->getInit(ILEElement++);
1464 // Reference values are always non-null and have the width of a pointer.
1465 if (Field->getType()->isReferenceType())
1466 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1467 CGF.getTarget().getPointerWidth(0));
1469 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1472 return NumNonZeroBytes;
1477 CharUnits NumNonZeroBytes = CharUnits::Zero();
1478 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1479 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1480 return NumNonZeroBytes;
1483 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1484 /// zeros in it, emit a memset and avoid storing the individual zeros.
1486 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1487 CodeGenFunction &CGF) {
1488 // If the slot is already known to be zeroed, nothing to do. Don't mess with
1490 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1493 // C++ objects with a user-declared constructor don't need zero'ing.
1494 if (CGF.getLangOpts().CPlusPlus)
1495 if (const RecordType *RT = CGF.getContext()
1496 .getBaseElementType(E->getType())->getAs<RecordType>()) {
1497 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1498 if (RD->hasUserDeclaredConstructor())
1502 // If the type is 16-bytes or smaller, prefer individual stores over memset.
1503 CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
1504 if (Size <= CharUnits::fromQuantity(16))
1507 // Check to see if over 3/4 of the initializer are known to be zero. If so,
1508 // we prefer to emit memset + individual stores for the rest.
1509 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1510 if (NumNonZeroBytes*4 > Size)
1513 // Okay, it seems like a good idea to use an initial memset, emit the call.
1514 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1516 Address Loc = Slot.getAddress();
1517 Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1518 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1520 // Tell the AggExprEmitter that the slot is known zero.
1527 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1528 /// type. The result is computed into DestPtr. Note that if DestPtr is null,
1529 /// the value of the aggregate expression is not needed. If VolatileDest is
1530 /// true, DestPtr cannot be 0.
1531 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1532 assert(E && hasAggregateEvaluationKind(E->getType()) &&
1533 "Invalid aggregate expression to emit");
1534 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1535 "slot has bits but no address");
1537 // Optimize the slot if possible.
1538 CheckAggExprForMemSetUse(Slot, E, *this);
1540 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1543 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1544 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1545 Address Temp = CreateMemTemp(E->getType());
1546 LValue LV = MakeAddrLValue(Temp, E->getType());
1547 EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1548 AggValueSlot::DoesNotNeedGCBarriers,
1549 AggValueSlot::IsNotAliased));
1553 void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
1554 Address SrcPtr, QualType Ty,
1556 bool isAssignment) {
1557 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1559 if (getLangOpts().CPlusPlus) {
1560 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1561 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1562 assert((Record->hasTrivialCopyConstructor() ||
1563 Record->hasTrivialCopyAssignment() ||
1564 Record->hasTrivialMoveConstructor() ||
1565 Record->hasTrivialMoveAssignment() ||
1566 Record->isUnion()) &&
1567 "Trying to aggregate-copy a type without a trivial copy/move "
1568 "constructor or assignment operator");
1569 // Ignore empty classes in C++.
1570 if (Record->isEmpty())
1575 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
1576 // C99 6.5.16.1p3, which states "If the value being stored in an object is
1577 // read from another object that overlaps in anyway the storage of the first
1578 // object, then the overlap shall be exact and the two objects shall have
1579 // qualified or unqualified versions of a compatible type."
1581 // memcpy is not defined if the source and destination pointers are exactly
1582 // equal, but other compilers do this optimization, and almost every memcpy
1583 // implementation handles this case safely. If there is a libc that does not
1584 // safely handle this, we can add a target hook.
1586 // Get data size info for this aggregate. If this is an assignment,
1587 // don't copy the tail padding, because we might be assigning into a
1588 // base subobject where the tail padding is claimed. Otherwise,
1589 // copying it is fine.
1590 std::pair<CharUnits, CharUnits> TypeInfo;
1592 TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1594 TypeInfo = getContext().getTypeInfoInChars(Ty);
1596 llvm::Value *SizeVal = nullptr;
1597 if (TypeInfo.first.isZero()) {
1598 // But note that getTypeInfo returns 0 for a VLA.
1599 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1600 getContext().getAsArrayType(Ty))) {
1602 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1603 TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
1604 std::pair<CharUnits, CharUnits> LastElementTypeInfo;
1606 LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1607 assert(!TypeInfo.first.isZero());
1608 SizeVal = Builder.CreateNUWMul(
1610 llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1611 if (!isAssignment) {
1612 SizeVal = Builder.CreateNUWSub(
1614 llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1615 SizeVal = Builder.CreateNUWAdd(
1616 SizeVal, llvm::ConstantInt::get(
1617 SizeTy, LastElementTypeInfo.first.getQuantity()));
1622 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1625 // FIXME: If we have a volatile struct, the optimizer can remove what might
1626 // appear to be `extra' memory ops:
1628 // volatile struct { int i; } a, b;
1635 // we need to use a different call here. We use isVolatile to indicate when
1636 // either the source or the destination is volatile.
1638 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1639 SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1641 // Don't do any of the memmove_collectable tests if GC isn't set.
1642 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1644 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1645 RecordDecl *Record = RecordTy->getDecl();
1646 if (Record->hasObjectMember()) {
1647 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1651 } else if (Ty->isArrayType()) {
1652 QualType BaseType = getContext().getBaseElementType(Ty);
1653 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1654 if (RecordTy->getDecl()->hasObjectMember()) {
1655 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1662 auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
1664 // Determine the metadata to describe the position of any padding in this
1665 // memcpy, as well as the TBAA tags for the members of the struct, in case
1666 // the optimizer wishes to expand it in to scalar memory operations.
1667 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
1668 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);