1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGObjCRuntime.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/Intrinsics.h"
24 using namespace clang;
25 using namespace CodeGen;
27 //===----------------------------------------------------------------------===//
28 // Aggregate Expression Emitter
29 //===----------------------------------------------------------------------===//
32 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
38 ReturnValueSlot getReturnValueSlot() const {
39 // If the destination slot requires garbage collection, we can't
40 // use the real return value slot, because we have to use the GC
42 if (Dest.requiresGCollection()) return ReturnValueSlot();
44 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
47 AggValueSlot EnsureSlot(QualType T) {
48 if (!Dest.isIgnored()) return Dest;
49 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
53 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
55 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
56 IgnoreResult(ignore) {
59 //===--------------------------------------------------------------------===//
61 //===--------------------------------------------------------------------===//
63 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
64 /// represents a value lvalue, this method emits the address of the lvalue,
65 /// then loads the result into DestPtr.
66 void EmitAggLoadOfLValue(const Expr *E);
68 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
69 void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
70 void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
72 void EmitGCMove(const Expr *E, RValue Src);
74 bool TypeRequiresGCollection(QualType T);
76 //===--------------------------------------------------------------------===//
78 //===--------------------------------------------------------------------===//
80 void VisitStmt(Stmt *S) {
81 CGF.ErrorUnsupported(S, "aggregate expression");
83 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
84 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
85 Visit(GE->getResultExpr());
87 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
88 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
89 return Visit(E->getReplacement());
93 void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
94 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
95 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
96 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
97 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
98 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
99 EmitAggLoadOfLValue(E);
101 void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
102 EmitAggLoadOfLValue(E);
104 void VisitPredefinedExpr(const PredefinedExpr *E) {
105 EmitAggLoadOfLValue(E);
109 void VisitCastExpr(CastExpr *E);
110 void VisitCallExpr(const CallExpr *E);
111 void VisitStmtExpr(const StmtExpr *E);
112 void VisitBinaryOperator(const BinaryOperator *BO);
113 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
114 void VisitBinAssign(const BinaryOperator *E);
115 void VisitBinComma(const BinaryOperator *E);
117 void VisitObjCMessageExpr(ObjCMessageExpr *E);
118 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
119 EmitAggLoadOfLValue(E);
121 void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
123 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
124 void VisitChooseExpr(const ChooseExpr *CE);
125 void VisitInitListExpr(InitListExpr *E);
126 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
127 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
128 Visit(DAE->getExpr());
130 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
131 void VisitCXXConstructExpr(const CXXConstructExpr *E);
132 void VisitExprWithCleanups(ExprWithCleanups *E);
133 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
134 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
135 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
136 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
138 void VisitVAArgExpr(VAArgExpr *E);
140 void EmitInitializationToLValue(Expr *E, LValue Address);
141 void EmitNullInitializationToLValue(LValue Address);
142 // case Expr::ChooseExprClass:
143 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
145 } // end anonymous namespace.
147 //===----------------------------------------------------------------------===//
149 //===----------------------------------------------------------------------===//
151 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
152 /// represents a value lvalue, this method emits the address of the lvalue,
153 /// then loads the result into DestPtr.
154 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
155 LValue LV = CGF.EmitLValue(E);
156 EmitFinalDestCopy(E, LV);
159 /// \brief True if the given aggregate type requires special GC API calls.
160 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
161 // Only record types have members that might require garbage collection.
162 const RecordType *RecordTy = T->getAs<RecordType>();
163 if (!RecordTy) return false;
165 // Don't mess with non-trivial C++ types.
166 RecordDecl *Record = RecordTy->getDecl();
167 if (isa<CXXRecordDecl>(Record) &&
168 (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
169 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
172 // Check whether the type has an object member.
173 return Record->hasObjectMember();
176 /// \brief Perform the final move to DestPtr if RequiresGCollection is set.
178 /// The idea is that you do something like this:
179 /// RValue Result = EmitSomething(..., getReturnValueSlot());
180 /// EmitGCMove(E, Result);
181 /// If GC doesn't interfere, this will cause the result to be emitted
182 /// directly into the return value slot. If GC does interfere, a final
183 /// move will be performed.
184 void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
185 if (Dest.requiresGCollection()) {
186 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
187 const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
188 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
189 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
190 Src.getAggregateAddr(),
195 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
196 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
197 assert(Src.isAggregate() && "value must be aggregate value!");
199 // If Dest is ignored, then we're evaluating an aggregate expression
200 // in a context (like an expression statement) that doesn't care
201 // about the result. C says that an lvalue-to-rvalue conversion is
202 // performed in these cases; C++ says that it is not. In either
203 // case, we don't actually need to do anything unless the value is
205 if (Dest.isIgnored()) {
206 if (!Src.isVolatileQualified() ||
207 CGF.CGM.getLangOptions().CPlusPlus ||
208 (IgnoreResult && Ignore))
211 // If the source is volatile, we must read from it; to do that, we need
212 // some place to put it.
213 Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
216 if (Dest.requiresGCollection()) {
217 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
218 const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
219 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
220 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
222 Src.getAggregateAddr(),
226 // If the result of the assignment is used, copy the LHS there also.
227 // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
228 // from the source as well, as we can't eliminate it if either operand
229 // is volatile, unless copy has volatile for both source and destination..
230 CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
231 Dest.isVolatile()|Src.isVolatileQualified());
234 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
235 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
236 assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
238 EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
239 Src.isVolatileQualified()),
243 //===----------------------------------------------------------------------===//
245 //===----------------------------------------------------------------------===//
247 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
248 Visit(E->GetTemporaryExpr());
251 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
252 EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
256 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
257 if (E->getType().isPODType(CGF.getContext())) {
258 // For a POD type, just emit a load of the lvalue + a copy, because our
259 // compound literal might alias the destination.
260 // FIXME: This is a band-aid; the real problem appears to be in our handling
261 // of assignments, where we store directly into the LHS without checking
262 // whether anything in the RHS aliases.
263 EmitAggLoadOfLValue(E);
267 AggValueSlot Slot = EnsureSlot(E->getType());
268 CGF.EmitAggExpr(E->getInitializer(), Slot);
272 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
273 switch (E->getCastKind()) {
275 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
276 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
277 // FIXME: Do we also need to handle property references here?
279 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
281 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
283 if (!Dest.isIgnored())
284 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
289 if (Dest.isIgnored()) break;
291 // GCC union extension
292 QualType Ty = E->getSubExpr()->getType();
293 QualType PtrTy = CGF.getContext().getPointerType(Ty);
294 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
295 CGF.ConvertType(PtrTy));
296 EmitInitializationToLValue(E->getSubExpr(),
297 CGF.MakeAddrLValue(CastPtr, Ty));
301 case CK_DerivedToBase:
302 case CK_BaseToDerived:
303 case CK_UncheckedDerivedToBase: {
304 assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
305 "should have been unpacked before we got here");
309 case CK_GetObjCProperty: {
310 LValue LV = CGF.EmitLValue(E->getSubExpr());
311 assert(LV.isPropertyRef());
312 RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
317 case CK_LValueToRValue: // hope for downstream optimization
319 case CK_UserDefinedConversion:
320 case CK_ConstructorConversion:
321 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
323 "Implicit cast types must be compatible");
324 Visit(E->getSubExpr());
327 case CK_LValueBitCast:
328 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
333 case CK_ArrayToPointerDecay:
334 case CK_FunctionToPointerDecay:
335 case CK_NullToPointer:
336 case CK_NullToMemberPointer:
337 case CK_BaseToDerivedMemberPointer:
338 case CK_DerivedToBaseMemberPointer:
339 case CK_MemberPointerToBoolean:
340 case CK_IntegralToPointer:
341 case CK_PointerToIntegral:
342 case CK_PointerToBoolean:
345 case CK_IntegralCast:
346 case CK_IntegralToBoolean:
347 case CK_IntegralToFloating:
348 case CK_FloatingToIntegral:
349 case CK_FloatingToBoolean:
350 case CK_FloatingCast:
351 case CK_AnyPointerToObjCPointerCast:
352 case CK_AnyPointerToBlockPointerCast:
353 case CK_ObjCObjectLValueCast:
354 case CK_FloatingRealToComplex:
355 case CK_FloatingComplexToReal:
356 case CK_FloatingComplexToBoolean:
357 case CK_FloatingComplexCast:
358 case CK_FloatingComplexToIntegralComplex:
359 case CK_IntegralRealToComplex:
360 case CK_IntegralComplexToReal:
361 case CK_IntegralComplexToBoolean:
362 case CK_IntegralComplexCast:
363 case CK_IntegralComplexToFloatingComplex:
364 case CK_ObjCProduceObject:
365 case CK_ObjCConsumeObject:
366 case CK_ObjCReclaimReturnedObject:
367 llvm_unreachable("cast kind invalid for aggregate types");
371 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
372 if (E->getCallReturnType()->isReferenceType()) {
373 EmitAggLoadOfLValue(E);
377 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
381 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
382 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
386 void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
387 llvm_unreachable("direct property access not surrounded by "
388 "lvalue-to-rvalue cast");
391 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
392 CGF.EmitIgnoredExpr(E->getLHS());
396 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
397 CodeGenFunction::StmtExprEvaluation eval(CGF);
398 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
401 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
402 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
403 VisitPointerToDataMemberBinaryOperator(E);
405 CGF.ErrorUnsupported(E, "aggregate binary expression");
408 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
409 const BinaryOperator *E) {
410 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
411 EmitFinalDestCopy(E, LV);
414 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
415 // For an assignment to work, the value on the right has
416 // to be compatible with the value on the left.
417 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
418 E->getRHS()->getType())
419 && "Invalid assignment");
421 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
422 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
423 if (VD->hasAttr<BlocksAttr>() &&
424 E->getRHS()->HasSideEffects(CGF.getContext())) {
425 // When __block variable on LHS, the RHS must be evaluated first
426 // as it may change the 'forwarding' field via call to Block_copy.
427 LValue RHS = CGF.EmitLValue(E->getRHS());
428 LValue LHS = CGF.EmitLValue(E->getLHS());
429 bool GCollection = false;
430 if (CGF.getContext().getLangOptions().getGCMode())
431 GCollection = TypeRequiresGCollection(E->getLHS()->getType());
432 Dest = AggValueSlot::forLValue(LHS, true, GCollection);
433 EmitFinalDestCopy(E, RHS, true);
437 LValue LHS = CGF.EmitLValue(E->getLHS());
439 // We have to special case property setters, otherwise we must have
440 // a simple lvalue (no aggregates inside vectors, bitfields).
441 if (LHS.isPropertyRef()) {
442 const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr();
443 QualType ArgType = RE->getSetterArgType();
445 if (ArgType->isReferenceType())
446 Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0);
448 AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
449 CGF.EmitAggExpr(E->getRHS(), Slot);
450 Src = Slot.asRValue();
452 CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
454 bool GCollection = false;
455 if (CGF.getContext().getLangOptions().getGCMode())
456 GCollection = TypeRequiresGCollection(E->getLHS()->getType());
458 // Codegen the RHS so that it stores directly into the LHS.
459 AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true,
461 CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
462 EmitFinalDestCopy(E, LHS, true);
466 void AggExprEmitter::
467 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
468 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
469 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
470 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
472 // Bind the common expression if necessary.
473 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
475 CodeGenFunction::ConditionalEvaluation eval(CGF);
476 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
478 // Save whether the destination's lifetime is externally managed.
479 bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged();
482 CGF.EmitBlock(LHSBlock);
483 Visit(E->getTrueExpr());
486 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
487 CGF.Builder.CreateBr(ContBlock);
489 // If the result of an agg expression is unused, then the emission
490 // of the LHS might need to create a destination slot. That's fine
491 // with us, and we can safely emit the RHS into the same slot, but
492 // we shouldn't claim that its lifetime is externally managed.
493 Dest.setLifetimeExternallyManaged(DestLifetimeManaged);
496 CGF.EmitBlock(RHSBlock);
497 Visit(E->getFalseExpr());
500 CGF.EmitBlock(ContBlock);
503 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
504 Visit(CE->getChosenSubExpr(CGF.getContext()));
507 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
508 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
509 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
512 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
516 EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
519 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
520 // Ensure that we have a slot, but if we already do, remember
521 // whether its lifetime was externally managed.
522 bool WasManaged = Dest.isLifetimeExternallyManaged();
523 Dest = EnsureSlot(E->getType());
524 Dest.setLifetimeExternallyManaged();
526 Visit(E->getSubExpr());
528 // Set up the temporary's destructor if its lifetime wasn't already
531 CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
535 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
536 AggValueSlot Slot = EnsureSlot(E->getType());
537 CGF.EmitCXXConstructExpr(E, Slot);
540 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
541 CGF.EmitExprWithCleanups(E, Dest);
544 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
545 QualType T = E->getType();
546 AggValueSlot Slot = EnsureSlot(T);
547 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
550 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
551 QualType T = E->getType();
552 AggValueSlot Slot = EnsureSlot(T);
553 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
556 /// isSimpleZero - If emitting this value will obviously just cause a store of
557 /// zero to memory, return true. This can return false if uncertain, so it just
558 /// handles simple cases.
559 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
560 E = E->IgnoreParens();
563 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
564 return IL->getValue() == 0;
566 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
567 return FL->getValue().isPosZero();
569 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
570 CGF.getTypes().isZeroInitializable(E->getType()))
572 // (int*)0 - Null pointer expressions.
573 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
574 return ICE->getCastKind() == CK_NullToPointer;
576 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
577 return CL->getValue() == 0;
579 // Otherwise, hard case: conservatively return false.
585 AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
586 QualType type = LV.getType();
587 // FIXME: Ignore result?
588 // FIXME: Are initializers affected by volatile?
589 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
590 // Storing "i32 0" to a zero'd memory location is a noop.
591 } else if (isa<ImplicitValueInitExpr>(E)) {
592 EmitNullInitializationToLValue(LV);
593 } else if (type->isReferenceType()) {
594 RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
595 CGF.EmitStoreThroughLValue(RV, LV);
596 } else if (type->isAnyComplexType()) {
597 CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
598 } else if (CGF.hasAggregateLLVMType(type)) {
599 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, true, false,
601 } else if (LV.isSimple()) {
602 CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
604 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
608 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
609 QualType type = lv.getType();
611 // If the destination slot is already zeroed out before the aggregate is
612 // copied into it, we don't have to emit any zeros here.
613 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
616 if (!CGF.hasAggregateLLVMType(type)) {
617 // For non-aggregates, we can store zero
618 llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
619 CGF.EmitStoreThroughLValue(RValue::get(null), lv);
621 // There's a potential optimization opportunity in combining
622 // memsets; that would be easy for arrays, but relatively
623 // difficult for structures with the current code.
624 CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
628 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
630 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
631 // (Length of globals? Chunks of zeroed-out space?).
633 // If we can, prefer a copy from a global; this is a lot less code for long
634 // globals, and it's easier for the current optimizers to analyze.
635 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
636 llvm::GlobalVariable* GV =
637 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
638 llvm::GlobalValue::InternalLinkage, C, "");
639 EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
643 if (E->hadArrayRangeDesignator())
644 CGF.ErrorUnsupported(E, "GNU array range designator extension");
646 llvm::Value *DestPtr = Dest.getAddr();
648 // Handle initialization of an array.
649 if (E->getType()->isArrayType()) {
650 const llvm::PointerType *APType =
651 cast<llvm::PointerType>(DestPtr->getType());
652 const llvm::ArrayType *AType =
653 cast<llvm::ArrayType>(APType->getElementType());
655 uint64_t NumInitElements = E->getNumInits();
657 if (E->getNumInits() > 0) {
658 QualType T1 = E->getType();
659 QualType T2 = E->getInit(0)->getType();
660 if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
661 EmitAggLoadOfLValue(E->getInit(0));
666 uint64_t NumArrayElements = AType->getNumElements();
667 assert(NumInitElements <= NumArrayElements);
669 QualType elementType = E->getType().getCanonicalType();
670 elementType = CGF.getContext().getQualifiedType(
671 cast<ArrayType>(elementType)->getElementType(),
672 elementType.getQualifiers() + Dest.getQualifiers());
674 // DestPtr is an array*. Construct an elementType* by drilling
676 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
677 llvm::Value *indices[] = { zero, zero };
679 Builder.CreateInBoundsGEP(DestPtr, indices, indices+2, "arrayinit.begin");
681 // Exception safety requires us to destroy all the
682 // already-constructed members if an initializer throws.
683 // For that, we'll need an EH cleanup.
684 QualType::DestructionKind dtorKind = elementType.isDestructedType();
685 llvm::AllocaInst *endOfInit = 0;
686 EHScopeStack::stable_iterator cleanup;
687 if (CGF.needsEHCleanup(dtorKind)) {
688 // In principle we could tell the cleanup where we are more
689 // directly, but the control flow can get so varied here that it
690 // would actually be quite complex. Therefore we go through an
692 endOfInit = CGF.CreateTempAlloca(begin->getType(),
693 "arrayinit.endOfInit");
694 Builder.CreateStore(begin, endOfInit);
695 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
696 CGF.getDestroyer(dtorKind));
697 cleanup = CGF.EHStack.stable_begin();
699 // Otherwise, remember that we didn't need a cleanup.
701 dtorKind = QualType::DK_none;
704 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
706 // The 'current element to initialize'. The invariants on this
707 // variable are complicated. Essentially, after each iteration of
708 // the loop, it points to the last initialized element, except
709 // that it points to the beginning of the array before any
710 // elements have been initialized.
711 llvm::Value *element = begin;
713 // Emit the explicit initializers.
714 for (uint64_t i = 0; i != NumInitElements; ++i) {
715 // Advance to the next element.
717 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
719 // Tell the cleanup that it needs to destroy up to this
720 // element. TODO: some of these stores can be trivially
721 // observed to be unnecessary.
722 if (endOfInit) Builder.CreateStore(element, endOfInit);
725 LValue elementLV = CGF.MakeAddrLValue(element, elementType);
726 EmitInitializationToLValue(E->getInit(i), elementLV);
729 // Check whether there's a non-trivial array-fill expression.
730 // Note that this will be a CXXConstructExpr even if the element
731 // type is an array (or array of array, etc.) of class type.
732 Expr *filler = E->getArrayFiller();
733 bool hasTrivialFiller = true;
734 if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
735 assert(cons->getConstructor()->isDefaultConstructor());
736 hasTrivialFiller = cons->getConstructor()->isTrivial();
739 // Any remaining elements need to be zero-initialized, possibly
740 // using the filler expression. We can skip this if the we're
741 // emitting to zeroed memory.
742 if (NumInitElements != NumArrayElements &&
743 !(Dest.isZeroed() && hasTrivialFiller &&
744 CGF.getTypes().isZeroInitializable(elementType))) {
746 // Use an actual loop. This is basically
747 // do { *array++ = filler; } while (array != end);
749 // Advance to the start of the rest of the array.
750 if (NumInitElements) {
751 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
752 if (endOfInit) Builder.CreateStore(element, endOfInit);
755 // Compute the end of the array.
756 llvm::Value *end = Builder.CreateInBoundsGEP(begin,
757 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
760 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
761 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
763 // Jump into the body.
764 CGF.EmitBlock(bodyBB);
765 llvm::PHINode *currentElement =
766 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
767 currentElement->addIncoming(element, entryBB);
769 // Emit the actual filler expression.
770 LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
772 EmitInitializationToLValue(filler, elementLV);
774 EmitNullInitializationToLValue(elementLV);
776 // Move on to the next element.
777 llvm::Value *nextElement =
778 Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
780 // Tell the EH cleanup that we finished with the last element.
781 if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
783 // Leave the loop if we're done.
784 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
786 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
787 Builder.CreateCondBr(done, endBB, bodyBB);
788 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
790 CGF.EmitBlock(endBB);
793 // Leave the partial-array cleanup if we entered one.
794 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
799 assert(E->getType()->isRecordType() && "Only support structs/unions here!");
801 // Do struct initialization; this code just sets each individual member
802 // to the approprate value. This makes bitfield support automatic;
803 // the disadvantage is that the generated code is more difficult for
804 // the optimizer, especially with bitfields.
805 unsigned NumInitElements = E->getNumInits();
806 RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
808 if (record->isUnion()) {
809 // Only initialize one field of a union. The field itself is
810 // specified by the initializer list.
811 if (!E->getInitializedFieldInUnion()) {
812 // Empty union; we have nothing to do.
815 // Make sure that it's really an empty and not a failure of
816 // semantic analysis.
817 for (RecordDecl::field_iterator Field = record->field_begin(),
818 FieldEnd = record->field_end();
819 Field != FieldEnd; ++Field)
820 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
826 FieldDecl *Field = E->getInitializedFieldInUnion();
828 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
829 if (NumInitElements) {
830 // Store the initializer into the field
831 EmitInitializationToLValue(E->getInit(0), FieldLoc);
833 // Default-initialize to null.
834 EmitNullInitializationToLValue(FieldLoc);
840 // We'll need to enter cleanup scopes in case any of the member
841 // initializers throw an exception.
842 llvm::SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
844 // Here we iterate over the fields; this makes it simpler to both
845 // default-initialize fields and skip over unnamed fields.
846 unsigned curInitIndex = 0;
847 for (RecordDecl::field_iterator field = record->field_begin(),
848 fieldEnd = record->field_end();
849 field != fieldEnd; ++field) {
850 // We're done once we hit the flexible array member.
851 if (field->getType()->isIncompleteArrayType())
854 // Always skip anonymous bitfields.
855 if (field->isUnnamedBitfield())
858 // We're done if we reach the end of the explicit initializers, we
859 // have a zeroed object, and the rest of the fields are
860 // zero-initializable.
861 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
862 CGF.getTypes().isZeroInitializable(E->getType()))
866 LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
867 // We never generate write-barries for initialized fields.
870 if (curInitIndex < NumInitElements) {
871 // Store the initializer into the field.
872 EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
874 // We're out of initalizers; default-initialize to null
875 EmitNullInitializationToLValue(LV);
878 // Push a destructor if necessary.
879 // FIXME: if we have an array of structures, all explicitly
880 // initialized, we can end up pushing a linear number of cleanups.
881 bool pushedCleanup = false;
882 if (QualType::DestructionKind dtorKind
883 = field->getType().isDestructedType()) {
884 assert(LV.isSimple());
885 if (CGF.needsEHCleanup(dtorKind)) {
886 CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
887 CGF.getDestroyer(dtorKind), false);
888 cleanups.push_back(CGF.EHStack.stable_begin());
889 pushedCleanup = true;
893 // If the GEP didn't get used because of a dead zero init or something
894 // else, clean it up for -O0 builds and general tidiness.
895 if (!pushedCleanup && LV.isSimple())
896 if (llvm::GetElementPtrInst *GEP =
897 dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
898 if (GEP->use_empty())
899 GEP->eraseFromParent();
902 // Deactivate all the partial cleanups in reverse order, which
903 // generally means popping them.
904 for (unsigned i = cleanups.size(); i != 0; --i)
905 CGF.DeactivateCleanupBlock(cleanups[i-1]);
908 //===----------------------------------------------------------------------===//
909 // Entry Points into this File
910 //===----------------------------------------------------------------------===//
912 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
913 /// non-zero bytes that will be stored when outputting the initializer for the
914 /// specified initializer expression.
915 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
916 E = E->IgnoreParens();
918 // 0 and 0.0 won't require any non-zero stores!
919 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
921 // If this is an initlist expr, sum up the size of sizes of the (present)
922 // elements. If this is something weird, assume the whole thing is non-zero.
923 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
924 if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
925 return CGF.getContext().getTypeSizeInChars(E->getType());
927 // InitListExprs for structs have to be handled carefully. If there are
928 // reference members, we need to consider the size of the reference, not the
929 // referencee. InitListExprs for unions and arrays can't have references.
930 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
931 if (!RT->isUnionType()) {
932 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
933 CharUnits NumNonZeroBytes = CharUnits::Zero();
935 unsigned ILEElement = 0;
936 for (RecordDecl::field_iterator Field = SD->field_begin(),
937 FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
938 // We're done once we hit the flexible array member or run out of
939 // InitListExpr elements.
940 if (Field->getType()->isIncompleteArrayType() ||
941 ILEElement == ILE->getNumInits())
943 if (Field->isUnnamedBitfield())
946 const Expr *E = ILE->getInit(ILEElement++);
948 // Reference values are always non-null and have the width of a pointer.
949 if (Field->getType()->isReferenceType())
950 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
951 CGF.getContext().Target.getPointerWidth(0));
953 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
956 return NumNonZeroBytes;
961 CharUnits NumNonZeroBytes = CharUnits::Zero();
962 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
963 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
964 return NumNonZeroBytes;
967 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
968 /// zeros in it, emit a memset and avoid storing the individual zeros.
970 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
971 CodeGenFunction &CGF) {
972 // If the slot is already known to be zeroed, nothing to do. Don't mess with
974 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
976 // C++ objects with a user-declared constructor don't need zero'ing.
977 if (CGF.getContext().getLangOptions().CPlusPlus)
978 if (const RecordType *RT = CGF.getContext()
979 .getBaseElementType(E->getType())->getAs<RecordType>()) {
980 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
981 if (RD->hasUserDeclaredConstructor())
985 // If the type is 16-bytes or smaller, prefer individual stores over memset.
986 std::pair<CharUnits, CharUnits> TypeInfo =
987 CGF.getContext().getTypeInfoInChars(E->getType());
988 if (TypeInfo.first <= CharUnits::fromQuantity(16))
991 // Check to see if over 3/4 of the initializer are known to be zero. If so,
992 // we prefer to emit memset + individual stores for the rest.
993 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
994 if (NumNonZeroBytes*4 > TypeInfo.first)
997 // Okay, it seems like a good idea to use an initial memset, emit the call.
998 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
999 CharUnits Align = TypeInfo.second;
1001 llvm::Value *Loc = Slot.getAddr();
1002 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1004 Loc = CGF.Builder.CreateBitCast(Loc, BP);
1005 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
1006 Align.getQuantity(), false);
1008 // Tell the AggExprEmitter that the slot is known zero.
1015 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1016 /// type. The result is computed into DestPtr. Note that if DestPtr is null,
1017 /// the value of the aggregate expression is not needed. If VolatileDest is
1018 /// true, DestPtr cannot be 0.
1020 /// \param IsInitializer - true if this evaluation is initializing an
1021 /// object whose lifetime is already being managed.
1022 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
1023 bool IgnoreResult) {
1024 assert(E && hasAggregateLLVMType(E->getType()) &&
1025 "Invalid aggregate expression to emit");
1026 assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
1027 "slot has bits but no address");
1029 // Optimize the slot if possible.
1030 CheckAggExprForMemSetUse(Slot, E, *this);
1032 AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
1035 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1036 assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
1037 llvm::Value *Temp = CreateMemTemp(E->getType());
1038 LValue LV = MakeAddrLValue(Temp, E->getType());
1039 EmitAggExpr(E, AggValueSlot::forLValue(LV, false));
1043 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
1044 llvm::Value *SrcPtr, QualType Ty,
1046 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1048 if (getContext().getLangOptions().CPlusPlus) {
1049 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1050 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1051 assert((Record->hasTrivialCopyConstructor() ||
1052 Record->hasTrivialCopyAssignment()) &&
1053 "Trying to aggregate-copy a type without a trivial copy "
1054 "constructor or assignment operator");
1055 // Ignore empty classes in C++.
1056 if (Record->isEmpty())
1061 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
1062 // C99 6.5.16.1p3, which states "If the value being stored in an object is
1063 // read from another object that overlaps in anyway the storage of the first
1064 // object, then the overlap shall be exact and the two objects shall have
1065 // qualified or unqualified versions of a compatible type."
1067 // memcpy is not defined if the source and destination pointers are exactly
1068 // equal, but other compilers do this optimization, and almost every memcpy
1069 // implementation handles this case safely. If there is a libc that does not
1070 // safely handle this, we can add a target hook.
1072 // Get size and alignment info for this aggregate.
1073 std::pair<CharUnits, CharUnits> TypeInfo =
1074 getContext().getTypeInfoInChars(Ty);
1076 // FIXME: Handle variable sized types.
1078 // FIXME: If we have a volatile struct, the optimizer can remove what might
1079 // appear to be `extra' memory ops:
1081 // volatile struct { int i; } a, b;
1088 // we need to use a different call here. We use isVolatile to indicate when
1089 // either the source or the destination is volatile.
1091 const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
1092 const llvm::Type *DBP =
1093 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
1094 DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
1096 const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
1097 const llvm::Type *SBP =
1098 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
1099 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
1101 // Don't do any of the memmove_collectable tests if GC isn't set.
1102 if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
1104 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1105 RecordDecl *Record = RecordTy->getDecl();
1106 if (Record->hasObjectMember()) {
1107 CharUnits size = TypeInfo.first;
1108 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1109 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
1110 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1114 } else if (Ty->isArrayType()) {
1115 QualType BaseType = getContext().getBaseElementType(Ty);
1116 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1117 if (RecordTy->getDecl()->hasObjectMember()) {
1118 CharUnits size = TypeInfo.first;
1119 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1120 llvm::Value *SizeVal =
1121 llvm::ConstantInt::get(SizeTy, size.getQuantity());
1122 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1129 Builder.CreateMemCpy(DestPtr, SrcPtr,
1130 llvm::ConstantInt::get(IntPtrTy,
1131 TypeInfo.first.getQuantity()),
1132 TypeInfo.second.getQuantity(), isVolatile);