1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code dealing with code generation of C++ expressions
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/CodeGenOptions.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/Intrinsics.h"
24 using namespace clang;
25 using namespace CodeGen;
28 struct MemberCallInfo {
30 // Number of prefix arguments for the call. Ignores the `this` pointer.
36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
37 llvm::Value *This, llvm::Value *ImplicitParam,
38 QualType ImplicitParamTy, const CallExpr *CE,
39 CallArgList &Args, CallArgList *RtlArgs) {
40 assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
41 isa<CXXOperatorCallExpr>(CE));
42 assert(MD->isInstance() &&
43 "Trying to emit a member or operator call expr on a static method!");
44 ASTContext &C = CGF.getContext();
47 const CXXRecordDecl *RD =
48 CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
49 Args.add(RValue::get(This),
50 RD ? C.getPointerType(C.getTypeDeclType(RD)) : C.VoidPtrTy);
52 // If there is an implicit parameter (e.g. VTT), emit it.
54 Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
57 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
58 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
59 unsigned PrefixSize = Args.size() - 1;
61 // And the rest of the call args.
63 // Special case: if the caller emitted the arguments right-to-left already
64 // (prior to emitting the *this argument), we're done. This happens for
65 // assignment operators.
66 Args.addFrom(*RtlArgs);
68 // Special case: skip first argument of CXXOperatorCall (it is "this").
69 unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
70 CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
71 CE->getDirectCallee());
74 FPT->getNumParams() == 0 &&
75 "No CallExpr specified for function with non-zero number of arguments");
77 return {required, PrefixSize};
80 RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
81 const CXXMethodDecl *MD, const CGCallee &Callee,
82 ReturnValueSlot ReturnValue,
83 llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
84 const CallExpr *CE, CallArgList *RtlArgs) {
85 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
87 MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
88 *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
89 auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
90 Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
91 return EmitCall(FnInfo, Callee, ReturnValue, Args);
94 RValue CodeGenFunction::EmitCXXDestructorCall(
95 const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This,
96 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
99 commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
100 ImplicitParamTy, CE, Args, nullptr);
101 return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
102 Callee, ReturnValueSlot(), Args);
105 RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
106 const CXXPseudoDestructorExpr *E) {
107 QualType DestroyedType = E->getDestroyedType();
108 if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
109 // Automatic Reference Counting:
110 // If the pseudo-expression names a retainable object with weak or
111 // strong lifetime, the object shall be released.
112 Expr *BaseExpr = E->getBase();
113 Address BaseValue = Address::invalid();
114 Qualifiers BaseQuals;
116 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
118 BaseValue = EmitPointerWithAlignment(BaseExpr);
119 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
120 BaseQuals = PTy->getPointeeType().getQualifiers();
122 LValue BaseLV = EmitLValue(BaseExpr);
123 BaseValue = BaseLV.getAddress();
124 QualType BaseTy = BaseExpr->getType();
125 BaseQuals = BaseTy.getQualifiers();
128 switch (DestroyedType.getObjCLifetime()) {
129 case Qualifiers::OCL_None:
130 case Qualifiers::OCL_ExplicitNone:
131 case Qualifiers::OCL_Autoreleasing:
134 case Qualifiers::OCL_Strong:
135 EmitARCRelease(Builder.CreateLoad(BaseValue,
136 DestroyedType.isVolatileQualified()),
140 case Qualifiers::OCL_Weak:
141 EmitARCDestroyWeak(BaseValue);
145 // C++ [expr.pseudo]p1:
146 // The result shall only be used as the operand for the function call
147 // operator (), and the result of such a call has type void. The only
148 // effect is the evaluation of the postfix-expression before the dot or
150 EmitIgnoredExpr(E->getBase());
153 return RValue::get(nullptr);
156 static CXXRecordDecl *getCXXRecord(const Expr *E) {
157 QualType T = E->getType();
158 if (const PointerType *PTy = T->getAs<PointerType>())
159 T = PTy->getPointeeType();
160 const RecordType *Ty = T->castAs<RecordType>();
161 return cast<CXXRecordDecl>(Ty->getDecl());
164 // Note: This function also emit constructor calls to support a MSVC
165 // extensions allowing explicit constructor function call.
166 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
167 ReturnValueSlot ReturnValue) {
168 const Expr *callee = CE->getCallee()->IgnoreParens();
170 if (isa<BinaryOperator>(callee))
171 return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
173 const MemberExpr *ME = cast<MemberExpr>(callee);
174 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
176 if (MD->isStatic()) {
177 // The method is static, emit it as we would a regular call.
178 CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
179 return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
183 bool HasQualifier = ME->hasQualifier();
184 NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
185 bool IsArrow = ME->isArrow();
186 const Expr *Base = ME->getBase();
188 return EmitCXXMemberOrOperatorMemberCallExpr(
189 CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
192 RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
193 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
194 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
196 assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
198 // Compute the object pointer.
199 bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
201 const CXXMethodDecl *DevirtualizedMethod = nullptr;
202 if (CanUseVirtualCall &&
203 MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {
204 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
205 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
206 assert(DevirtualizedMethod);
207 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
208 const Expr *Inner = Base->ignoreParenBaseCasts();
209 if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
210 MD->getReturnType().getCanonicalType())
211 // If the return types are not the same, this might be a case where more
212 // code needs to run to compensate for it. For example, the derived
213 // method might return a type that inherits form from the return
214 // type of MD and has a prefix.
215 // For now we just avoid devirtualizing these covariant cases.
216 DevirtualizedMethod = nullptr;
217 else if (getCXXRecord(Inner) == DevirtualizedClass)
218 // If the class of the Inner expression is where the dynamic method
219 // is defined, build the this pointer from it.
221 else if (getCXXRecord(Base) != DevirtualizedClass) {
222 // If the method is defined in a class that is not the best dynamic
223 // one or the one of the full expression, we would have to build
224 // a derived-to-base cast to compute the correct this pointer, but
225 // we don't have support for that yet, so do a virtual call.
226 DevirtualizedMethod = nullptr;
230 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
231 // operator before the LHS.
232 CallArgList RtlArgStorage;
233 CallArgList *RtlArgs = nullptr;
234 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
235 if (OCE->isAssignmentOp()) {
236 RtlArgs = &RtlArgStorage;
237 EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
238 drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
239 /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
243 Address This = Address::invalid();
245 This = EmitPointerWithAlignment(Base);
247 This = EmitLValue(Base).getAddress();
250 if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
251 if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
252 if (isa<CXXConstructorDecl>(MD) &&
253 cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
254 return RValue::get(nullptr);
256 if (!MD->getParent()->mayInsertExtraPadding()) {
257 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
258 // We don't like to generate the trivial copy/move assignment operator
259 // when it isn't necessary; just produce the proper effect here.
260 LValue RHS = isa<CXXOperatorCallExpr>(CE)
261 ? MakeNaturalAlignAddrLValue(
262 (*RtlArgs)[0].RV.getScalarVal(),
263 (*(CE->arg_begin() + 1))->getType())
264 : EmitLValue(*CE->arg_begin());
265 EmitAggregateAssign(This, RHS.getAddress(), CE->getType());
266 return RValue::get(This.getPointer());
269 if (isa<CXXConstructorDecl>(MD) &&
270 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
271 // Trivial move and copy ctor are the same.
272 assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
273 Address RHS = EmitLValue(*CE->arg_begin()).getAddress();
274 EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType());
275 return RValue::get(This.getPointer());
277 llvm_unreachable("unknown trivial member function");
281 // Compute the function type we're calling.
282 const CXXMethodDecl *CalleeDecl =
283 DevirtualizedMethod ? DevirtualizedMethod : MD;
284 const CGFunctionInfo *FInfo = nullptr;
285 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
286 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
287 Dtor, StructorType::Complete);
288 else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
289 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
290 Ctor, StructorType::Complete);
292 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
294 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
296 // C++11 [class.mfct.non-static]p2:
297 // If a non-static member function of a class X is called for an object that
298 // is not of type X, or of a type derived from X, the behavior is undefined.
299 SourceLocation CallLoc;
300 ASTContext &C = getContext();
302 CallLoc = CE->getExprLoc();
304 SanitizerSet SkippedChecks;
305 if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
306 auto *IOA = CMCE->getImplicitObjectArgument();
307 bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
308 if (IsImplicitObjectCXXThis)
309 SkippedChecks.set(SanitizerKind::Alignment, true);
310 if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
311 SkippedChecks.set(SanitizerKind::Null, true);
314 isa<CXXConstructorDecl>(CalleeDecl) ? CodeGenFunction::TCK_ConstructorCall
315 : CodeGenFunction::TCK_MemberCall,
316 CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()),
317 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
319 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
320 // 'CalleeDecl' instead.
322 // C++ [class.virtual]p12:
323 // Explicit qualification with the scope operator (5.1) suppresses the
324 // virtual call mechanism.
326 // We also don't emit a virtual call if the base expression has a record type
327 // because then we know what the type is.
328 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
330 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
331 assert(CE->arg_begin() == CE->arg_end() &&
332 "Destructor shouldn't have explicit parameters");
333 assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
334 if (UseVirtualCall) {
335 CGM.getCXXABI().EmitVirtualDestructorCall(
336 *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
339 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
340 Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
341 else if (!DevirtualizedMethod)
342 Callee = CGCallee::forDirect(
343 CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
346 const CXXDestructorDecl *DDtor =
347 cast<CXXDestructorDecl>(DevirtualizedMethod);
348 Callee = CGCallee::forDirect(
349 CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
352 EmitCXXMemberOrOperatorCall(
353 CalleeDecl, Callee, ReturnValue, This.getPointer(),
354 /*ImplicitParam=*/nullptr, QualType(), CE, nullptr);
356 return RValue::get(nullptr);
360 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
361 Callee = CGCallee::forDirect(
362 CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
364 } else if (UseVirtualCall) {
365 Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
368 if (SanOpts.has(SanitizerKind::CFINVCall) &&
369 MD->getParent()->isDynamicClass()) {
370 llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy, MD->getParent());
371 EmitVTablePtrCheckForCall(MD->getParent(), VTable, CFITCK_NVCall,
375 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
376 Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
377 else if (!DevirtualizedMethod)
378 Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
380 Callee = CGCallee::forDirect(
381 CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
382 DevirtualizedMethod);
386 if (MD->isVirtual()) {
387 This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
388 *this, CalleeDecl, This, UseVirtualCall);
391 return EmitCXXMemberOrOperatorCall(
392 CalleeDecl, Callee, ReturnValue, This.getPointer(),
393 /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
397 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
398 ReturnValueSlot ReturnValue) {
399 const BinaryOperator *BO =
400 cast<BinaryOperator>(E->getCallee()->IgnoreParens());
401 const Expr *BaseExpr = BO->getLHS();
402 const Expr *MemFnExpr = BO->getRHS();
404 const MemberPointerType *MPT =
405 MemFnExpr->getType()->castAs<MemberPointerType>();
407 const FunctionProtoType *FPT =
408 MPT->getPointeeType()->castAs<FunctionProtoType>();
409 const CXXRecordDecl *RD =
410 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
412 // Emit the 'this' pointer.
413 Address This = Address::invalid();
414 if (BO->getOpcode() == BO_PtrMemI)
415 This = EmitPointerWithAlignment(BaseExpr);
417 This = EmitLValue(BaseExpr).getAddress();
419 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
420 QualType(MPT->getClass(), 0));
422 // Get the member function pointer.
423 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
425 // Ask the ABI to load the callee. Note that This is modified.
426 llvm::Value *ThisPtrForCall = nullptr;
428 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
429 ThisPtrForCall, MemFnPtr, MPT);
434 getContext().getPointerType(getContext().getTagDeclType(RD));
436 // Push the this ptr.
437 Args.add(RValue::get(ThisPtrForCall), ThisType);
439 RequiredArgs required =
440 RequiredArgs::forPrototypePlus(FPT, 1, /*FD=*/nullptr);
442 // And the rest of the call args
443 EmitCallArgs(Args, FPT, E->arguments());
444 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
446 Callee, ReturnValue, Args);
450 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
451 const CXXMethodDecl *MD,
452 ReturnValueSlot ReturnValue) {
453 assert(MD->isInstance() &&
454 "Trying to emit a member call expr on a static method!");
455 return EmitCXXMemberOrOperatorMemberCallExpr(
456 E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
457 /*IsArrow=*/false, E->getArg(0));
460 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
461 ReturnValueSlot ReturnValue) {
462 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
465 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
467 const CXXRecordDecl *Base) {
471 DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
473 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
474 CharUnits NVSize = Layout.getNonVirtualSize();
476 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
477 // present, they are initialized by the most derived class before calling the
479 SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
480 Stores.emplace_back(CharUnits::Zero(), NVSize);
482 // Each store is split by the existence of a vbptr.
483 CharUnits VBPtrWidth = CGF.getPointerSize();
484 std::vector<CharUnits> VBPtrOffsets =
485 CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
486 for (CharUnits VBPtrOffset : VBPtrOffsets) {
487 // Stop before we hit any virtual base pointers located in virtual bases.
488 if (VBPtrOffset >= NVSize)
490 std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
491 CharUnits LastStoreOffset = LastStore.first;
492 CharUnits LastStoreSize = LastStore.second;
494 CharUnits SplitBeforeOffset = LastStoreOffset;
495 CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
496 assert(!SplitBeforeSize.isNegative() && "negative store size!");
497 if (!SplitBeforeSize.isZero())
498 Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
500 CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
501 CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
502 assert(!SplitAfterSize.isNegative() && "negative store size!");
503 if (!SplitAfterSize.isZero())
504 Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
507 // If the type contains a pointer to data member we can't memset it to zero.
508 // Instead, create a null constant and copy it to the destination.
509 // TODO: there are other patterns besides zero that we can usefully memset,
510 // like -1, which happens to be the pattern used by member-pointers.
511 // TODO: isZeroInitializable can be over-conservative in the case where a
512 // virtual base contains a member pointer.
513 llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
514 if (!NullConstantForBase->isNullValue()) {
515 llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
516 CGF.CGM.getModule(), NullConstantForBase->getType(),
517 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
518 NullConstantForBase, Twine());
520 CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
521 DestPtr.getAlignment());
522 NullVariable->setAlignment(Align.getQuantity());
524 Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
526 // Get and call the appropriate llvm.memcpy overload.
527 for (std::pair<CharUnits, CharUnits> Store : Stores) {
528 CharUnits StoreOffset = Store.first;
529 CharUnits StoreSize = Store.second;
530 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
531 CGF.Builder.CreateMemCpy(
532 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
533 CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
537 // Otherwise, just memset the whole thing to zero. This is legal
538 // because in LLVM, all default initializers (other than the ones we just
539 // handled above) are guaranteed to have a bit pattern of all zeros.
541 for (std::pair<CharUnits, CharUnits> Store : Stores) {
542 CharUnits StoreOffset = Store.first;
543 CharUnits StoreSize = Store.second;
544 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
545 CGF.Builder.CreateMemSet(
546 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
547 CGF.Builder.getInt8(0), StoreSizeVal);
553 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
555 assert(!Dest.isIgnored() && "Must have a destination!");
556 const CXXConstructorDecl *CD = E->getConstructor();
558 // If we require zero initialization before (or instead of) calling the
559 // constructor, as can be the case with a non-user-provided default
560 // constructor, emit the zero initialization now, unless destination is
562 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
563 switch (E->getConstructionKind()) {
564 case CXXConstructExpr::CK_Delegating:
565 case CXXConstructExpr::CK_Complete:
566 EmitNullInitialization(Dest.getAddress(), E->getType());
568 case CXXConstructExpr::CK_VirtualBase:
569 case CXXConstructExpr::CK_NonVirtualBase:
570 EmitNullBaseClassInitialization(*this, Dest.getAddress(),
576 // If this is a call to a trivial default constructor, do nothing.
577 if (CD->isTrivial() && CD->isDefaultConstructor())
580 // Elide the constructor if we're constructing from a temporary.
581 // The temporary check is required because Sema sets this on NRVO
583 if (getLangOpts().ElideConstructors && E->isElidable()) {
584 assert(getContext().hasSameUnqualifiedType(E->getType(),
585 E->getArg(0)->getType()));
586 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
587 EmitAggExpr(E->getArg(0), Dest);
592 if (const ArrayType *arrayType
593 = getContext().getAsArrayType(E->getType())) {
594 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E);
596 CXXCtorType Type = Ctor_Complete;
597 bool ForVirtualBase = false;
598 bool Delegating = false;
600 switch (E->getConstructionKind()) {
601 case CXXConstructExpr::CK_Delegating:
602 // We should be emitting a constructor; GlobalDecl will assert this
603 Type = CurGD.getCtorType();
607 case CXXConstructExpr::CK_Complete:
608 Type = Ctor_Complete;
611 case CXXConstructExpr::CK_VirtualBase:
612 ForVirtualBase = true;
615 case CXXConstructExpr::CK_NonVirtualBase:
619 // Call the constructor.
620 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
621 Dest.getAddress(), E);
625 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
627 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
628 Exp = E->getSubExpr();
629 assert(isa<CXXConstructExpr>(Exp) &&
630 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
631 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
632 const CXXConstructorDecl *CD = E->getConstructor();
633 RunCleanupsScope Scope(*this);
635 // If we require zero initialization before (or instead of) calling the
636 // constructor, as can be the case with a non-user-provided default
637 // constructor, emit the zero initialization now.
638 // FIXME. Do I still need this for a copy ctor synthesis?
639 if (E->requiresZeroInitialization())
640 EmitNullInitialization(Dest, E->getType());
642 assert(!getContext().getAsConstantArrayType(E->getType())
643 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
644 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
647 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
648 const CXXNewExpr *E) {
650 return CharUnits::Zero();
652 // No cookie is required if the operator new[] being used is the
653 // reserved placement operator new[].
654 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
655 return CharUnits::Zero();
657 return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
660 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
662 unsigned minElements,
663 llvm::Value *&numElements,
664 llvm::Value *&sizeWithoutCookie) {
665 QualType type = e->getAllocatedType();
668 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
670 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
671 return sizeWithoutCookie;
674 // The width of size_t.
675 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
677 // Figure out the cookie size.
678 llvm::APInt cookieSize(sizeWidth,
679 CalculateCookiePadding(CGF, e).getQuantity());
681 // Emit the array size expression.
682 // We multiply the size of all dimensions for NumElements.
683 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
684 numElements = CGF.CGM.EmitConstantExpr(e->getArraySize(),
685 CGF.getContext().getSizeType(), &CGF);
687 numElements = CGF.EmitScalarExpr(e->getArraySize());
688 assert(isa<llvm::IntegerType>(numElements->getType()));
690 // The number of elements can be have an arbitrary integer type;
691 // essentially, we need to multiply it by a constant factor, add a
692 // cookie size, and verify that the result is representable as a
693 // size_t. That's just a gloss, though, and it's wrong in one
694 // important way: if the count is negative, it's an error even if
695 // the cookie size would bring the total size >= 0.
697 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
698 llvm::IntegerType *numElementsType
699 = cast<llvm::IntegerType>(numElements->getType());
700 unsigned numElementsWidth = numElementsType->getBitWidth();
702 // Compute the constant factor.
703 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
704 while (const ConstantArrayType *CAT
705 = CGF.getContext().getAsConstantArrayType(type)) {
706 type = CAT->getElementType();
707 arraySizeMultiplier *= CAT->getSize();
710 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
711 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
712 typeSizeMultiplier *= arraySizeMultiplier;
714 // This will be a size_t.
717 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
718 // Don't bloat the -O0 code.
719 if (llvm::ConstantInt *numElementsC =
720 dyn_cast<llvm::ConstantInt>(numElements)) {
721 const llvm::APInt &count = numElementsC->getValue();
723 bool hasAnyOverflow = false;
725 // If 'count' was a negative number, it's an overflow.
726 if (isSigned && count.isNegative())
727 hasAnyOverflow = true;
729 // We want to do all this arithmetic in size_t. If numElements is
730 // wider than that, check whether it's already too big, and if so,
732 else if (numElementsWidth > sizeWidth &&
733 numElementsWidth - sizeWidth > count.countLeadingZeros())
734 hasAnyOverflow = true;
736 // Okay, compute a count at the right width.
737 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
739 // If there is a brace-initializer, we cannot allocate fewer elements than
740 // there are initializers. If we do, that's treated like an overflow.
741 if (adjustedCount.ult(minElements))
742 hasAnyOverflow = true;
744 // Scale numElements by that. This might overflow, but we don't
745 // care because it only overflows if allocationSize does, too, and
746 // if that overflows then we shouldn't use this.
747 numElements = llvm::ConstantInt::get(CGF.SizeTy,
748 adjustedCount * arraySizeMultiplier);
750 // Compute the size before cookie, and track whether it overflowed.
752 llvm::APInt allocationSize
753 = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
754 hasAnyOverflow |= overflow;
756 // Add in the cookie, and check whether it's overflowed.
757 if (cookieSize != 0) {
758 // Save the current size without a cookie. This shouldn't be
759 // used if there was overflow.
760 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
762 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
763 hasAnyOverflow |= overflow;
766 // On overflow, produce a -1 so operator new will fail.
767 if (hasAnyOverflow) {
768 size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
770 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
773 // Otherwise, we might need to use the overflow intrinsics.
775 // There are up to five conditions we need to test for:
776 // 1) if isSigned, we need to check whether numElements is negative;
777 // 2) if numElementsWidth > sizeWidth, we need to check whether
778 // numElements is larger than something representable in size_t;
779 // 3) if minElements > 0, we need to check whether numElements is smaller
781 // 4) we need to compute
782 // sizeWithoutCookie := numElements * typeSizeMultiplier
783 // and check whether it overflows; and
784 // 5) if we need a cookie, we need to compute
785 // size := sizeWithoutCookie + cookieSize
786 // and check whether it overflows.
788 llvm::Value *hasOverflow = nullptr;
790 // If numElementsWidth > sizeWidth, then one way or another, we're
791 // going to have to do a comparison for (2), and this happens to
792 // take care of (1), too.
793 if (numElementsWidth > sizeWidth) {
794 llvm::APInt threshold(numElementsWidth, 1);
795 threshold <<= sizeWidth;
797 llvm::Value *thresholdV
798 = llvm::ConstantInt::get(numElementsType, threshold);
800 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
801 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
803 // Otherwise, if we're signed, we want to sext up to size_t.
804 } else if (isSigned) {
805 if (numElementsWidth < sizeWidth)
806 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
808 // If there's a non-1 type size multiplier, then we can do the
809 // signedness check at the same time as we do the multiply
810 // because a negative number times anything will cause an
811 // unsigned overflow. Otherwise, we have to do it here. But at least
812 // in this case, we can subsume the >= minElements check.
813 if (typeSizeMultiplier == 1)
814 hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
815 llvm::ConstantInt::get(CGF.SizeTy, minElements));
817 // Otherwise, zext up to size_t if necessary.
818 } else if (numElementsWidth < sizeWidth) {
819 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
822 assert(numElements->getType() == CGF.SizeTy);
825 // Don't allow allocation of fewer elements than we have initializers.
827 hasOverflow = CGF.Builder.CreateICmpULT(numElements,
828 llvm::ConstantInt::get(CGF.SizeTy, minElements));
829 } else if (numElementsWidth > sizeWidth) {
830 // The other existing overflow subsumes this check.
831 // We do an unsigned comparison, since any signed value < -1 is
832 // taken care of either above or below.
833 hasOverflow = CGF.Builder.CreateOr(hasOverflow,
834 CGF.Builder.CreateICmpULT(numElements,
835 llvm::ConstantInt::get(CGF.SizeTy, minElements)));
841 // Multiply by the type size if necessary. This multiplier
842 // includes all the factors for nested arrays.
844 // This step also causes numElements to be scaled up by the
845 // nested-array factor if necessary. Overflow on this computation
846 // can be ignored because the result shouldn't be used if
848 if (typeSizeMultiplier != 1) {
849 llvm::Value *umul_with_overflow
850 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
853 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
854 llvm::Value *result =
855 CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
857 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
859 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
861 hasOverflow = overflowed;
863 size = CGF.Builder.CreateExtractValue(result, 0);
865 // Also scale up numElements by the array size multiplier.
866 if (arraySizeMultiplier != 1) {
867 // If the base element type size is 1, then we can re-use the
868 // multiply we just did.
869 if (typeSize.isOne()) {
870 assert(arraySizeMultiplier == typeSizeMultiplier);
873 // Otherwise we need a separate multiply.
876 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
877 numElements = CGF.Builder.CreateMul(numElements, asmV);
881 // numElements doesn't need to be scaled.
882 assert(arraySizeMultiplier == 1);
885 // Add in the cookie size if necessary.
886 if (cookieSize != 0) {
887 sizeWithoutCookie = size;
889 llvm::Value *uadd_with_overflow
890 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
892 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
893 llvm::Value *result =
894 CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
896 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
898 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
900 hasOverflow = overflowed;
902 size = CGF.Builder.CreateExtractValue(result, 0);
905 // If we had any possibility of dynamic overflow, make a select to
906 // overwrite 'size' with an all-ones value, which should cause
907 // operator new to throw.
909 size = CGF.Builder.CreateSelect(hasOverflow,
910 llvm::Constant::getAllOnesValue(CGF.SizeTy),
915 sizeWithoutCookie = size;
917 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
922 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
923 QualType AllocType, Address NewPtr) {
924 // FIXME: Refactor with EmitExprAsInit.
925 switch (CGF.getEvaluationKind(AllocType)) {
927 CGF.EmitScalarInit(Init, nullptr,
928 CGF.MakeAddrLValue(NewPtr, AllocType), false);
931 CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
934 case TEK_Aggregate: {
936 = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
937 AggValueSlot::IsDestructed,
938 AggValueSlot::DoesNotNeedGCBarriers,
939 AggValueSlot::IsNotAliased);
940 CGF.EmitAggExpr(Init, Slot);
944 llvm_unreachable("bad evaluation kind");
947 void CodeGenFunction::EmitNewArrayInitializer(
948 const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
949 Address BeginPtr, llvm::Value *NumElements,
950 llvm::Value *AllocSizeWithoutCookie) {
951 // If we have a type with trivial initialization and no initializer,
952 // there's nothing to do.
953 if (!E->hasInitializer())
956 Address CurPtr = BeginPtr;
958 unsigned InitListElements = 0;
960 const Expr *Init = E->getInitializer();
961 Address EndOfInit = Address::invalid();
962 QualType::DestructionKind DtorKind = ElementType.isDestructedType();
963 EHScopeStack::stable_iterator Cleanup;
964 llvm::Instruction *CleanupDominator = nullptr;
966 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
967 CharUnits ElementAlign =
968 BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
970 // Attempt to perform zero-initialization using memset.
971 auto TryMemsetInitialization = [&]() -> bool {
972 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
973 // we can initialize with a memset to -1.
974 if (!CGM.getTypes().isZeroInitializable(ElementType))
977 // Optimization: since zero initialization will just set the memory
978 // to all zeroes, generate a single memset to do it in one shot.
980 // Subtract out the size of any elements we've already initialized.
981 auto *RemainingSize = AllocSizeWithoutCookie;
982 if (InitListElements) {
983 // We know this can't overflow; we check this when doing the allocation.
984 auto *InitializedSize = llvm::ConstantInt::get(
985 RemainingSize->getType(),
986 getContext().getTypeSizeInChars(ElementType).getQuantity() *
988 RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
991 // Create the memset.
992 Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
996 // If the initializer is an initializer list, first do the explicit elements.
997 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
998 // Initializing from a (braced) string literal is a special case; the init
999 // list element does not initialize a (single) array element.
1000 if (ILE->isStringLiteralInit()) {
1001 // Initialize the initial portion of length equal to that of the string
1002 // literal. The allocation must be for at least this much; we emitted a
1003 // check for that earlier.
1005 AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1006 AggValueSlot::IsDestructed,
1007 AggValueSlot::DoesNotNeedGCBarriers,
1008 AggValueSlot::IsNotAliased);
1009 EmitAggExpr(ILE->getInit(0), Slot);
1011 // Move past these elements.
1013 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1014 ->getSize().getZExtValue();
1016 Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1017 Builder.getSize(InitListElements),
1019 CurPtr.getAlignment().alignmentAtOffset(InitListElements *
1022 // Zero out the rest, if any remain.
1023 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1024 if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
1025 bool OK = TryMemsetInitialization();
1027 assert(OK && "couldn't memset character type?");
1032 InitListElements = ILE->getNumInits();
1034 // If this is a multi-dimensional array new, we will initialize multiple
1035 // elements with each init list element.
1036 QualType AllocType = E->getAllocatedType();
1037 if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1038 AllocType->getAsArrayTypeUnsafe())) {
1039 ElementTy = ConvertTypeForMem(AllocType);
1040 CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
1041 InitListElements *= getContext().getConstantArrayElementCount(CAT);
1044 // Enter a partial-destruction Cleanup if necessary.
1045 if (needsEHCleanup(DtorKind)) {
1046 // In principle we could tell the Cleanup where we are more
1047 // directly, but the control flow can get so varied here that it
1048 // would actually be quite complex. Therefore we go through an
1050 EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1052 CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
1053 pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
1054 ElementType, ElementAlign,
1055 getDestroyer(DtorKind));
1056 Cleanup = EHStack.stable_begin();
1059 CharUnits StartAlign = CurPtr.getAlignment();
1060 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
1061 // Tell the cleanup that it needs to destroy up to this
1062 // element. TODO: some of these stores can be trivially
1063 // observed to be unnecessary.
1064 if (EndOfInit.isValid()) {
1066 Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
1067 Builder.CreateStore(FinishedPtr, EndOfInit);
1069 // FIXME: If the last initializer is an incomplete initializer list for
1070 // an array, and we have an array filler, we can fold together the two
1071 // initialization loops.
1072 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
1073 ILE->getInit(i)->getType(), CurPtr);
1074 CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1077 StartAlign.alignmentAtOffset((i + 1) * ElementSize));
1080 // The remaining elements are filled with the array filler expression.
1081 Init = ILE->getArrayFiller();
1083 // Extract the initializer for the individual array elements by pulling
1084 // out the array filler from all the nested initializer lists. This avoids
1085 // generating a nested loop for the initialization.
1086 while (Init && Init->getType()->isConstantArrayType()) {
1087 auto *SubILE = dyn_cast<InitListExpr>(Init);
1090 assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1091 Init = SubILE->getArrayFiller();
1094 // Switch back to initializing one base element at a time.
1095 CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
1098 // If all elements have already been initialized, skip any further
1100 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1101 if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1102 // If there was a Cleanup, deactivate it.
1103 if (CleanupDominator)
1104 DeactivateCleanupBlock(Cleanup, CleanupDominator);
1108 assert(Init && "have trailing elements to initialize but no initializer");
1110 // If this is a constructor call, try to optimize it out, and failing that
1111 // emit a single loop to initialize all remaining elements.
1112 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1113 CXXConstructorDecl *Ctor = CCE->getConstructor();
1114 if (Ctor->isTrivial()) {
1115 // If new expression did not specify value-initialization, then there
1116 // is no initialization.
1117 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1120 if (TryMemsetInitialization())
1124 // Store the new Cleanup position for irregular Cleanups.
1126 // FIXME: Share this cleanup with the constructor call emission rather than
1127 // having it create a cleanup of its own.
1128 if (EndOfInit.isValid())
1129 Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1131 // Emit a constructor call loop to initialize the remaining elements.
1132 if (InitListElements)
1133 NumElements = Builder.CreateSub(
1135 llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1136 EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1137 CCE->requiresZeroInitialization());
1141 // If this is value-initialization, we can usually use memset.
1142 ImplicitValueInitExpr IVIE(ElementType);
1143 if (isa<ImplicitValueInitExpr>(Init)) {
1144 if (TryMemsetInitialization())
1147 // Switch to an ImplicitValueInitExpr for the element type. This handles
1148 // only one case: multidimensional array new of pointers to members. In
1149 // all other cases, we already have an initializer for the array element.
1153 // At this point we should have found an initializer for the individual
1154 // elements of the array.
1155 assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1156 "got wrong type of element to initialize");
1158 // If we have an empty initializer list, we can usually use memset.
1159 if (auto *ILE = dyn_cast<InitListExpr>(Init))
1160 if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1163 // If we have a struct whose every field is value-initialized, we can
1164 // usually use memset.
1165 if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1166 if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1167 if (RType->getDecl()->isStruct()) {
1168 unsigned NumElements = 0;
1169 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1170 NumElements = CXXRD->getNumBases();
1171 for (auto *Field : RType->getDecl()->fields())
1172 if (!Field->isUnnamedBitfield())
1174 // FIXME: Recurse into nested InitListExprs.
1175 if (ILE->getNumInits() == NumElements)
1176 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1177 if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1179 if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1185 // Create the loop blocks.
1186 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1187 llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1188 llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1190 // Find the end of the array, hoisted out of the loop.
1191 llvm::Value *EndPtr =
1192 Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
1194 // If the number of elements isn't constant, we have to now check if there is
1195 // anything left to initialize.
1197 llvm::Value *IsEmpty =
1198 Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1199 Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1205 // Set up the current-element phi.
1206 llvm::PHINode *CurPtrPhi =
1207 Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1208 CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1210 CurPtr = Address(CurPtrPhi, ElementAlign);
1212 // Store the new Cleanup position for irregular Cleanups.
1213 if (EndOfInit.isValid())
1214 Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1216 // Enter a partial-destruction Cleanup if necessary.
1217 if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1218 pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1219 ElementType, ElementAlign,
1220 getDestroyer(DtorKind));
1221 Cleanup = EHStack.stable_begin();
1222 CleanupDominator = Builder.CreateUnreachable();
1225 // Emit the initializer into this element.
1226 StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
1228 // Leave the Cleanup if we entered one.
1229 if (CleanupDominator) {
1230 DeactivateCleanupBlock(Cleanup, CleanupDominator);
1231 CleanupDominator->eraseFromParent();
1234 // Advance to the next element by adjusting the pointer type as necessary.
1235 llvm::Value *NextPtr =
1236 Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1239 // Check whether we've gotten to the end of the array and, if so,
1241 llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1242 Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1243 CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1248 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1249 QualType ElementType, llvm::Type *ElementTy,
1250 Address NewPtr, llvm::Value *NumElements,
1251 llvm::Value *AllocSizeWithoutCookie) {
1252 ApplyDebugLocation DL(CGF, E);
1254 CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1255 AllocSizeWithoutCookie);
1256 else if (const Expr *Init = E->getInitializer())
1257 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
1260 /// Emit a call to an operator new or operator delete function, as implicitly
1261 /// created by new-expressions and delete-expressions.
1262 static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1263 const FunctionDecl *CalleeDecl,
1264 const FunctionProtoType *CalleeType,
1265 const CallArgList &Args) {
1266 llvm::Instruction *CallOrInvoke;
1267 llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1268 CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
1270 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1271 Args, CalleeType, /*chainCall=*/false),
1272 Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1274 /// C++1y [expr.new]p10:
1275 /// [In a new-expression,] an implementation is allowed to omit a call
1276 /// to a replaceable global allocation function.
1278 /// We model such elidable calls with the 'builtin' attribute.
1279 llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1280 if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1281 Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1282 // FIXME: Add addAttribute to CallSite.
1283 if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
1284 CI->addAttribute(llvm::AttributeList::FunctionIndex,
1285 llvm::Attribute::Builtin);
1286 else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
1287 II->addAttribute(llvm::AttributeList::FunctionIndex,
1288 llvm::Attribute::Builtin);
1290 llvm_unreachable("unexpected kind of call instruction");
1296 RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1300 const Stmt *ArgS = Arg;
1301 EmitCallArgs(Args, *Type->param_type_begin(), llvm::makeArrayRef(ArgS));
1302 // Find the allocation or deallocation function that we're calling.
1303 ASTContext &Ctx = getContext();
1304 DeclarationName Name = Ctx.DeclarationNames
1305 .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1306 for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1307 if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1308 if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1309 return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
1310 llvm_unreachable("predeclared global operator new/delete is missing");
1313 static std::pair<bool, bool>
1314 shouldPassSizeAndAlignToUsualDelete(const FunctionProtoType *FPT) {
1315 auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1317 // The first argument is always a void*.
1320 // Figure out what other parameters we should be implicitly passing.
1321 bool PassSize = false;
1322 bool PassAlignment = false;
1324 if (AI != AE && (*AI)->isIntegerType()) {
1329 if (AI != AE && (*AI)->isAlignValT()) {
1330 PassAlignment = true;
1334 assert(AI == AE && "unexpected usual deallocation function parameter");
1335 return {PassSize, PassAlignment};
1339 /// A cleanup to call the given 'operator delete' function upon abnormal
1340 /// exit from a new expression. Templated on a traits type that deals with
1341 /// ensuring that the arguments dominate the cleanup if necessary.
1342 template<typename Traits>
1343 class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1344 /// Type used to hold llvm::Value*s.
1345 typedef typename Traits::ValueTy ValueTy;
1346 /// Type used to hold RValues.
1347 typedef typename Traits::RValueTy RValueTy;
1348 struct PlacementArg {
1353 unsigned NumPlacementArgs : 31;
1354 unsigned PassAlignmentToPlacementDelete : 1;
1355 const FunctionDecl *OperatorDelete;
1358 CharUnits AllocAlign;
1360 PlacementArg *getPlacementArgs() {
1361 return reinterpret_cast<PlacementArg *>(this + 1);
1365 static size_t getExtraSize(size_t NumPlacementArgs) {
1366 return NumPlacementArgs * sizeof(PlacementArg);
1369 CallDeleteDuringNew(size_t NumPlacementArgs,
1370 const FunctionDecl *OperatorDelete, ValueTy Ptr,
1371 ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
1372 CharUnits AllocAlign)
1373 : NumPlacementArgs(NumPlacementArgs),
1374 PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
1375 OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
1376 AllocAlign(AllocAlign) {}
1378 void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1379 assert(I < NumPlacementArgs && "index out of range");
1380 getPlacementArgs()[I] = {Arg, Type};
1383 void Emit(CodeGenFunction &CGF, Flags flags) override {
1384 const FunctionProtoType *FPT =
1385 OperatorDelete->getType()->getAs<FunctionProtoType>();
1386 CallArgList DeleteArgs;
1388 // The first argument is always a void*.
1389 DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1391 // Figure out what other parameters we should be implicitly passing.
1392 bool PassSize = false;
1393 bool PassAlignment = false;
1394 if (NumPlacementArgs) {
1395 // A placement deallocation function is implicitly passed an alignment
1396 // if the placement allocation function was, but is never passed a size.
1397 PassAlignment = PassAlignmentToPlacementDelete;
1399 // For a non-placement new-expression, 'operator delete' can take a
1400 // size and/or an alignment if it has the right parameters.
1401 std::tie(PassSize, PassAlignment) =
1402 shouldPassSizeAndAlignToUsualDelete(FPT);
1405 // The second argument can be a std::size_t (for non-placement delete).
1407 DeleteArgs.add(Traits::get(CGF, AllocSize),
1408 CGF.getContext().getSizeType());
1410 // The next (second or third) argument can be a std::align_val_t, which
1411 // is an enum whose underlying type is std::size_t.
1412 // FIXME: Use the right type as the parameter type. Note that in a call
1413 // to operator delete(size_t, ...), we may not have it available.
1415 DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1416 CGF.SizeTy, AllocAlign.getQuantity())),
1417 CGF.getContext().getSizeType());
1419 // Pass the rest of the arguments, which must match exactly.
1420 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1421 auto Arg = getPlacementArgs()[I];
1422 DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1425 // Call 'operator delete'.
1426 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1431 /// Enter a cleanup to call 'operator delete' if the initializer in a
1432 /// new-expression throws.
1433 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1434 const CXXNewExpr *E,
1436 llvm::Value *AllocSize,
1437 CharUnits AllocAlign,
1438 const CallArgList &NewArgs) {
1439 unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
1441 // If we're not inside a conditional branch, then the cleanup will
1442 // dominate and we can do the easier (and more efficient) thing.
1443 if (!CGF.isInConditionalBranch()) {
1444 struct DirectCleanupTraits {
1445 typedef llvm::Value *ValueTy;
1446 typedef RValue RValueTy;
1447 static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1448 static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1451 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1453 DirectCleanup *Cleanup = CGF.EHStack
1454 .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
1455 E->getNumPlacementArgs(),
1456 E->getOperatorDelete(),
1457 NewPtr.getPointer(),
1461 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1462 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1463 Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
1469 // Otherwise, we need to save all this stuff.
1470 DominatingValue<RValue>::saved_type SavedNewPtr =
1471 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1472 DominatingValue<RValue>::saved_type SavedAllocSize =
1473 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1475 struct ConditionalCleanupTraits {
1476 typedef DominatingValue<RValue>::saved_type ValueTy;
1477 typedef DominatingValue<RValue>::saved_type RValueTy;
1478 static RValue get(CodeGenFunction &CGF, ValueTy V) {
1479 return V.restore(CGF);
1482 typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1484 ConditionalCleanup *Cleanup = CGF.EHStack
1485 .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
1486 E->getNumPlacementArgs(),
1487 E->getOperatorDelete(),
1492 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1493 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1494 Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
1498 CGF.initFullExprCleanup();
1501 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1502 // The element type being allocated.
1503 QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1505 // 1. Build a call to the allocation function.
1506 FunctionDecl *allocator = E->getOperatorNew();
1508 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1509 unsigned minElements = 0;
1510 if (E->isArray() && E->hasInitializer()) {
1511 const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
1512 if (ILE && ILE->isStringLiteralInit())
1514 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1515 ->getSize().getZExtValue();
1517 minElements = ILE->getNumInits();
1520 llvm::Value *numElements = nullptr;
1521 llvm::Value *allocSizeWithoutCookie = nullptr;
1522 llvm::Value *allocSize =
1523 EmitCXXNewAllocSize(*this, E, minElements, numElements,
1524 allocSizeWithoutCookie);
1525 CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1527 // Emit the allocation call. If the allocator is a global placement
1528 // operator, just "inline" it directly.
1529 Address allocation = Address::invalid();
1530 CallArgList allocatorArgs;
1531 if (allocator->isReservedGlobalPlacementOperator()) {
1532 assert(E->getNumPlacementArgs() == 1);
1533 const Expr *arg = *E->placement_arguments().begin();
1535 LValueBaseInfo BaseInfo;
1536 allocation = EmitPointerWithAlignment(arg, &BaseInfo);
1538 // The pointer expression will, in many cases, be an opaque void*.
1539 // In these cases, discard the computed alignment and use the
1540 // formal alignment of the allocated type.
1541 if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1542 allocation = Address(allocation.getPointer(), allocAlign);
1544 // Set up allocatorArgs for the call to operator delete if it's not
1545 // the reserved global operator.
1546 if (E->getOperatorDelete() &&
1547 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1548 allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1549 allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1553 const FunctionProtoType *allocatorType =
1554 allocator->getType()->castAs<FunctionProtoType>();
1555 unsigned ParamsToSkip = 0;
1557 // The allocation size is the first argument.
1558 QualType sizeType = getContext().getSizeType();
1559 allocatorArgs.add(RValue::get(allocSize), sizeType);
1562 if (allocSize != allocSizeWithoutCookie) {
1563 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1564 allocAlign = std::max(allocAlign, cookieAlign);
1567 // The allocation alignment may be passed as the second argument.
1568 if (E->passAlignment()) {
1569 QualType AlignValT = sizeType;
1570 if (allocatorType->getNumParams() > 1) {
1571 AlignValT = allocatorType->getParamType(1);
1572 assert(getContext().hasSameUnqualifiedType(
1573 AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1575 "wrong type for alignment parameter");
1578 // Corner case, passing alignment to 'operator new(size_t, ...)'.
1579 assert(allocator->isVariadic() && "can't pass alignment to allocator");
1582 RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1586 // FIXME: Why do we not pass a CalleeDecl here?
1587 EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1588 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1591 EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1593 // If this was a call to a global replaceable allocation function that does
1594 // not take an alignment argument, the allocator is known to produce
1595 // storage that's suitably aligned for any object that fits, up to a known
1596 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1597 CharUnits allocationAlign = allocAlign;
1598 if (!E->passAlignment() &&
1599 allocator->isReplaceableGlobalAllocationFunction()) {
1600 unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
1601 Target.getNewAlign(), getContext().getTypeSize(allocType)));
1602 allocationAlign = std::max(
1603 allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1606 allocation = Address(RV.getScalarVal(), allocationAlign);
1609 // Emit a null check on the allocation result if the allocation
1610 // function is allowed to return null (because it has a non-throwing
1611 // exception spec or is the reserved placement new) and we have an
1612 // interesting initializer.
1613 bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
1614 (!allocType.isPODType(getContext()) || E->hasInitializer());
1616 llvm::BasicBlock *nullCheckBB = nullptr;
1617 llvm::BasicBlock *contBB = nullptr;
1619 // The null-check means that the initializer is conditionally
1621 ConditionalEvaluation conditional(*this);
1624 conditional.begin(*this);
1626 nullCheckBB = Builder.GetInsertBlock();
1627 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1628 contBB = createBasicBlock("new.cont");
1630 llvm::Value *isNull =
1631 Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1632 Builder.CreateCondBr(isNull, contBB, notNullBB);
1633 EmitBlock(notNullBB);
1636 // If there's an operator delete, enter a cleanup to call it if an
1637 // exception is thrown.
1638 EHScopeStack::stable_iterator operatorDeleteCleanup;
1639 llvm::Instruction *cleanupDominator = nullptr;
1640 if (E->getOperatorDelete() &&
1641 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1642 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
1644 operatorDeleteCleanup = EHStack.stable_begin();
1645 cleanupDominator = Builder.CreateUnreachable();
1648 assert((allocSize == allocSizeWithoutCookie) ==
1649 CalculateCookiePadding(*this, E).isZero());
1650 if (allocSize != allocSizeWithoutCookie) {
1651 assert(E->isArray());
1652 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1657 llvm::Type *elementTy = ConvertTypeForMem(allocType);
1658 Address result = Builder.CreateElementBitCast(allocation, elementTy);
1660 // Passing pointer through invariant.group.barrier to avoid propagation of
1661 // vptrs information which may be included in previous type.
1662 // To not break LTO with different optimizations levels, we do it regardless
1663 // of optimization level.
1664 if (CGM.getCodeGenOpts().StrictVTablePointers &&
1665 allocator->isReservedGlobalPlacementOperator())
1666 result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()),
1667 result.getAlignment());
1669 EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1670 allocSizeWithoutCookie);
1672 // NewPtr is a pointer to the base element type. If we're
1673 // allocating an array of arrays, we'll need to cast back to the
1674 // array pointer type.
1675 llvm::Type *resultType = ConvertTypeForMem(E->getType());
1676 if (result.getType() != resultType)
1677 result = Builder.CreateBitCast(result, resultType);
1680 // Deactivate the 'operator delete' cleanup if we finished
1682 if (operatorDeleteCleanup.isValid()) {
1683 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1684 cleanupDominator->eraseFromParent();
1687 llvm::Value *resultPtr = result.getPointer();
1689 conditional.end(*this);
1691 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1694 llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1695 PHI->addIncoming(resultPtr, notNullBB);
1696 PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1705 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1706 llvm::Value *Ptr, QualType DeleteTy,
1707 llvm::Value *NumElements,
1708 CharUnits CookieSize) {
1709 assert((!NumElements && CookieSize.isZero()) ||
1710 DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1712 const FunctionProtoType *DeleteFTy =
1713 DeleteFD->getType()->getAs<FunctionProtoType>();
1715 CallArgList DeleteArgs;
1717 std::pair<bool, bool> PassSizeAndAlign =
1718 shouldPassSizeAndAlignToUsualDelete(DeleteFTy);
1720 auto ParamTypeIt = DeleteFTy->param_type_begin();
1722 // Pass the pointer itself.
1723 QualType ArgTy = *ParamTypeIt++;
1724 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1725 DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1727 // Pass the size if the delete function has a size_t parameter.
1728 if (PassSizeAndAlign.first) {
1729 QualType SizeType = *ParamTypeIt++;
1730 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1731 llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1732 DeleteTypeSize.getQuantity());
1734 // For array new, multiply by the number of elements.
1736 Size = Builder.CreateMul(Size, NumElements);
1738 // If there is a cookie, add the cookie size.
1739 if (!CookieSize.isZero())
1740 Size = Builder.CreateAdd(
1741 Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1743 DeleteArgs.add(RValue::get(Size), SizeType);
1746 // Pass the alignment if the delete function has an align_val_t parameter.
1747 if (PassSizeAndAlign.second) {
1748 QualType AlignValType = *ParamTypeIt++;
1749 CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
1750 getContext().getTypeAlignIfKnown(DeleteTy));
1751 llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1752 DeleteTypeAlign.getQuantity());
1753 DeleteArgs.add(RValue::get(Align), AlignValType);
1756 assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1757 "unknown parameter to usual delete function");
1759 // Emit the call to delete.
1760 EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1764 /// Calls the given 'operator delete' on a single object.
1765 struct CallObjectDelete final : EHScopeStack::Cleanup {
1767 const FunctionDecl *OperatorDelete;
1768 QualType ElementType;
1770 CallObjectDelete(llvm::Value *Ptr,
1771 const FunctionDecl *OperatorDelete,
1772 QualType ElementType)
1773 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1775 void Emit(CodeGenFunction &CGF, Flags flags) override {
1776 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1782 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1783 llvm::Value *CompletePtr,
1784 QualType ElementType) {
1785 EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1786 OperatorDelete, ElementType);
1789 /// Emit the code for deleting a single object.
1790 static void EmitObjectDelete(CodeGenFunction &CGF,
1791 const CXXDeleteExpr *DE,
1793 QualType ElementType) {
1794 // C++11 [expr.delete]p3:
1795 // If the static type of the object to be deleted is different from its
1796 // dynamic type, the static type shall be a base class of the dynamic type
1797 // of the object to be deleted and the static type shall have a virtual
1798 // destructor or the behavior is undefined.
1799 CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
1800 DE->getExprLoc(), Ptr.getPointer(),
1803 // Find the destructor for the type, if applicable. If the
1804 // destructor is virtual, we'll just emit the vcall and return.
1805 const CXXDestructorDecl *Dtor = nullptr;
1806 if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1807 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1808 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1809 Dtor = RD->getDestructor();
1811 if (Dtor->isVirtual()) {
1812 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1819 // Make sure that we call delete even if the dtor throws.
1820 // This doesn't have to a conditional cleanup because we're going
1821 // to pop it off in a second.
1822 const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1823 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1825 OperatorDelete, ElementType);
1828 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1829 /*ForVirtualBase=*/false,
1830 /*Delegating=*/false,
1832 else if (auto Lifetime = ElementType.getObjCLifetime()) {
1834 case Qualifiers::OCL_None:
1835 case Qualifiers::OCL_ExplicitNone:
1836 case Qualifiers::OCL_Autoreleasing:
1839 case Qualifiers::OCL_Strong:
1840 CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1843 case Qualifiers::OCL_Weak:
1844 CGF.EmitARCDestroyWeak(Ptr);
1849 CGF.PopCleanupBlock();
1853 /// Calls the given 'operator delete' on an array of objects.
1854 struct CallArrayDelete final : EHScopeStack::Cleanup {
1856 const FunctionDecl *OperatorDelete;
1857 llvm::Value *NumElements;
1858 QualType ElementType;
1859 CharUnits CookieSize;
1861 CallArrayDelete(llvm::Value *Ptr,
1862 const FunctionDecl *OperatorDelete,
1863 llvm::Value *NumElements,
1864 QualType ElementType,
1865 CharUnits CookieSize)
1866 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1867 ElementType(ElementType), CookieSize(CookieSize) {}
1869 void Emit(CodeGenFunction &CGF, Flags flags) override {
1870 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
1876 /// Emit the code for deleting an array of objects.
1877 static void EmitArrayDelete(CodeGenFunction &CGF,
1878 const CXXDeleteExpr *E,
1880 QualType elementType) {
1881 llvm::Value *numElements = nullptr;
1882 llvm::Value *allocatedPtr = nullptr;
1883 CharUnits cookieSize;
1884 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1885 numElements, allocatedPtr, cookieSize);
1887 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1889 // Make sure that we call delete even if one of the dtors throws.
1890 const FunctionDecl *operatorDelete = E->getOperatorDelete();
1891 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1892 allocatedPtr, operatorDelete,
1893 numElements, elementType,
1896 // Destroy the elements.
1897 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1898 assert(numElements && "no element count for a type with a destructor!");
1900 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1901 CharUnits elementAlign =
1902 deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
1904 llvm::Value *arrayBegin = deletedPtr.getPointer();
1905 llvm::Value *arrayEnd =
1906 CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
1908 // Note that it is legal to allocate a zero-length array, and we
1909 // can never fold the check away because the length should always
1910 // come from a cookie.
1911 CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
1912 CGF.getDestroyer(dtorKind),
1913 /*checkZeroLength*/ true,
1914 CGF.needsEHCleanup(dtorKind));
1917 // Pop the cleanup block.
1918 CGF.PopCleanupBlock();
1921 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1922 const Expr *Arg = E->getArgument();
1923 Address Ptr = EmitPointerWithAlignment(Arg);
1925 // Null check the pointer.
1926 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1927 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1929 llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
1931 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1932 EmitBlock(DeleteNotNull);
1934 // We might be deleting a pointer to array. If so, GEP down to the
1935 // first non-array element.
1936 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1937 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1938 if (DeleteTy->isConstantArrayType()) {
1939 llvm::Value *Zero = Builder.getInt32(0);
1940 SmallVector<llvm::Value*,8> GEP;
1942 GEP.push_back(Zero); // point at the outermost array
1944 // For each layer of array type we're pointing at:
1945 while (const ConstantArrayType *Arr
1946 = getContext().getAsConstantArrayType(DeleteTy)) {
1947 // 1. Unpeel the array type.
1948 DeleteTy = Arr->getElementType();
1950 // 2. GEP to the first element of the array.
1951 GEP.push_back(Zero);
1954 Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
1955 Ptr.getAlignment());
1958 assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
1960 if (E->isArrayForm()) {
1961 EmitArrayDelete(*this, E, Ptr, DeleteTy);
1963 EmitObjectDelete(*this, E, Ptr, DeleteTy);
1966 EmitBlock(DeleteEnd);
1969 static bool isGLValueFromPointerDeref(const Expr *E) {
1970 E = E->IgnoreParens();
1972 if (const auto *CE = dyn_cast<CastExpr>(E)) {
1973 if (!CE->getSubExpr()->isGLValue())
1975 return isGLValueFromPointerDeref(CE->getSubExpr());
1978 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
1979 return isGLValueFromPointerDeref(OVE->getSourceExpr());
1981 if (const auto *BO = dyn_cast<BinaryOperator>(E))
1982 if (BO->getOpcode() == BO_Comma)
1983 return isGLValueFromPointerDeref(BO->getRHS());
1985 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
1986 return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
1987 isGLValueFromPointerDeref(ACO->getFalseExpr());
1989 // C++11 [expr.sub]p1:
1990 // The expression E1[E2] is identical (by definition) to *((E1)+(E2))
1991 if (isa<ArraySubscriptExpr>(E))
1994 if (const auto *UO = dyn_cast<UnaryOperator>(E))
1995 if (UO->getOpcode() == UO_Deref)
2001 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2002 llvm::Type *StdTypeInfoPtrTy) {
2003 // Get the vtable pointer.
2004 Address ThisPtr = CGF.EmitLValue(E).getAddress();
2006 // C++ [expr.typeid]p2:
2007 // If the glvalue expression is obtained by applying the unary * operator to
2008 // a pointer and the pointer is a null pointer value, the typeid expression
2009 // throws the std::bad_typeid exception.
2011 // However, this paragraph's intent is not clear. We choose a very generous
2012 // interpretation which implores us to consider comma operators, conditional
2013 // operators, parentheses and other such constructs.
2014 QualType SrcRecordTy = E->getType();
2015 if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
2016 isGLValueFromPointerDeref(E), SrcRecordTy)) {
2017 llvm::BasicBlock *BadTypeidBlock =
2018 CGF.createBasicBlock("typeid.bad_typeid");
2019 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2021 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
2022 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2024 CGF.EmitBlock(BadTypeidBlock);
2025 CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2026 CGF.EmitBlock(EndBlock);
2029 return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2033 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2034 llvm::Type *StdTypeInfoPtrTy =
2035 ConvertType(E->getType())->getPointerTo();
2037 if (E->isTypeOperand()) {
2038 llvm::Constant *TypeInfo =
2039 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2040 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
2043 // C++ [expr.typeid]p2:
2044 // When typeid is applied to a glvalue expression whose type is a
2045 // polymorphic class type, the result refers to a std::type_info object
2046 // representing the type of the most derived object (that is, the dynamic
2047 // type) to which the glvalue refers.
2048 if (E->isPotentiallyEvaluated())
2049 return EmitTypeidFromVTable(*this, E->getExprOperand(),
2052 QualType OperandTy = E->getExprOperand()->getType();
2053 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
2057 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2059 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2060 if (DestTy->isPointerType())
2061 return llvm::Constant::getNullValue(DestLTy);
2063 /// C++ [expr.dynamic.cast]p9:
2064 /// A failed cast to reference type throws std::bad_cast
2065 if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2068 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
2069 return llvm::UndefValue::get(DestLTy);
2072 llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2073 const CXXDynamicCastExpr *DCE) {
2074 CGM.EmitExplicitCastExprType(DCE, this);
2075 QualType DestTy = DCE->getTypeAsWritten();
2077 if (DCE->isAlwaysNull())
2078 if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
2081 QualType SrcTy = DCE->getSubExpr()->getType();
2083 // C++ [expr.dynamic.cast]p7:
2084 // If T is "pointer to cv void," then the result is a pointer to the most
2085 // derived object pointed to by v.
2086 const PointerType *DestPTy = DestTy->getAs<PointerType>();
2088 bool isDynamicCastToVoid;
2089 QualType SrcRecordTy;
2090 QualType DestRecordTy;
2092 isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
2093 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2094 DestRecordTy = DestPTy->getPointeeType();
2096 isDynamicCastToVoid = false;
2097 SrcRecordTy = SrcTy;
2098 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2101 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2103 // C++ [expr.dynamic.cast]p4:
2104 // If the value of v is a null pointer value in the pointer case, the result
2105 // is the null pointer value of type T.
2106 bool ShouldNullCheckSrcValue =
2107 CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
2110 llvm::BasicBlock *CastNull = nullptr;
2111 llvm::BasicBlock *CastNotNull = nullptr;
2112 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2114 if (ShouldNullCheckSrcValue) {
2115 CastNull = createBasicBlock("dynamic_cast.null");
2116 CastNotNull = createBasicBlock("dynamic_cast.notnull");
2118 llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
2119 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2120 EmitBlock(CastNotNull);
2124 if (isDynamicCastToVoid) {
2125 Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
2128 assert(DestRecordTy->isRecordType() &&
2129 "destination type must be a record type!");
2130 Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2131 DestTy, DestRecordTy, CastEnd);
2132 CastNotNull = Builder.GetInsertBlock();
2135 if (ShouldNullCheckSrcValue) {
2136 EmitBranch(CastEnd);
2138 EmitBlock(CastNull);
2139 EmitBranch(CastEnd);
2144 if (ShouldNullCheckSrcValue) {
2145 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2146 PHI->addIncoming(Value, CastNotNull);
2147 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
2155 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
2156 RunCleanupsScope Scope(*this);
2157 LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
2159 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
2160 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
2161 e = E->capture_init_end();
2162 i != e; ++i, ++CurField) {
2163 // Emit initialization
2164 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2165 if (CurField->hasCapturedVLAType()) {
2166 auto VAT = CurField->getCapturedVLAType();
2167 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2169 EmitInitializerForField(*CurField, LV, *i);