1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code dealing with code generation of C++ expressions
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/CodeGenOptions.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/Intrinsics.h"
24 using namespace clang;
25 using namespace CodeGen;
28 struct MemberCallInfo {
30 // Number of prefix arguments for the call. Ignores the `this` pointer.
36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
37 llvm::Value *This, llvm::Value *ImplicitParam,
38 QualType ImplicitParamTy, const CallExpr *CE,
39 CallArgList &Args, CallArgList *RtlArgs) {
40 assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
41 isa<CXXOperatorCallExpr>(CE));
42 assert(MD->isInstance() &&
43 "Trying to emit a member or operator call expr on a static method!");
44 ASTContext &C = CGF.getContext();
47 const CXXRecordDecl *RD =
48 CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
49 Args.add(RValue::get(This),
50 RD ? C.getPointerType(C.getTypeDeclType(RD)) : C.VoidPtrTy);
52 // If there is an implicit parameter (e.g. VTT), emit it.
54 Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
57 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
58 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
59 unsigned PrefixSize = Args.size() - 1;
61 // And the rest of the call args.
63 // Special case: if the caller emitted the arguments right-to-left already
64 // (prior to emitting the *this argument), we're done. This happens for
65 // assignment operators.
66 Args.addFrom(*RtlArgs);
68 // Special case: skip first argument of CXXOperatorCall (it is "this").
69 unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
70 CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
71 CE->getDirectCallee());
74 FPT->getNumParams() == 0 &&
75 "No CallExpr specified for function with non-zero number of arguments");
77 return {required, PrefixSize};
80 RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
81 const CXXMethodDecl *MD, const CGCallee &Callee,
82 ReturnValueSlot ReturnValue,
83 llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
84 const CallExpr *CE, CallArgList *RtlArgs) {
85 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
87 MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
88 *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
89 auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
90 Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
91 return EmitCall(FnInfo, Callee, ReturnValue, Args);
94 RValue CodeGenFunction::EmitCXXDestructorCall(
95 const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This,
96 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
99 commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
100 ImplicitParamTy, CE, Args, nullptr);
101 return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
102 Callee, ReturnValueSlot(), Args);
105 RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
106 const CXXPseudoDestructorExpr *E) {
107 QualType DestroyedType = E->getDestroyedType();
108 if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
109 // Automatic Reference Counting:
110 // If the pseudo-expression names a retainable object with weak or
111 // strong lifetime, the object shall be released.
112 Expr *BaseExpr = E->getBase();
113 Address BaseValue = Address::invalid();
114 Qualifiers BaseQuals;
116 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
118 BaseValue = EmitPointerWithAlignment(BaseExpr);
119 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
120 BaseQuals = PTy->getPointeeType().getQualifiers();
122 LValue BaseLV = EmitLValue(BaseExpr);
123 BaseValue = BaseLV.getAddress();
124 QualType BaseTy = BaseExpr->getType();
125 BaseQuals = BaseTy.getQualifiers();
128 switch (DestroyedType.getObjCLifetime()) {
129 case Qualifiers::OCL_None:
130 case Qualifiers::OCL_ExplicitNone:
131 case Qualifiers::OCL_Autoreleasing:
134 case Qualifiers::OCL_Strong:
135 EmitARCRelease(Builder.CreateLoad(BaseValue,
136 DestroyedType.isVolatileQualified()),
140 case Qualifiers::OCL_Weak:
141 EmitARCDestroyWeak(BaseValue);
145 // C++ [expr.pseudo]p1:
146 // The result shall only be used as the operand for the function call
147 // operator (), and the result of such a call has type void. The only
148 // effect is the evaluation of the postfix-expression before the dot or
150 EmitIgnoredExpr(E->getBase());
153 return RValue::get(nullptr);
156 static CXXRecordDecl *getCXXRecord(const Expr *E) {
157 QualType T = E->getType();
158 if (const PointerType *PTy = T->getAs<PointerType>())
159 T = PTy->getPointeeType();
160 const RecordType *Ty = T->castAs<RecordType>();
161 return cast<CXXRecordDecl>(Ty->getDecl());
164 // Note: This function also emit constructor calls to support a MSVC
165 // extensions allowing explicit constructor function call.
166 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
167 ReturnValueSlot ReturnValue) {
168 const Expr *callee = CE->getCallee()->IgnoreParens();
170 if (isa<BinaryOperator>(callee))
171 return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
173 const MemberExpr *ME = cast<MemberExpr>(callee);
174 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
176 if (MD->isStatic()) {
177 // The method is static, emit it as we would a regular call.
178 CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
179 return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
183 bool HasQualifier = ME->hasQualifier();
184 NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
185 bool IsArrow = ME->isArrow();
186 const Expr *Base = ME->getBase();
188 return EmitCXXMemberOrOperatorMemberCallExpr(
189 CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
192 RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
193 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
194 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
196 assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
198 // Compute the object pointer.
199 bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
201 const CXXMethodDecl *DevirtualizedMethod = nullptr;
202 if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
203 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
204 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
205 assert(DevirtualizedMethod);
206 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
207 const Expr *Inner = Base->ignoreParenBaseCasts();
208 if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
209 MD->getReturnType().getCanonicalType())
210 // If the return types are not the same, this might be a case where more
211 // code needs to run to compensate for it. For example, the derived
212 // method might return a type that inherits form from the return
213 // type of MD and has a prefix.
214 // For now we just avoid devirtualizing these covariant cases.
215 DevirtualizedMethod = nullptr;
216 else if (getCXXRecord(Inner) == DevirtualizedClass)
217 // If the class of the Inner expression is where the dynamic method
218 // is defined, build the this pointer from it.
220 else if (getCXXRecord(Base) != DevirtualizedClass) {
221 // If the method is defined in a class that is not the best dynamic
222 // one or the one of the full expression, we would have to build
223 // a derived-to-base cast to compute the correct this pointer, but
224 // we don't have support for that yet, so do a virtual call.
225 DevirtualizedMethod = nullptr;
229 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
230 // operator before the LHS.
231 CallArgList RtlArgStorage;
232 CallArgList *RtlArgs = nullptr;
233 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
234 if (OCE->isAssignmentOp()) {
235 RtlArgs = &RtlArgStorage;
236 EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
237 drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
238 /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
242 Address This = Address::invalid();
244 This = EmitPointerWithAlignment(Base);
246 This = EmitLValue(Base).getAddress();
249 if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
250 if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
251 if (isa<CXXConstructorDecl>(MD) &&
252 cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
253 return RValue::get(nullptr);
255 if (!MD->getParent()->mayInsertExtraPadding()) {
256 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
257 // We don't like to generate the trivial copy/move assignment operator
258 // when it isn't necessary; just produce the proper effect here.
259 LValue RHS = isa<CXXOperatorCallExpr>(CE)
260 ? MakeNaturalAlignAddrLValue(
261 (*RtlArgs)[0].RV.getScalarVal(),
262 (*(CE->arg_begin() + 1))->getType())
263 : EmitLValue(*CE->arg_begin());
264 EmitAggregateAssign(This, RHS.getAddress(), CE->getType());
265 return RValue::get(This.getPointer());
268 if (isa<CXXConstructorDecl>(MD) &&
269 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
270 // Trivial move and copy ctor are the same.
271 assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
272 Address RHS = EmitLValue(*CE->arg_begin()).getAddress();
273 EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType());
274 return RValue::get(This.getPointer());
276 llvm_unreachable("unknown trivial member function");
280 // Compute the function type we're calling.
281 const CXXMethodDecl *CalleeDecl =
282 DevirtualizedMethod ? DevirtualizedMethod : MD;
283 const CGFunctionInfo *FInfo = nullptr;
284 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
285 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
286 Dtor, StructorType::Complete);
287 else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
288 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
289 Ctor, StructorType::Complete);
291 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
293 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
295 // C++11 [class.mfct.non-static]p2:
296 // If a non-static member function of a class X is called for an object that
297 // is not of type X, or of a type derived from X, the behavior is undefined.
298 SourceLocation CallLoc;
299 ASTContext &C = getContext();
301 CallLoc = CE->getExprLoc();
303 SanitizerSet SkippedChecks;
304 if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
305 auto *IOA = CMCE->getImplicitObjectArgument();
306 bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
307 if (IsImplicitObjectCXXThis)
308 SkippedChecks.set(SanitizerKind::Alignment, true);
309 if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
310 SkippedChecks.set(SanitizerKind::Null, true);
313 isa<CXXConstructorDecl>(CalleeDecl) ? CodeGenFunction::TCK_ConstructorCall
314 : CodeGenFunction::TCK_MemberCall,
315 CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()),
316 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
318 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
319 // 'CalleeDecl' instead.
321 // C++ [class.virtual]p12:
322 // Explicit qualification with the scope operator (5.1) suppresses the
323 // virtual call mechanism.
325 // We also don't emit a virtual call if the base expression has a record type
326 // because then we know what the type is.
327 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
329 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
330 assert(CE->arg_begin() == CE->arg_end() &&
331 "Destructor shouldn't have explicit parameters");
332 assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
333 if (UseVirtualCall) {
334 CGM.getCXXABI().EmitVirtualDestructorCall(
335 *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
338 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
339 Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
340 else if (!DevirtualizedMethod)
341 Callee = CGCallee::forDirect(
342 CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
345 const CXXDestructorDecl *DDtor =
346 cast<CXXDestructorDecl>(DevirtualizedMethod);
347 Callee = CGCallee::forDirect(
348 CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
351 EmitCXXMemberOrOperatorCall(
352 CalleeDecl, Callee, ReturnValue, This.getPointer(),
353 /*ImplicitParam=*/nullptr, QualType(), CE, nullptr);
355 return RValue::get(nullptr);
359 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
360 Callee = CGCallee::forDirect(
361 CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
363 } else if (UseVirtualCall) {
364 Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
367 if (SanOpts.has(SanitizerKind::CFINVCall) &&
368 MD->getParent()->isDynamicClass()) {
369 llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy, MD->getParent());
370 EmitVTablePtrCheckForCall(MD->getParent(), VTable, CFITCK_NVCall,
374 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
375 Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
376 else if (!DevirtualizedMethod)
377 Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
379 Callee = CGCallee::forDirect(
380 CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
381 DevirtualizedMethod);
385 if (MD->isVirtual()) {
386 This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
387 *this, CalleeDecl, This, UseVirtualCall);
390 return EmitCXXMemberOrOperatorCall(
391 CalleeDecl, Callee, ReturnValue, This.getPointer(),
392 /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
396 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
397 ReturnValueSlot ReturnValue) {
398 const BinaryOperator *BO =
399 cast<BinaryOperator>(E->getCallee()->IgnoreParens());
400 const Expr *BaseExpr = BO->getLHS();
401 const Expr *MemFnExpr = BO->getRHS();
403 const MemberPointerType *MPT =
404 MemFnExpr->getType()->castAs<MemberPointerType>();
406 const FunctionProtoType *FPT =
407 MPT->getPointeeType()->castAs<FunctionProtoType>();
408 const CXXRecordDecl *RD =
409 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
411 // Emit the 'this' pointer.
412 Address This = Address::invalid();
413 if (BO->getOpcode() == BO_PtrMemI)
414 This = EmitPointerWithAlignment(BaseExpr);
416 This = EmitLValue(BaseExpr).getAddress();
418 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
419 QualType(MPT->getClass(), 0));
421 // Get the member function pointer.
422 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
424 // Ask the ABI to load the callee. Note that This is modified.
425 llvm::Value *ThisPtrForCall = nullptr;
427 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
428 ThisPtrForCall, MemFnPtr, MPT);
433 getContext().getPointerType(getContext().getTagDeclType(RD));
435 // Push the this ptr.
436 Args.add(RValue::get(ThisPtrForCall), ThisType);
438 RequiredArgs required =
439 RequiredArgs::forPrototypePlus(FPT, 1, /*FD=*/nullptr);
441 // And the rest of the call args
442 EmitCallArgs(Args, FPT, E->arguments());
443 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
445 Callee, ReturnValue, Args);
449 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
450 const CXXMethodDecl *MD,
451 ReturnValueSlot ReturnValue) {
452 assert(MD->isInstance() &&
453 "Trying to emit a member call expr on a static method!");
454 return EmitCXXMemberOrOperatorMemberCallExpr(
455 E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
456 /*IsArrow=*/false, E->getArg(0));
459 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
460 ReturnValueSlot ReturnValue) {
461 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
464 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
466 const CXXRecordDecl *Base) {
470 DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
472 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
473 CharUnits NVSize = Layout.getNonVirtualSize();
475 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
476 // present, they are initialized by the most derived class before calling the
478 SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
479 Stores.emplace_back(CharUnits::Zero(), NVSize);
481 // Each store is split by the existence of a vbptr.
482 CharUnits VBPtrWidth = CGF.getPointerSize();
483 std::vector<CharUnits> VBPtrOffsets =
484 CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
485 for (CharUnits VBPtrOffset : VBPtrOffsets) {
486 // Stop before we hit any virtual base pointers located in virtual bases.
487 if (VBPtrOffset >= NVSize)
489 std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
490 CharUnits LastStoreOffset = LastStore.first;
491 CharUnits LastStoreSize = LastStore.second;
493 CharUnits SplitBeforeOffset = LastStoreOffset;
494 CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
495 assert(!SplitBeforeSize.isNegative() && "negative store size!");
496 if (!SplitBeforeSize.isZero())
497 Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
499 CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
500 CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
501 assert(!SplitAfterSize.isNegative() && "negative store size!");
502 if (!SplitAfterSize.isZero())
503 Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
506 // If the type contains a pointer to data member we can't memset it to zero.
507 // Instead, create a null constant and copy it to the destination.
508 // TODO: there are other patterns besides zero that we can usefully memset,
509 // like -1, which happens to be the pattern used by member-pointers.
510 // TODO: isZeroInitializable can be over-conservative in the case where a
511 // virtual base contains a member pointer.
512 llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
513 if (!NullConstantForBase->isNullValue()) {
514 llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
515 CGF.CGM.getModule(), NullConstantForBase->getType(),
516 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
517 NullConstantForBase, Twine());
519 CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
520 DestPtr.getAlignment());
521 NullVariable->setAlignment(Align.getQuantity());
523 Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
525 // Get and call the appropriate llvm.memcpy overload.
526 for (std::pair<CharUnits, CharUnits> Store : Stores) {
527 CharUnits StoreOffset = Store.first;
528 CharUnits StoreSize = Store.second;
529 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
530 CGF.Builder.CreateMemCpy(
531 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
532 CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
536 // Otherwise, just memset the whole thing to zero. This is legal
537 // because in LLVM, all default initializers (other than the ones we just
538 // handled above) are guaranteed to have a bit pattern of all zeros.
540 for (std::pair<CharUnits, CharUnits> Store : Stores) {
541 CharUnits StoreOffset = Store.first;
542 CharUnits StoreSize = Store.second;
543 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
544 CGF.Builder.CreateMemSet(
545 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
546 CGF.Builder.getInt8(0), StoreSizeVal);
552 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
554 assert(!Dest.isIgnored() && "Must have a destination!");
555 const CXXConstructorDecl *CD = E->getConstructor();
557 // If we require zero initialization before (or instead of) calling the
558 // constructor, as can be the case with a non-user-provided default
559 // constructor, emit the zero initialization now, unless destination is
561 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
562 switch (E->getConstructionKind()) {
563 case CXXConstructExpr::CK_Delegating:
564 case CXXConstructExpr::CK_Complete:
565 EmitNullInitialization(Dest.getAddress(), E->getType());
567 case CXXConstructExpr::CK_VirtualBase:
568 case CXXConstructExpr::CK_NonVirtualBase:
569 EmitNullBaseClassInitialization(*this, Dest.getAddress(),
575 // If this is a call to a trivial default constructor, do nothing.
576 if (CD->isTrivial() && CD->isDefaultConstructor())
579 // Elide the constructor if we're constructing from a temporary.
580 // The temporary check is required because Sema sets this on NRVO
582 if (getLangOpts().ElideConstructors && E->isElidable()) {
583 assert(getContext().hasSameUnqualifiedType(E->getType(),
584 E->getArg(0)->getType()));
585 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
586 EmitAggExpr(E->getArg(0), Dest);
591 if (const ArrayType *arrayType
592 = getContext().getAsArrayType(E->getType())) {
593 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E);
595 CXXCtorType Type = Ctor_Complete;
596 bool ForVirtualBase = false;
597 bool Delegating = false;
599 switch (E->getConstructionKind()) {
600 case CXXConstructExpr::CK_Delegating:
601 // We should be emitting a constructor; GlobalDecl will assert this
602 Type = CurGD.getCtorType();
606 case CXXConstructExpr::CK_Complete:
607 Type = Ctor_Complete;
610 case CXXConstructExpr::CK_VirtualBase:
611 ForVirtualBase = true;
614 case CXXConstructExpr::CK_NonVirtualBase:
618 // Call the constructor.
619 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
620 Dest.getAddress(), E);
624 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
626 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
627 Exp = E->getSubExpr();
628 assert(isa<CXXConstructExpr>(Exp) &&
629 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
630 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
631 const CXXConstructorDecl *CD = E->getConstructor();
632 RunCleanupsScope Scope(*this);
634 // If we require zero initialization before (or instead of) calling the
635 // constructor, as can be the case with a non-user-provided default
636 // constructor, emit the zero initialization now.
637 // FIXME. Do I still need this for a copy ctor synthesis?
638 if (E->requiresZeroInitialization())
639 EmitNullInitialization(Dest, E->getType());
641 assert(!getContext().getAsConstantArrayType(E->getType())
642 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
643 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
646 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
647 const CXXNewExpr *E) {
649 return CharUnits::Zero();
651 // No cookie is required if the operator new[] being used is the
652 // reserved placement operator new[].
653 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
654 return CharUnits::Zero();
656 return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
659 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
661 unsigned minElements,
662 llvm::Value *&numElements,
663 llvm::Value *&sizeWithoutCookie) {
664 QualType type = e->getAllocatedType();
667 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
669 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
670 return sizeWithoutCookie;
673 // The width of size_t.
674 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
676 // Figure out the cookie size.
677 llvm::APInt cookieSize(sizeWidth,
678 CalculateCookiePadding(CGF, e).getQuantity());
680 // Emit the array size expression.
681 // We multiply the size of all dimensions for NumElements.
682 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
683 numElements = CGF.CGM.EmitConstantExpr(e->getArraySize(),
684 CGF.getContext().getSizeType(), &CGF);
686 numElements = CGF.EmitScalarExpr(e->getArraySize());
687 assert(isa<llvm::IntegerType>(numElements->getType()));
689 // The number of elements can be have an arbitrary integer type;
690 // essentially, we need to multiply it by a constant factor, add a
691 // cookie size, and verify that the result is representable as a
692 // size_t. That's just a gloss, though, and it's wrong in one
693 // important way: if the count is negative, it's an error even if
694 // the cookie size would bring the total size >= 0.
696 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
697 llvm::IntegerType *numElementsType
698 = cast<llvm::IntegerType>(numElements->getType());
699 unsigned numElementsWidth = numElementsType->getBitWidth();
701 // Compute the constant factor.
702 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
703 while (const ConstantArrayType *CAT
704 = CGF.getContext().getAsConstantArrayType(type)) {
705 type = CAT->getElementType();
706 arraySizeMultiplier *= CAT->getSize();
709 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
710 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
711 typeSizeMultiplier *= arraySizeMultiplier;
713 // This will be a size_t.
716 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
717 // Don't bloat the -O0 code.
718 if (llvm::ConstantInt *numElementsC =
719 dyn_cast<llvm::ConstantInt>(numElements)) {
720 const llvm::APInt &count = numElementsC->getValue();
722 bool hasAnyOverflow = false;
724 // If 'count' was a negative number, it's an overflow.
725 if (isSigned && count.isNegative())
726 hasAnyOverflow = true;
728 // We want to do all this arithmetic in size_t. If numElements is
729 // wider than that, check whether it's already too big, and if so,
731 else if (numElementsWidth > sizeWidth &&
732 numElementsWidth - sizeWidth > count.countLeadingZeros())
733 hasAnyOverflow = true;
735 // Okay, compute a count at the right width.
736 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
738 // If there is a brace-initializer, we cannot allocate fewer elements than
739 // there are initializers. If we do, that's treated like an overflow.
740 if (adjustedCount.ult(minElements))
741 hasAnyOverflow = true;
743 // Scale numElements by that. This might overflow, but we don't
744 // care because it only overflows if allocationSize does, too, and
745 // if that overflows then we shouldn't use this.
746 numElements = llvm::ConstantInt::get(CGF.SizeTy,
747 adjustedCount * arraySizeMultiplier);
749 // Compute the size before cookie, and track whether it overflowed.
751 llvm::APInt allocationSize
752 = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
753 hasAnyOverflow |= overflow;
755 // Add in the cookie, and check whether it's overflowed.
756 if (cookieSize != 0) {
757 // Save the current size without a cookie. This shouldn't be
758 // used if there was overflow.
759 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
761 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
762 hasAnyOverflow |= overflow;
765 // On overflow, produce a -1 so operator new will fail.
766 if (hasAnyOverflow) {
767 size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
769 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
772 // Otherwise, we might need to use the overflow intrinsics.
774 // There are up to five conditions we need to test for:
775 // 1) if isSigned, we need to check whether numElements is negative;
776 // 2) if numElementsWidth > sizeWidth, we need to check whether
777 // numElements is larger than something representable in size_t;
778 // 3) if minElements > 0, we need to check whether numElements is smaller
780 // 4) we need to compute
781 // sizeWithoutCookie := numElements * typeSizeMultiplier
782 // and check whether it overflows; and
783 // 5) if we need a cookie, we need to compute
784 // size := sizeWithoutCookie + cookieSize
785 // and check whether it overflows.
787 llvm::Value *hasOverflow = nullptr;
789 // If numElementsWidth > sizeWidth, then one way or another, we're
790 // going to have to do a comparison for (2), and this happens to
791 // take care of (1), too.
792 if (numElementsWidth > sizeWidth) {
793 llvm::APInt threshold(numElementsWidth, 1);
794 threshold <<= sizeWidth;
796 llvm::Value *thresholdV
797 = llvm::ConstantInt::get(numElementsType, threshold);
799 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
800 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
802 // Otherwise, if we're signed, we want to sext up to size_t.
803 } else if (isSigned) {
804 if (numElementsWidth < sizeWidth)
805 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
807 // If there's a non-1 type size multiplier, then we can do the
808 // signedness check at the same time as we do the multiply
809 // because a negative number times anything will cause an
810 // unsigned overflow. Otherwise, we have to do it here. But at least
811 // in this case, we can subsume the >= minElements check.
812 if (typeSizeMultiplier == 1)
813 hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
814 llvm::ConstantInt::get(CGF.SizeTy, minElements));
816 // Otherwise, zext up to size_t if necessary.
817 } else if (numElementsWidth < sizeWidth) {
818 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
821 assert(numElements->getType() == CGF.SizeTy);
824 // Don't allow allocation of fewer elements than we have initializers.
826 hasOverflow = CGF.Builder.CreateICmpULT(numElements,
827 llvm::ConstantInt::get(CGF.SizeTy, minElements));
828 } else if (numElementsWidth > sizeWidth) {
829 // The other existing overflow subsumes this check.
830 // We do an unsigned comparison, since any signed value < -1 is
831 // taken care of either above or below.
832 hasOverflow = CGF.Builder.CreateOr(hasOverflow,
833 CGF.Builder.CreateICmpULT(numElements,
834 llvm::ConstantInt::get(CGF.SizeTy, minElements)));
840 // Multiply by the type size if necessary. This multiplier
841 // includes all the factors for nested arrays.
843 // This step also causes numElements to be scaled up by the
844 // nested-array factor if necessary. Overflow on this computation
845 // can be ignored because the result shouldn't be used if
847 if (typeSizeMultiplier != 1) {
848 llvm::Value *umul_with_overflow
849 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
852 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
853 llvm::Value *result =
854 CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
856 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
858 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
860 hasOverflow = overflowed;
862 size = CGF.Builder.CreateExtractValue(result, 0);
864 // Also scale up numElements by the array size multiplier.
865 if (arraySizeMultiplier != 1) {
866 // If the base element type size is 1, then we can re-use the
867 // multiply we just did.
868 if (typeSize.isOne()) {
869 assert(arraySizeMultiplier == typeSizeMultiplier);
872 // Otherwise we need a separate multiply.
875 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
876 numElements = CGF.Builder.CreateMul(numElements, asmV);
880 // numElements doesn't need to be scaled.
881 assert(arraySizeMultiplier == 1);
884 // Add in the cookie size if necessary.
885 if (cookieSize != 0) {
886 sizeWithoutCookie = size;
888 llvm::Value *uadd_with_overflow
889 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
891 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
892 llvm::Value *result =
893 CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
895 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
897 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
899 hasOverflow = overflowed;
901 size = CGF.Builder.CreateExtractValue(result, 0);
904 // If we had any possibility of dynamic overflow, make a select to
905 // overwrite 'size' with an all-ones value, which should cause
906 // operator new to throw.
908 size = CGF.Builder.CreateSelect(hasOverflow,
909 llvm::Constant::getAllOnesValue(CGF.SizeTy),
914 sizeWithoutCookie = size;
916 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
921 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
922 QualType AllocType, Address NewPtr) {
923 // FIXME: Refactor with EmitExprAsInit.
924 switch (CGF.getEvaluationKind(AllocType)) {
926 CGF.EmitScalarInit(Init, nullptr,
927 CGF.MakeAddrLValue(NewPtr, AllocType), false);
930 CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
933 case TEK_Aggregate: {
935 = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
936 AggValueSlot::IsDestructed,
937 AggValueSlot::DoesNotNeedGCBarriers,
938 AggValueSlot::IsNotAliased);
939 CGF.EmitAggExpr(Init, Slot);
943 llvm_unreachable("bad evaluation kind");
946 void CodeGenFunction::EmitNewArrayInitializer(
947 const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
948 Address BeginPtr, llvm::Value *NumElements,
949 llvm::Value *AllocSizeWithoutCookie) {
950 // If we have a type with trivial initialization and no initializer,
951 // there's nothing to do.
952 if (!E->hasInitializer())
955 Address CurPtr = BeginPtr;
957 unsigned InitListElements = 0;
959 const Expr *Init = E->getInitializer();
960 Address EndOfInit = Address::invalid();
961 QualType::DestructionKind DtorKind = ElementType.isDestructedType();
962 EHScopeStack::stable_iterator Cleanup;
963 llvm::Instruction *CleanupDominator = nullptr;
965 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
966 CharUnits ElementAlign =
967 BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
969 // Attempt to perform zero-initialization using memset.
970 auto TryMemsetInitialization = [&]() -> bool {
971 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
972 // we can initialize with a memset to -1.
973 if (!CGM.getTypes().isZeroInitializable(ElementType))
976 // Optimization: since zero initialization will just set the memory
977 // to all zeroes, generate a single memset to do it in one shot.
979 // Subtract out the size of any elements we've already initialized.
980 auto *RemainingSize = AllocSizeWithoutCookie;
981 if (InitListElements) {
982 // We know this can't overflow; we check this when doing the allocation.
983 auto *InitializedSize = llvm::ConstantInt::get(
984 RemainingSize->getType(),
985 getContext().getTypeSizeInChars(ElementType).getQuantity() *
987 RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
990 // Create the memset.
991 Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
995 // If the initializer is an initializer list, first do the explicit elements.
996 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
997 // Initializing from a (braced) string literal is a special case; the init
998 // list element does not initialize a (single) array element.
999 if (ILE->isStringLiteralInit()) {
1000 // Initialize the initial portion of length equal to that of the string
1001 // literal. The allocation must be for at least this much; we emitted a
1002 // check for that earlier.
1004 AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1005 AggValueSlot::IsDestructed,
1006 AggValueSlot::DoesNotNeedGCBarriers,
1007 AggValueSlot::IsNotAliased);
1008 EmitAggExpr(ILE->getInit(0), Slot);
1010 // Move past these elements.
1012 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1013 ->getSize().getZExtValue();
1015 Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1016 Builder.getSize(InitListElements),
1018 CurPtr.getAlignment().alignmentAtOffset(InitListElements *
1021 // Zero out the rest, if any remain.
1022 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1023 if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
1024 bool OK = TryMemsetInitialization();
1026 assert(OK && "couldn't memset character type?");
1031 InitListElements = ILE->getNumInits();
1033 // If this is a multi-dimensional array new, we will initialize multiple
1034 // elements with each init list element.
1035 QualType AllocType = E->getAllocatedType();
1036 if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1037 AllocType->getAsArrayTypeUnsafe())) {
1038 ElementTy = ConvertTypeForMem(AllocType);
1039 CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
1040 InitListElements *= getContext().getConstantArrayElementCount(CAT);
1043 // Enter a partial-destruction Cleanup if necessary.
1044 if (needsEHCleanup(DtorKind)) {
1045 // In principle we could tell the Cleanup where we are more
1046 // directly, but the control flow can get so varied here that it
1047 // would actually be quite complex. Therefore we go through an
1049 EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1051 CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
1052 pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
1053 ElementType, ElementAlign,
1054 getDestroyer(DtorKind));
1055 Cleanup = EHStack.stable_begin();
1058 CharUnits StartAlign = CurPtr.getAlignment();
1059 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
1060 // Tell the cleanup that it needs to destroy up to this
1061 // element. TODO: some of these stores can be trivially
1062 // observed to be unnecessary.
1063 if (EndOfInit.isValid()) {
1065 Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
1066 Builder.CreateStore(FinishedPtr, EndOfInit);
1068 // FIXME: If the last initializer is an incomplete initializer list for
1069 // an array, and we have an array filler, we can fold together the two
1070 // initialization loops.
1071 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
1072 ILE->getInit(i)->getType(), CurPtr);
1073 CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1076 StartAlign.alignmentAtOffset((i + 1) * ElementSize));
1079 // The remaining elements are filled with the array filler expression.
1080 Init = ILE->getArrayFiller();
1082 // Extract the initializer for the individual array elements by pulling
1083 // out the array filler from all the nested initializer lists. This avoids
1084 // generating a nested loop for the initialization.
1085 while (Init && Init->getType()->isConstantArrayType()) {
1086 auto *SubILE = dyn_cast<InitListExpr>(Init);
1089 assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1090 Init = SubILE->getArrayFiller();
1093 // Switch back to initializing one base element at a time.
1094 CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
1097 // If all elements have already been initialized, skip any further
1099 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1100 if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1101 // If there was a Cleanup, deactivate it.
1102 if (CleanupDominator)
1103 DeactivateCleanupBlock(Cleanup, CleanupDominator);
1107 assert(Init && "have trailing elements to initialize but no initializer");
1109 // If this is a constructor call, try to optimize it out, and failing that
1110 // emit a single loop to initialize all remaining elements.
1111 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1112 CXXConstructorDecl *Ctor = CCE->getConstructor();
1113 if (Ctor->isTrivial()) {
1114 // If new expression did not specify value-initialization, then there
1115 // is no initialization.
1116 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1119 if (TryMemsetInitialization())
1123 // Store the new Cleanup position for irregular Cleanups.
1125 // FIXME: Share this cleanup with the constructor call emission rather than
1126 // having it create a cleanup of its own.
1127 if (EndOfInit.isValid())
1128 Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1130 // Emit a constructor call loop to initialize the remaining elements.
1131 if (InitListElements)
1132 NumElements = Builder.CreateSub(
1134 llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1135 EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1136 CCE->requiresZeroInitialization());
1140 // If this is value-initialization, we can usually use memset.
1141 ImplicitValueInitExpr IVIE(ElementType);
1142 if (isa<ImplicitValueInitExpr>(Init)) {
1143 if (TryMemsetInitialization())
1146 // Switch to an ImplicitValueInitExpr for the element type. This handles
1147 // only one case: multidimensional array new of pointers to members. In
1148 // all other cases, we already have an initializer for the array element.
1152 // At this point we should have found an initializer for the individual
1153 // elements of the array.
1154 assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1155 "got wrong type of element to initialize");
1157 // If we have an empty initializer list, we can usually use memset.
1158 if (auto *ILE = dyn_cast<InitListExpr>(Init))
1159 if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1162 // If we have a struct whose every field is value-initialized, we can
1163 // usually use memset.
1164 if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1165 if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1166 if (RType->getDecl()->isStruct()) {
1167 unsigned NumElements = 0;
1168 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1169 NumElements = CXXRD->getNumBases();
1170 for (auto *Field : RType->getDecl()->fields())
1171 if (!Field->isUnnamedBitfield())
1173 // FIXME: Recurse into nested InitListExprs.
1174 if (ILE->getNumInits() == NumElements)
1175 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1176 if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1178 if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1184 // Create the loop blocks.
1185 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1186 llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1187 llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1189 // Find the end of the array, hoisted out of the loop.
1190 llvm::Value *EndPtr =
1191 Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
1193 // If the number of elements isn't constant, we have to now check if there is
1194 // anything left to initialize.
1196 llvm::Value *IsEmpty =
1197 Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1198 Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1204 // Set up the current-element phi.
1205 llvm::PHINode *CurPtrPhi =
1206 Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1207 CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1209 CurPtr = Address(CurPtrPhi, ElementAlign);
1211 // Store the new Cleanup position for irregular Cleanups.
1212 if (EndOfInit.isValid())
1213 Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1215 // Enter a partial-destruction Cleanup if necessary.
1216 if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1217 pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1218 ElementType, ElementAlign,
1219 getDestroyer(DtorKind));
1220 Cleanup = EHStack.stable_begin();
1221 CleanupDominator = Builder.CreateUnreachable();
1224 // Emit the initializer into this element.
1225 StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
1227 // Leave the Cleanup if we entered one.
1228 if (CleanupDominator) {
1229 DeactivateCleanupBlock(Cleanup, CleanupDominator);
1230 CleanupDominator->eraseFromParent();
1233 // Advance to the next element by adjusting the pointer type as necessary.
1234 llvm::Value *NextPtr =
1235 Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1238 // Check whether we've gotten to the end of the array and, if so,
1240 llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1241 Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1242 CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1247 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1248 QualType ElementType, llvm::Type *ElementTy,
1249 Address NewPtr, llvm::Value *NumElements,
1250 llvm::Value *AllocSizeWithoutCookie) {
1251 ApplyDebugLocation DL(CGF, E);
1253 CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1254 AllocSizeWithoutCookie);
1255 else if (const Expr *Init = E->getInitializer())
1256 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
1259 /// Emit a call to an operator new or operator delete function, as implicitly
1260 /// created by new-expressions and delete-expressions.
1261 static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1262 const FunctionDecl *CalleeDecl,
1263 const FunctionProtoType *CalleeType,
1264 const CallArgList &Args) {
1265 llvm::Instruction *CallOrInvoke;
1266 llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1267 CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
1269 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1270 Args, CalleeType, /*chainCall=*/false),
1271 Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1273 /// C++1y [expr.new]p10:
1274 /// [In a new-expression,] an implementation is allowed to omit a call
1275 /// to a replaceable global allocation function.
1277 /// We model such elidable calls with the 'builtin' attribute.
1278 llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1279 if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1280 Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1281 // FIXME: Add addAttribute to CallSite.
1282 if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
1283 CI->addAttribute(llvm::AttributeList::FunctionIndex,
1284 llvm::Attribute::Builtin);
1285 else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
1286 II->addAttribute(llvm::AttributeList::FunctionIndex,
1287 llvm::Attribute::Builtin);
1289 llvm_unreachable("unexpected kind of call instruction");
1295 RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1299 const Stmt *ArgS = Arg;
1300 EmitCallArgs(Args, *Type->param_type_begin(), llvm::makeArrayRef(ArgS));
1301 // Find the allocation or deallocation function that we're calling.
1302 ASTContext &Ctx = getContext();
1303 DeclarationName Name = Ctx.DeclarationNames
1304 .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1305 for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1306 if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1307 if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1308 return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
1309 llvm_unreachable("predeclared global operator new/delete is missing");
1312 static std::pair<bool, bool>
1313 shouldPassSizeAndAlignToUsualDelete(const FunctionProtoType *FPT) {
1314 auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1316 // The first argument is always a void*.
1319 // Figure out what other parameters we should be implicitly passing.
1320 bool PassSize = false;
1321 bool PassAlignment = false;
1323 if (AI != AE && (*AI)->isIntegerType()) {
1328 if (AI != AE && (*AI)->isAlignValT()) {
1329 PassAlignment = true;
1333 assert(AI == AE && "unexpected usual deallocation function parameter");
1334 return {PassSize, PassAlignment};
1338 /// A cleanup to call the given 'operator delete' function upon abnormal
1339 /// exit from a new expression. Templated on a traits type that deals with
1340 /// ensuring that the arguments dominate the cleanup if necessary.
1341 template<typename Traits>
1342 class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1343 /// Type used to hold llvm::Value*s.
1344 typedef typename Traits::ValueTy ValueTy;
1345 /// Type used to hold RValues.
1346 typedef typename Traits::RValueTy RValueTy;
1347 struct PlacementArg {
1352 unsigned NumPlacementArgs : 31;
1353 unsigned PassAlignmentToPlacementDelete : 1;
1354 const FunctionDecl *OperatorDelete;
1357 CharUnits AllocAlign;
1359 PlacementArg *getPlacementArgs() {
1360 return reinterpret_cast<PlacementArg *>(this + 1);
1364 static size_t getExtraSize(size_t NumPlacementArgs) {
1365 return NumPlacementArgs * sizeof(PlacementArg);
1368 CallDeleteDuringNew(size_t NumPlacementArgs,
1369 const FunctionDecl *OperatorDelete, ValueTy Ptr,
1370 ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
1371 CharUnits AllocAlign)
1372 : NumPlacementArgs(NumPlacementArgs),
1373 PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
1374 OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
1375 AllocAlign(AllocAlign) {}
1377 void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1378 assert(I < NumPlacementArgs && "index out of range");
1379 getPlacementArgs()[I] = {Arg, Type};
1382 void Emit(CodeGenFunction &CGF, Flags flags) override {
1383 const FunctionProtoType *FPT =
1384 OperatorDelete->getType()->getAs<FunctionProtoType>();
1385 CallArgList DeleteArgs;
1387 // The first argument is always a void*.
1388 DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1390 // Figure out what other parameters we should be implicitly passing.
1391 bool PassSize = false;
1392 bool PassAlignment = false;
1393 if (NumPlacementArgs) {
1394 // A placement deallocation function is implicitly passed an alignment
1395 // if the placement allocation function was, but is never passed a size.
1396 PassAlignment = PassAlignmentToPlacementDelete;
1398 // For a non-placement new-expression, 'operator delete' can take a
1399 // size and/or an alignment if it has the right parameters.
1400 std::tie(PassSize, PassAlignment) =
1401 shouldPassSizeAndAlignToUsualDelete(FPT);
1404 // The second argument can be a std::size_t (for non-placement delete).
1406 DeleteArgs.add(Traits::get(CGF, AllocSize),
1407 CGF.getContext().getSizeType());
1409 // The next (second or third) argument can be a std::align_val_t, which
1410 // is an enum whose underlying type is std::size_t.
1411 // FIXME: Use the right type as the parameter type. Note that in a call
1412 // to operator delete(size_t, ...), we may not have it available.
1414 DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1415 CGF.SizeTy, AllocAlign.getQuantity())),
1416 CGF.getContext().getSizeType());
1418 // Pass the rest of the arguments, which must match exactly.
1419 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1420 auto Arg = getPlacementArgs()[I];
1421 DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1424 // Call 'operator delete'.
1425 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1430 /// Enter a cleanup to call 'operator delete' if the initializer in a
1431 /// new-expression throws.
1432 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1433 const CXXNewExpr *E,
1435 llvm::Value *AllocSize,
1436 CharUnits AllocAlign,
1437 const CallArgList &NewArgs) {
1438 unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
1440 // If we're not inside a conditional branch, then the cleanup will
1441 // dominate and we can do the easier (and more efficient) thing.
1442 if (!CGF.isInConditionalBranch()) {
1443 struct DirectCleanupTraits {
1444 typedef llvm::Value *ValueTy;
1445 typedef RValue RValueTy;
1446 static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1447 static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1450 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1452 DirectCleanup *Cleanup = CGF.EHStack
1453 .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
1454 E->getNumPlacementArgs(),
1455 E->getOperatorDelete(),
1456 NewPtr.getPointer(),
1460 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1461 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1462 Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
1468 // Otherwise, we need to save all this stuff.
1469 DominatingValue<RValue>::saved_type SavedNewPtr =
1470 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1471 DominatingValue<RValue>::saved_type SavedAllocSize =
1472 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1474 struct ConditionalCleanupTraits {
1475 typedef DominatingValue<RValue>::saved_type ValueTy;
1476 typedef DominatingValue<RValue>::saved_type RValueTy;
1477 static RValue get(CodeGenFunction &CGF, ValueTy V) {
1478 return V.restore(CGF);
1481 typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1483 ConditionalCleanup *Cleanup = CGF.EHStack
1484 .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
1485 E->getNumPlacementArgs(),
1486 E->getOperatorDelete(),
1491 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1492 auto &Arg = NewArgs[I + NumNonPlacementArgs];
1493 Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
1497 CGF.initFullExprCleanup();
1500 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1501 // The element type being allocated.
1502 QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1504 // 1. Build a call to the allocation function.
1505 FunctionDecl *allocator = E->getOperatorNew();
1507 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1508 unsigned minElements = 0;
1509 if (E->isArray() && E->hasInitializer()) {
1510 const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
1511 if (ILE && ILE->isStringLiteralInit())
1513 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1514 ->getSize().getZExtValue();
1516 minElements = ILE->getNumInits();
1519 llvm::Value *numElements = nullptr;
1520 llvm::Value *allocSizeWithoutCookie = nullptr;
1521 llvm::Value *allocSize =
1522 EmitCXXNewAllocSize(*this, E, minElements, numElements,
1523 allocSizeWithoutCookie);
1524 CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1526 // Emit the allocation call. If the allocator is a global placement
1527 // operator, just "inline" it directly.
1528 Address allocation = Address::invalid();
1529 CallArgList allocatorArgs;
1530 if (allocator->isReservedGlobalPlacementOperator()) {
1531 assert(E->getNumPlacementArgs() == 1);
1532 const Expr *arg = *E->placement_arguments().begin();
1534 AlignmentSource alignSource;
1535 allocation = EmitPointerWithAlignment(arg, &alignSource);
1537 // The pointer expression will, in many cases, be an opaque void*.
1538 // In these cases, discard the computed alignment and use the
1539 // formal alignment of the allocated type.
1540 if (alignSource != AlignmentSource::Decl)
1541 allocation = Address(allocation.getPointer(), allocAlign);
1543 // Set up allocatorArgs for the call to operator delete if it's not
1544 // the reserved global operator.
1545 if (E->getOperatorDelete() &&
1546 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1547 allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1548 allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1552 const FunctionProtoType *allocatorType =
1553 allocator->getType()->castAs<FunctionProtoType>();
1554 unsigned ParamsToSkip = 0;
1556 // The allocation size is the first argument.
1557 QualType sizeType = getContext().getSizeType();
1558 allocatorArgs.add(RValue::get(allocSize), sizeType);
1561 if (allocSize != allocSizeWithoutCookie) {
1562 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1563 allocAlign = std::max(allocAlign, cookieAlign);
1566 // The allocation alignment may be passed as the second argument.
1567 if (E->passAlignment()) {
1568 QualType AlignValT = sizeType;
1569 if (allocatorType->getNumParams() > 1) {
1570 AlignValT = allocatorType->getParamType(1);
1571 assert(getContext().hasSameUnqualifiedType(
1572 AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1574 "wrong type for alignment parameter");
1577 // Corner case, passing alignment to 'operator new(size_t, ...)'.
1578 assert(allocator->isVariadic() && "can't pass alignment to allocator");
1581 RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1585 // FIXME: Why do we not pass a CalleeDecl here?
1586 EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1587 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1590 EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1592 // If this was a call to a global replaceable allocation function that does
1593 // not take an alignment argument, the allocator is known to produce
1594 // storage that's suitably aligned for any object that fits, up to a known
1595 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1596 CharUnits allocationAlign = allocAlign;
1597 if (!E->passAlignment() &&
1598 allocator->isReplaceableGlobalAllocationFunction()) {
1599 unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
1600 Target.getNewAlign(), getContext().getTypeSize(allocType)));
1601 allocationAlign = std::max(
1602 allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1605 allocation = Address(RV.getScalarVal(), allocationAlign);
1608 // Emit a null check on the allocation result if the allocation
1609 // function is allowed to return null (because it has a non-throwing
1610 // exception spec or is the reserved placement new) and we have an
1611 // interesting initializer.
1612 bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
1613 (!allocType.isPODType(getContext()) || E->hasInitializer());
1615 llvm::BasicBlock *nullCheckBB = nullptr;
1616 llvm::BasicBlock *contBB = nullptr;
1618 // The null-check means that the initializer is conditionally
1620 ConditionalEvaluation conditional(*this);
1623 conditional.begin(*this);
1625 nullCheckBB = Builder.GetInsertBlock();
1626 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1627 contBB = createBasicBlock("new.cont");
1629 llvm::Value *isNull =
1630 Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1631 Builder.CreateCondBr(isNull, contBB, notNullBB);
1632 EmitBlock(notNullBB);
1635 // If there's an operator delete, enter a cleanup to call it if an
1636 // exception is thrown.
1637 EHScopeStack::stable_iterator operatorDeleteCleanup;
1638 llvm::Instruction *cleanupDominator = nullptr;
1639 if (E->getOperatorDelete() &&
1640 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1641 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
1643 operatorDeleteCleanup = EHStack.stable_begin();
1644 cleanupDominator = Builder.CreateUnreachable();
1647 assert((allocSize == allocSizeWithoutCookie) ==
1648 CalculateCookiePadding(*this, E).isZero());
1649 if (allocSize != allocSizeWithoutCookie) {
1650 assert(E->isArray());
1651 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1656 llvm::Type *elementTy = ConvertTypeForMem(allocType);
1657 Address result = Builder.CreateElementBitCast(allocation, elementTy);
1659 // Passing pointer through invariant.group.barrier to avoid propagation of
1660 // vptrs information which may be included in previous type.
1661 if (CGM.getCodeGenOpts().StrictVTablePointers &&
1662 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1663 allocator->isReservedGlobalPlacementOperator())
1664 result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()),
1665 result.getAlignment());
1667 EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1668 allocSizeWithoutCookie);
1670 // NewPtr is a pointer to the base element type. If we're
1671 // allocating an array of arrays, we'll need to cast back to the
1672 // array pointer type.
1673 llvm::Type *resultType = ConvertTypeForMem(E->getType());
1674 if (result.getType() != resultType)
1675 result = Builder.CreateBitCast(result, resultType);
1678 // Deactivate the 'operator delete' cleanup if we finished
1680 if (operatorDeleteCleanup.isValid()) {
1681 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1682 cleanupDominator->eraseFromParent();
1685 llvm::Value *resultPtr = result.getPointer();
1687 conditional.end(*this);
1689 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1692 llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1693 PHI->addIncoming(resultPtr, notNullBB);
1694 PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1703 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1704 llvm::Value *Ptr, QualType DeleteTy,
1705 llvm::Value *NumElements,
1706 CharUnits CookieSize) {
1707 assert((!NumElements && CookieSize.isZero()) ||
1708 DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1710 const FunctionProtoType *DeleteFTy =
1711 DeleteFD->getType()->getAs<FunctionProtoType>();
1713 CallArgList DeleteArgs;
1715 std::pair<bool, bool> PassSizeAndAlign =
1716 shouldPassSizeAndAlignToUsualDelete(DeleteFTy);
1718 auto ParamTypeIt = DeleteFTy->param_type_begin();
1720 // Pass the pointer itself.
1721 QualType ArgTy = *ParamTypeIt++;
1722 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1723 DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1725 // Pass the size if the delete function has a size_t parameter.
1726 if (PassSizeAndAlign.first) {
1727 QualType SizeType = *ParamTypeIt++;
1728 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1729 llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1730 DeleteTypeSize.getQuantity());
1732 // For array new, multiply by the number of elements.
1734 Size = Builder.CreateMul(Size, NumElements);
1736 // If there is a cookie, add the cookie size.
1737 if (!CookieSize.isZero())
1738 Size = Builder.CreateAdd(
1739 Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1741 DeleteArgs.add(RValue::get(Size), SizeType);
1744 // Pass the alignment if the delete function has an align_val_t parameter.
1745 if (PassSizeAndAlign.second) {
1746 QualType AlignValType = *ParamTypeIt++;
1747 CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
1748 getContext().getTypeAlignIfKnown(DeleteTy));
1749 llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1750 DeleteTypeAlign.getQuantity());
1751 DeleteArgs.add(RValue::get(Align), AlignValType);
1754 assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1755 "unknown parameter to usual delete function");
1757 // Emit the call to delete.
1758 EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1762 /// Calls the given 'operator delete' on a single object.
1763 struct CallObjectDelete final : EHScopeStack::Cleanup {
1765 const FunctionDecl *OperatorDelete;
1766 QualType ElementType;
1768 CallObjectDelete(llvm::Value *Ptr,
1769 const FunctionDecl *OperatorDelete,
1770 QualType ElementType)
1771 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1773 void Emit(CodeGenFunction &CGF, Flags flags) override {
1774 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1780 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1781 llvm::Value *CompletePtr,
1782 QualType ElementType) {
1783 EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1784 OperatorDelete, ElementType);
1787 /// Emit the code for deleting a single object.
1788 static void EmitObjectDelete(CodeGenFunction &CGF,
1789 const CXXDeleteExpr *DE,
1791 QualType ElementType) {
1792 // C++11 [expr.delete]p3:
1793 // If the static type of the object to be deleted is different from its
1794 // dynamic type, the static type shall be a base class of the dynamic type
1795 // of the object to be deleted and the static type shall have a virtual
1796 // destructor or the behavior is undefined.
1797 CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
1798 DE->getExprLoc(), Ptr.getPointer(),
1801 // Find the destructor for the type, if applicable. If the
1802 // destructor is virtual, we'll just emit the vcall and return.
1803 const CXXDestructorDecl *Dtor = nullptr;
1804 if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1805 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1806 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1807 Dtor = RD->getDestructor();
1809 if (Dtor->isVirtual()) {
1810 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1817 // Make sure that we call delete even if the dtor throws.
1818 // This doesn't have to a conditional cleanup because we're going
1819 // to pop it off in a second.
1820 const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1821 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1823 OperatorDelete, ElementType);
1826 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1827 /*ForVirtualBase=*/false,
1828 /*Delegating=*/false,
1830 else if (auto Lifetime = ElementType.getObjCLifetime()) {
1832 case Qualifiers::OCL_None:
1833 case Qualifiers::OCL_ExplicitNone:
1834 case Qualifiers::OCL_Autoreleasing:
1837 case Qualifiers::OCL_Strong:
1838 CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1841 case Qualifiers::OCL_Weak:
1842 CGF.EmitARCDestroyWeak(Ptr);
1847 CGF.PopCleanupBlock();
1851 /// Calls the given 'operator delete' on an array of objects.
1852 struct CallArrayDelete final : EHScopeStack::Cleanup {
1854 const FunctionDecl *OperatorDelete;
1855 llvm::Value *NumElements;
1856 QualType ElementType;
1857 CharUnits CookieSize;
1859 CallArrayDelete(llvm::Value *Ptr,
1860 const FunctionDecl *OperatorDelete,
1861 llvm::Value *NumElements,
1862 QualType ElementType,
1863 CharUnits CookieSize)
1864 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1865 ElementType(ElementType), CookieSize(CookieSize) {}
1867 void Emit(CodeGenFunction &CGF, Flags flags) override {
1868 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
1874 /// Emit the code for deleting an array of objects.
1875 static void EmitArrayDelete(CodeGenFunction &CGF,
1876 const CXXDeleteExpr *E,
1878 QualType elementType) {
1879 llvm::Value *numElements = nullptr;
1880 llvm::Value *allocatedPtr = nullptr;
1881 CharUnits cookieSize;
1882 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1883 numElements, allocatedPtr, cookieSize);
1885 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1887 // Make sure that we call delete even if one of the dtors throws.
1888 const FunctionDecl *operatorDelete = E->getOperatorDelete();
1889 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1890 allocatedPtr, operatorDelete,
1891 numElements, elementType,
1894 // Destroy the elements.
1895 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1896 assert(numElements && "no element count for a type with a destructor!");
1898 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1899 CharUnits elementAlign =
1900 deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
1902 llvm::Value *arrayBegin = deletedPtr.getPointer();
1903 llvm::Value *arrayEnd =
1904 CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
1906 // Note that it is legal to allocate a zero-length array, and we
1907 // can never fold the check away because the length should always
1908 // come from a cookie.
1909 CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
1910 CGF.getDestroyer(dtorKind),
1911 /*checkZeroLength*/ true,
1912 CGF.needsEHCleanup(dtorKind));
1915 // Pop the cleanup block.
1916 CGF.PopCleanupBlock();
1919 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1920 const Expr *Arg = E->getArgument();
1921 Address Ptr = EmitPointerWithAlignment(Arg);
1923 // Null check the pointer.
1924 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1925 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1927 llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
1929 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1930 EmitBlock(DeleteNotNull);
1932 // We might be deleting a pointer to array. If so, GEP down to the
1933 // first non-array element.
1934 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1935 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1936 if (DeleteTy->isConstantArrayType()) {
1937 llvm::Value *Zero = Builder.getInt32(0);
1938 SmallVector<llvm::Value*,8> GEP;
1940 GEP.push_back(Zero); // point at the outermost array
1942 // For each layer of array type we're pointing at:
1943 while (const ConstantArrayType *Arr
1944 = getContext().getAsConstantArrayType(DeleteTy)) {
1945 // 1. Unpeel the array type.
1946 DeleteTy = Arr->getElementType();
1948 // 2. GEP to the first element of the array.
1949 GEP.push_back(Zero);
1952 Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
1953 Ptr.getAlignment());
1956 assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
1958 if (E->isArrayForm()) {
1959 EmitArrayDelete(*this, E, Ptr, DeleteTy);
1961 EmitObjectDelete(*this, E, Ptr, DeleteTy);
1964 EmitBlock(DeleteEnd);
1967 static bool isGLValueFromPointerDeref(const Expr *E) {
1968 E = E->IgnoreParens();
1970 if (const auto *CE = dyn_cast<CastExpr>(E)) {
1971 if (!CE->getSubExpr()->isGLValue())
1973 return isGLValueFromPointerDeref(CE->getSubExpr());
1976 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
1977 return isGLValueFromPointerDeref(OVE->getSourceExpr());
1979 if (const auto *BO = dyn_cast<BinaryOperator>(E))
1980 if (BO->getOpcode() == BO_Comma)
1981 return isGLValueFromPointerDeref(BO->getRHS());
1983 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
1984 return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
1985 isGLValueFromPointerDeref(ACO->getFalseExpr());
1987 // C++11 [expr.sub]p1:
1988 // The expression E1[E2] is identical (by definition) to *((E1)+(E2))
1989 if (isa<ArraySubscriptExpr>(E))
1992 if (const auto *UO = dyn_cast<UnaryOperator>(E))
1993 if (UO->getOpcode() == UO_Deref)
1999 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2000 llvm::Type *StdTypeInfoPtrTy) {
2001 // Get the vtable pointer.
2002 Address ThisPtr = CGF.EmitLValue(E).getAddress();
2004 // C++ [expr.typeid]p2:
2005 // If the glvalue expression is obtained by applying the unary * operator to
2006 // a pointer and the pointer is a null pointer value, the typeid expression
2007 // throws the std::bad_typeid exception.
2009 // However, this paragraph's intent is not clear. We choose a very generous
2010 // interpretation which implores us to consider comma operators, conditional
2011 // operators, parentheses and other such constructs.
2012 QualType SrcRecordTy = E->getType();
2013 if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
2014 isGLValueFromPointerDeref(E), SrcRecordTy)) {
2015 llvm::BasicBlock *BadTypeidBlock =
2016 CGF.createBasicBlock("typeid.bad_typeid");
2017 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2019 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
2020 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2022 CGF.EmitBlock(BadTypeidBlock);
2023 CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2024 CGF.EmitBlock(EndBlock);
2027 return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2031 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2032 llvm::Type *StdTypeInfoPtrTy =
2033 ConvertType(E->getType())->getPointerTo();
2035 if (E->isTypeOperand()) {
2036 llvm::Constant *TypeInfo =
2037 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2038 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
2041 // C++ [expr.typeid]p2:
2042 // When typeid is applied to a glvalue expression whose type is a
2043 // polymorphic class type, the result refers to a std::type_info object
2044 // representing the type of the most derived object (that is, the dynamic
2045 // type) to which the glvalue refers.
2046 if (E->isPotentiallyEvaluated())
2047 return EmitTypeidFromVTable(*this, E->getExprOperand(),
2050 QualType OperandTy = E->getExprOperand()->getType();
2051 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
2055 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2057 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2058 if (DestTy->isPointerType())
2059 return llvm::Constant::getNullValue(DestLTy);
2061 /// C++ [expr.dynamic.cast]p9:
2062 /// A failed cast to reference type throws std::bad_cast
2063 if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2066 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
2067 return llvm::UndefValue::get(DestLTy);
2070 llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2071 const CXXDynamicCastExpr *DCE) {
2072 CGM.EmitExplicitCastExprType(DCE, this);
2073 QualType DestTy = DCE->getTypeAsWritten();
2075 if (DCE->isAlwaysNull())
2076 if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
2079 QualType SrcTy = DCE->getSubExpr()->getType();
2081 // C++ [expr.dynamic.cast]p7:
2082 // If T is "pointer to cv void," then the result is a pointer to the most
2083 // derived object pointed to by v.
2084 const PointerType *DestPTy = DestTy->getAs<PointerType>();
2086 bool isDynamicCastToVoid;
2087 QualType SrcRecordTy;
2088 QualType DestRecordTy;
2090 isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
2091 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2092 DestRecordTy = DestPTy->getPointeeType();
2094 isDynamicCastToVoid = false;
2095 SrcRecordTy = SrcTy;
2096 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2099 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2101 // C++ [expr.dynamic.cast]p4:
2102 // If the value of v is a null pointer value in the pointer case, the result
2103 // is the null pointer value of type T.
2104 bool ShouldNullCheckSrcValue =
2105 CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
2108 llvm::BasicBlock *CastNull = nullptr;
2109 llvm::BasicBlock *CastNotNull = nullptr;
2110 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2112 if (ShouldNullCheckSrcValue) {
2113 CastNull = createBasicBlock("dynamic_cast.null");
2114 CastNotNull = createBasicBlock("dynamic_cast.notnull");
2116 llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
2117 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2118 EmitBlock(CastNotNull);
2122 if (isDynamicCastToVoid) {
2123 Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
2126 assert(DestRecordTy->isRecordType() &&
2127 "destination type must be a record type!");
2128 Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2129 DestTy, DestRecordTy, CastEnd);
2130 CastNotNull = Builder.GetInsertBlock();
2133 if (ShouldNullCheckSrcValue) {
2134 EmitBranch(CastEnd);
2136 EmitBlock(CastNull);
2137 EmitBranch(CastEnd);
2142 if (ShouldNullCheckSrcValue) {
2143 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2144 PHI->addIncoming(Value, CastNotNull);
2145 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
2153 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
2154 RunCleanupsScope Scope(*this);
2155 LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
2157 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
2158 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
2159 e = E->capture_init_end();
2160 i != e; ++i, ++CurField) {
2161 // Emit initialization
2162 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2163 if (CurField->hasCapturedVLAType()) {
2164 auto VAT = CurField->getCapturedVLAType();
2165 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2167 EmitInitializerForField(*CurField, LV, *i);