1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code dealing with code generation of C++ expressions
12 //===----------------------------------------------------------------------===//
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCUDARuntime.h"
18 #include "CGObjCRuntime.h"
19 #include "CGDebugInfo.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
23 using namespace clang;
24 using namespace CodeGen;
26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
28 ReturnValueSlot ReturnValue,
31 CallExpr::const_arg_iterator ArgBeg,
32 CallExpr::const_arg_iterator ArgEnd) {
33 assert(MD->isInstance() &&
34 "Trying to emit a member call expr on a static method!");
36 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
41 Args.add(RValue::get(This), MD->getThisType(getContext()));
43 // If there is a VTT parameter, emit it.
45 QualType T = getContext().getPointerType(getContext().VoidPtrTy);
46 Args.add(RValue::get(VTT), T);
49 // And the rest of the call args
50 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
52 QualType ResultType = FPT->getResultType();
53 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
55 Callee, ReturnValue, Args, MD);
58 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
62 E = E->IgnoreParens();
63 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
64 if (CE->getCastKind() == CK_DerivedToBase ||
65 CE->getCastKind() == CK_UncheckedDerivedToBase ||
66 CE->getCastKind() == CK_NoOp) {
75 QualType DerivedType = E->getType();
76 if (const PointerType *PTy = DerivedType->getAs<PointerType>())
77 DerivedType = PTy->getPointeeType();
79 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
82 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
83 // quite what we want.
84 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
86 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
91 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
92 if (CE->getCastKind() == CK_NoOp) {
97 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
98 if (UO->getOpcode() == UO_Extension) {
107 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
108 /// expr can be devirtualized.
109 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
111 const CXXMethodDecl *MD) {
113 // When building with -fapple-kext, all calls must go through the vtable since
114 // the kernel linker can do runtime patching of vtables.
115 if (Context.getLangOptions().AppleKext)
118 // If the most derived class is marked final, we know that no subclass can
119 // override this member function and so we can devirtualize it. For example:
121 // struct A { virtual void f(); }
122 // struct B final : A { };
128 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
129 if (MostDerivedClassDecl->hasAttr<FinalAttr>())
132 // If the member function is marked 'final', we know that it can't be
133 // overridden and can therefore devirtualize it.
134 if (MD->hasAttr<FinalAttr>())
137 // Similarly, if the class itself is marked 'final' it can't be overridden
138 // and we can therefore devirtualize the member function call.
139 if (MD->getParent()->hasAttr<FinalAttr>())
142 Base = skipNoOpCastsAndParens(Base);
143 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
144 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
145 // This is a record decl. We know the type and can devirtualize it.
146 return VD->getType()->isRecordType();
152 // We can always devirtualize calls on temporary object expressions.
153 if (isa<CXXConstructExpr>(Base))
156 // And calls on bound temporaries.
157 if (isa<CXXBindTemporaryExpr>(Base))
160 // Check if this is a call expr that returns a record type.
161 if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
162 return CE->getCallReturnType()->isRecordType();
164 // We can't devirtualize the call.
168 // Note: This function also emit constructor calls to support a MSVC
169 // extensions allowing explicit constructor function call.
170 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
171 ReturnValueSlot ReturnValue) {
172 const Expr *callee = CE->getCallee()->IgnoreParens();
174 if (isa<BinaryOperator>(callee))
175 return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
177 const MemberExpr *ME = cast<MemberExpr>(callee);
178 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
180 CGDebugInfo *DI = getDebugInfo();
181 if (DI && CGM.getCodeGenOpts().LimitDebugInfo
182 && !isa<CallExpr>(ME->getBase())) {
183 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
184 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
185 DI->getOrCreateRecordType(PTy->getPointeeType(),
186 MD->getParent()->getLocation());
190 if (MD->isStatic()) {
191 // The method is static, emit it as we would a regular call.
192 llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
193 return EmitCall(getContext().getPointerType(MD->getType()), Callee,
194 ReturnValue, CE->arg_begin(), CE->arg_end());
197 // Compute the object pointer.
200 This = EmitScalarExpr(ME->getBase());
202 This = EmitLValue(ME->getBase()).getAddress();
204 if (MD->isTrivial()) {
205 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
206 if (isa<CXXConstructorDecl>(MD) &&
207 cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
208 return RValue::get(0);
210 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
211 // We don't like to generate the trivial copy/move assignment operator
212 // when it isn't necessary; just produce the proper effect here.
213 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
214 EmitAggregateCopy(This, RHS, CE->getType());
215 return RValue::get(This);
218 if (isa<CXXConstructorDecl>(MD) &&
219 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
220 // Trivial move and copy ctor are the same.
221 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
222 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
223 CE->arg_begin(), CE->arg_end());
224 return RValue::get(This);
226 llvm_unreachable("unknown trivial member function");
229 // Compute the function type we're calling.
230 const CGFunctionInfo *FInfo = 0;
231 if (isa<CXXDestructorDecl>(MD))
232 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
234 else if (isa<CXXConstructorDecl>(MD))
235 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
238 FInfo = &CGM.getTypes().getFunctionInfo(MD);
240 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
242 = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
244 // C++ [class.virtual]p12:
245 // Explicit qualification with the scope operator (5.1) suppresses the
246 // virtual call mechanism.
248 // We also don't emit a virtual call if the base expression has a record type
249 // because then we know what the type is.
251 UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
252 && !canDevirtualizeMemberFunctionCalls(getContext(),
255 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
256 if (UseVirtualCall) {
257 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
259 if (getContext().getLangOptions().AppleKext &&
262 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
264 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
266 } else if (const CXXConstructorDecl *Ctor =
267 dyn_cast<CXXConstructorDecl>(MD)) {
268 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
269 } else if (UseVirtualCall) {
270 Callee = BuildVirtualCall(MD, This, Ty);
272 if (getContext().getLangOptions().AppleKext &&
275 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
277 Callee = CGM.GetAddrOfFunction(MD, Ty);
280 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
281 CE->arg_begin(), CE->arg_end());
285 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
286 ReturnValueSlot ReturnValue) {
287 const BinaryOperator *BO =
288 cast<BinaryOperator>(E->getCallee()->IgnoreParens());
289 const Expr *BaseExpr = BO->getLHS();
290 const Expr *MemFnExpr = BO->getRHS();
292 const MemberPointerType *MPT =
293 MemFnExpr->getType()->castAs<MemberPointerType>();
295 const FunctionProtoType *FPT =
296 MPT->getPointeeType()->castAs<FunctionProtoType>();
297 const CXXRecordDecl *RD =
298 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
300 // Get the member function pointer.
301 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
303 // Emit the 'this' pointer.
306 if (BO->getOpcode() == BO_PtrMemI)
307 This = EmitScalarExpr(BaseExpr);
309 This = EmitLValue(BaseExpr).getAddress();
311 // Ask the ABI to load the callee. Note that This is modified.
312 llvm::Value *Callee =
313 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
318 getContext().getPointerType(getContext().getTagDeclType(RD));
320 // Push the this ptr.
321 Args.add(RValue::get(This), ThisType);
323 // And the rest of the call args
324 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
325 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
330 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
331 const CXXMethodDecl *MD,
332 ReturnValueSlot ReturnValue) {
333 assert(MD->isInstance() &&
334 "Trying to emit a member call expr on a static method!");
335 LValue LV = EmitLValue(E->getArg(0));
336 llvm::Value *This = LV.getAddress();
338 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
340 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
341 QualType Ty = E->getType();
342 EmitAggregateCopy(This, Src, Ty);
343 return RValue::get(This);
346 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
347 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
348 E->arg_begin() + 1, E->arg_end());
351 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
352 ReturnValueSlot ReturnValue) {
353 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
356 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
357 llvm::Value *DestPtr,
358 const CXXRecordDecl *Base) {
362 DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
364 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
365 CharUnits Size = Layout.getNonVirtualSize();
366 CharUnits Align = Layout.getNonVirtualAlign();
368 llvm::Value *SizeVal = CGF.CGM.getSize(Size);
370 // If the type contains a pointer to data member we can't memset it to zero.
371 // Instead, create a null constant and copy it to the destination.
372 // TODO: there are other patterns besides zero that we can usefully memset,
373 // like -1, which happens to be the pattern used by member-pointers.
374 // TODO: isZeroInitializable can be over-conservative in the case where a
375 // virtual base contains a member pointer.
376 if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
377 llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
379 llvm::GlobalVariable *NullVariable =
380 new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
382 llvm::GlobalVariable::PrivateLinkage,
383 NullConstant, Twine());
384 NullVariable->setAlignment(Align.getQuantity());
385 llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
387 // Get and call the appropriate llvm.memcpy overload.
388 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
392 // Otherwise, just memset the whole thing to zero. This is legal
393 // because in LLVM, all default initializers (other than the ones we just
394 // handled above) are guaranteed to have a bit pattern of all zeros.
395 CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
396 Align.getQuantity());
400 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
402 assert(!Dest.isIgnored() && "Must have a destination!");
403 const CXXConstructorDecl *CD = E->getConstructor();
405 // If we require zero initialization before (or instead of) calling the
406 // constructor, as can be the case with a non-user-provided default
407 // constructor, emit the zero initialization now, unless destination is
409 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
410 switch (E->getConstructionKind()) {
411 case CXXConstructExpr::CK_Delegating:
412 assert(0 && "Delegating constructor should not need zeroing");
413 case CXXConstructExpr::CK_Complete:
414 EmitNullInitialization(Dest.getAddr(), E->getType());
416 case CXXConstructExpr::CK_VirtualBase:
417 case CXXConstructExpr::CK_NonVirtualBase:
418 EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
423 // If this is a call to a trivial default constructor, do nothing.
424 if (CD->isTrivial() && CD->isDefaultConstructor())
427 // Elide the constructor if we're constructing from a temporary.
428 // The temporary check is required because Sema sets this on NRVO
430 if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
431 assert(getContext().hasSameUnqualifiedType(E->getType(),
432 E->getArg(0)->getType()));
433 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
434 EmitAggExpr(E->getArg(0), Dest);
439 if (const ConstantArrayType *arrayType
440 = getContext().getAsConstantArrayType(E->getType())) {
441 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
442 E->arg_begin(), E->arg_end());
444 CXXCtorType Type = Ctor_Complete;
445 bool ForVirtualBase = false;
447 switch (E->getConstructionKind()) {
448 case CXXConstructExpr::CK_Delegating:
449 // We should be emitting a constructor; GlobalDecl will assert this
450 Type = CurGD.getCtorType();
453 case CXXConstructExpr::CK_Complete:
454 Type = Ctor_Complete;
457 case CXXConstructExpr::CK_VirtualBase:
458 ForVirtualBase = true;
461 case CXXConstructExpr::CK_NonVirtualBase:
465 // Call the constructor.
466 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
467 E->arg_begin(), E->arg_end());
472 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
475 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
476 Exp = E->getSubExpr();
477 assert(isa<CXXConstructExpr>(Exp) &&
478 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
479 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
480 const CXXConstructorDecl *CD = E->getConstructor();
481 RunCleanupsScope Scope(*this);
483 // If we require zero initialization before (or instead of) calling the
484 // constructor, as can be the case with a non-user-provided default
485 // constructor, emit the zero initialization now.
486 // FIXME. Do I still need this for a copy ctor synthesis?
487 if (E->requiresZeroInitialization())
488 EmitNullInitialization(Dest, E->getType());
490 assert(!getContext().getAsConstantArrayType(E->getType())
491 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
492 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
493 E->arg_begin(), E->arg_end());
496 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
497 const CXXNewExpr *E) {
499 return CharUnits::Zero();
501 // No cookie is required if the operator new[] being used is the
502 // reserved placement operator new[].
503 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
504 return CharUnits::Zero();
506 return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
509 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
511 llvm::Value *&numElements,
512 llvm::Value *&sizeWithoutCookie) {
513 QualType type = e->getAllocatedType();
516 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
518 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
519 return sizeWithoutCookie;
522 // The width of size_t.
523 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
525 // Figure out the cookie size.
526 llvm::APInt cookieSize(sizeWidth,
527 CalculateCookiePadding(CGF, e).getQuantity());
529 // Emit the array size expression.
530 // We multiply the size of all dimensions for NumElements.
531 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
532 numElements = CGF.EmitScalarExpr(e->getArraySize());
533 assert(isa<llvm::IntegerType>(numElements->getType()));
535 // The number of elements can be have an arbitrary integer type;
536 // essentially, we need to multiply it by a constant factor, add a
537 // cookie size, and verify that the result is representable as a
538 // size_t. That's just a gloss, though, and it's wrong in one
539 // important way: if the count is negative, it's an error even if
540 // the cookie size would bring the total size >= 0.
542 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
543 llvm::IntegerType *numElementsType
544 = cast<llvm::IntegerType>(numElements->getType());
545 unsigned numElementsWidth = numElementsType->getBitWidth();
547 // Compute the constant factor.
548 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
549 while (const ConstantArrayType *CAT
550 = CGF.getContext().getAsConstantArrayType(type)) {
551 type = CAT->getElementType();
552 arraySizeMultiplier *= CAT->getSize();
555 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
556 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
557 typeSizeMultiplier *= arraySizeMultiplier;
559 // This will be a size_t.
562 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
563 // Don't bloat the -O0 code.
564 if (llvm::ConstantInt *numElementsC =
565 dyn_cast<llvm::ConstantInt>(numElements)) {
566 const llvm::APInt &count = numElementsC->getValue();
568 bool hasAnyOverflow = false;
570 // If 'count' was a negative number, it's an overflow.
571 if (isSigned && count.isNegative())
572 hasAnyOverflow = true;
574 // We want to do all this arithmetic in size_t. If numElements is
575 // wider than that, check whether it's already too big, and if so,
577 else if (numElementsWidth > sizeWidth &&
578 numElementsWidth - sizeWidth > count.countLeadingZeros())
579 hasAnyOverflow = true;
581 // Okay, compute a count at the right width.
582 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
584 // Scale numElements by that. This might overflow, but we don't
585 // care because it only overflows if allocationSize does, too, and
586 // if that overflows then we shouldn't use this.
587 numElements = llvm::ConstantInt::get(CGF.SizeTy,
588 adjustedCount * arraySizeMultiplier);
590 // Compute the size before cookie, and track whether it overflowed.
592 llvm::APInt allocationSize
593 = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
594 hasAnyOverflow |= overflow;
596 // Add in the cookie, and check whether it's overflowed.
597 if (cookieSize != 0) {
598 // Save the current size without a cookie. This shouldn't be
599 // used if there was overflow.
600 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
602 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
603 hasAnyOverflow |= overflow;
606 // On overflow, produce a -1 so operator new will fail.
607 if (hasAnyOverflow) {
608 size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
610 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
613 // Otherwise, we might need to use the overflow intrinsics.
615 // There are up to four conditions we need to test for:
616 // 1) if isSigned, we need to check whether numElements is negative;
617 // 2) if numElementsWidth > sizeWidth, we need to check whether
618 // numElements is larger than something representable in size_t;
619 // 3) we need to compute
620 // sizeWithoutCookie := numElements * typeSizeMultiplier
621 // and check whether it overflows; and
622 // 4) if we need a cookie, we need to compute
623 // size := sizeWithoutCookie + cookieSize
624 // and check whether it overflows.
626 llvm::Value *hasOverflow = 0;
628 // If numElementsWidth > sizeWidth, then one way or another, we're
629 // going to have to do a comparison for (2), and this happens to
630 // take care of (1), too.
631 if (numElementsWidth > sizeWidth) {
632 llvm::APInt threshold(numElementsWidth, 1);
633 threshold <<= sizeWidth;
635 llvm::Value *thresholdV
636 = llvm::ConstantInt::get(numElementsType, threshold);
638 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
639 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
641 // Otherwise, if we're signed, we want to sext up to size_t.
642 } else if (isSigned) {
643 if (numElementsWidth < sizeWidth)
644 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
646 // If there's a non-1 type size multiplier, then we can do the
647 // signedness check at the same time as we do the multiply
648 // because a negative number times anything will cause an
649 // unsigned overflow. Otherwise, we have to do it here.
650 if (typeSizeMultiplier == 1)
651 hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
652 llvm::ConstantInt::get(CGF.SizeTy, 0));
654 // Otherwise, zext up to size_t if necessary.
655 } else if (numElementsWidth < sizeWidth) {
656 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
659 assert(numElements->getType() == CGF.SizeTy);
663 // Multiply by the type size if necessary. This multiplier
664 // includes all the factors for nested arrays.
666 // This step also causes numElements to be scaled up by the
667 // nested-array factor if necessary. Overflow on this computation
668 // can be ignored because the result shouldn't be used if
670 if (typeSizeMultiplier != 1) {
671 llvm::Value *umul_with_overflow
672 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
675 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
676 llvm::Value *result =
677 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
679 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
681 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
683 hasOverflow = overflowed;
685 size = CGF.Builder.CreateExtractValue(result, 0);
687 // Also scale up numElements by the array size multiplier.
688 if (arraySizeMultiplier != 1) {
689 // If the base element type size is 1, then we can re-use the
690 // multiply we just did.
691 if (typeSize.isOne()) {
692 assert(arraySizeMultiplier == typeSizeMultiplier);
695 // Otherwise we need a separate multiply.
698 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
699 numElements = CGF.Builder.CreateMul(numElements, asmV);
703 // numElements doesn't need to be scaled.
704 assert(arraySizeMultiplier == 1);
707 // Add in the cookie size if necessary.
708 if (cookieSize != 0) {
709 sizeWithoutCookie = size;
711 llvm::Value *uadd_with_overflow
712 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
714 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
715 llvm::Value *result =
716 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
718 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
720 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
722 hasOverflow = overflowed;
724 size = CGF.Builder.CreateExtractValue(result, 0);
727 // If we had any possibility of dynamic overflow, make a select to
728 // overwrite 'size' with an all-ones value, which should cause
729 // operator new to throw.
731 size = CGF.Builder.CreateSelect(hasOverflow,
732 llvm::Constant::getAllOnesValue(CGF.SizeTy),
737 sizeWithoutCookie = size;
739 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
744 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
745 llvm::Value *NewPtr) {
747 assert(E->getNumConstructorArgs() == 1 &&
748 "Can only have one argument to initializer of POD type.");
750 const Expr *Init = E->getConstructorArg(0);
751 QualType AllocType = E->getAllocatedType();
754 CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
755 if (!CGF.hasAggregateLLVMType(AllocType))
756 CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, Alignment),
758 else if (AllocType->isAnyComplexType())
759 CGF.EmitComplexExprIntoAddr(Init, NewPtr,
760 AllocType.isVolatileQualified());
763 = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
764 AggValueSlot::IsDestructed,
765 AggValueSlot::DoesNotNeedGCBarriers,
766 AggValueSlot::IsNotAliased);
767 CGF.EmitAggExpr(Init, Slot);
772 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
773 QualType elementType,
774 llvm::Value *beginPtr,
775 llvm::Value *numElements) {
776 // We have a POD type.
777 if (E->getNumConstructorArgs() == 0)
780 // Check if the number of elements is constant.
781 bool checkZero = true;
782 if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
783 // If it's constant zero, skip the whole loop.
784 if (constNum->isZero()) return;
789 // Find the end of the array, hoisted out of the loop.
790 llvm::Value *endPtr =
791 Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
793 // Create the continuation block.
794 llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
796 // If we need to check for zero, do so now.
798 llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
799 llvm::Value *isEmpty = Builder.CreateICmpEQ(beginPtr, endPtr,
801 Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
802 EmitBlock(nonEmptyBB);
806 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
807 llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
811 // Set up the current-element phi.
812 llvm::PHINode *curPtr =
813 Builder.CreatePHI(beginPtr->getType(), 2, "array.cur");
814 curPtr->addIncoming(beginPtr, entryBB);
816 // Enter a partial-destruction cleanup if necessary.
817 QualType::DestructionKind dtorKind = elementType.isDestructedType();
818 EHScopeStack::stable_iterator cleanup;
819 if (needsEHCleanup(dtorKind)) {
820 pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
821 getDestroyer(dtorKind));
822 cleanup = EHStack.stable_begin();
825 // Emit the initializer into this element.
826 StoreAnyExprIntoOneUnit(*this, E, curPtr);
828 // Leave the cleanup if we entered one.
829 if (cleanup != EHStack.stable_end())
830 DeactivateCleanupBlock(cleanup);
832 // Advance to the next element.
833 llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
835 // Check whether we've gotten to the end of the array and, if so,
837 llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
838 Builder.CreateCondBr(isEnd, contBB, loopBB);
839 curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
844 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
845 llvm::Value *NewPtr, llvm::Value *Size) {
846 CGF.EmitCastToVoidPtr(NewPtr);
847 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
848 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
849 Alignment.getQuantity(), false);
852 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
853 QualType ElementType,
855 llvm::Value *NumElements,
856 llvm::Value *AllocSizeWithoutCookie) {
858 if (CXXConstructorDecl *Ctor = E->getConstructor()) {
859 bool RequiresZeroInitialization = false;
860 if (Ctor->getParent()->hasTrivialDefaultConstructor()) {
861 // If new expression did not specify value-initialization, then there
862 // is no initialization.
863 if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
866 if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
867 // Optimization: since zero initialization will just set the memory
868 // to all zeroes, generate a single memset to do it in one shot.
869 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
873 RequiresZeroInitialization = true;
876 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
877 E->constructor_arg_begin(),
878 E->constructor_arg_end(),
879 RequiresZeroInitialization);
881 } else if (E->getNumConstructorArgs() == 1 &&
882 isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
883 // Optimization: since zero initialization will just set the memory
884 // to all zeroes, generate a single memset to do it in one shot.
885 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
888 CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
893 if (CXXConstructorDecl *Ctor = E->getConstructor()) {
894 // Per C++ [expr.new]p15, if we have an initializer, then we're performing
895 // direct initialization. C++ [dcl.init]p5 requires that we
896 // zero-initialize storage if there are no user-declared constructors.
897 if (E->hasInitializer() &&
898 !Ctor->getParent()->hasUserDeclaredConstructor() &&
899 !Ctor->getParent()->isEmpty())
900 CGF.EmitNullInitialization(NewPtr, ElementType);
902 CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
903 NewPtr, E->constructor_arg_begin(),
904 E->constructor_arg_end());
908 // We have a POD type.
909 if (E->getNumConstructorArgs() == 0)
912 StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
916 /// A cleanup to call the given 'operator delete' function upon
917 /// abnormal exit from a new expression.
918 class CallDeleteDuringNew : public EHScopeStack::Cleanup {
919 size_t NumPlacementArgs;
920 const FunctionDecl *OperatorDelete;
922 llvm::Value *AllocSize;
924 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
927 static size_t getExtraSize(size_t NumPlacementArgs) {
928 return NumPlacementArgs * sizeof(RValue);
931 CallDeleteDuringNew(size_t NumPlacementArgs,
932 const FunctionDecl *OperatorDelete,
934 llvm::Value *AllocSize)
935 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
936 Ptr(Ptr), AllocSize(AllocSize) {}
938 void setPlacementArg(unsigned I, RValue Arg) {
939 assert(I < NumPlacementArgs && "index out of range");
940 getPlacementArgs()[I] = Arg;
943 void Emit(CodeGenFunction &CGF, Flags flags) {
944 const FunctionProtoType *FPT
945 = OperatorDelete->getType()->getAs<FunctionProtoType>();
946 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
947 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
949 CallArgList DeleteArgs;
951 // The first argument is always a void*.
952 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
953 DeleteArgs.add(RValue::get(Ptr), *AI++);
955 // A member 'operator delete' can take an extra 'size_t' argument.
956 if (FPT->getNumArgs() == NumPlacementArgs + 2)
957 DeleteArgs.add(RValue::get(AllocSize), *AI++);
959 // Pass the rest of the arguments, which must match exactly.
960 for (unsigned I = 0; I != NumPlacementArgs; ++I)
961 DeleteArgs.add(getPlacementArgs()[I], *AI++);
963 // Call 'operator delete'.
964 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
965 CGF.CGM.GetAddrOfFunction(OperatorDelete),
966 ReturnValueSlot(), DeleteArgs, OperatorDelete);
970 /// A cleanup to call the given 'operator delete' function upon
971 /// abnormal exit from a new expression when the new expression is
973 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
974 size_t NumPlacementArgs;
975 const FunctionDecl *OperatorDelete;
976 DominatingValue<RValue>::saved_type Ptr;
977 DominatingValue<RValue>::saved_type AllocSize;
979 DominatingValue<RValue>::saved_type *getPlacementArgs() {
980 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
984 static size_t getExtraSize(size_t NumPlacementArgs) {
985 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
988 CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
989 const FunctionDecl *OperatorDelete,
990 DominatingValue<RValue>::saved_type Ptr,
991 DominatingValue<RValue>::saved_type AllocSize)
992 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
993 Ptr(Ptr), AllocSize(AllocSize) {}
995 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
996 assert(I < NumPlacementArgs && "index out of range");
997 getPlacementArgs()[I] = Arg;
1000 void Emit(CodeGenFunction &CGF, Flags flags) {
1001 const FunctionProtoType *FPT
1002 = OperatorDelete->getType()->getAs<FunctionProtoType>();
1003 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1004 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1006 CallArgList DeleteArgs;
1008 // The first argument is always a void*.
1009 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1010 DeleteArgs.add(Ptr.restore(CGF), *AI++);
1012 // A member 'operator delete' can take an extra 'size_t' argument.
1013 if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1014 RValue RV = AllocSize.restore(CGF);
1015 DeleteArgs.add(RV, *AI++);
1018 // Pass the rest of the arguments, which must match exactly.
1019 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1020 RValue RV = getPlacementArgs()[I].restore(CGF);
1021 DeleteArgs.add(RV, *AI++);
1024 // Call 'operator delete'.
1025 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
1026 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1027 ReturnValueSlot(), DeleteArgs, OperatorDelete);
1032 /// Enter a cleanup to call 'operator delete' if the initializer in a
1033 /// new-expression throws.
1034 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1035 const CXXNewExpr *E,
1036 llvm::Value *NewPtr,
1037 llvm::Value *AllocSize,
1038 const CallArgList &NewArgs) {
1039 // If we're not inside a conditional branch, then the cleanup will
1040 // dominate and we can do the easier (and more efficient) thing.
1041 if (!CGF.isInConditionalBranch()) {
1042 CallDeleteDuringNew *Cleanup = CGF.EHStack
1043 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1044 E->getNumPlacementArgs(),
1045 E->getOperatorDelete(),
1047 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1048 Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1053 // Otherwise, we need to save all this stuff.
1054 DominatingValue<RValue>::saved_type SavedNewPtr =
1055 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1056 DominatingValue<RValue>::saved_type SavedAllocSize =
1057 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1059 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1060 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
1061 E->getNumPlacementArgs(),
1062 E->getOperatorDelete(),
1065 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1066 Cleanup->setPlacementArg(I,
1067 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1069 CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
1072 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1073 // The element type being allocated.
1074 QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1076 // 1. Build a call to the allocation function.
1077 FunctionDecl *allocator = E->getOperatorNew();
1078 const FunctionProtoType *allocatorType =
1079 allocator->getType()->castAs<FunctionProtoType>();
1081 CallArgList allocatorArgs;
1083 // The allocation size is the first argument.
1084 QualType sizeType = getContext().getSizeType();
1086 llvm::Value *numElements = 0;
1087 llvm::Value *allocSizeWithoutCookie = 0;
1088 llvm::Value *allocSize =
1089 EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie);
1091 allocatorArgs.add(RValue::get(allocSize), sizeType);
1093 // Emit the rest of the arguments.
1094 // FIXME: Ideally, this should just use EmitCallArgs.
1095 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1097 // First, use the types from the function type.
1098 // We start at 1 here because the first argument (the allocation size)
1099 // has already been emitted.
1100 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1101 ++i, ++placementArg) {
1102 QualType argType = allocatorType->getArgType(i);
1104 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1105 placementArg->getType()) &&
1106 "type mismatch in call argument!");
1108 EmitCallArg(allocatorArgs, *placementArg, argType);
1111 // Either we've emitted all the call args, or we have a call to a
1112 // variadic function.
1113 assert((placementArg == E->placement_arg_end() ||
1114 allocatorType->isVariadic()) &&
1115 "Extra arguments to non-variadic function!");
1117 // If we still have any arguments, emit them using the type of the argument.
1118 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1119 placementArg != placementArgsEnd; ++placementArg) {
1120 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1123 // Emit the allocation call. If the allocator is a global placement
1124 // operator, just "inline" it directly.
1126 if (allocator->isReservedGlobalPlacementOperator()) {
1127 assert(allocatorArgs.size() == 2);
1128 RV = allocatorArgs[1].RV;
1129 // TODO: kill any unnecessary computations done for the size
1132 RV = EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
1133 CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1134 allocatorArgs, allocator);
1137 // Emit a null check on the allocation result if the allocation
1138 // function is allowed to return null (because it has a non-throwing
1139 // exception spec; for this part, we inline
1140 // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1141 // interesting initializer.
1142 bool nullCheck = allocatorType->isNothrow(getContext()) &&
1143 !(allocType.isPODType(getContext()) && !E->hasInitializer());
1145 llvm::BasicBlock *nullCheckBB = 0;
1146 llvm::BasicBlock *contBB = 0;
1148 llvm::Value *allocation = RV.getScalarVal();
1150 cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1152 // The null-check means that the initializer is conditionally
1154 ConditionalEvaluation conditional(*this);
1157 conditional.begin(*this);
1159 nullCheckBB = Builder.GetInsertBlock();
1160 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1161 contBB = createBasicBlock("new.cont");
1163 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1164 Builder.CreateCondBr(isNull, contBB, notNullBB);
1165 EmitBlock(notNullBB);
1168 // If there's an operator delete, enter a cleanup to call it if an
1169 // exception is thrown.
1170 EHScopeStack::stable_iterator operatorDeleteCleanup;
1171 if (E->getOperatorDelete() &&
1172 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1173 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1174 operatorDeleteCleanup = EHStack.stable_begin();
1177 assert((allocSize == allocSizeWithoutCookie) ==
1178 CalculateCookiePadding(*this, E).isZero());
1179 if (allocSize != allocSizeWithoutCookie) {
1180 assert(E->isArray());
1181 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1186 llvm::Type *elementPtrTy
1187 = ConvertTypeForMem(allocType)->getPointerTo(AS);
1188 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1190 EmitNewInitializer(*this, E, allocType, result, numElements,
1191 allocSizeWithoutCookie);
1193 // NewPtr is a pointer to the base element type. If we're
1194 // allocating an array of arrays, we'll need to cast back to the
1195 // array pointer type.
1196 llvm::Type *resultType = ConvertTypeForMem(E->getType());
1197 if (result->getType() != resultType)
1198 result = Builder.CreateBitCast(result, resultType);
1201 // Deactivate the 'operator delete' cleanup if we finished
1203 if (operatorDeleteCleanup.isValid())
1204 DeactivateCleanupBlock(operatorDeleteCleanup);
1207 conditional.end(*this);
1209 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1212 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1213 PHI->addIncoming(result, notNullBB);
1214 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1223 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1225 QualType DeleteTy) {
1226 assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1228 const FunctionProtoType *DeleteFTy =
1229 DeleteFD->getType()->getAs<FunctionProtoType>();
1231 CallArgList DeleteArgs;
1233 // Check if we need to pass the size to the delete operator.
1234 llvm::Value *Size = 0;
1236 if (DeleteFTy->getNumArgs() == 2) {
1237 SizeTy = DeleteFTy->getArgType(1);
1238 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1239 Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1240 DeleteTypeSize.getQuantity());
1243 QualType ArgTy = DeleteFTy->getArgType(0);
1244 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1245 DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1248 DeleteArgs.add(RValue::get(Size), SizeTy);
1250 // Emit the call to delete.
1251 EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1252 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1253 DeleteArgs, DeleteFD);
1257 /// Calls the given 'operator delete' on a single object.
1258 struct CallObjectDelete : EHScopeStack::Cleanup {
1260 const FunctionDecl *OperatorDelete;
1261 QualType ElementType;
1263 CallObjectDelete(llvm::Value *Ptr,
1264 const FunctionDecl *OperatorDelete,
1265 QualType ElementType)
1266 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1268 void Emit(CodeGenFunction &CGF, Flags flags) {
1269 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1274 /// Emit the code for deleting a single object.
1275 static void EmitObjectDelete(CodeGenFunction &CGF,
1276 const FunctionDecl *OperatorDelete,
1278 QualType ElementType,
1279 bool UseGlobalDelete) {
1280 // Find the destructor for the type, if applicable. If the
1281 // destructor is virtual, we'll just emit the vcall and return.
1282 const CXXDestructorDecl *Dtor = 0;
1283 if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1284 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1285 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1286 Dtor = RD->getDestructor();
1288 if (Dtor->isVirtual()) {
1289 if (UseGlobalDelete) {
1290 // If we're supposed to call the global delete, make sure we do so
1291 // even if the destructor throws.
1292 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1293 Ptr, OperatorDelete,
1298 CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1300 /*isVariadic=*/false);
1303 = CGF.BuildVirtualCall(Dtor,
1304 UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
1306 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1309 if (UseGlobalDelete) {
1310 CGF.PopCleanupBlock();
1318 // Make sure that we call delete even if the dtor throws.
1319 // This doesn't have to a conditional cleanup because we're going
1320 // to pop it off in a second.
1321 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1322 Ptr, OperatorDelete, ElementType);
1325 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1326 /*ForVirtualBase=*/false, Ptr);
1327 else if (CGF.getLangOptions().ObjCAutoRefCount &&
1328 ElementType->isObjCLifetimeType()) {
1329 switch (ElementType.getObjCLifetime()) {
1330 case Qualifiers::OCL_None:
1331 case Qualifiers::OCL_ExplicitNone:
1332 case Qualifiers::OCL_Autoreleasing:
1335 case Qualifiers::OCL_Strong: {
1336 // Load the pointer value.
1337 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1338 ElementType.isVolatileQualified());
1340 CGF.EmitARCRelease(PtrValue, /*precise*/ true);
1344 case Qualifiers::OCL_Weak:
1345 CGF.EmitARCDestroyWeak(Ptr);
1350 CGF.PopCleanupBlock();
1354 /// Calls the given 'operator delete' on an array of objects.
1355 struct CallArrayDelete : EHScopeStack::Cleanup {
1357 const FunctionDecl *OperatorDelete;
1358 llvm::Value *NumElements;
1359 QualType ElementType;
1360 CharUnits CookieSize;
1362 CallArrayDelete(llvm::Value *Ptr,
1363 const FunctionDecl *OperatorDelete,
1364 llvm::Value *NumElements,
1365 QualType ElementType,
1366 CharUnits CookieSize)
1367 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1368 ElementType(ElementType), CookieSize(CookieSize) {}
1370 void Emit(CodeGenFunction &CGF, Flags flags) {
1371 const FunctionProtoType *DeleteFTy =
1372 OperatorDelete->getType()->getAs<FunctionProtoType>();
1373 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1377 // Pass the pointer as the first argument.
1378 QualType VoidPtrTy = DeleteFTy->getArgType(0);
1379 llvm::Value *DeletePtr
1380 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1381 Args.add(RValue::get(DeletePtr), VoidPtrTy);
1383 // Pass the original requested size as the second argument.
1384 if (DeleteFTy->getNumArgs() == 2) {
1385 QualType size_t = DeleteFTy->getArgType(1);
1386 llvm::IntegerType *SizeTy
1387 = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1389 CharUnits ElementTypeSize =
1390 CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1392 // The size of an element, multiplied by the number of elements.
1394 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1395 Size = CGF.Builder.CreateMul(Size, NumElements);
1397 // Plus the size of the cookie if applicable.
1398 if (!CookieSize.isZero()) {
1399 llvm::Value *CookieSizeV
1400 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1401 Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1404 Args.add(RValue::get(Size), size_t);
1407 // Emit the call to delete.
1408 CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1409 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1410 ReturnValueSlot(), Args, OperatorDelete);
1415 /// Emit the code for deleting an array of objects.
1416 static void EmitArrayDelete(CodeGenFunction &CGF,
1417 const CXXDeleteExpr *E,
1418 llvm::Value *deletedPtr,
1419 QualType elementType) {
1420 llvm::Value *numElements = 0;
1421 llvm::Value *allocatedPtr = 0;
1422 CharUnits cookieSize;
1423 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1424 numElements, allocatedPtr, cookieSize);
1426 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1428 // Make sure that we call delete even if one of the dtors throws.
1429 const FunctionDecl *operatorDelete = E->getOperatorDelete();
1430 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1431 allocatedPtr, operatorDelete,
1432 numElements, elementType,
1435 // Destroy the elements.
1436 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1437 assert(numElements && "no element count for a type with a destructor!");
1439 llvm::Value *arrayEnd =
1440 CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1442 // Note that it is legal to allocate a zero-length array, and we
1443 // can never fold the check away because the length should always
1444 // come from a cookie.
1445 CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1446 CGF.getDestroyer(dtorKind),
1447 /*checkZeroLength*/ true,
1448 CGF.needsEHCleanup(dtorKind));
1451 // Pop the cleanup block.
1452 CGF.PopCleanupBlock();
1455 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1457 // Get at the argument before we performed the implicit conversion
1459 const Expr *Arg = E->getArgument();
1460 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1461 if (ICE->getCastKind() != CK_UserDefinedConversion &&
1462 ICE->getType()->isVoidPointerType())
1463 Arg = ICE->getSubExpr();
1468 llvm::Value *Ptr = EmitScalarExpr(Arg);
1470 // Null check the pointer.
1471 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1472 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1474 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1476 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1477 EmitBlock(DeleteNotNull);
1479 // We might be deleting a pointer to array. If so, GEP down to the
1480 // first non-array element.
1481 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1482 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1483 if (DeleteTy->isConstantArrayType()) {
1484 llvm::Value *Zero = Builder.getInt32(0);
1485 SmallVector<llvm::Value*,8> GEP;
1487 GEP.push_back(Zero); // point at the outermost array
1489 // For each layer of array type we're pointing at:
1490 while (const ConstantArrayType *Arr
1491 = getContext().getAsConstantArrayType(DeleteTy)) {
1492 // 1. Unpeel the array type.
1493 DeleteTy = Arr->getElementType();
1495 // 2. GEP to the first element of the array.
1496 GEP.push_back(Zero);
1499 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1502 assert(ConvertTypeForMem(DeleteTy) ==
1503 cast<llvm::PointerType>(Ptr->getType())->getElementType());
1505 if (E->isArrayForm()) {
1506 EmitArrayDelete(*this, E, Ptr, DeleteTy);
1508 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1509 E->isGlobalDelete());
1512 EmitBlock(DeleteEnd);
1515 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1516 // void __cxa_bad_typeid();
1518 llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1519 llvm::FunctionType *FTy =
1520 llvm::FunctionType::get(VoidTy, false);
1522 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1525 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1526 llvm::Value *Fn = getBadTypeidFn(CGF);
1527 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1528 CGF.Builder.CreateUnreachable();
1531 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1533 llvm::Type *StdTypeInfoPtrTy) {
1534 // Get the vtable pointer.
1535 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1537 // C++ [expr.typeid]p2:
1538 // If the glvalue expression is obtained by applying the unary * operator to
1539 // a pointer and the pointer is a null pointer value, the typeid expression
1540 // throws the std::bad_typeid exception.
1541 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1542 if (UO->getOpcode() == UO_Deref) {
1543 llvm::BasicBlock *BadTypeidBlock =
1544 CGF.createBasicBlock("typeid.bad_typeid");
1545 llvm::BasicBlock *EndBlock =
1546 CGF.createBasicBlock("typeid.end");
1548 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1549 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1551 CGF.EmitBlock(BadTypeidBlock);
1552 EmitBadTypeidCall(CGF);
1553 CGF.EmitBlock(EndBlock);
1557 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1558 StdTypeInfoPtrTy->getPointerTo());
1560 // Load the type info.
1561 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1562 return CGF.Builder.CreateLoad(Value);
1565 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1566 llvm::Type *StdTypeInfoPtrTy =
1567 ConvertType(E->getType())->getPointerTo();
1569 if (E->isTypeOperand()) {
1570 llvm::Constant *TypeInfo =
1571 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1572 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1575 // C++ [expr.typeid]p2:
1576 // When typeid is applied to a glvalue expression whose type is a
1577 // polymorphic class type, the result refers to a std::type_info object
1578 // representing the type of the most derived object (that is, the dynamic
1579 // type) to which the glvalue refers.
1580 if (E->getExprOperand()->isGLValue()) {
1581 if (const RecordType *RT =
1582 E->getExprOperand()->getType()->getAs<RecordType>()) {
1583 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1584 if (RD->isPolymorphic())
1585 return EmitTypeidFromVTable(*this, E->getExprOperand(),
1590 QualType OperandTy = E->getExprOperand()->getType();
1591 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1595 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1596 // void *__dynamic_cast(const void *sub,
1597 // const abi::__class_type_info *src,
1598 // const abi::__class_type_info *dst,
1599 // std::ptrdiff_t src2dst_offset);
1601 llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1602 llvm::Type *PtrDiffTy =
1603 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1605 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1607 llvm::FunctionType *FTy =
1608 llvm::FunctionType::get(Int8PtrTy, Args, false);
1610 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1613 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1614 // void __cxa_bad_cast();
1616 llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1617 llvm::FunctionType *FTy =
1618 llvm::FunctionType::get(VoidTy, false);
1620 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1623 static void EmitBadCastCall(CodeGenFunction &CGF) {
1624 llvm::Value *Fn = getBadCastFn(CGF);
1625 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1626 CGF.Builder.CreateUnreachable();
1629 static llvm::Value *
1630 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1631 QualType SrcTy, QualType DestTy,
1632 llvm::BasicBlock *CastEnd) {
1633 llvm::Type *PtrDiffLTy =
1634 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1635 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1637 if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1638 if (PTy->getPointeeType()->isVoidType()) {
1639 // C++ [expr.dynamic.cast]p7:
1640 // If T is "pointer to cv void," then the result is a pointer to the
1641 // most derived object pointed to by v.
1643 // Get the vtable pointer.
1644 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1646 // Get the offset-to-top from the vtable.
1647 llvm::Value *OffsetToTop =
1648 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1649 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1651 // Finally, add the offset to the pointer.
1652 Value = CGF.EmitCastToVoidPtr(Value);
1653 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1655 return CGF.Builder.CreateBitCast(Value, DestLTy);
1659 QualType SrcRecordTy;
1660 QualType DestRecordTy;
1662 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1663 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1664 DestRecordTy = DestPTy->getPointeeType();
1666 SrcRecordTy = SrcTy;
1667 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1670 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1671 assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1673 llvm::Value *SrcRTTI =
1674 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1675 llvm::Value *DestRTTI =
1676 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1678 // FIXME: Actually compute a hint here.
1679 llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1681 // Emit the call to __dynamic_cast.
1682 Value = CGF.EmitCastToVoidPtr(Value);
1683 Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1684 SrcRTTI, DestRTTI, OffsetHint);
1685 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1687 /// C++ [expr.dynamic.cast]p9:
1688 /// A failed cast to reference type throws std::bad_cast
1689 if (DestTy->isReferenceType()) {
1690 llvm::BasicBlock *BadCastBlock =
1691 CGF.createBasicBlock("dynamic_cast.bad_cast");
1693 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1694 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1696 CGF.EmitBlock(BadCastBlock);
1697 EmitBadCastCall(CGF);
1703 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1705 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1706 if (DestTy->isPointerType())
1707 return llvm::Constant::getNullValue(DestLTy);
1709 /// C++ [expr.dynamic.cast]p9:
1710 /// A failed cast to reference type throws std::bad_cast
1711 EmitBadCastCall(CGF);
1713 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1714 return llvm::UndefValue::get(DestLTy);
1717 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1718 const CXXDynamicCastExpr *DCE) {
1719 QualType DestTy = DCE->getTypeAsWritten();
1721 if (DCE->isAlwaysNull())
1722 return EmitDynamicCastToNull(*this, DestTy);
1724 QualType SrcTy = DCE->getSubExpr()->getType();
1726 // C++ [expr.dynamic.cast]p4:
1727 // If the value of v is a null pointer value in the pointer case, the result
1728 // is the null pointer value of type T.
1729 bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1731 llvm::BasicBlock *CastNull = 0;
1732 llvm::BasicBlock *CastNotNull = 0;
1733 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1735 if (ShouldNullCheckSrcValue) {
1736 CastNull = createBasicBlock("dynamic_cast.null");
1737 CastNotNull = createBasicBlock("dynamic_cast.notnull");
1739 llvm::Value *IsNull = Builder.CreateIsNull(Value);
1740 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1741 EmitBlock(CastNotNull);
1744 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1746 if (ShouldNullCheckSrcValue) {
1747 EmitBranch(CastEnd);
1749 EmitBlock(CastNull);
1750 EmitBranch(CastEnd);
1755 if (ShouldNullCheckSrcValue) {
1756 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1757 PHI->addIncoming(Value, CastNotNull);
1758 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);