1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Objective-C code as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGDebugInfo.h"
14 #include "CGObjCRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/StmtObjC.h"
22 #include "clang/Basic/Diagnostic.h"
23 #include "clang/CodeGen/CGFunctionInfo.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/InlineAsm.h"
27 using namespace clang;
28 using namespace CodeGen;
30 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
32 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
33 static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
37 /// Given the address of a variable of pointer type, find the correct
38 /// null to store into it.
39 static llvm::Constant *getNullForVariable(Address addr) {
40 llvm::Type *type = addr.getElementType();
41 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
44 /// Emits an instance of NSConstantString representing the object.
45 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
48 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
49 // FIXME: This bitcast should just be made an invariant on the Runtime.
50 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
53 /// EmitObjCBoxedExpr - This routine generates code to call
54 /// the appropriate expression boxing method. This will either be
55 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:],
56 /// or [NSValue valueWithBytes:objCType:].
59 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
60 // Generate the correct selector for this literal's concrete type.
62 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
63 const Expr *SubExpr = E->getSubExpr();
65 if (E->isExpressibleAsConstantInitializer()) {
66 ConstantEmitter ConstEmitter(CGM);
67 return ConstEmitter.tryEmitAbstract(E, E->getType());
70 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
71 Selector Sel = BoxingMethod->getSelector();
73 // Generate a reference to the class pointer, which will be the receiver.
74 // Assumes that the method was introduced in the class that should be
75 // messaged (avoids pulling it out of the result type).
76 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
77 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
78 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
81 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin();
82 QualType ArgQT = ArgDecl->getType().getUnqualifiedType();
84 // ObjCBoxedExpr supports boxing of structs and unions
85 // via [NSValue valueWithBytes:objCType:]
86 const QualType ValueType(SubExpr->getType().getCanonicalType());
87 if (ValueType->isObjCBoxableRecordType()) {
88 // Emit CodeGen for first parameter
89 // and cast value to correct type
90 Address Temporary = CreateMemTemp(SubExpr->getType());
91 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
92 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
93 Args.add(RValue::get(BitCast.getPointer()), ArgQT);
95 // Create char array to store type encoding
97 getContext().getObjCEncodingForType(ValueType, Str);
98 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
100 // Cast type encoding to correct type
101 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
102 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType();
103 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT));
105 Args.add(RValue::get(Cast), EncodingQT);
107 Args.add(EmitAnyExpr(SubExpr), ArgQT);
110 RValue result = Runtime.GenerateMessageSend(
111 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
112 Args, ClassDecl, BoxingMethod);
113 return Builder.CreateBitCast(result.getScalarVal(),
114 ConvertType(E->getType()));
117 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
118 const ObjCMethodDecl *MethodWithObjects) {
119 ASTContext &Context = CGM.getContext();
120 const ObjCDictionaryLiteral *DLE = nullptr;
121 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
123 DLE = cast<ObjCDictionaryLiteral>(E);
125 // Optimize empty collections by referencing constants, when available.
126 uint64_t NumElements =
127 ALE ? ALE->getNumElements() : DLE->getNumElements();
128 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) {
129 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__";
130 QualType IdTy(CGM.getContext().getObjCIdType());
131 llvm::Constant *Constant =
132 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName);
133 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy);
134 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc());
135 cast<llvm::LoadInst>(Ptr)->setMetadata(
136 CGM.getModule().getMDKindID("invariant.load"),
137 llvm::MDNode::get(getLLVMContext(), None));
138 return Builder.CreateBitCast(Ptr, ConvertType(E->getType()));
141 // Compute the type of the array we're initializing.
142 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
144 QualType ElementType = Context.getObjCIdType().withConst();
145 QualType ElementArrayType
146 = Context.getConstantArrayType(ElementType, APNumElements,
147 ArrayType::Normal, /*IndexTypeQuals=*/0);
149 // Allocate the temporary array(s).
150 Address Objects = CreateMemTemp(ElementArrayType, "objects");
151 Address Keys = Address::invalid();
153 Keys = CreateMemTemp(ElementArrayType, "keys");
155 // In ARC, we may need to do extra work to keep all the keys and
156 // values alive until after the call.
157 SmallVector<llvm::Value *, 16> NeededObjects;
158 bool TrackNeededObjects =
159 (getLangOpts().ObjCAutoRefCount &&
160 CGM.getCodeGenOpts().OptimizationLevel != 0);
162 // Perform the actual initialialization of the array(s).
163 for (uint64_t i = 0; i < NumElements; i++) {
165 // Emit the element and store it to the appropriate array slot.
166 const Expr *Rhs = ALE->getElement(i);
167 LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i),
168 ElementType, AlignmentSource::Decl);
170 llvm::Value *value = EmitScalarExpr(Rhs);
171 EmitStoreThroughLValue(RValue::get(value), LV, true);
172 if (TrackNeededObjects) {
173 NeededObjects.push_back(value);
176 // Emit the key and store it to the appropriate array slot.
177 const Expr *Key = DLE->getKeyValueElement(i).Key;
178 LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i),
179 ElementType, AlignmentSource::Decl);
180 llvm::Value *keyValue = EmitScalarExpr(Key);
181 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
183 // Emit the value and store it to the appropriate array slot.
184 const Expr *Value = DLE->getKeyValueElement(i).Value;
185 LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i),
186 ElementType, AlignmentSource::Decl);
187 llvm::Value *valueValue = EmitScalarExpr(Value);
188 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
189 if (TrackNeededObjects) {
190 NeededObjects.push_back(keyValue);
191 NeededObjects.push_back(valueValue);
196 // Generate the argument list.
198 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
199 const ParmVarDecl *argDecl = *PI++;
200 QualType ArgQT = argDecl->getType().getUnqualifiedType();
201 Args.add(RValue::get(Objects.getPointer()), ArgQT);
204 ArgQT = argDecl->getType().getUnqualifiedType();
205 Args.add(RValue::get(Keys.getPointer()), ArgQT);
208 ArgQT = argDecl->getType().getUnqualifiedType();
210 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
211 Args.add(RValue::get(Count), ArgQT);
213 // Generate a reference to the class pointer, which will be the receiver.
214 Selector Sel = MethodWithObjects->getSelector();
215 QualType ResultType = E->getType();
216 const ObjCObjectPointerType *InterfacePointerType
217 = ResultType->getAsObjCInterfacePointerType();
218 ObjCInterfaceDecl *Class
219 = InterfacePointerType->getObjectType()->getInterface();
220 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
221 llvm::Value *Receiver = Runtime.GetClass(*this, Class);
223 // Generate the message send.
224 RValue result = Runtime.GenerateMessageSend(
225 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel,
226 Receiver, Args, Class, MethodWithObjects);
228 // The above message send needs these objects, but in ARC they are
229 // passed in a buffer that is essentially __unsafe_unretained.
230 // Therefore we must prevent the optimizer from releasing them until
232 if (TrackNeededObjects) {
233 EmitARCIntrinsicUse(NeededObjects);
236 return Builder.CreateBitCast(result.getScalarVal(),
237 ConvertType(E->getType()));
240 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
241 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
244 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
245 const ObjCDictionaryLiteral *E) {
246 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
250 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
252 // Note that this implementation allows for non-constant strings to be passed
253 // as arguments to @selector(). Currently, the only thing preventing this
254 // behaviour is the type checking in the front end.
255 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector());
258 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
259 // FIXME: This should pass the Decl not the name.
260 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol());
263 /// Adjust the type of an Objective-C object that doesn't match up due
264 /// to type erasure at various points, e.g., related result types or the use
265 /// of parameterized classes.
266 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
268 if (!ExpT->isObjCRetainableType())
271 // If the converted types are the same, we're done.
272 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT);
273 if (ExpLLVMTy == Result.getScalarVal()->getType())
276 // We have applied a substitution. Cast the rvalue appropriately.
277 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
281 /// Decide whether to extend the lifetime of the receiver of a
282 /// returns-inner-pointer message.
284 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
285 switch (message->getReceiverKind()) {
287 // For a normal instance message, we should extend unless the
288 // receiver is loaded from a variable with precise lifetime.
289 case ObjCMessageExpr::Instance: {
290 const Expr *receiver = message->getInstanceReceiver();
292 // Look through OVEs.
293 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
294 if (opaque->getSourceExpr())
295 receiver = opaque->getSourceExpr()->IgnoreParens();
298 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
299 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
300 receiver = ice->getSubExpr()->IgnoreParens();
302 // Look through OVEs.
303 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
304 if (opaque->getSourceExpr())
305 receiver = opaque->getSourceExpr()->IgnoreParens();
308 // Only __strong variables.
309 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
312 // All ivars and fields have precise lifetime.
313 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
316 // Otherwise, check for variables.
317 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
318 if (!declRef) return true;
319 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
320 if (!var) return true;
322 // All variables have precise lifetime except local variables with
323 // automatic storage duration that aren't specially marked.
324 return (var->hasLocalStorage() &&
325 !var->hasAttr<ObjCPreciseLifetimeAttr>());
328 case ObjCMessageExpr::Class:
329 case ObjCMessageExpr::SuperClass:
330 // It's never necessary for class objects.
333 case ObjCMessageExpr::SuperInstance:
334 // We generally assume that 'self' lives throughout a method call.
338 llvm_unreachable("invalid receiver kind");
341 /// Given an expression of ObjC pointer type, check whether it was
342 /// immediately loaded from an ARC __weak l-value.
343 static const Expr *findWeakLValue(const Expr *E) {
344 assert(E->getType()->isObjCRetainableType());
345 E = E->IgnoreParens();
346 if (auto CE = dyn_cast<CastExpr>(E)) {
347 if (CE->getCastKind() == CK_LValueToRValue) {
348 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
349 return CE->getSubExpr();
356 /// The ObjC runtime may provide entrypoints that are likely to be faster
357 /// than an ordinary message send of the appropriate selector.
359 /// The entrypoints are guaranteed to be equivalent to just sending the
360 /// corresponding message. If the entrypoint is implemented naively as just a
361 /// message send, using it is a trade-off: it sacrifices a few cycles of
362 /// overhead to save a small amount of code. However, it's possible for
363 /// runtimes to detect and special-case classes that use "standard"
364 /// behavior; if that's dynamically a large proportion of all objects, using
365 /// the entrypoint will also be faster than using a message send.
367 /// If the runtime does support a required entrypoint, then this method will
368 /// generate a call and return the resulting value. Otherwise it will return
369 /// None and the caller can generate a msgSend instead.
370 static Optional<llvm::Value *>
371 tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
372 llvm::Value *Receiver,
373 const CallArgList& Args, Selector Sel,
374 const ObjCMethodDecl *method,
375 bool isClassMessage) {
377 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls)
380 auto &Runtime = CGM.getLangOpts().ObjCRuntime;
381 switch (Sel.getMethodFamily()) {
383 if (isClassMessage &&
384 Runtime.shouldUseRuntimeFunctionsForAlloc() &&
385 ResultType->isObjCObjectPointerType()) {
386 // [Foo alloc] -> objc_alloc(Foo) or
387 // [self alloc] -> objc_alloc(self)
388 if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc")
389 return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType));
390 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or
391 // [self allocWithZone:nil] -> objc_allocWithZone(self)
392 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 &&
393 Args.size() == 1 && Args.front().getType()->isPointerType() &&
394 Sel.getNameForSlot(0) == "allocWithZone") {
395 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal();
396 if (isa<llvm::ConstantPointerNull>(arg))
397 return CGF.EmitObjCAllocWithZone(Receiver,
398 CGF.ConvertType(ResultType));
404 case OMF_autorelease:
405 if (ResultType->isObjCObjectPointerType() &&
406 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
407 Runtime.shouldUseARCFunctionsForRetainRelease())
408 return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType));
412 if (ResultType->isObjCObjectPointerType() &&
413 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
414 Runtime.shouldUseARCFunctionsForRetainRelease())
415 return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType));
419 if (ResultType->isVoidType() &&
420 CGM.getLangOpts().getGC() == LangOptions::NonGC &&
421 Runtime.shouldUseARCFunctionsForRetainRelease()) {
422 CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime);
433 /// Instead of '[[MyClass alloc] init]', try to generate
434 /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the
435 /// caller side, as well as the optimized objc_alloc.
436 static Optional<llvm::Value *>
437 tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) {
438 auto &Runtime = CGF.getLangOpts().ObjCRuntime;
439 if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit())
442 // Match the exact pattern '[[MyClass alloc] init]'.
443 Selector Sel = OME->getSelector();
444 if (OME->getReceiverKind() != ObjCMessageExpr::Instance ||
445 !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() ||
446 Sel.getNameForSlot(0) != "init")
449 // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' or
450 // we are in an ObjC class method and 'receiver' is '[self alloc]'.
452 dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts());
455 Selector SubSel = SubOME->getSelector();
457 // Check if we are in an ObjC class method and the receiver expression is
459 const Expr *SelfInClassMethod = nullptr;
460 if (const auto *CurMD = dyn_cast_or_null<ObjCMethodDecl>(CGF.CurFuncDecl))
461 if (CurMD->isClassMethod())
462 if ((SelfInClassMethod = SubOME->getInstanceReceiver()))
463 if (!SelfInClassMethod->isObjCSelfExpr())
464 SelfInClassMethod = nullptr;
466 if ((SubOME->getReceiverKind() != ObjCMessageExpr::Class &&
467 !SelfInClassMethod) || !SubOME->getType()->isObjCObjectPointerType() ||
468 !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc")
471 llvm::Value *Receiver;
472 if (SelfInClassMethod) {
473 Receiver = CGF.EmitScalarExpr(SelfInClassMethod);
475 QualType ReceiverType = SubOME->getClassReceiver();
476 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
477 const ObjCInterfaceDecl *ID = ObjTy->getInterface();
478 assert(ID && "null interface should be impossible here");
479 Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID);
481 return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType()));
484 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
485 ReturnValueSlot Return) {
486 // Only the lookup mechanism and first two arguments of the method
487 // implementation vary between runtimes. We can get the receiver and
488 // arguments in generic code.
490 bool isDelegateInit = E->isDelegateInitCall();
492 const ObjCMethodDecl *method = E->getMethodDecl();
494 // If the method is -retain, and the receiver's being loaded from
495 // a __weak variable, peephole the entire operation to objc_loadWeakRetained.
496 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance &&
497 method->getMethodFamily() == OMF_retain) {
498 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
499 LValue lvalue = EmitLValue(lvalueExpr);
500 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
501 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
505 if (Optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E))
506 return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val));
508 // We don't retain the receiver in delegate init calls, and this is
509 // safe because the receiver value is always loaded from 'self',
510 // which we zero out. We don't want to Block_copy block receivers,
514 CGM.getLangOpts().ObjCAutoRefCount &&
516 method->hasAttr<NSConsumesSelfAttr>());
518 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
519 bool isSuperMessage = false;
520 bool isClassMessage = false;
521 ObjCInterfaceDecl *OID = nullptr;
523 QualType ReceiverType;
524 llvm::Value *Receiver = nullptr;
525 switch (E->getReceiverKind()) {
526 case ObjCMessageExpr::Instance:
527 ReceiverType = E->getInstanceReceiver()->getType();
528 if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl))
529 if (OMD->isClassMethod())
530 if (E->getInstanceReceiver()->isObjCSelfExpr())
531 isClassMessage = true;
533 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
534 E->getInstanceReceiver());
535 Receiver = ter.getPointer();
536 if (ter.getInt()) retainSelf = false;
538 Receiver = EmitScalarExpr(E->getInstanceReceiver());
541 case ObjCMessageExpr::Class: {
542 ReceiverType = E->getClassReceiver();
543 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
544 assert(ObjTy && "Invalid Objective-C class message send");
545 OID = ObjTy->getInterface();
546 assert(OID && "Invalid Objective-C class message send");
547 Receiver = Runtime.GetClass(*this, OID);
548 isClassMessage = true;
552 case ObjCMessageExpr::SuperInstance:
553 ReceiverType = E->getSuperType();
554 Receiver = LoadObjCSelf();
555 isSuperMessage = true;
558 case ObjCMessageExpr::SuperClass:
559 ReceiverType = E->getSuperType();
560 Receiver = LoadObjCSelf();
561 isSuperMessage = true;
562 isClassMessage = true;
567 Receiver = EmitARCRetainNonBlock(Receiver);
569 // In ARC, we sometimes want to "extend the lifetime"
570 // (i.e. retain+autorelease) of receivers of returns-inner-pointer
572 if (getLangOpts().ObjCAutoRefCount && method &&
573 method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
574 shouldExtendReceiverForInnerPointerMessage(E))
575 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
577 QualType ResultType = method ? method->getReturnType() : E->getType();
580 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method));
582 // For delegate init calls in ARC, do an unsafe store of null into
583 // self. This represents the call taking direct ownership of that
584 // value. We have to do this after emitting the other call
585 // arguments because they might also reference self, but we don't
586 // have to worry about any of them modifying self because that would
587 // be an undefined read and write of an object in unordered
589 if (isDelegateInit) {
590 assert(getLangOpts().ObjCAutoRefCount &&
591 "delegate init calls should only be marked in ARC");
593 // Do an unsafe store of null into self.
595 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
596 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
600 if (isSuperMessage) {
601 // super is only valid in an Objective-C method
602 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
603 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
604 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
606 OMD->getClassInterface(),
613 // Call runtime methods directly if we can.
614 if (Optional<llvm::Value *> SpecializedResult =
615 tryGenerateSpecializedMessageSend(*this, ResultType, Receiver, Args,
616 E->getSelector(), method,
618 result = RValue::get(SpecializedResult.getValue());
620 result = Runtime.GenerateMessageSend(*this, Return, ResultType,
621 E->getSelector(), Receiver, Args,
626 // For delegate init calls in ARC, implicitly store the result of
627 // the call back into self. This takes ownership of the value.
628 if (isDelegateInit) {
630 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
631 llvm::Value *newSelf = result.getScalarVal();
633 // The delegate return type isn't necessarily a matching type; in
634 // fact, it's quite likely to be 'id'.
635 llvm::Type *selfTy = selfAddr.getElementType();
636 newSelf = Builder.CreateBitCast(newSelf, selfTy);
638 Builder.CreateStore(newSelf, selfAddr);
641 return AdjustObjCObjectType(*this, E->getType(), result);
645 struct FinishARCDealloc final : EHScopeStack::Cleanup {
646 void Emit(CodeGenFunction &CGF, Flags flags) override {
647 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
649 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
650 const ObjCInterfaceDecl *iface = impl->getClassInterface();
651 if (!iface->getSuperClass()) return;
653 bool isCategory = isa<ObjCCategoryImplDecl>(impl);
655 // Call [super dealloc] if we have a superclass.
656 llvm::Value *self = CGF.LoadObjCSelf();
659 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
660 CGF.getContext().VoidTy,
661 method->getSelector(),
665 /*is class msg*/ false,
672 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates
673 /// the LLVM function and sets the other context used by
675 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
676 const ObjCContainerDecl *CD) {
677 SourceLocation StartLoc = OMD->getBeginLoc();
678 FunctionArgList args;
679 // Check if we should generate debug info for this method.
680 if (OMD->hasAttr<NoDebugAttr>())
681 DebugInfo = nullptr; // disable debug info indefinitely for this function
683 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
685 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
686 CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
688 args.push_back(OMD->getSelfDecl());
689 args.push_back(OMD->getCmdDecl());
691 args.append(OMD->param_begin(), OMD->param_end());
694 CurEHLocation = OMD->getEndLoc();
696 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
697 OMD->getLocation(), StartLoc);
699 // In ARC, certain methods get an extra cleanup.
700 if (CGM.getLangOpts().ObjCAutoRefCount &&
701 OMD->isInstanceMethod() &&
702 OMD->getSelector().isUnarySelector()) {
703 const IdentifierInfo *ident =
704 OMD->getSelector().getIdentifierInfoForSlot(0);
705 if (ident->isStr("dealloc"))
706 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
710 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
711 LValue lvalue, QualType type);
713 /// Generate an Objective-C method. An Objective-C method is a C function with
714 /// its pointer, name, and types registered in the class structure.
715 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
716 StartObjCMethod(OMD, OMD->getClassInterface());
717 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
718 assert(isa<CompoundStmt>(OMD->getBody()));
719 incrementProfileCounter(OMD->getBody());
720 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
721 FinishFunction(OMD->getBodyRBrace());
724 /// emitStructGetterCall - Call the runtime function to load a property
725 /// into the return value slot.
726 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
727 bool isAtomic, bool hasStrong) {
728 ASTContext &Context = CGF.getContext();
731 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
734 // objc_copyStruct (ReturnValue, &structIvar,
735 // sizeof (Type of Ivar), isAtomic, false);
738 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
739 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
741 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
742 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
744 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
745 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
746 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
747 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
749 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
750 CGCallee callee = CGCallee::forDirect(fn);
751 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
752 callee, ReturnValueSlot(), args);
755 /// Determine whether the given architecture supports unaligned atomic
756 /// accesses. They don't have to be fast, just faster than a function
757 /// call and a mutex.
758 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
759 // FIXME: Allow unaligned atomic load/store on x86. (It is not
760 // currently supported by the backend.)
764 /// Return the maximum size that permits atomic accesses for the given
766 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
767 llvm::Triple::ArchType arch) {
768 // ARM has 8-byte atomic accesses, but it's not clear whether we
769 // want to rely on them here.
771 // In the default case, just assume that any size up to a pointer is
772 // fine given adequate alignment.
773 return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
777 class PropertyImplStrategy {
780 /// The 'native' strategy is to use the architecture's provided
781 /// reads and writes.
784 /// Use objc_setProperty and objc_getProperty.
787 /// Use objc_setProperty for the setter, but use expression
788 /// evaluation for the getter.
789 SetPropertyAndExpressionGet,
791 /// Use objc_copyStruct.
794 /// The 'expression' strategy is to emit normal assignment or
795 /// lvalue-to-rvalue expressions.
799 StrategyKind getKind() const { return StrategyKind(Kind); }
801 bool hasStrongMember() const { return HasStrong; }
802 bool isAtomic() const { return IsAtomic; }
803 bool isCopy() const { return IsCopy; }
805 CharUnits getIvarSize() const { return IvarSize; }
806 CharUnits getIvarAlignment() const { return IvarAlignment; }
808 PropertyImplStrategy(CodeGenModule &CGM,
809 const ObjCPropertyImplDecl *propImpl);
813 unsigned IsAtomic : 1;
815 unsigned HasStrong : 1;
818 CharUnits IvarAlignment;
822 /// Pick an implementation strategy for the given property synthesis.
823 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
824 const ObjCPropertyImplDecl *propImpl) {
825 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
826 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
828 IsCopy = (setterKind == ObjCPropertyDecl::Copy);
829 IsAtomic = prop->isAtomic();
830 HasStrong = false; // doesn't matter here.
832 // Evaluate the ivar's size and alignment.
833 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
834 QualType ivarType = ivar->getType();
835 std::tie(IvarSize, IvarAlignment) =
836 CGM.getContext().getTypeInfoInChars(ivarType);
838 // If we have a copy property, we always have to use getProperty/setProperty.
839 // TODO: we could actually use setProperty and an expression for non-atomics.
841 Kind = GetSetProperty;
846 if (setterKind == ObjCPropertyDecl::Retain) {
847 // In GC-only, there's nothing special that needs to be done.
848 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
851 // In ARC, if the property is non-atomic, use expression emission,
852 // which translates to objc_storeStrong. This isn't required, but
853 // it's slightly nicer.
854 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
855 // Using standard expression emission for the setter is only
856 // acceptable if the ivar is __strong, which won't be true if
857 // the property is annotated with __attribute__((NSObject)).
858 // TODO: falling all the way back to objc_setProperty here is
859 // just laziness, though; we could still use objc_storeStrong
860 // if we hacked it right.
861 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
864 Kind = SetPropertyAndExpressionGet;
867 // Otherwise, we need to at least use setProperty. However, if
868 // the property isn't atomic, we can use normal expression
869 // emission for the getter.
870 } else if (!IsAtomic) {
871 Kind = SetPropertyAndExpressionGet;
874 // Otherwise, we have to use both setProperty and getProperty.
876 Kind = GetSetProperty;
881 // If we're not atomic, just use expression accesses.
887 // Properties on bitfield ivars need to be emitted using expression
888 // accesses even if they're nominally atomic.
889 if (ivar->isBitField()) {
894 // GC-qualified or ARC-qualified ivars need to be emitted as
895 // expressions. This actually works out to being atomic anyway,
896 // except for ARC __strong, but that should trigger the above code.
897 if (ivarType.hasNonTrivialObjCLifetime() ||
898 (CGM.getLangOpts().getGC() &&
899 CGM.getContext().getObjCGCAttrKind(ivarType))) {
904 // Compute whether the ivar has strong members.
905 if (CGM.getLangOpts().getGC())
906 if (const RecordType *recordType = ivarType->getAs<RecordType>())
907 HasStrong = recordType->getDecl()->hasObjectMember();
909 // We can never access structs with object members with a native
910 // access, because we need to use write barriers. This is what
911 // objc_copyStruct is for.
917 // Otherwise, this is target-dependent and based on the size and
918 // alignment of the ivar.
920 // If the size of the ivar is not a power of two, give up. We don't
921 // want to get into the business of doing compare-and-swaps.
922 if (!IvarSize.isPowerOfTwo()) {
927 llvm::Triple::ArchType arch =
928 CGM.getTarget().getTriple().getArch();
930 // Most architectures require memory to fit within a single cache
931 // line, so the alignment has to be at least the size of the access.
932 // Otherwise we have to grab a lock.
933 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
938 // If the ivar's size exceeds the architecture's maximum atomic
939 // access size, we have to use CopyStruct.
940 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
945 // Otherwise, we can use native loads and stores.
949 /// Generate an Objective-C property getter function.
951 /// The given Decl must be an ObjCImplementationDecl. \@synthesize
952 /// is illegal within a category.
953 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
954 const ObjCPropertyImplDecl *PID) {
955 llvm::Constant *AtomicHelperFn =
956 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
957 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
958 ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
959 assert(OMD && "Invalid call to generate getter (empty method)");
960 StartObjCMethod(OMD, IMP->getClassInterface());
962 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
967 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
968 const Expr *getter = propImpl->getGetterCXXConstructor();
969 if (!getter) return true;
971 // Sema only makes only of these when the ivar has a C++ class type,
972 // so the form is pretty constrained.
974 // If the property has a reference type, we might just be binding a
975 // reference, in which case the result will be a gl-value. We should
976 // treat this as a non-trivial operation.
977 if (getter->isGLValue())
980 // If we selected a trivial copy-constructor, we're okay.
981 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
982 return (construct->getConstructor()->isTrivial());
984 // The constructor might require cleanups (in which case it's never
986 assert(isa<ExprWithCleanups>(getter));
990 /// emitCPPObjectAtomicGetterCall - Call the runtime function to
991 /// copy the ivar into the resturn slot.
992 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
993 llvm::Value *returnAddr,
995 llvm::Constant *AtomicHelperFn) {
996 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
1000 // The 1st argument is the return Slot.
1001 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
1003 // The 2nd argument is the address of the ivar.
1004 llvm::Value *ivarAddr =
1005 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
1006 CGF.LoadObjCSelf(), ivar, 0).getPointer();
1007 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
1008 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
1010 // Third argument is the helper function.
1011 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
1013 llvm::FunctionCallee copyCppAtomicObjectFn =
1014 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
1015 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
1017 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
1018 callee, ReturnValueSlot(), args);
1022 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1023 const ObjCPropertyImplDecl *propImpl,
1024 const ObjCMethodDecl *GetterMethodDecl,
1025 llvm::Constant *AtomicHelperFn) {
1026 // If there's a non-trivial 'get' expression, we just have to emit that.
1027 if (!hasTrivialGetExpr(propImpl)) {
1028 if (!AtomicHelperFn) {
1029 auto *ret = ReturnStmt::Create(getContext(), SourceLocation(),
1030 propImpl->getGetterCXXConstructor(),
1031 /* NRVOCandidate=*/nullptr);
1032 EmitReturnStmt(*ret);
1035 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1036 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
1037 ivar, AtomicHelperFn);
1042 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
1043 QualType propType = prop->getType();
1044 ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
1046 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1048 // Pick an implementation strategy.
1049 PropertyImplStrategy strategy(CGM, propImpl);
1050 switch (strategy.getKind()) {
1051 case PropertyImplStrategy::Native: {
1052 // We don't need to do anything for a zero-size struct.
1053 if (strategy.getIvarSize().isZero())
1056 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
1058 // Currently, all atomic accesses have to be through integer
1059 // types, so there's no point in trying to pick a prettier type.
1060 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize());
1061 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
1062 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
1064 // Perform an atomic load. This does not impose ordering constraints.
1065 Address ivarAddr = LV.getAddress();
1066 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
1067 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
1068 load->setAtomic(llvm::AtomicOrdering::Unordered);
1070 // Store that value into the return address. Doing this with a
1071 // bitcast is likely to produce some pretty ugly IR, but it's not
1072 // the *most* terrible thing in the world.
1073 llvm::Type *retTy = ConvertType(getterMethod->getReturnType());
1074 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy);
1075 llvm::Value *ivarVal = load;
1076 if (ivarSize > retTySize) {
1077 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
1078 ivarVal = Builder.CreateTrunc(load, newTy);
1079 bitcastType = newTy->getPointerTo();
1081 Builder.CreateStore(ivarVal,
1082 Builder.CreateBitCast(ReturnValue, bitcastType));
1084 // Make sure we don't do an autorelease.
1085 AutoreleaseResult = false;
1089 case PropertyImplStrategy::GetSetProperty: {
1090 llvm::FunctionCallee getPropertyFn =
1091 CGM.getObjCRuntime().GetPropertyGetFunction();
1092 if (!getPropertyFn) {
1093 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
1096 CGCallee callee = CGCallee::forDirect(getPropertyFn);
1098 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
1099 // FIXME: Can't this be simpler? This might even be worse than the
1100 // corresponding gcc code.
1102 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
1103 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
1104 llvm::Value *ivarOffset =
1105 EmitIvarOffset(classImpl->getClassInterface(), ivar);
1108 args.add(RValue::get(self), getContext().getObjCIdType());
1109 args.add(RValue::get(cmd), getContext().getObjCSelType());
1110 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1111 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
1112 getContext().BoolTy);
1114 // FIXME: We shouldn't need to get the function info here, the
1115 // runtime already should have computed it to build the function.
1116 llvm::CallBase *CallInstruction;
1117 RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall(
1118 getContext().getObjCIdType(), args),
1119 callee, ReturnValueSlot(), args, &CallInstruction);
1120 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
1121 call->setTailCall();
1123 // We need to fix the type here. Ivars with copy & retain are
1124 // always objects so we don't need to worry about complex or
1126 RV = RValue::get(Builder.CreateBitCast(
1128 getTypes().ConvertType(getterMethod->getReturnType())));
1130 EmitReturnOfRValue(RV, propType);
1132 // objc_getProperty does an autorelease, so we should suppress ours.
1133 AutoreleaseResult = false;
1138 case PropertyImplStrategy::CopyStruct:
1139 emitStructGetterCall(*this, ivar, strategy.isAtomic(),
1140 strategy.hasStrongMember());
1143 case PropertyImplStrategy::Expression:
1144 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1145 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
1147 QualType ivarType = ivar->getType();
1148 switch (getEvaluationKind(ivarType)) {
1150 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
1151 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType),
1155 case TEK_Aggregate: {
1156 // The return value slot is guaranteed to not be aliased, but
1157 // that's not necessarily the same as "on the stack", so
1158 // we still potentially need objc_memmove_collectable.
1159 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType),
1160 /* Src= */ LV, ivarType, getOverlapForReturnValue());
1165 if (propType->isReferenceType()) {
1166 value = LV.getAddress().getPointer();
1168 // We want to load and autoreleaseReturnValue ARC __weak ivars.
1169 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1170 if (getLangOpts().ObjCAutoRefCount) {
1171 value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
1173 value = EmitARCLoadWeak(LV.getAddress());
1176 // Otherwise we want to do a simple load, suppressing the
1177 // final autorelease.
1179 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal();
1180 AutoreleaseResult = false;
1183 value = Builder.CreateBitCast(
1184 value, ConvertType(GetterMethodDecl->getReturnType()));
1187 EmitReturnOfRValue(RValue::get(value), propType);
1191 llvm_unreachable("bad evaluation kind");
1195 llvm_unreachable("bad @property implementation strategy!");
1198 /// emitStructSetterCall - Call the runtime function to store the value
1199 /// from the first formal parameter into the given ivar.
1200 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
1201 ObjCIvarDecl *ivar) {
1202 // objc_copyStruct (&structIvar, &Arg,
1203 // sizeof (struct something), true, false);
1206 // The first argument is the address of the ivar.
1207 llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
1208 CGF.LoadObjCSelf(), ivar, 0)
1210 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
1211 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
1213 // The second argument is the address of the parameter variable.
1214 ParmVarDecl *argVar = *OMD->param_begin();
1215 DeclRefExpr argRef(CGF.getContext(), argVar, false,
1216 argVar->getType().getNonReferenceType(), VK_LValue,
1218 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
1219 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
1220 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
1222 // The third argument is the sizeof the type.
1224 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
1225 args.add(RValue::get(size), CGF.getContext().getSizeType());
1227 // The fourth argument is the 'isAtomic' flag.
1228 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
1230 // The fifth argument is the 'hasStrong' flag.
1231 // FIXME: should this really always be false?
1232 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
1234 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
1235 CGCallee callee = CGCallee::forDirect(fn);
1237 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
1238 callee, ReturnValueSlot(), args);
1241 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store
1242 /// the value from the first formal parameter into the given ivar, using
1243 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
1244 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
1245 ObjCMethodDecl *OMD,
1247 llvm::Constant *AtomicHelperFn) {
1248 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
1252 // The first argument is the address of the ivar.
1253 llvm::Value *ivarAddr =
1254 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
1255 CGF.LoadObjCSelf(), ivar, 0).getPointer();
1256 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
1257 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
1259 // The second argument is the address of the parameter variable.
1260 ParmVarDecl *argVar = *OMD->param_begin();
1261 DeclRefExpr argRef(CGF.getContext(), argVar, false,
1262 argVar->getType().getNonReferenceType(), VK_LValue,
1264 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
1265 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
1266 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
1268 // Third argument is the helper function.
1269 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
1271 llvm::FunctionCallee fn =
1272 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
1273 CGCallee callee = CGCallee::forDirect(fn);
1275 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
1276 callee, ReturnValueSlot(), args);
1280 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
1281 Expr *setter = PID->getSetterCXXAssignment();
1282 if (!setter) return true;
1284 // Sema only makes only of these when the ivar has a C++ class type,
1285 // so the form is pretty constrained.
1287 // An operator call is trivial if the function it calls is trivial.
1288 // This also implies that there's nothing non-trivial going on with
1289 // the arguments, because operator= can only be trivial if it's a
1290 // synthesized assignment operator and therefore both parameters are
1292 if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
1293 if (const FunctionDecl *callee
1294 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
1295 if (callee->isTrivial())
1300 assert(isa<ExprWithCleanups>(setter));
1304 static bool UseOptimizedSetter(CodeGenModule &CGM) {
1305 if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
1307 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
1311 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1312 const ObjCPropertyImplDecl *propImpl,
1313 llvm::Constant *AtomicHelperFn) {
1314 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
1315 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1316 ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
1318 // Just use the setter expression if Sema gave us one and it's
1320 if (!hasTrivialSetExpr(propImpl)) {
1321 if (!AtomicHelperFn)
1322 // If non-atomic, assignment is called directly.
1323 EmitStmt(propImpl->getSetterCXXAssignment());
1325 // If atomic, assignment is called via a locking api.
1326 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
1331 PropertyImplStrategy strategy(CGM, propImpl);
1332 switch (strategy.getKind()) {
1333 case PropertyImplStrategy::Native: {
1334 // We don't need to do anything for a zero-size struct.
1335 if (strategy.getIvarSize().isZero())
1338 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
1341 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
1342 Address ivarAddr = ivarLValue.getAddress();
1344 // Currently, all atomic accesses have to be through integer
1345 // types, so there's no point in trying to pick a prettier type.
1346 llvm::Type *bitcastType =
1347 llvm::Type::getIntNTy(getLLVMContext(),
1348 getContext().toBits(strategy.getIvarSize()));
1350 // Cast both arguments to the chosen operation type.
1351 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
1352 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
1354 // This bitcast load is likely to cause some nasty IR.
1355 llvm::Value *load = Builder.CreateLoad(argAddr);
1357 // Perform an atomic store. There are no memory ordering requirements.
1358 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
1359 store->setAtomic(llvm::AtomicOrdering::Unordered);
1363 case PropertyImplStrategy::GetSetProperty:
1364 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1366 llvm::FunctionCallee setOptimizedPropertyFn = nullptr;
1367 llvm::FunctionCallee setPropertyFn = nullptr;
1368 if (UseOptimizedSetter(CGM)) {
1369 // 10.8 and iOS 6.0 code and GC is off
1370 setOptimizedPropertyFn =
1371 CGM.getObjCRuntime().GetOptimizedPropertySetFunction(
1372 strategy.isAtomic(), strategy.isCopy());
1373 if (!setOptimizedPropertyFn) {
1374 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
1379 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
1380 if (!setPropertyFn) {
1381 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
1386 // Emit objc_setProperty((id) self, _cmd, offset, arg,
1387 // <is-atomic>, <is-copy>).
1389 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
1391 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
1392 llvm::Value *ivarOffset =
1393 EmitIvarOffset(classImpl->getClassInterface(), ivar);
1394 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
1395 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
1396 arg = Builder.CreateBitCast(arg, VoidPtrTy);
1399 args.add(RValue::get(self), getContext().getObjCIdType());
1400 args.add(RValue::get(cmd), getContext().getObjCSelType());
1401 if (setOptimizedPropertyFn) {
1402 args.add(RValue::get(arg), getContext().getObjCIdType());
1403 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1404 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn);
1405 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
1406 callee, ReturnValueSlot(), args);
1408 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1409 args.add(RValue::get(arg), getContext().getObjCIdType());
1410 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
1411 getContext().BoolTy);
1412 args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
1413 getContext().BoolTy);
1414 // FIXME: We shouldn't need to get the function info here, the runtime
1415 // already should have computed it to build the function.
1416 CGCallee callee = CGCallee::forDirect(setPropertyFn);
1417 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
1418 callee, ReturnValueSlot(), args);
1424 case PropertyImplStrategy::CopyStruct:
1425 emitStructSetterCall(*this, setterMethod, ivar);
1428 case PropertyImplStrategy::Expression:
1432 // Otherwise, fake up some ASTs and emit a normal assignment.
1433 ValueDecl *selfDecl = setterMethod->getSelfDecl();
1434 DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(),
1435 VK_LValue, SourceLocation());
1436 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
1437 selfDecl->getType(), CK_LValueToRValue, &self,
1439 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
1440 SourceLocation(), SourceLocation(),
1441 &selfLoad, true, true);
1443 ParmVarDecl *argDecl = *setterMethod->param_begin();
1444 QualType argType = argDecl->getType().getNonReferenceType();
1445 DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue,
1447 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
1448 argType.getUnqualifiedType(), CK_LValueToRValue,
1451 // The property type can differ from the ivar type in some situations with
1452 // Objective-C pointer types, we can always bit cast the RHS in these cases.
1453 // The following absurdity is just to ensure well-formed IR.
1454 CastKind argCK = CK_NoOp;
1455 if (ivarRef.getType()->isObjCObjectPointerType()) {
1456 if (argLoad.getType()->isObjCObjectPointerType())
1458 else if (argLoad.getType()->isBlockPointerType())
1459 argCK = CK_BlockPointerToObjCPointerCast;
1461 argCK = CK_CPointerToObjCPointerCast;
1462 } else if (ivarRef.getType()->isBlockPointerType()) {
1463 if (argLoad.getType()->isBlockPointerType())
1466 argCK = CK_AnyPointerToBlockPointerCast;
1467 } else if (ivarRef.getType()->isPointerType()) {
1470 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
1471 ivarRef.getType(), argCK, &argLoad,
1473 Expr *finalArg = &argLoad;
1474 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
1476 finalArg = &argCast;
1479 BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
1480 ivarRef.getType(), VK_RValue, OK_Ordinary,
1481 SourceLocation(), FPOptions());
1485 /// Generate an Objective-C property setter function.
1487 /// The given Decl must be an ObjCImplementationDecl. \@synthesize
1488 /// is illegal within a category.
1489 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
1490 const ObjCPropertyImplDecl *PID) {
1491 llvm::Constant *AtomicHelperFn =
1492 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
1493 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
1494 ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
1495 assert(OMD && "Invalid call to generate setter (empty method)");
1496 StartObjCMethod(OMD, IMP->getClassInterface());
1498 generateObjCSetterBody(IMP, PID, AtomicHelperFn);
1504 struct DestroyIvar final : EHScopeStack::Cleanup {
1507 const ObjCIvarDecl *ivar;
1508 CodeGenFunction::Destroyer *destroyer;
1509 bool useEHCleanupForArray;
1511 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
1512 CodeGenFunction::Destroyer *destroyer,
1513 bool useEHCleanupForArray)
1514 : addr(addr), ivar(ivar), destroyer(destroyer),
1515 useEHCleanupForArray(useEHCleanupForArray) {}
1517 void Emit(CodeGenFunction &CGF, Flags flags) override {
1519 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
1520 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
1521 flags.isForNormalCleanup() && useEHCleanupForArray);
1526 /// Like CodeGenFunction::destroyARCStrong, but do it with a call.
1527 static void destroyARCStrongWithStore(CodeGenFunction &CGF,
1530 llvm::Value *null = getNullForVariable(addr);
1531 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
1534 static void emitCXXDestructMethod(CodeGenFunction &CGF,
1535 ObjCImplementationDecl *impl) {
1536 CodeGenFunction::RunCleanupsScope scope(CGF);
1538 llvm::Value *self = CGF.LoadObjCSelf();
1540 const ObjCInterfaceDecl *iface = impl->getClassInterface();
1541 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
1542 ivar; ivar = ivar->getNextIvar()) {
1543 QualType type = ivar->getType();
1545 // Check whether the ivar is a destructible type.
1546 QualType::DestructionKind dtorKind = type.isDestructedType();
1547 if (!dtorKind) continue;
1549 CodeGenFunction::Destroyer *destroyer = nullptr;
1551 // Use a call to objc_storeStrong to destroy strong ivars, for the
1552 // general benefit of the tools.
1553 if (dtorKind == QualType::DK_objc_strong_lifetime) {
1554 destroyer = destroyARCStrongWithStore;
1556 // Otherwise use the default for the destruction kind.
1558 destroyer = CGF.getDestroyer(dtorKind);
1561 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
1563 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
1564 cleanupKind & EHCleanup);
1567 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
1570 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1573 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
1574 StartObjCMethod(MD, IMP->getClassInterface());
1576 // Emit .cxx_construct.
1578 // Suppress the final autorelease in ARC.
1579 AutoreleaseResult = false;
1581 for (const auto *IvarInit : IMP->inits()) {
1582 FieldDecl *Field = IvarInit->getAnyMember();
1583 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
1584 LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
1585 LoadObjCSelf(), Ivar, 0);
1586 EmitAggExpr(IvarInit->getInit(),
1587 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
1588 AggValueSlot::DoesNotNeedGCBarriers,
1589 AggValueSlot::IsNotAliased,
1590 AggValueSlot::DoesNotOverlap));
1592 // constructor returns 'self'.
1593 CodeGenTypes &Types = CGM.getTypes();
1594 QualType IdTy(CGM.getContext().getObjCIdType());
1595 llvm::Value *SelfAsId =
1596 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
1597 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
1599 // Emit .cxx_destruct.
1601 emitCXXDestructMethod(*this, IMP);
1606 llvm::Value *CodeGenFunction::LoadObjCSelf() {
1607 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
1608 DeclRefExpr DRE(getContext(), Self,
1609 /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
1610 Self->getType(), VK_LValue, SourceLocation());
1611 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
1614 QualType CodeGenFunction::TypeOfSelfObject() {
1615 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
1616 ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
1617 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
1618 getContext().getCanonicalType(selfDecl->getType()));
1619 return PTy->getPointeeType();
1622 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
1623 llvm::FunctionCallee EnumerationMutationFnPtr =
1624 CGM.getObjCRuntime().EnumerationMutationFunction();
1625 if (!EnumerationMutationFnPtr) {
1626 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
1629 CGCallee EnumerationMutationFn =
1630 CGCallee::forDirect(EnumerationMutationFnPtr);
1632 CGDebugInfo *DI = getDebugInfo();
1634 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
1636 RunCleanupsScope ForScope(*this);
1638 // The local variable comes into scope immediately.
1639 AutoVarEmission variable = AutoVarEmission::invalid();
1640 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
1641 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
1643 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
1645 // Fast enumeration state.
1646 QualType StateTy = CGM.getObjCFastEnumerationStateType();
1647 Address StatePtr = CreateMemTemp(StateTy, "state.ptr");
1648 EmitNullInitialization(StatePtr, StateTy);
1650 // Number of elements in the items array.
1651 static const unsigned NumItems = 16;
1653 // Fetch the countByEnumeratingWithState:objects:count: selector.
1654 IdentifierInfo *II[] = {
1655 &CGM.getContext().Idents.get("countByEnumeratingWithState"),
1656 &CGM.getContext().Idents.get("objects"),
1657 &CGM.getContext().Idents.get("count")
1659 Selector FastEnumSel =
1660 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
1663 getContext().getConstantArrayType(getContext().getObjCIdType(),
1664 llvm::APInt(32, NumItems),
1665 ArrayType::Normal, 0);
1666 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
1668 // Emit the collection pointer. In ARC, we do a retain.
1669 llvm::Value *Collection;
1670 if (getLangOpts().ObjCAutoRefCount) {
1671 Collection = EmitARCRetainScalarExpr(S.getCollection());
1673 // Enter a cleanup to do the release.
1674 EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
1676 Collection = EmitScalarExpr(S.getCollection());
1679 // The 'continue' label needs to appear within the cleanup for the
1680 // collection object.
1681 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
1683 // Send it our message:
1686 // The first argument is a temporary of the enumeration-state type.
1687 Args.add(RValue::get(StatePtr.getPointer()),
1688 getContext().getPointerType(StateTy));
1690 // The second argument is a temporary array with space for NumItems
1691 // pointers. We'll actually be loading elements from the array
1692 // pointer written into the control state; this buffer is so that
1693 // collections that *aren't* backed by arrays can still queue up
1694 // batches of elements.
1695 Args.add(RValue::get(ItemsPtr.getPointer()),
1696 getContext().getPointerType(ItemsTy));
1698 // The third argument is the capacity of that temporary array.
1699 llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType());
1700 llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems);
1701 Args.add(RValue::get(Count), getContext().getNSUIntegerType());
1703 // Start the enumeration.
1705 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1706 getContext().getNSUIntegerType(),
1707 FastEnumSel, Collection, Args);
1709 // The initial number of objects that were returned in the buffer.
1710 llvm::Value *initialBufferLimit = CountRV.getScalarVal();
1712 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
1713 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
1715 llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy);
1717 // If the limit pointer was zero to begin with, the collection is
1718 // empty; skip all this. Set the branch weight assuming this has the same
1719 // probability of exiting the loop as any other loop exit.
1720 uint64_t EntryCount = getCurrentProfileCount();
1721 Builder.CreateCondBr(
1722 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB,
1724 createProfileWeights(EntryCount, getProfileCount(S.getBody())));
1726 // Otherwise, initialize the loop.
1727 EmitBlock(LoopInitBB);
1729 // Save the initial mutations value. This is the value at an
1730 // address that was written into the state object by
1731 // countByEnumeratingWithState:objects:count:.
1732 Address StateMutationsPtrPtr =
1733 Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
1734 llvm::Value *StateMutationsPtr
1735 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1737 llvm::Value *initialMutations =
1738 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
1739 "forcoll.initial-mutations");
1741 // Start looping. This is the point we return to whenever we have a
1742 // fresh, non-empty batch of objects.
1743 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
1744 EmitBlock(LoopBodyBB);
1746 // The current index into the buffer.
1747 llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index");
1748 index->addIncoming(zero, LoopInitBB);
1750 // The current buffer size.
1751 llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count");
1752 count->addIncoming(initialBufferLimit, LoopInitBB);
1754 incrementProfileCounter(&S);
1756 // Check whether the mutations value has changed from where it was
1757 // at start. StateMutationsPtr should actually be invariant between
1759 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1760 llvm::Value *currentMutations
1761 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
1764 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
1765 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
1767 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
1768 WasNotMutatedBB, WasMutatedBB);
1770 // If so, call the enumeration-mutation function.
1771 EmitBlock(WasMutatedBB);
1773 Builder.CreateBitCast(Collection,
1774 ConvertType(getContext().getObjCIdType()));
1776 Args2.add(RValue::get(V), getContext().getObjCIdType());
1777 // FIXME: We shouldn't need to get the function info here, the runtime already
1778 // should have computed it to build the function.
1780 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2),
1781 EnumerationMutationFn, ReturnValueSlot(), Args2);
1783 // Otherwise, or if the mutation function returns, just continue.
1784 EmitBlock(WasNotMutatedBB);
1786 // Initialize the element variable.
1787 RunCleanupsScope elementVariableScope(*this);
1788 bool elementIsVariable;
1789 LValue elementLValue;
1790 QualType elementType;
1791 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
1792 // Initialize the variable, in case it's a __block variable or something.
1793 EmitAutoVarInit(variable);
1795 const VarDecl *D = cast<VarDecl>(SD->getSingleDecl());
1796 DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false,
1797 D->getType(), VK_LValue, SourceLocation());
1798 elementLValue = EmitLValue(&tempDRE);
1799 elementType = D->getType();
1800 elementIsVariable = true;
1802 if (D->isARCPseudoStrong())
1803 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
1805 elementLValue = LValue(); // suppress warning
1806 elementType = cast<Expr>(S.getElement())->getType();
1807 elementIsVariable = false;
1809 llvm::Type *convertedElementType = ConvertType(elementType);
1811 // Fetch the buffer out of the enumeration state.
1812 // TODO: this pointer should actually be invariant between
1813 // refreshes, which would help us do certain loop optimizations.
1814 Address StateItemsPtr =
1815 Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
1816 llvm::Value *EnumStateItems =
1817 Builder.CreateLoad(StateItemsPtr, "stateitems");
1819 // Fetch the value at the current index from the buffer.
1820 llvm::Value *CurrentItemPtr =
1821 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
1822 llvm::Value *CurrentItem =
1823 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
1825 // Cast that value to the right type.
1826 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
1829 // Make sure we have an l-value. Yes, this gets evaluated every
1830 // time through the loop.
1831 if (!elementIsVariable) {
1832 elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1833 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
1835 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue,
1839 // If we do have an element variable, this assignment is the end of
1840 // its initialization.
1841 if (elementIsVariable)
1842 EmitAutoVarCleanups(variable);
1844 // Perform the loop body, setting up break and continue labels.
1845 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
1847 RunCleanupsScope Scope(*this);
1848 EmitStmt(S.getBody());
1850 BreakContinueStack.pop_back();
1852 // Destroy the element variable now.
1853 elementVariableScope.ForceCleanup();
1855 // Check whether there are more elements.
1856 EmitBlock(AfterBody.getBlock());
1858 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
1860 // First we check in the local buffer.
1861 llvm::Value *indexPlusOne =
1862 Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1));
1864 // If we haven't overrun the buffer yet, we can continue.
1865 // Set the branch weights based on the simplifying assumption that this is
1866 // like a while-loop, i.e., ignoring that the false branch fetches more
1867 // elements and then returns to the loop.
1868 Builder.CreateCondBr(
1869 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB,
1870 createProfileWeights(getProfileCount(S.getBody()), EntryCount));
1872 index->addIncoming(indexPlusOne, AfterBody.getBlock());
1873 count->addIncoming(count, AfterBody.getBlock());
1875 // Otherwise, we have to fetch more elements.
1876 EmitBlock(FetchMoreBB);
1879 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1880 getContext().getNSUIntegerType(),
1881 FastEnumSel, Collection, Args);
1883 // If we got a zero count, we're done.
1884 llvm::Value *refetchCount = CountRV.getScalarVal();
1886 // (note that the message send might split FetchMoreBB)
1887 index->addIncoming(zero, Builder.GetInsertBlock());
1888 count->addIncoming(refetchCount, Builder.GetInsertBlock());
1890 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
1891 EmptyBB, LoopBodyBB);
1893 // No more elements.
1896 if (!elementIsVariable) {
1897 // If the element was not a declaration, set it to be null.
1899 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
1900 elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1901 EmitStoreThroughLValue(RValue::get(null), elementLValue);
1905 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
1907 ForScope.ForceCleanup();
1908 EmitBlock(LoopEnd.getBlock());
1911 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
1912 CGM.getObjCRuntime().EmitTryStmt(*this, S);
1915 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
1916 CGM.getObjCRuntime().EmitThrowStmt(*this, S);
1919 void CodeGenFunction::EmitObjCAtSynchronizedStmt(
1920 const ObjCAtSynchronizedStmt &S) {
1921 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
1925 struct CallObjCRelease final : EHScopeStack::Cleanup {
1926 CallObjCRelease(llvm::Value *object) : object(object) {}
1927 llvm::Value *object;
1929 void Emit(CodeGenFunction &CGF, Flags flags) override {
1930 // Releases at the end of the full-expression are imprecise.
1931 CGF.EmitARCRelease(object, ARCImpreciseLifetime);
1936 /// Produce the code for a CK_ARCConsumeObject. Does a primitive
1937 /// release at the end of the full-expression.
1938 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
1939 llvm::Value *object) {
1940 // If we're in a conditional branch, we need to make the cleanup
1942 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
1946 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
1947 llvm::Value *value) {
1948 return EmitARCRetainAutorelease(type, value);
1951 /// Given a number of pointers, inform the optimizer that they're
1952 /// being intrinsically used up until this point in the program.
1953 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
1954 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use;
1956 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use);
1958 // This isn't really a "runtime" function, but as an intrinsic it
1959 // doesn't really matter as long as we align things up.
1960 EmitNounwindRuntimeCall(fn, values);
1963 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) {
1964 if (auto *F = dyn_cast<llvm::Function>(RTF)) {
1965 // If the target runtime doesn't naturally support ARC, emit weak
1966 // references to the runtime support library. We don't really
1967 // permit this to fail, but we need a particular relocation style.
1968 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
1969 !CGM.getTriple().isOSBinFormatCOFF()) {
1970 F->setLinkage(llvm::Function::ExternalWeakLinkage);
1975 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
1976 llvm::FunctionCallee RTF) {
1977 setARCRuntimeFunctionLinkage(CGM, RTF.getCallee());
1980 /// Perform an operation having the signature
1982 /// where a null input causes a no-op and returns null.
1983 static llvm::Value *emitARCValueOperation(
1984 CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType,
1985 llvm::Function *&fn, llvm::Intrinsic::ID IntID,
1986 llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) {
1987 if (isa<llvm::ConstantPointerNull>(value))
1991 fn = CGF.CGM.getIntrinsic(IntID);
1992 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
1995 // Cast the argument to 'id'.
1996 llvm::Type *origType = returnType ? returnType : value->getType();
1997 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
1999 // Call the function.
2000 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
2001 call->setTailCallKind(tailKind);
2003 // Cast the result back to the original type.
2004 return CGF.Builder.CreateBitCast(call, origType);
2007 /// Perform an operation having the following signature:
2009 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
2010 llvm::Function *&fn,
2011 llvm::Intrinsic::ID IntID) {
2013 fn = CGF.CGM.getIntrinsic(IntID);
2014 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
2017 // Cast the argument to 'id*'.
2018 llvm::Type *origType = addr.getElementType();
2019 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
2021 // Call the function.
2022 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
2024 // Cast the result back to a dereference of the original type.
2025 if (origType != CGF.Int8PtrTy)
2026 result = CGF.Builder.CreateBitCast(result, origType);
2031 /// Perform an operation having the following signature:
2033 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr,
2035 llvm::Function *&fn,
2036 llvm::Intrinsic::ID IntID,
2038 assert(addr.getElementType() == value->getType());
2041 fn = CGF.CGM.getIntrinsic(IntID);
2042 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
2045 llvm::Type *origType = value->getType();
2047 llvm::Value *args[] = {
2048 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
2049 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
2051 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
2053 if (ignored) return nullptr;
2055 return CGF.Builder.CreateBitCast(result, origType);
2058 /// Perform an operation having the following signature:
2059 /// void (i8**, i8**)
2060 static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src,
2061 llvm::Function *&fn,
2062 llvm::Intrinsic::ID IntID) {
2063 assert(dst.getType() == src.getType());
2066 fn = CGF.CGM.getIntrinsic(IntID);
2067 setARCRuntimeFunctionLinkage(CGF.CGM, fn);
2070 llvm::Value *args[] = {
2071 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
2072 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
2074 CGF.EmitNounwindRuntimeCall(fn, args);
2077 /// Perform an operation having the signature
2079 /// where a null input causes a no-op and returns null.
2080 static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
2082 llvm::Type *returnType,
2083 llvm::FunctionCallee &fn,
2085 if (isa<llvm::ConstantPointerNull>(value))
2089 llvm::FunctionType *fnType =
2090 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
2091 fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName);
2093 // We have Native ARC, so set nonlazybind attribute for performance
2094 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee()))
2095 if (fnName == "objc_retain")
2096 f->addFnAttr(llvm::Attribute::NonLazyBind);
2099 // Cast the argument to 'id'.
2100 llvm::Type *origType = returnType ? returnType : value->getType();
2101 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
2103 // Call the function.
2104 llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value);
2106 // Cast the result back to the original type.
2107 return CGF.Builder.CreateBitCast(Inst, origType);
2110 /// Produce the code to do a retain. Based on the type, calls one of:
2111 /// call i8* \@objc_retain(i8* %value)
2112 /// call i8* \@objc_retainBlock(i8* %value)
2113 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
2114 if (type->isBlockPointerType())
2115 return EmitARCRetainBlock(value, /*mandatory*/ false);
2117 return EmitARCRetainNonBlock(value);
2120 /// Retain the given object, with normal retain semantics.
2121 /// call i8* \@objc_retain(i8* %value)
2122 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
2123 return emitARCValueOperation(*this, value, nullptr,
2124 CGM.getObjCEntrypoints().objc_retain,
2125 llvm::Intrinsic::objc_retain);
2128 /// Retain the given block, with _Block_copy semantics.
2129 /// call i8* \@objc_retainBlock(i8* %value)
2131 /// \param mandatory - If false, emit the call with metadata
2132 /// indicating that it's okay for the optimizer to eliminate this call
2133 /// if it can prove that the block never escapes except down the stack.
2134 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
2137 = emitARCValueOperation(*this, value, nullptr,
2138 CGM.getObjCEntrypoints().objc_retainBlock,
2139 llvm::Intrinsic::objc_retainBlock);
2141 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
2142 // tell the optimizer that it doesn't need to do this copy if the
2143 // block doesn't escape, where being passed as an argument doesn't
2144 // count as escaping.
2145 if (!mandatory && isa<llvm::Instruction>(result)) {
2146 llvm::CallInst *call
2147 = cast<llvm::CallInst>(result->stripPointerCasts());
2148 assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
2150 call->setMetadata("clang.arc.copy_on_escape",
2151 llvm::MDNode::get(Builder.getContext(), None));
2157 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
2158 // Fetch the void(void) inline asm which marks that we're going to
2159 // do something with the autoreleased return value.
2160 llvm::InlineAsm *&marker
2161 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker;
2164 = CGF.CGM.getTargetCodeGenInfo()
2165 .getARCRetainAutoreleasedReturnValueMarker();
2167 // If we have an empty assembly string, there's nothing to do.
2168 if (assembly.empty()) {
2170 // Otherwise, at -O0, build an inline asm that we're going to call
2172 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
2173 llvm::FunctionType *type =
2174 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false);
2176 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
2178 // If we're at -O1 and above, we don't want to litter the code
2179 // with this marker yet, so leave a breadcrumb for the ARC
2180 // optimizer to pick up.
2182 const char *markerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
2183 if (!CGF.CGM.getModule().getModuleFlag(markerKey)) {
2184 auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly);
2185 CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, markerKey, str);
2190 // Call the marker asm if we made one, which we do only at -O0.
2192 CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker));
2195 /// Retain the given object which is the result of a function call.
2196 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
2198 /// Yes, this function name is one character away from a different
2199 /// call with completely different semantics.
2201 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
2202 emitAutoreleasedReturnValueMarker(*this);
2203 llvm::CallInst::TailCallKind tailKind =
2204 CGM.getTargetCodeGenInfo()
2205 .shouldSuppressTailCallsOfRetainAutoreleasedReturnValue()
2206 ? llvm::CallInst::TCK_NoTail
2207 : llvm::CallInst::TCK_None;
2208 return emitARCValueOperation(
2209 *this, value, nullptr,
2210 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
2211 llvm::Intrinsic::objc_retainAutoreleasedReturnValue, tailKind);
2214 /// Claim a possibly-autoreleased return value at +0. This is only
2215 /// valid to do in contexts which do not rely on the retain to keep
2216 /// the object valid for all of its uses; for example, when
2217 /// the value is ignored, or when it is being assigned to an
2218 /// __unsafe_unretained variable.
2220 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value)
2222 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
2223 emitAutoreleasedReturnValueMarker(*this);
2224 return emitARCValueOperation(*this, value, nullptr,
2225 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
2226 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue);
2229 /// Release the given object.
2230 /// call void \@objc_release(i8* %value)
2231 void CodeGenFunction::EmitARCRelease(llvm::Value *value,
2232 ARCPreciseLifetime_t precise) {
2233 if (isa<llvm::ConstantPointerNull>(value)) return;
2235 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release;
2237 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release);
2238 setARCRuntimeFunctionLinkage(CGM, fn);
2241 // Cast the argument to 'id'.
2242 value = Builder.CreateBitCast(value, Int8PtrTy);
2244 // Call objc_release.
2245 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
2247 if (precise == ARCImpreciseLifetime) {
2248 call->setMetadata("clang.imprecise_release",
2249 llvm::MDNode::get(Builder.getContext(), None));
2253 /// Destroy a __strong variable.
2255 /// At -O0, emit a call to store 'null' into the address;
2256 /// instrumenting tools prefer this because the address is exposed,
2257 /// but it's relatively cumbersome to optimize.
2259 /// At -O1 and above, just load and call objc_release.
2261 /// call void \@objc_storeStrong(i8** %addr, i8* null)
2262 void CodeGenFunction::EmitARCDestroyStrong(Address addr,
2263 ARCPreciseLifetime_t precise) {
2264 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2265 llvm::Value *null = getNullForVariable(addr);
2266 EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
2270 llvm::Value *value = Builder.CreateLoad(addr);
2271 EmitARCRelease(value, precise);
2274 /// Store into a strong object. Always calls this:
2275 /// call void \@objc_storeStrong(i8** %addr, i8* %value)
2276 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
2279 assert(addr.getElementType() == value->getType());
2281 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
2283 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong);
2284 setARCRuntimeFunctionLinkage(CGM, fn);
2287 llvm::Value *args[] = {
2288 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
2289 Builder.CreateBitCast(value, Int8PtrTy)
2291 EmitNounwindRuntimeCall(fn, args);
2293 if (ignored) return nullptr;
2297 /// Store into a strong object. Sometimes calls this:
2298 /// call void \@objc_storeStrong(i8** %addr, i8* %value)
2299 /// Other times, breaks it down into components.
2300 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
2301 llvm::Value *newValue,
2303 QualType type = dst.getType();
2304 bool isBlock = type->isBlockPointerType();
2306 // Use a store barrier at -O0 unless this is a block type or the
2307 // lvalue is inadequately aligned.
2308 if (shouldUseFusedARCCalls() &&
2310 (dst.getAlignment().isZero() ||
2311 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
2312 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
2315 // Otherwise, split it out.
2317 // Retain the new value.
2318 newValue = EmitARCRetain(type, newValue);
2320 // Read the old value.
2321 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation());
2323 // Store. We do this before the release so that any deallocs won't
2324 // see the old value.
2325 EmitStoreOfScalar(newValue, dst);
2327 // Finally, release the old value.
2328 EmitARCRelease(oldValue, dst.isARCPreciseLifetime());
2333 /// Autorelease the given object.
2334 /// call i8* \@objc_autorelease(i8* %value)
2335 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
2336 return emitARCValueOperation(*this, value, nullptr,
2337 CGM.getObjCEntrypoints().objc_autorelease,
2338 llvm::Intrinsic::objc_autorelease);
2341 /// Autorelease the given object.
2342 /// call i8* \@objc_autoreleaseReturnValue(i8* %value)
2344 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
2345 return emitARCValueOperation(*this, value, nullptr,
2346 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
2347 llvm::Intrinsic::objc_autoreleaseReturnValue,
2348 llvm::CallInst::TCK_Tail);
2351 /// Do a fused retain/autorelease of the given object.
2352 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
2354 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
2355 return emitARCValueOperation(*this, value, nullptr,
2356 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
2357 llvm::Intrinsic::objc_retainAutoreleaseReturnValue,
2358 llvm::CallInst::TCK_Tail);
2361 /// Do a fused retain/autorelease of the given object.
2362 /// call i8* \@objc_retainAutorelease(i8* %value)
2364 /// %retain = call i8* \@objc_retainBlock(i8* %value)
2365 /// call i8* \@objc_autorelease(i8* %retain)
2366 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
2367 llvm::Value *value) {
2368 if (!type->isBlockPointerType())
2369 return EmitARCRetainAutoreleaseNonBlock(value);
2371 if (isa<llvm::ConstantPointerNull>(value)) return value;
2373 llvm::Type *origType = value->getType();
2374 value = Builder.CreateBitCast(value, Int8PtrTy);
2375 value = EmitARCRetainBlock(value, /*mandatory*/ true);
2376 value = EmitARCAutorelease(value);
2377 return Builder.CreateBitCast(value, origType);
2380 /// Do a fused retain/autorelease of the given object.
2381 /// call i8* \@objc_retainAutorelease(i8* %value)
2383 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
2384 return emitARCValueOperation(*this, value, nullptr,
2385 CGM.getObjCEntrypoints().objc_retainAutorelease,
2386 llvm::Intrinsic::objc_retainAutorelease);
2389 /// i8* \@objc_loadWeak(i8** %addr)
2390 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
2391 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
2392 return emitARCLoadOperation(*this, addr,
2393 CGM.getObjCEntrypoints().objc_loadWeak,
2394 llvm::Intrinsic::objc_loadWeak);
2397 /// i8* \@objc_loadWeakRetained(i8** %addr)
2398 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
2399 return emitARCLoadOperation(*this, addr,
2400 CGM.getObjCEntrypoints().objc_loadWeakRetained,
2401 llvm::Intrinsic::objc_loadWeakRetained);
2404 /// i8* \@objc_storeWeak(i8** %addr, i8* %value)
2406 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
2409 return emitARCStoreOperation(*this, addr, value,
2410 CGM.getObjCEntrypoints().objc_storeWeak,
2411 llvm::Intrinsic::objc_storeWeak, ignored);
2414 /// i8* \@objc_initWeak(i8** %addr, i8* %value)
2415 /// Returns %value. %addr is known to not have a current weak entry.
2416 /// Essentially equivalent to:
2417 /// *addr = nil; objc_storeWeak(addr, value);
2418 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
2419 // If we're initializing to null, just write null to memory; no need
2420 // to get the runtime involved. But don't do this if optimization
2421 // is enabled, because accounting for this would make the optimizer
2422 // much more complicated.
2423 if (isa<llvm::ConstantPointerNull>(value) &&
2424 CGM.getCodeGenOpts().OptimizationLevel == 0) {
2425 Builder.CreateStore(value, addr);
2429 emitARCStoreOperation(*this, addr, value,
2430 CGM.getObjCEntrypoints().objc_initWeak,
2431 llvm::Intrinsic::objc_initWeak, /*ignored*/ true);
2434 /// void \@objc_destroyWeak(i8** %addr)
2435 /// Essentially objc_storeWeak(addr, nil).
2436 void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
2437 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
2439 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak);
2440 setARCRuntimeFunctionLinkage(CGM, fn);
2443 // Cast the argument to 'id*'.
2444 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
2446 EmitNounwindRuntimeCall(fn, addr.getPointer());
2449 /// void \@objc_moveWeak(i8** %dest, i8** %src)
2450 /// Disregards the current value in %dest. Leaves %src pointing to nothing.
2451 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
2452 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
2453 emitARCCopyOperation(*this, dst, src,
2454 CGM.getObjCEntrypoints().objc_moveWeak,
2455 llvm::Intrinsic::objc_moveWeak);
2458 /// void \@objc_copyWeak(i8** %dest, i8** %src)
2459 /// Disregards the current value in %dest. Essentially
2460 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
2461 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
2462 emitARCCopyOperation(*this, dst, src,
2463 CGM.getObjCEntrypoints().objc_copyWeak,
2464 llvm::Intrinsic::objc_copyWeak);
2467 void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr,
2469 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
2470 Object = EmitObjCConsumeObject(Ty, Object);
2471 EmitARCStoreWeak(DstAddr, Object, false);
2474 void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
2476 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
2477 Object = EmitObjCConsumeObject(Ty, Object);
2478 EmitARCStoreWeak(DstAddr, Object, false);
2479 EmitARCDestroyWeak(SrcAddr);
2482 /// Produce the code to do a objc_autoreleasepool_push.
2483 /// call i8* \@objc_autoreleasePoolPush(void)
2484 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
2485 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
2487 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush);
2488 setARCRuntimeFunctionLinkage(CGM, fn);
2491 return EmitNounwindRuntimeCall(fn);
2494 /// Produce the code to do a primitive release.
2495 /// call void \@objc_autoreleasePoolPop(i8* %ptr)
2496 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
2497 assert(value->getType() == Int8PtrTy);
2499 if (getInvokeDest()) {
2500 // Call the runtime method not the intrinsic if we are handling exceptions
2501 llvm::FunctionCallee &fn =
2502 CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
2504 llvm::FunctionType *fnType =
2505 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
2506 fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop");
2507 setARCRuntimeFunctionLinkage(CGM, fn);
2510 // objc_autoreleasePoolPop can throw.
2511 EmitRuntimeCallOrInvoke(fn, value);
2513 llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
2515 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop);
2516 setARCRuntimeFunctionLinkage(CGM, fn);
2519 EmitRuntimeCall(fn, value);
2523 /// Produce the code to do an MRR version objc_autoreleasepool_push.
2524 /// Which is: [[NSAutoreleasePool alloc] init];
2525 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
2526 /// init is declared as: - (id) init; in its NSObject super class.
2528 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
2529 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
2530 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this);
2531 // [NSAutoreleasePool alloc]
2532 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
2533 Selector AllocSel = getContext().Selectors.getSelector(0, &II);
2536 Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2537 getContext().getObjCIdType(),
2538 AllocSel, Receiver, Args);
2541 Receiver = AllocRV.getScalarVal();
2542 II = &CGM.getContext().Idents.get("init");
2543 Selector InitSel = getContext().Selectors.getSelector(0, &II);
2545 Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2546 getContext().getObjCIdType(),
2547 InitSel, Receiver, Args);
2548 return InitRV.getScalarVal();
2551 /// Allocate the given objc object.
2552 /// call i8* \@objc_alloc(i8* %value)
2553 llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value,
2554 llvm::Type *resultType) {
2555 return emitObjCValueOperation(*this, value, resultType,
2556 CGM.getObjCEntrypoints().objc_alloc,
2560 /// Allocate the given objc object.
2561 /// call i8* \@objc_allocWithZone(i8* %value)
2562 llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value,
2563 llvm::Type *resultType) {
2564 return emitObjCValueOperation(*this, value, resultType,
2565 CGM.getObjCEntrypoints().objc_allocWithZone,
2566 "objc_allocWithZone");
2569 llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value,
2570 llvm::Type *resultType) {
2571 return emitObjCValueOperation(*this, value, resultType,
2572 CGM.getObjCEntrypoints().objc_alloc_init,
2576 /// Produce the code to do a primitive release.
2578 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
2579 IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
2580 Selector DrainSel = getContext().Selectors.getSelector(0, &II);
2582 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
2583 getContext().VoidTy, DrainSel, Arg, Args);
2586 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
2589 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
2592 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
2595 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
2598 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
2601 CGF.EmitARCDestroyWeak(addr);
2604 void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr,
2606 llvm::Value *value = CGF.Builder.CreateLoad(addr);
2607 CGF.EmitARCIntrinsicUse(value);
2610 /// Autorelease the given object.
2611 /// call i8* \@objc_autorelease(i8* %value)
2612 llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value,
2613 llvm::Type *returnType) {
2614 return emitObjCValueOperation(
2615 *this, value, returnType,
2616 CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
2617 "objc_autorelease");
2620 /// Retain the given object, with normal retain semantics.
2621 /// call i8* \@objc_retain(i8* %value)
2622 llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value,
2623 llvm::Type *returnType) {
2624 return emitObjCValueOperation(
2625 *this, value, returnType,
2626 CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain");
2629 /// Release the given object.
2630 /// call void \@objc_release(i8* %value)
2631 void CodeGenFunction::EmitObjCRelease(llvm::Value *value,
2632 ARCPreciseLifetime_t precise) {
2633 if (isa<llvm::ConstantPointerNull>(value)) return;
2635 llvm::FunctionCallee &fn =
2636 CGM.getObjCEntrypoints().objc_releaseRuntimeFunction;
2638 llvm::FunctionType *fnType =
2639 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
2640 fn = CGM.CreateRuntimeFunction(fnType, "objc_release");
2641 setARCRuntimeFunctionLinkage(CGM, fn);
2642 // We have Native ARC, so set nonlazybind attribute for performance
2643 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee()))
2644 f->addFnAttr(llvm::Attribute::NonLazyBind);
2647 // Cast the argument to 'id'.
2648 value = Builder.CreateBitCast(value, Int8PtrTy);
2650 // Call objc_release.
2651 llvm::CallBase *call = EmitCallOrInvoke(fn, value);
2653 if (precise == ARCImpreciseLifetime) {
2654 call->setMetadata("clang.imprecise_release",
2655 llvm::MDNode::get(Builder.getContext(), None));
2660 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
2663 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2665 void Emit(CodeGenFunction &CGF, Flags flags) override {
2666 CGF.EmitObjCAutoreleasePoolPop(Token);
2669 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup {
2672 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2674 void Emit(CodeGenFunction &CGF, Flags flags) override {
2675 CGF.EmitObjCMRRAutoreleasePoolPop(Token);
2680 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
2681 if (CGM.getLangOpts().ObjCAutoRefCount)
2682 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
2684 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
2687 static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) {
2689 case Qualifiers::OCL_None:
2690 case Qualifiers::OCL_ExplicitNone:
2691 case Qualifiers::OCL_Strong:
2692 case Qualifiers::OCL_Autoreleasing:
2695 case Qualifiers::OCL_Weak:
2699 llvm_unreachable("impossible lifetime!");
2702 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2705 llvm::Value *result;
2706 bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime());
2708 result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal();
2710 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak);
2711 result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress());
2713 return TryEmitResult(result, !shouldRetain);
2716 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2718 e = e->IgnoreParens();
2719 QualType type = e->getType();
2721 // If we're loading retained from a __strong xvalue, we can avoid
2722 // an extra retain/release pair by zeroing out the source of this
2723 // "move" operation.
2724 if (e->isXValue() &&
2725 !type.isConstQualified() &&
2726 type.getObjCLifetime() == Qualifiers::OCL_Strong) {
2728 LValue lv = CGF.EmitLValue(e);
2730 // Load the object pointer.
2731 llvm::Value *result = CGF.EmitLoadOfLValue(lv,
2732 SourceLocation()).getScalarVal();
2734 // Set the source pointer to NULL.
2735 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
2737 return TryEmitResult(result, true);
2740 // As a very special optimization, in ARC++, if the l-value is the
2741 // result of a non-volatile assignment, do a simple retain of the
2742 // result of the call to objc_storeWeak instead of reloading.
2743 if (CGF.getLangOpts().CPlusPlus &&
2744 !type.isVolatileQualified() &&
2745 type.getObjCLifetime() == Qualifiers::OCL_Weak &&
2746 isa<BinaryOperator>(e) &&
2747 cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
2748 return TryEmitResult(CGF.EmitScalarExpr(e), false);
2750 // Try to emit code for scalar constant instead of emitting LValue and
2751 // loading it because we are not guaranteed to have an l-value. One of such
2752 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable.
2753 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) {
2754 auto *DRE = const_cast<DeclRefExpr *>(decl_expr);
2755 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE))
2756 return TryEmitResult(CGF.emitScalarConstant(constant, DRE),
2757 !shouldRetainObjCLifetime(type.getObjCLifetime()));
2760 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
2763 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
2764 llvm::Value *value)>
2767 /// Insert code immediately after a call.
2768 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
2770 ValueTransform doAfterCall,
2771 ValueTransform doFallback) {
2772 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
2773 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2775 // Place the retain immediately following the call.
2776 CGF.Builder.SetInsertPoint(call->getParent(),
2777 ++llvm::BasicBlock::iterator(call));
2778 value = doAfterCall(CGF, value);
2780 CGF.Builder.restoreIP(ip);
2782 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
2783 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2785 // Place the retain at the beginning of the normal destination block.
2786 llvm::BasicBlock *BB = invoke->getNormalDest();
2787 CGF.Builder.SetInsertPoint(BB, BB->begin());
2788 value = doAfterCall(CGF, value);
2790 CGF.Builder.restoreIP(ip);
2793 // Bitcasts can arise because of related-result returns. Rewrite
2795 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
2796 llvm::Value *operand = bitcast->getOperand(0);
2797 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
2798 bitcast->setOperand(0, operand);
2801 // Generic fall-back case.
2803 // Retain using the non-block variant: we never need to do a copy
2804 // of a block that's been returned to us.
2805 return doFallback(CGF, value);
2809 /// Given that the given expression is some sort of call (which does
2810 /// not return retained), emit a retain following it.
2811 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF,
2813 llvm::Value *value = CGF.EmitScalarExpr(e);
2814 return emitARCOperationAfterCall(CGF, value,
2815 [](CodeGenFunction &CGF, llvm::Value *value) {
2816 return CGF.EmitARCRetainAutoreleasedReturnValue(value);
2818 [](CodeGenFunction &CGF, llvm::Value *value) {
2819 return CGF.EmitARCRetainNonBlock(value);
2823 /// Given that the given expression is some sort of call (which does
2824 /// not return retained), perform an unsafeClaim following it.
2825 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF,
2827 llvm::Value *value = CGF.EmitScalarExpr(e);
2828 return emitARCOperationAfterCall(CGF, value,
2829 [](CodeGenFunction &CGF, llvm::Value *value) {
2830 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value);
2832 [](CodeGenFunction &CGF, llvm::Value *value) {
2837 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E,
2838 bool allowUnsafeClaim) {
2839 if (allowUnsafeClaim &&
2840 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) {
2841 return emitARCUnsafeClaimCallResult(*this, E);
2843 llvm::Value *value = emitARCRetainCallResult(*this, E);
2844 return EmitObjCConsumeObject(E->getType(), value);
2848 /// Determine whether it might be important to emit a separate
2849 /// objc_retain_block on the result of the given expression, or
2850 /// whether it's okay to just emit it in a +1 context.
2851 static bool shouldEmitSeparateBlockRetain(const Expr *e) {
2852 assert(e->getType()->isBlockPointerType());
2853 e = e->IgnoreParens();
2855 // For future goodness, emit block expressions directly in +1
2856 // contexts if we can.
2857 if (isa<BlockExpr>(e))
2860 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
2861 switch (cast->getCastKind()) {
2862 // Emitting these operations in +1 contexts is goodness.
2863 case CK_LValueToRValue:
2864 case CK_ARCReclaimReturnedObject:
2865 case CK_ARCConsumeObject:
2866 case CK_ARCProduceObject:
2869 // These operations preserve a block type.
2872 return shouldEmitSeparateBlockRetain(cast->getSubExpr());
2874 // These operations are known to be bad (or haven't been considered).
2875 case CK_AnyPointerToBlockPointerCast:
2885 /// A CRTP base class for emitting expressions of retainable object
2886 /// pointer type in ARC.
2887 template <typename Impl, typename Result> class ARCExprEmitter {
2889 CodeGenFunction &CGF;
2890 Impl &asImpl() { return *static_cast<Impl*>(this); }
2892 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {}
2895 Result visit(const Expr *e);
2896 Result visitCastExpr(const CastExpr *e);
2897 Result visitPseudoObjectExpr(const PseudoObjectExpr *e);
2898 Result visitBlockExpr(const BlockExpr *e);
2899 Result visitBinaryOperator(const BinaryOperator *e);
2900 Result visitBinAssign(const BinaryOperator *e);
2901 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e);
2902 Result visitBinAssignAutoreleasing(const BinaryOperator *e);
2903 Result visitBinAssignWeak(const BinaryOperator *e);
2904 Result visitBinAssignStrong(const BinaryOperator *e);
2906 // Minimal implementation:
2907 // Result visitLValueToRValue(const Expr *e)
2908 // Result visitConsumeObject(const Expr *e)
2909 // Result visitExtendBlockObject(const Expr *e)
2910 // Result visitReclaimReturnedObject(const Expr *e)
2911 // Result visitCall(const Expr *e)
2912 // Result visitExpr(const Expr *e)
2914 // Result emitBitCast(Result result, llvm::Type *resultType)
2915 // llvm::Value *getValueOfResult(Result result)
2919 /// Try to emit a PseudoObjectExpr under special ARC rules.
2921 /// This massively duplicates emitPseudoObjectRValue.
2922 template <typename Impl, typename Result>
2924 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) {
2925 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
2927 // Find the result expression.
2928 const Expr *resultExpr = E->getResultExpr();
2932 for (PseudoObjectExpr::const_semantics_iterator
2933 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
2934 const Expr *semantic = *i;
2936 // If this semantic expression is an opaque value, bind it
2937 // to the result of its source expression.
2938 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
2939 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
2942 // If this semantic is the result of the pseudo-object
2943 // expression, try to evaluate the source as +1.
2944 if (ov == resultExpr) {
2945 assert(!OVMA::shouldBindAsLValue(ov));
2946 result = asImpl().visit(ov->getSourceExpr());
2947 opaqueData = OVMA::bind(CGF, ov,
2948 RValue::get(asImpl().getValueOfResult(result)));
2950 // Otherwise, just bind it.
2952 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
2954 opaques.push_back(opaqueData);
2956 // Otherwise, if the expression is the result, evaluate it
2957 // and remember the result.
2958 } else if (semantic == resultExpr) {
2959 result = asImpl().visit(semantic);
2961 // Otherwise, evaluate the expression in an ignored context.
2963 CGF.EmitIgnoredExpr(semantic);
2967 // Unbind all the opaques now.
2968 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
2969 opaques[i].unbind(CGF);
2974 template <typename Impl, typename Result>
2975 Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) {
2976 // The default implementation just forwards the expression to visitExpr.
2977 return asImpl().visitExpr(e);
2980 template <typename Impl, typename Result>
2981 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) {
2982 switch (e->getCastKind()) {
2984 // No-op casts don't change the type, so we just ignore them.
2986 return asImpl().visit(e->getSubExpr());
2988 // These casts can change the type.
2989 case CK_CPointerToObjCPointerCast:
2990 case CK_BlockPointerToObjCPointerCast:
2991 case CK_AnyPointerToBlockPointerCast:
2993 llvm::Type *resultType = CGF.ConvertType(e->getType());
2994 assert(e->getSubExpr()->getType()->hasPointerRepresentation());
2995 Result result = asImpl().visit(e->getSubExpr());
2996 return asImpl().emitBitCast(result, resultType);
2999 // Handle some casts specially.
3000 case CK_LValueToRValue:
3001 return asImpl().visitLValueToRValue(e->getSubExpr());
3002 case CK_ARCConsumeObject:
3003 return asImpl().visitConsumeObject(e->getSubExpr());
3004 case CK_ARCExtendBlockObject:
3005 return asImpl().visitExtendBlockObject(e->getSubExpr());
3006 case CK_ARCReclaimReturnedObject:
3007 return asImpl().visitReclaimReturnedObject(e->getSubExpr());
3009 // Otherwise, use the default logic.
3011 return asImpl().visitExpr(e);
3015 template <typename Impl, typename Result>
3017 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) {
3018 switch (e->getOpcode()) {
3020 CGF.EmitIgnoredExpr(e->getLHS());
3021 CGF.EnsureInsertPoint();
3022 return asImpl().visit(e->getRHS());
3025 return asImpl().visitBinAssign(e);
3028 return asImpl().visitExpr(e);
3032 template <typename Impl, typename Result>
3033 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) {
3034 switch (e->getLHS()->getType().getObjCLifetime()) {
3035 case Qualifiers::OCL_ExplicitNone:
3036 return asImpl().visitBinAssignUnsafeUnretained(e);
3038 case Qualifiers::OCL_Weak:
3039 return asImpl().visitBinAssignWeak(e);
3041 case Qualifiers::OCL_Autoreleasing:
3042 return asImpl().visitBinAssignAutoreleasing(e);
3044 case Qualifiers::OCL_Strong:
3045 return asImpl().visitBinAssignStrong(e);
3047 case Qualifiers::OCL_None:
3048 return asImpl().visitExpr(e);
3050 llvm_unreachable("bad ObjC ownership qualifier");
3053 /// The default rule for __unsafe_unretained emits the RHS recursively,
3054 /// stores into the unsafe variable, and propagates the result outward.
3055 template <typename Impl, typename Result>
3056 Result ARCExprEmitter<Impl,Result>::
3057 visitBinAssignUnsafeUnretained(const BinaryOperator *e) {
3058 // Recursively emit the RHS.
3059 // For __block safety, do this before emitting the LHS.
3060 Result result = asImpl().visit(e->getRHS());
3062 // Perform the store.
3064 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store);
3065 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)),
3071 template <typename Impl, typename Result>
3073 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) {
3074 return asImpl().visitExpr(e);
3077 template <typename Impl, typename Result>
3079 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) {
3080 return asImpl().visitExpr(e);
3083 template <typename Impl, typename Result>
3085 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) {
3086 return asImpl().visitExpr(e);
3089 /// The general expression-emission logic.
3090 template <typename Impl, typename Result>
3091 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) {
3092 // We should *never* see a nested full-expression here, because if
3093 // we fail to emit at +1, our caller must not retain after we close
3094 // out the full-expression. This isn't as important in the unsafe
3096 assert(!isa<ExprWithCleanups>(e));
3098 // Look through parens, __extension__, generic selection, etc.
3099 e = e->IgnoreParens();
3101 // Handle certain kinds of casts.
3102 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
3103 return asImpl().visitCastExpr(ce);
3105 // Handle the comma operator.
3106 } else if (auto op = dyn_cast<BinaryOperator>(e)) {
3107 return asImpl().visitBinaryOperator(op);
3109 // TODO: handle conditional operators here
3111 // For calls and message sends, use the retained-call logic.
3112 // Delegate inits are a special case in that they're the only
3113 // returns-retained expression that *isn't* surrounded by
3115 } else if (isa<CallExpr>(e) ||
3116 (isa<ObjCMessageExpr>(e) &&
3117 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
3118 return asImpl().visitCall(e);
3120 // Look through pseudo-object expressions.
3121 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
3122 return asImpl().visitPseudoObjectExpr(pseudo);
3123 } else if (auto *be = dyn_cast<BlockExpr>(e))
3124 return asImpl().visitBlockExpr(be);
3126 return asImpl().visitExpr(e);
3131 /// An emitter for +1 results.
3132 struct ARCRetainExprEmitter :
3133 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> {
3135 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
3137 llvm::Value *getValueOfResult(TryEmitResult result) {
3138 return result.getPointer();
3141 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) {
3142 llvm::Value *value = result.getPointer();
3143 value = CGF.Builder.CreateBitCast(value, resultType);
3144 result.setPointer(value);
3148 TryEmitResult visitLValueToRValue(const Expr *e) {
3149 return tryEmitARCRetainLoadOfScalar(CGF, e);
3152 /// For consumptions, just emit the subexpression and thus elide
3153 /// the retain/release pair.
3154 TryEmitResult visitConsumeObject(const Expr *e) {
3155 llvm::Value *result = CGF.EmitScalarExpr(e);
3156 return TryEmitResult(result, true);
3159 TryEmitResult visitBlockExpr(const BlockExpr *e) {
3160 TryEmitResult result = visitExpr(e);
3161 // Avoid the block-retain if this is a block literal that doesn't need to be
3162 // copied to the heap.
3163 if (e->getBlockDecl()->canAvoidCopyToHeap())
3164 result.setInt(true);
3168 /// Block extends are net +0. Naively, we could just recurse on
3169 /// the subexpression, but actually we need to ensure that the
3170 /// value is copied as a block, so there's a little filter here.
3171 TryEmitResult visitExtendBlockObject(const Expr *e) {
3172 llvm::Value *result; // will be a +0 value
3174 // If we can't safely assume the sub-expression will produce a
3175 // block-copied value, emit the sub-expression at +0.
3176 if (shouldEmitSeparateBlockRetain(e)) {
3177 result = CGF.EmitScalarExpr(e);
3179 // Otherwise, try to emit the sub-expression at +1 recursively.
3181 TryEmitResult subresult = asImpl().visit(e);
3183 // If that produced a retained value, just use that.
3184 if (subresult.getInt()) {
3188 // Otherwise it's +0.
3189 result = subresult.getPointer();
3192 // Retain the object as a block.
3193 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
3194 return TryEmitResult(result, true);
3197 /// For reclaims, emit the subexpression as a retained call and
3198 /// skip the consumption.
3199 TryEmitResult visitReclaimReturnedObject(const Expr *e) {
3200 llvm::Value *result = emitARCRetainCallResult(CGF, e);
3201 return TryEmitResult(result, true);
3204 /// When we have an undecorated call, retroactively do a claim.
3205 TryEmitResult visitCall(const Expr *e) {
3206 llvm::Value *result = emitARCRetainCallResult(CGF, e);
3207 return TryEmitResult(result, true);
3210 // TODO: maybe special-case visitBinAssignWeak?
3212 TryEmitResult visitExpr(const Expr *e) {
3213 // We didn't find an obvious production, so emit what we've got and
3214 // tell the caller that we didn't manage to retain.
3215 llvm::Value *result = CGF.EmitScalarExpr(e);
3216 return TryEmitResult(result, false);
3221 static TryEmitResult
3222 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
3223 return ARCRetainExprEmitter(CGF).visit(e);
3226 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
3229 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
3230 llvm::Value *value = result.getPointer();
3231 if (!result.getInt())
3232 value = CGF.EmitARCRetain(type, value);
3236 /// EmitARCRetainScalarExpr - Semantically equivalent to
3237 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
3238 /// best-effort attempt to peephole expressions that naturally produce
3239 /// retained objects.
3240 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
3241 // The retain needs to happen within the full-expression.
3242 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
3243 enterFullExpression(cleanups);
3244 RunCleanupsScope scope(*this);
3245 return EmitARCRetainScalarExpr(cleanups->getSubExpr());
3248 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
3249 llvm::Value *value = result.getPointer();
3250 if (!result.getInt())
3251 value = EmitARCRetain(e->getType(), value);
3256 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
3257 // The retain needs to happen within the full-expression.
3258 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
3259 enterFullExpression(cleanups);
3260 RunCleanupsScope scope(*this);
3261 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
3264 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
3265 llvm::Value *value = result.getPointer();
3266 if (result.getInt())
3267 value = EmitARCAutorelease(value);
3269 value = EmitARCRetainAutorelease(e->getType(), value);
3273 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
3274 llvm::Value *result;
3277 if (shouldEmitSeparateBlockRetain(e)) {
3278 result = EmitScalarExpr(e);
3281 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
3282 result = subresult.getPointer();
3283 doRetain = !subresult.getInt();
3287 result = EmitARCRetainBlock(result, /*mandatory*/ true);
3288 return EmitObjCConsumeObject(e->getType(), result);
3291 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
3292 // In ARC, retain and autorelease the expression.
3293 if (getLangOpts().ObjCAutoRefCount) {
3294 // Do so before running any cleanups for the full-expression.
3295 // EmitARCRetainAutoreleaseScalarExpr does this for us.
3296 return EmitARCRetainAutoreleaseScalarExpr(expr);
3299 // Otherwise, use the normal scalar-expression emission. The
3300 // exception machinery doesn't do anything special with the
3301 // exception like retaining it, so there's no safety associated with
3302 // only running cleanups after the throw has started, and when it
3303 // matters it tends to be substantially inferior code.
3304 return EmitScalarExpr(expr);
3309 /// An emitter for assigning into an __unsafe_unretained context.
3310 struct ARCUnsafeUnretainedExprEmitter :
3311 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> {
3313 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
3315 llvm::Value *getValueOfResult(llvm::Value *value) {
3319 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) {
3320 return CGF.Builder.CreateBitCast(value, resultType);
3323 llvm::Value *visitLValueToRValue(const Expr *e) {
3324 return CGF.EmitScalarExpr(e);
3327 /// For consumptions, just emit the subexpression and perform the
3328 /// consumption like normal.
3329 llvm::Value *visitConsumeObject(const Expr *e) {
3330 llvm::Value *value = CGF.EmitScalarExpr(e);
3331 return CGF.EmitObjCConsumeObject(e->getType(), value);
3334 /// No special logic for block extensions. (This probably can't
3335 /// actually happen in this emitter, though.)
3336 llvm::Value *visitExtendBlockObject(const Expr *e) {
3337 return CGF.EmitARCExtendBlockObject(e);
3340 /// For reclaims, perform an unsafeClaim if that's enabled.
3341 llvm::Value *visitReclaimReturnedObject(const Expr *e) {
3342 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true);
3345 /// When we have an undecorated call, just emit it without adding
3346 /// the unsafeClaim.
3347 llvm::Value *visitCall(const Expr *e) {
3348 return CGF.EmitScalarExpr(e);
3351 /// Just do normal scalar emission in the default case.
3352 llvm::Value *visitExpr(const Expr *e) {
3353 return CGF.EmitScalarExpr(e);
3358 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
3360 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e);
3363 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to
3364 /// immediately releasing the resut of EmitARCRetainScalarExpr, but
3365 /// avoiding any spurious retains, including by performing reclaims
3366 /// with objc_unsafeClaimAutoreleasedReturnValue.
3367 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
3368 // Look through full-expressions.
3369 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
3370 enterFullExpression(cleanups);
3371 RunCleanupsScope scope(*this);
3372 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr());
3375 return emitARCUnsafeUnretainedScalarExpr(*this, e);
3378 std::pair<LValue,llvm::Value*>
3379 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e,
3381 // Evaluate the RHS first. If we're ignoring the result, assume
3382 // that we can emit at an unsafe +0.
3385 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS());
3387 value = EmitScalarExpr(e->getRHS());
3390 // Emit the LHS and perform the store.
3391 LValue lvalue = EmitLValue(e->getLHS());
3392 EmitStoreOfScalar(value, lvalue);
3394 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value);
3397 std::pair<LValue,llvm::Value*>
3398 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
3400 // Evaluate the RHS first.
3401 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
3402 llvm::Value *value = result.getPointer();
3404 bool hasImmediateRetain = result.getInt();
3406 // If we didn't emit a retained object, and the l-value is of block
3407 // type, then we need to emit the block-retain immediately in case
3408 // it invalidates the l-value.
3409 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
3410 value = EmitARCRetainBlock(value, /*mandatory*/ false);
3411 hasImmediateRetain = true;
3414 LValue lvalue = EmitLValue(e->getLHS());
3416 // If the RHS was emitted retained, expand this.
3417 if (hasImmediateRetain) {
3418 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation());
3419 EmitStoreOfScalar(value, lvalue);
3420 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
3422 value = EmitARCStoreStrong(lvalue, value, ignored);
3425 return std::pair<LValue,llvm::Value*>(lvalue, value);
3428 std::pair<LValue,llvm::Value*>
3429 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
3430 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
3431 LValue lvalue = EmitLValue(e->getLHS());
3433 EmitStoreOfScalar(value, lvalue);
3435 return std::pair<LValue,llvm::Value*>(lvalue, value);
3438 void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
3439 const ObjCAutoreleasePoolStmt &ARPS) {
3440 const Stmt *subStmt = ARPS.getSubStmt();
3441 const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
3443 CGDebugInfo *DI = getDebugInfo();
3445 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
3447 // Keep track of the current cleanup stack depth.
3448 RunCleanupsScope Scope(*this);
3449 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
3450 llvm::Value *token = EmitObjCAutoreleasePoolPush();
3451 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
3453 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
3454 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
3457 for (const auto *I : S.body())
3461 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
3464 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3465 /// make sure it survives garbage collection until this point.
3466 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
3467 // We just use an inline assembly.
3468 llvm::FunctionType *extenderType
3469 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
3470 llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType,
3472 /* constraints */ "r",
3473 /* side effects */ true);
3475 object = Builder.CreateBitCast(object, VoidPtrTy);
3476 EmitNounwindRuntimeCall(extender, object);
3479 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
3480 /// non-trivial copy assignment function, produce following helper function.
3481 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
3484 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
3485 const ObjCPropertyImplDecl *PID) {
3486 if (!getLangOpts().CPlusPlus ||
3487 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3489 QualType Ty = PID->getPropertyIvarDecl()->getType();
3490 if (!Ty->isRecordType())
3492 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3493 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
3495 llvm::Constant *HelperFn = nullptr;
3496 if (hasTrivialSetExpr(PID))
3498 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
3499 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
3502 ASTContext &C = getContext();
3504 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
3506 QualType ReturnTy = C.VoidTy;
3507 QualType DestTy = C.getPointerType(Ty);
3508 QualType SrcTy = Ty;
3510 SrcTy = C.getPointerType(SrcTy);
3512 SmallVector<QualType, 2> ArgTys;
3513 ArgTys.push_back(DestTy);
3514 ArgTys.push_back(SrcTy);
3515 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
3517 FunctionDecl *FD = FunctionDecl::Create(
3518 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
3519 FunctionTy, nullptr, SC_Static, false, false);
3521 FunctionArgList args;
3522 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
3523 ImplicitParamDecl::Other);
3524 args.push_back(&DstDecl);
3525 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
3526 ImplicitParamDecl::Other);
3527 args.push_back(&SrcDecl);
3529 const CGFunctionInfo &FI =
3530 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
3532 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
3534 llvm::Function *Fn =
3535 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
3536 "__assign_helper_atomic_property_",
3539 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
3541 StartFunction(FD, ReturnTy, Fn, FI, args);
3543 DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
3545 UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
3546 VK_LValue, OK_Ordinary, SourceLocation(), false);
3548 DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
3550 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
3551 VK_LValue, OK_Ordinary, SourceLocation(), false);
3553 Expr *Args[2] = { &DST, &SRC };
3554 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
3555 CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
3556 C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(),
3557 VK_LValue, SourceLocation(), FPOptions());
3562 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
3563 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
3568 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
3569 const ObjCPropertyImplDecl *PID) {
3570 if (!getLangOpts().CPlusPlus ||
3571 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3573 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3574 QualType Ty = PD->getType();
3575 if (!Ty->isRecordType())
3577 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
3579 llvm::Constant *HelperFn = nullptr;
3580 if (hasTrivialGetExpr(PID))
3582 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
3583 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
3586 ASTContext &C = getContext();
3587 IdentifierInfo *II =
3588 &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
3590 QualType ReturnTy = C.VoidTy;
3591 QualType DestTy = C.getPointerType(Ty);
3592 QualType SrcTy = Ty;
3594 SrcTy = C.getPointerType(SrcTy);
3596 SmallVector<QualType, 2> ArgTys;
3597 ArgTys.push_back(DestTy);
3598 ArgTys.push_back(SrcTy);
3599 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
3601 FunctionDecl *FD = FunctionDecl::Create(
3602 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
3603 FunctionTy, nullptr, SC_Static, false, false);
3605 FunctionArgList args;
3606 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
3607 ImplicitParamDecl::Other);
3608 args.push_back(&DstDecl);
3609 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
3610 ImplicitParamDecl::Other);
3611 args.push_back(&SrcDecl);
3613 const CGFunctionInfo &FI =
3614 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
3616 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
3618 llvm::Function *Fn = llvm::Function::Create(
3619 LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_",
3622 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
3624 StartFunction(FD, ReturnTy, Fn, FI, args);
3626 DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
3629 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
3630 VK_LValue, OK_Ordinary, SourceLocation(), false);
3632 CXXConstructExpr *CXXConstExpr =
3633 cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
3635 SmallVector<Expr*, 4> ConstructorArgs;
3636 ConstructorArgs.push_back(&SRC);
3637 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
3638 CXXConstExpr->arg_end());
3640 CXXConstructExpr *TheCXXConstructExpr =
3641 CXXConstructExpr::Create(C, Ty, SourceLocation(),
3642 CXXConstExpr->getConstructor(),
3643 CXXConstExpr->isElidable(),
3645 CXXConstExpr->hadMultipleCandidates(),
3646 CXXConstExpr->isListInitialization(),
3647 CXXConstExpr->isStdInitListInitialization(),
3648 CXXConstExpr->requiresZeroInitialization(),
3649 CXXConstExpr->getConstructionKind(),
3652 DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
3655 RValue DV = EmitAnyExpr(&DstExpr);
3657 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
3658 EmitAggExpr(TheCXXConstructExpr,
3659 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
3661 AggValueSlot::IsDestructed,
3662 AggValueSlot::DoesNotNeedGCBarriers,
3663 AggValueSlot::IsNotAliased,
3664 AggValueSlot::DoesNotOverlap));
3667 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
3668 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
3673 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
3674 // Get selectors for retain/autorelease.
3675 IdentifierInfo *CopyID = &getContext().Idents.get("copy");
3676 Selector CopySelector =
3677 getContext().Selectors.getNullarySelector(CopyID);
3678 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
3679 Selector AutoreleaseSelector =
3680 getContext().Selectors.getNullarySelector(AutoreleaseID);
3682 // Emit calls to retain/autorelease.
3683 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
3684 llvm::Value *Val = Block;
3686 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3688 Val, CallArgList(), nullptr, nullptr);
3689 Val = Result.getScalarVal();
3690 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3691 Ty, AutoreleaseSelector,
3692 Val, CallArgList(), nullptr, nullptr);
3693 Val = Result.getScalarVal();
3698 CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) {
3699 assert(Args.size() == 3 && "Expected 3 argument here!");
3701 if (!CGM.IsOSVersionAtLeastFn) {
3702 llvm::FunctionType *FTy =
3703 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false);
3704 CGM.IsOSVersionAtLeastFn =
3705 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast");
3708 llvm::Value *CallRes =
3709 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args);
3711 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty));
3714 void CodeGenModule::emitAtAvailableLinkGuard() {
3715 if (!IsOSVersionAtLeastFn)
3717 // @available requires CoreFoundation only on Darwin.
3718 if (!Target.getTriple().isOSDarwin())
3720 // Add -framework CoreFoundation to the linker commands. We still want to
3721 // emit the core foundation reference down below because otherwise if
3722 // CoreFoundation is not used in the code, the linker won't link the
3724 auto &Context = getLLVMContext();
3725 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
3726 llvm::MDString::get(Context, "CoreFoundation")};
3727 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args));
3728 // Emit a reference to a symbol from CoreFoundation to ensure that
3729 // CoreFoundation is linked into the final binary.
3730 llvm::FunctionType *FTy =
3731 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false);
3732 llvm::FunctionCallee CFFunc =
3733 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber");
3735 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false);
3736 llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction(
3737 CheckFTy, "__clang_at_available_requires_core_foundation_framework",
3738 llvm::AttributeList(), /*Local=*/true);
3739 llvm::Function *CFLinkCheckFunc =
3740 cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts());
3741 if (CFLinkCheckFunc->empty()) {
3742 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3743 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
3744 CodeGenFunction CGF(*this);
3745 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc));
3746 CGF.EmitNounwindRuntimeCall(CFFunc,
3747 llvm::Constant::getNullValue(VoidPtrTy));
3748 CGF.Builder.CreateUnreachable();
3749 addCompilerUsedGlobal(CFLinkCheckFunc);
3753 CGObjCRuntime::~CGObjCRuntime() {}