1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Objective-C code as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGDebugInfo.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/StmtObjC.h"
22 #include "clang/Basic/Diagnostic.h"
23 #include "clang/CodeGen/CGFunctionInfo.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/InlineAsm.h"
28 using namespace clang;
29 using namespace CodeGen;
31 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
33 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
34 static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
38 /// Given the address of a variable of pointer type, find the correct
39 /// null to store into it.
40 static llvm::Constant *getNullForVariable(Address addr) {
41 llvm::Type *type = addr.getElementType();
42 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
45 /// Emits an instance of NSConstantString representing the object.
46 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
49 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
50 // FIXME: This bitcast should just be made an invariant on the Runtime.
51 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
54 /// EmitObjCBoxedExpr - This routine generates code to call
55 /// the appropriate expression boxing method. This will either be
56 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:],
57 /// or [NSValue valueWithBytes:objCType:].
60 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
61 // Generate the correct selector for this literal's concrete type.
63 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
64 const Expr *SubExpr = E->getSubExpr();
65 assert(BoxingMethod && "BoxingMethod is null");
66 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
67 Selector Sel = BoxingMethod->getSelector();
69 // Generate a reference to the class pointer, which will be the receiver.
70 // Assumes that the method was introduced in the class that should be
71 // messaged (avoids pulling it out of the result type).
72 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
73 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
74 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
77 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin();
78 QualType ArgQT = ArgDecl->getType().getUnqualifiedType();
80 // ObjCBoxedExpr supports boxing of structs and unions
81 // via [NSValue valueWithBytes:objCType:]
82 const QualType ValueType(SubExpr->getType().getCanonicalType());
83 if (ValueType->isObjCBoxableRecordType()) {
84 // Emit CodeGen for first parameter
85 // and cast value to correct type
86 Address Temporary = CreateMemTemp(SubExpr->getType());
87 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
88 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
89 Args.add(RValue::get(BitCast.getPointer()), ArgQT);
91 // Create char array to store type encoding
93 getContext().getObjCEncodingForType(ValueType, Str);
94 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
96 // Cast type encoding to correct type
97 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
98 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType();
99 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT));
101 Args.add(RValue::get(Cast), EncodingQT);
103 Args.add(EmitAnyExpr(SubExpr), ArgQT);
106 RValue result = Runtime.GenerateMessageSend(
107 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
108 Args, ClassDecl, BoxingMethod);
109 return Builder.CreateBitCast(result.getScalarVal(),
110 ConvertType(E->getType()));
113 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
114 const ObjCMethodDecl *MethodWithObjects) {
115 ASTContext &Context = CGM.getContext();
116 const ObjCDictionaryLiteral *DLE = nullptr;
117 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
119 DLE = cast<ObjCDictionaryLiteral>(E);
121 // Compute the type of the array we're initializing.
122 uint64_t NumElements =
123 ALE ? ALE->getNumElements() : DLE->getNumElements();
124 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
126 QualType ElementType = Context.getObjCIdType().withConst();
127 QualType ElementArrayType
128 = Context.getConstantArrayType(ElementType, APNumElements,
129 ArrayType::Normal, /*IndexTypeQuals=*/0);
131 // Allocate the temporary array(s).
132 Address Objects = CreateMemTemp(ElementArrayType, "objects");
133 Address Keys = Address::invalid();
135 Keys = CreateMemTemp(ElementArrayType, "keys");
137 // In ARC, we may need to do extra work to keep all the keys and
138 // values alive until after the call.
139 SmallVector<llvm::Value *, 16> NeededObjects;
140 bool TrackNeededObjects =
141 (getLangOpts().ObjCAutoRefCount &&
142 CGM.getCodeGenOpts().OptimizationLevel != 0);
144 // Perform the actual initialialization of the array(s).
145 for (uint64_t i = 0; i < NumElements; i++) {
147 // Emit the element and store it to the appropriate array slot.
148 const Expr *Rhs = ALE->getElement(i);
149 LValue LV = MakeAddrLValue(
150 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
151 ElementType, AlignmentSource::Decl);
153 llvm::Value *value = EmitScalarExpr(Rhs);
154 EmitStoreThroughLValue(RValue::get(value), LV, true);
155 if (TrackNeededObjects) {
156 NeededObjects.push_back(value);
159 // Emit the key and store it to the appropriate array slot.
160 const Expr *Key = DLE->getKeyValueElement(i).Key;
161 LValue KeyLV = MakeAddrLValue(
162 Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
163 ElementType, AlignmentSource::Decl);
164 llvm::Value *keyValue = EmitScalarExpr(Key);
165 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
167 // Emit the value and store it to the appropriate array slot.
168 const Expr *Value = DLE->getKeyValueElement(i).Value;
169 LValue ValueLV = MakeAddrLValue(
170 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
171 ElementType, AlignmentSource::Decl);
172 llvm::Value *valueValue = EmitScalarExpr(Value);
173 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
174 if (TrackNeededObjects) {
175 NeededObjects.push_back(keyValue);
176 NeededObjects.push_back(valueValue);
181 // Generate the argument list.
183 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
184 const ParmVarDecl *argDecl = *PI++;
185 QualType ArgQT = argDecl->getType().getUnqualifiedType();
186 Args.add(RValue::get(Objects.getPointer()), ArgQT);
189 ArgQT = argDecl->getType().getUnqualifiedType();
190 Args.add(RValue::get(Keys.getPointer()), ArgQT);
193 ArgQT = argDecl->getType().getUnqualifiedType();
195 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
196 Args.add(RValue::get(Count), ArgQT);
198 // Generate a reference to the class pointer, which will be the receiver.
199 Selector Sel = MethodWithObjects->getSelector();
200 QualType ResultType = E->getType();
201 const ObjCObjectPointerType *InterfacePointerType
202 = ResultType->getAsObjCInterfacePointerType();
203 ObjCInterfaceDecl *Class
204 = InterfacePointerType->getObjectType()->getInterface();
205 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
206 llvm::Value *Receiver = Runtime.GetClass(*this, Class);
208 // Generate the message send.
209 RValue result = Runtime.GenerateMessageSend(
210 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel,
211 Receiver, Args, Class, MethodWithObjects);
213 // The above message send needs these objects, but in ARC they are
214 // passed in a buffer that is essentially __unsafe_unretained.
215 // Therefore we must prevent the optimizer from releasing them until
217 if (TrackNeededObjects) {
218 EmitARCIntrinsicUse(NeededObjects);
221 return Builder.CreateBitCast(result.getScalarVal(),
222 ConvertType(E->getType()));
225 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
226 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
229 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
230 const ObjCDictionaryLiteral *E) {
231 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
235 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
237 // Note that this implementation allows for non-constant strings to be passed
238 // as arguments to @selector(). Currently, the only thing preventing this
239 // behaviour is the type checking in the front end.
240 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector());
243 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
244 // FIXME: This should pass the Decl not the name.
245 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol());
248 /// \brief Adjust the type of an Objective-C object that doesn't match up due
249 /// to type erasure at various points, e.g., related result types or the use
250 /// of parameterized classes.
251 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
253 if (!ExpT->isObjCRetainableType())
256 // If the converted types are the same, we're done.
257 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT);
258 if (ExpLLVMTy == Result.getScalarVal()->getType())
261 // We have applied a substitution. Cast the rvalue appropriately.
262 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
266 /// Decide whether to extend the lifetime of the receiver of a
267 /// returns-inner-pointer message.
269 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
270 switch (message->getReceiverKind()) {
272 // For a normal instance message, we should extend unless the
273 // receiver is loaded from a variable with precise lifetime.
274 case ObjCMessageExpr::Instance: {
275 const Expr *receiver = message->getInstanceReceiver();
277 // Look through OVEs.
278 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
279 if (opaque->getSourceExpr())
280 receiver = opaque->getSourceExpr()->IgnoreParens();
283 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
284 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
285 receiver = ice->getSubExpr()->IgnoreParens();
287 // Look through OVEs.
288 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
289 if (opaque->getSourceExpr())
290 receiver = opaque->getSourceExpr()->IgnoreParens();
293 // Only __strong variables.
294 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
297 // All ivars and fields have precise lifetime.
298 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
301 // Otherwise, check for variables.
302 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
303 if (!declRef) return true;
304 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
305 if (!var) return true;
307 // All variables have precise lifetime except local variables with
308 // automatic storage duration that aren't specially marked.
309 return (var->hasLocalStorage() &&
310 !var->hasAttr<ObjCPreciseLifetimeAttr>());
313 case ObjCMessageExpr::Class:
314 case ObjCMessageExpr::SuperClass:
315 // It's never necessary for class objects.
318 case ObjCMessageExpr::SuperInstance:
319 // We generally assume that 'self' lives throughout a method call.
323 llvm_unreachable("invalid receiver kind");
326 /// Given an expression of ObjC pointer type, check whether it was
327 /// immediately loaded from an ARC __weak l-value.
328 static const Expr *findWeakLValue(const Expr *E) {
329 assert(E->getType()->isObjCRetainableType());
330 E = E->IgnoreParens();
331 if (auto CE = dyn_cast<CastExpr>(E)) {
332 if (CE->getCastKind() == CK_LValueToRValue) {
333 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
334 return CE->getSubExpr();
341 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
342 ReturnValueSlot Return) {
343 // Only the lookup mechanism and first two arguments of the method
344 // implementation vary between runtimes. We can get the receiver and
345 // arguments in generic code.
347 bool isDelegateInit = E->isDelegateInitCall();
349 const ObjCMethodDecl *method = E->getMethodDecl();
351 // If the method is -retain, and the receiver's being loaded from
352 // a __weak variable, peephole the entire operation to objc_loadWeakRetained.
353 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance &&
354 method->getMethodFamily() == OMF_retain) {
355 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
356 LValue lvalue = EmitLValue(lvalueExpr);
357 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
358 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
362 // We don't retain the receiver in delegate init calls, and this is
363 // safe because the receiver value is always loaded from 'self',
364 // which we zero out. We don't want to Block_copy block receivers,
368 CGM.getLangOpts().ObjCAutoRefCount &&
370 method->hasAttr<NSConsumesSelfAttr>());
372 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
373 bool isSuperMessage = false;
374 bool isClassMessage = false;
375 ObjCInterfaceDecl *OID = nullptr;
377 QualType ReceiverType;
378 llvm::Value *Receiver = nullptr;
379 switch (E->getReceiverKind()) {
380 case ObjCMessageExpr::Instance:
381 ReceiverType = E->getInstanceReceiver()->getType();
383 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
384 E->getInstanceReceiver());
385 Receiver = ter.getPointer();
386 if (ter.getInt()) retainSelf = false;
388 Receiver = EmitScalarExpr(E->getInstanceReceiver());
391 case ObjCMessageExpr::Class: {
392 ReceiverType = E->getClassReceiver();
393 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
394 assert(ObjTy && "Invalid Objective-C class message send");
395 OID = ObjTy->getInterface();
396 assert(OID && "Invalid Objective-C class message send");
397 Receiver = Runtime.GetClass(*this, OID);
398 isClassMessage = true;
402 case ObjCMessageExpr::SuperInstance:
403 ReceiverType = E->getSuperType();
404 Receiver = LoadObjCSelf();
405 isSuperMessage = true;
408 case ObjCMessageExpr::SuperClass:
409 ReceiverType = E->getSuperType();
410 Receiver = LoadObjCSelf();
411 isSuperMessage = true;
412 isClassMessage = true;
417 Receiver = EmitARCRetainNonBlock(Receiver);
419 // In ARC, we sometimes want to "extend the lifetime"
420 // (i.e. retain+autorelease) of receivers of returns-inner-pointer
422 if (getLangOpts().ObjCAutoRefCount && method &&
423 method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
424 shouldExtendReceiverForInnerPointerMessage(E))
425 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
427 QualType ResultType = method ? method->getReturnType() : E->getType();
430 EmitCallArgs(Args, method, E->arguments());
432 // For delegate init calls in ARC, do an unsafe store of null into
433 // self. This represents the call taking direct ownership of that
434 // value. We have to do this after emitting the other call
435 // arguments because they might also reference self, but we don't
436 // have to worry about any of them modifying self because that would
437 // be an undefined read and write of an object in unordered
439 if (isDelegateInit) {
440 assert(getLangOpts().ObjCAutoRefCount &&
441 "delegate init calls should only be marked in ARC");
443 // Do an unsafe store of null into self.
445 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
446 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
450 if (isSuperMessage) {
451 // super is only valid in an Objective-C method
452 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
453 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
454 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
456 OMD->getClassInterface(),
463 result = Runtime.GenerateMessageSend(*this, Return, ResultType,
469 // For delegate init calls in ARC, implicitly store the result of
470 // the call back into self. This takes ownership of the value.
471 if (isDelegateInit) {
473 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
474 llvm::Value *newSelf = result.getScalarVal();
476 // The delegate return type isn't necessarily a matching type; in
477 // fact, it's quite likely to be 'id'.
478 llvm::Type *selfTy = selfAddr.getElementType();
479 newSelf = Builder.CreateBitCast(newSelf, selfTy);
481 Builder.CreateStore(newSelf, selfAddr);
484 return AdjustObjCObjectType(*this, E->getType(), result);
488 struct FinishARCDealloc final : EHScopeStack::Cleanup {
489 void Emit(CodeGenFunction &CGF, Flags flags) override {
490 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
492 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
493 const ObjCInterfaceDecl *iface = impl->getClassInterface();
494 if (!iface->getSuperClass()) return;
496 bool isCategory = isa<ObjCCategoryImplDecl>(impl);
498 // Call [super dealloc] if we have a superclass.
499 llvm::Value *self = CGF.LoadObjCSelf();
502 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
503 CGF.getContext().VoidTy,
504 method->getSelector(),
508 /*is class msg*/ false,
515 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates
516 /// the LLVM function and sets the other context used by
518 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
519 const ObjCContainerDecl *CD) {
520 SourceLocation StartLoc = OMD->getLocStart();
521 FunctionArgList args;
522 // Check if we should generate debug info for this method.
523 if (OMD->hasAttr<NoDebugAttr>())
524 DebugInfo = nullptr; // disable debug info indefinitely for this function
526 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
528 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
529 CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
531 args.push_back(OMD->getSelfDecl());
532 args.push_back(OMD->getCmdDecl());
534 args.append(OMD->param_begin(), OMD->param_end());
537 CurEHLocation = OMD->getLocEnd();
539 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
540 OMD->getLocation(), StartLoc);
542 // In ARC, certain methods get an extra cleanup.
543 if (CGM.getLangOpts().ObjCAutoRefCount &&
544 OMD->isInstanceMethod() &&
545 OMD->getSelector().isUnarySelector()) {
546 const IdentifierInfo *ident =
547 OMD->getSelector().getIdentifierInfoForSlot(0);
548 if (ident->isStr("dealloc"))
549 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
553 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
554 LValue lvalue, QualType type);
556 /// Generate an Objective-C method. An Objective-C method is a C function with
557 /// its pointer, name, and types registered in the class struture.
558 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
559 StartObjCMethod(OMD, OMD->getClassInterface());
560 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
561 assert(isa<CompoundStmt>(OMD->getBody()));
562 incrementProfileCounter(OMD->getBody());
563 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
564 FinishFunction(OMD->getBodyRBrace());
567 /// emitStructGetterCall - Call the runtime function to load a property
568 /// into the return value slot.
569 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
570 bool isAtomic, bool hasStrong) {
571 ASTContext &Context = CGF.getContext();
574 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
577 // objc_copyStruct (ReturnValue, &structIvar,
578 // sizeof (Type of Ivar), isAtomic, false);
581 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
582 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
584 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
585 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
587 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
588 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
589 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
590 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
592 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
593 CGCallee callee = CGCallee::forDirect(fn);
594 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
595 callee, ReturnValueSlot(), args);
598 /// Determine whether the given architecture supports unaligned atomic
599 /// accesses. They don't have to be fast, just faster than a function
600 /// call and a mutex.
601 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
602 // FIXME: Allow unaligned atomic load/store on x86. (It is not
603 // currently supported by the backend.)
607 /// Return the maximum size that permits atomic accesses for the given
609 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
610 llvm::Triple::ArchType arch) {
611 // ARM has 8-byte atomic accesses, but it's not clear whether we
612 // want to rely on them here.
614 // In the default case, just assume that any size up to a pointer is
615 // fine given adequate alignment.
616 return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
620 class PropertyImplStrategy {
623 /// The 'native' strategy is to use the architecture's provided
624 /// reads and writes.
627 /// Use objc_setProperty and objc_getProperty.
630 /// Use objc_setProperty for the setter, but use expression
631 /// evaluation for the getter.
632 SetPropertyAndExpressionGet,
634 /// Use objc_copyStruct.
637 /// The 'expression' strategy is to emit normal assignment or
638 /// lvalue-to-rvalue expressions.
642 StrategyKind getKind() const { return StrategyKind(Kind); }
644 bool hasStrongMember() const { return HasStrong; }
645 bool isAtomic() const { return IsAtomic; }
646 bool isCopy() const { return IsCopy; }
648 CharUnits getIvarSize() const { return IvarSize; }
649 CharUnits getIvarAlignment() const { return IvarAlignment; }
651 PropertyImplStrategy(CodeGenModule &CGM,
652 const ObjCPropertyImplDecl *propImpl);
656 unsigned IsAtomic : 1;
658 unsigned HasStrong : 1;
661 CharUnits IvarAlignment;
665 /// Pick an implementation strategy for the given property synthesis.
666 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
667 const ObjCPropertyImplDecl *propImpl) {
668 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
669 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
671 IsCopy = (setterKind == ObjCPropertyDecl::Copy);
672 IsAtomic = prop->isAtomic();
673 HasStrong = false; // doesn't matter here.
675 // Evaluate the ivar's size and alignment.
676 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
677 QualType ivarType = ivar->getType();
678 std::tie(IvarSize, IvarAlignment) =
679 CGM.getContext().getTypeInfoInChars(ivarType);
681 // If we have a copy property, we always have to use getProperty/setProperty.
682 // TODO: we could actually use setProperty and an expression for non-atomics.
684 Kind = GetSetProperty;
689 if (setterKind == ObjCPropertyDecl::Retain) {
690 // In GC-only, there's nothing special that needs to be done.
691 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
694 // In ARC, if the property is non-atomic, use expression emission,
695 // which translates to objc_storeStrong. This isn't required, but
696 // it's slightly nicer.
697 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
698 // Using standard expression emission for the setter is only
699 // acceptable if the ivar is __strong, which won't be true if
700 // the property is annotated with __attribute__((NSObject)).
701 // TODO: falling all the way back to objc_setProperty here is
702 // just laziness, though; we could still use objc_storeStrong
703 // if we hacked it right.
704 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
707 Kind = SetPropertyAndExpressionGet;
710 // Otherwise, we need to at least use setProperty. However, if
711 // the property isn't atomic, we can use normal expression
712 // emission for the getter.
713 } else if (!IsAtomic) {
714 Kind = SetPropertyAndExpressionGet;
717 // Otherwise, we have to use both setProperty and getProperty.
719 Kind = GetSetProperty;
724 // If we're not atomic, just use expression accesses.
730 // Properties on bitfield ivars need to be emitted using expression
731 // accesses even if they're nominally atomic.
732 if (ivar->isBitField()) {
737 // GC-qualified or ARC-qualified ivars need to be emitted as
738 // expressions. This actually works out to being atomic anyway,
739 // except for ARC __strong, but that should trigger the above code.
740 if (ivarType.hasNonTrivialObjCLifetime() ||
741 (CGM.getLangOpts().getGC() &&
742 CGM.getContext().getObjCGCAttrKind(ivarType))) {
747 // Compute whether the ivar has strong members.
748 if (CGM.getLangOpts().getGC())
749 if (const RecordType *recordType = ivarType->getAs<RecordType>())
750 HasStrong = recordType->getDecl()->hasObjectMember();
752 // We can never access structs with object members with a native
753 // access, because we need to use write barriers. This is what
754 // objc_copyStruct is for.
760 // Otherwise, this is target-dependent and based on the size and
761 // alignment of the ivar.
763 // If the size of the ivar is not a power of two, give up. We don't
764 // want to get into the business of doing compare-and-swaps.
765 if (!IvarSize.isPowerOfTwo()) {
770 llvm::Triple::ArchType arch =
771 CGM.getTarget().getTriple().getArch();
773 // Most architectures require memory to fit within a single cache
774 // line, so the alignment has to be at least the size of the access.
775 // Otherwise we have to grab a lock.
776 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
781 // If the ivar's size exceeds the architecture's maximum atomic
782 // access size, we have to use CopyStruct.
783 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
788 // Otherwise, we can use native loads and stores.
792 /// \brief Generate an Objective-C property getter function.
794 /// The given Decl must be an ObjCImplementationDecl. \@synthesize
795 /// is illegal within a category.
796 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
797 const ObjCPropertyImplDecl *PID) {
798 llvm::Constant *AtomicHelperFn =
799 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
800 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
801 ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
802 assert(OMD && "Invalid call to generate getter (empty method)");
803 StartObjCMethod(OMD, IMP->getClassInterface());
805 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
810 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
811 const Expr *getter = propImpl->getGetterCXXConstructor();
812 if (!getter) return true;
814 // Sema only makes only of these when the ivar has a C++ class type,
815 // so the form is pretty constrained.
817 // If the property has a reference type, we might just be binding a
818 // reference, in which case the result will be a gl-value. We should
819 // treat this as a non-trivial operation.
820 if (getter->isGLValue())
823 // If we selected a trivial copy-constructor, we're okay.
824 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
825 return (construct->getConstructor()->isTrivial());
827 // The constructor might require cleanups (in which case it's never
829 assert(isa<ExprWithCleanups>(getter));
833 /// emitCPPObjectAtomicGetterCall - Call the runtime function to
834 /// copy the ivar into the resturn slot.
835 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
836 llvm::Value *returnAddr,
838 llvm::Constant *AtomicHelperFn) {
839 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
843 // The 1st argument is the return Slot.
844 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
846 // The 2nd argument is the address of the ivar.
847 llvm::Value *ivarAddr =
848 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
849 CGF.LoadObjCSelf(), ivar, 0).getPointer();
850 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
851 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
853 // Third argument is the helper function.
854 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
856 llvm::Constant *copyCppAtomicObjectFn =
857 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
858 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
860 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
861 callee, ReturnValueSlot(), args);
865 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
866 const ObjCPropertyImplDecl *propImpl,
867 const ObjCMethodDecl *GetterMethodDecl,
868 llvm::Constant *AtomicHelperFn) {
869 // If there's a non-trivial 'get' expression, we just have to emit that.
870 if (!hasTrivialGetExpr(propImpl)) {
871 if (!AtomicHelperFn) {
872 ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
877 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
878 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
879 ivar, AtomicHelperFn);
884 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
885 QualType propType = prop->getType();
886 ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
888 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
890 // Pick an implementation strategy.
891 PropertyImplStrategy strategy(CGM, propImpl);
892 switch (strategy.getKind()) {
893 case PropertyImplStrategy::Native: {
894 // We don't need to do anything for a zero-size struct.
895 if (strategy.getIvarSize().isZero())
898 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
900 // Currently, all atomic accesses have to be through integer
901 // types, so there's no point in trying to pick a prettier type.
902 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize());
903 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
904 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
906 // Perform an atomic load. This does not impose ordering constraints.
907 Address ivarAddr = LV.getAddress();
908 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
909 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
910 load->setAtomic(llvm::AtomicOrdering::Unordered);
912 // Store that value into the return address. Doing this with a
913 // bitcast is likely to produce some pretty ugly IR, but it's not
914 // the *most* terrible thing in the world.
915 llvm::Type *retTy = ConvertType(getterMethod->getReturnType());
916 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy);
917 llvm::Value *ivarVal = load;
918 if (ivarSize > retTySize) {
919 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
920 ivarVal = Builder.CreateTrunc(load, newTy);
921 bitcastType = newTy->getPointerTo();
923 Builder.CreateStore(ivarVal,
924 Builder.CreateBitCast(ReturnValue, bitcastType));
926 // Make sure we don't do an autorelease.
927 AutoreleaseResult = false;
931 case PropertyImplStrategy::GetSetProperty: {
932 llvm::Constant *getPropertyFn =
933 CGM.getObjCRuntime().GetPropertyGetFunction();
934 if (!getPropertyFn) {
935 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
938 CGCallee callee = CGCallee::forDirect(getPropertyFn);
940 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
941 // FIXME: Can't this be simpler? This might even be worse than the
942 // corresponding gcc code.
944 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
945 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
946 llvm::Value *ivarOffset =
947 EmitIvarOffset(classImpl->getClassInterface(), ivar);
950 args.add(RValue::get(self), getContext().getObjCIdType());
951 args.add(RValue::get(cmd), getContext().getObjCSelType());
952 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
953 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
954 getContext().BoolTy);
956 // FIXME: We shouldn't need to get the function info here, the
957 // runtime already should have computed it to build the function.
958 llvm::Instruction *CallInstruction;
959 RValue RV = EmitCall(
960 getTypes().arrangeBuiltinFunctionCall(propType, args),
961 callee, ReturnValueSlot(), args, &CallInstruction);
962 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
965 // We need to fix the type here. Ivars with copy & retain are
966 // always objects so we don't need to worry about complex or
968 RV = RValue::get(Builder.CreateBitCast(
970 getTypes().ConvertType(getterMethod->getReturnType())));
972 EmitReturnOfRValue(RV, propType);
974 // objc_getProperty does an autorelease, so we should suppress ours.
975 AutoreleaseResult = false;
980 case PropertyImplStrategy::CopyStruct:
981 emitStructGetterCall(*this, ivar, strategy.isAtomic(),
982 strategy.hasStrongMember());
985 case PropertyImplStrategy::Expression:
986 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
987 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
989 QualType ivarType = ivar->getType();
990 switch (getEvaluationKind(ivarType)) {
992 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
993 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType),
998 // The return value slot is guaranteed to not be aliased, but
999 // that's not necessarily the same as "on the stack", so
1000 // we still potentially need objc_memmove_collectable.
1001 EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
1005 if (propType->isReferenceType()) {
1006 value = LV.getAddress().getPointer();
1008 // We want to load and autoreleaseReturnValue ARC __weak ivars.
1009 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1010 if (getLangOpts().ObjCAutoRefCount) {
1011 value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
1013 value = EmitARCLoadWeak(LV.getAddress());
1016 // Otherwise we want to do a simple load, suppressing the
1017 // final autorelease.
1019 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal();
1020 AutoreleaseResult = false;
1023 value = Builder.CreateBitCast(
1024 value, ConvertType(GetterMethodDecl->getReturnType()));
1027 EmitReturnOfRValue(RValue::get(value), propType);
1031 llvm_unreachable("bad evaluation kind");
1035 llvm_unreachable("bad @property implementation strategy!");
1038 /// emitStructSetterCall - Call the runtime function to store the value
1039 /// from the first formal parameter into the given ivar.
1040 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
1041 ObjCIvarDecl *ivar) {
1042 // objc_copyStruct (&structIvar, &Arg,
1043 // sizeof (struct something), true, false);
1046 // The first argument is the address of the ivar.
1047 llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
1048 CGF.LoadObjCSelf(), ivar, 0)
1050 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
1051 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
1053 // The second argument is the address of the parameter variable.
1054 ParmVarDecl *argVar = *OMD->param_begin();
1055 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
1056 VK_LValue, SourceLocation());
1057 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
1058 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
1059 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
1061 // The third argument is the sizeof the type.
1063 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
1064 args.add(RValue::get(size), CGF.getContext().getSizeType());
1066 // The fourth argument is the 'isAtomic' flag.
1067 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
1069 // The fifth argument is the 'hasStrong' flag.
1070 // FIXME: should this really always be false?
1071 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
1073 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
1074 CGCallee callee = CGCallee::forDirect(fn);
1076 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
1077 callee, ReturnValueSlot(), args);
1080 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store
1081 /// the value from the first formal parameter into the given ivar, using
1082 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
1083 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
1084 ObjCMethodDecl *OMD,
1086 llvm::Constant *AtomicHelperFn) {
1087 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
1091 // The first argument is the address of the ivar.
1092 llvm::Value *ivarAddr =
1093 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
1094 CGF.LoadObjCSelf(), ivar, 0).getPointer();
1095 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
1096 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
1098 // The second argument is the address of the parameter variable.
1099 ParmVarDecl *argVar = *OMD->param_begin();
1100 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
1101 VK_LValue, SourceLocation());
1102 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
1103 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
1104 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
1106 // Third argument is the helper function.
1107 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
1109 llvm::Constant *fn =
1110 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
1111 CGCallee callee = CGCallee::forDirect(fn);
1113 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
1114 callee, ReturnValueSlot(), args);
1118 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
1119 Expr *setter = PID->getSetterCXXAssignment();
1120 if (!setter) return true;
1122 // Sema only makes only of these when the ivar has a C++ class type,
1123 // so the form is pretty constrained.
1125 // An operator call is trivial if the function it calls is trivial.
1126 // This also implies that there's nothing non-trivial going on with
1127 // the arguments, because operator= can only be trivial if it's a
1128 // synthesized assignment operator and therefore both parameters are
1130 if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
1131 if (const FunctionDecl *callee
1132 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
1133 if (callee->isTrivial())
1138 assert(isa<ExprWithCleanups>(setter));
1142 static bool UseOptimizedSetter(CodeGenModule &CGM) {
1143 if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
1145 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
1149 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1150 const ObjCPropertyImplDecl *propImpl,
1151 llvm::Constant *AtomicHelperFn) {
1152 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
1153 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1154 ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
1156 // Just use the setter expression if Sema gave us one and it's
1158 if (!hasTrivialSetExpr(propImpl)) {
1159 if (!AtomicHelperFn)
1160 // If non-atomic, assignment is called directly.
1161 EmitStmt(propImpl->getSetterCXXAssignment());
1163 // If atomic, assignment is called via a locking api.
1164 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
1169 PropertyImplStrategy strategy(CGM, propImpl);
1170 switch (strategy.getKind()) {
1171 case PropertyImplStrategy::Native: {
1172 // We don't need to do anything for a zero-size struct.
1173 if (strategy.getIvarSize().isZero())
1176 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
1179 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
1180 Address ivarAddr = ivarLValue.getAddress();
1182 // Currently, all atomic accesses have to be through integer
1183 // types, so there's no point in trying to pick a prettier type.
1184 llvm::Type *bitcastType =
1185 llvm::Type::getIntNTy(getLLVMContext(),
1186 getContext().toBits(strategy.getIvarSize()));
1188 // Cast both arguments to the chosen operation type.
1189 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
1190 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
1192 // This bitcast load is likely to cause some nasty IR.
1193 llvm::Value *load = Builder.CreateLoad(argAddr);
1195 // Perform an atomic store. There are no memory ordering requirements.
1196 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
1197 store->setAtomic(llvm::AtomicOrdering::Unordered);
1201 case PropertyImplStrategy::GetSetProperty:
1202 case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1204 llvm::Constant *setOptimizedPropertyFn = nullptr;
1205 llvm::Constant *setPropertyFn = nullptr;
1206 if (UseOptimizedSetter(CGM)) {
1207 // 10.8 and iOS 6.0 code and GC is off
1208 setOptimizedPropertyFn =
1209 CGM.getObjCRuntime()
1210 .GetOptimizedPropertySetFunction(strategy.isAtomic(),
1212 if (!setOptimizedPropertyFn) {
1213 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
1218 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
1219 if (!setPropertyFn) {
1220 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
1225 // Emit objc_setProperty((id) self, _cmd, offset, arg,
1226 // <is-atomic>, <is-copy>).
1228 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
1230 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
1231 llvm::Value *ivarOffset =
1232 EmitIvarOffset(classImpl->getClassInterface(), ivar);
1233 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
1234 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
1235 arg = Builder.CreateBitCast(arg, VoidPtrTy);
1238 args.add(RValue::get(self), getContext().getObjCIdType());
1239 args.add(RValue::get(cmd), getContext().getObjCSelType());
1240 if (setOptimizedPropertyFn) {
1241 args.add(RValue::get(arg), getContext().getObjCIdType());
1242 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1243 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn);
1244 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
1245 callee, ReturnValueSlot(), args);
1247 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1248 args.add(RValue::get(arg), getContext().getObjCIdType());
1249 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
1250 getContext().BoolTy);
1251 args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
1252 getContext().BoolTy);
1253 // FIXME: We shouldn't need to get the function info here, the runtime
1254 // already should have computed it to build the function.
1255 CGCallee callee = CGCallee::forDirect(setPropertyFn);
1256 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
1257 callee, ReturnValueSlot(), args);
1263 case PropertyImplStrategy::CopyStruct:
1264 emitStructSetterCall(*this, setterMethod, ivar);
1267 case PropertyImplStrategy::Expression:
1271 // Otherwise, fake up some ASTs and emit a normal assignment.
1272 ValueDecl *selfDecl = setterMethod->getSelfDecl();
1273 DeclRefExpr self(selfDecl, false, selfDecl->getType(),
1274 VK_LValue, SourceLocation());
1275 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
1276 selfDecl->getType(), CK_LValueToRValue, &self,
1278 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
1279 SourceLocation(), SourceLocation(),
1280 &selfLoad, true, true);
1282 ParmVarDecl *argDecl = *setterMethod->param_begin();
1283 QualType argType = argDecl->getType().getNonReferenceType();
1284 DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
1285 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
1286 argType.getUnqualifiedType(), CK_LValueToRValue,
1289 // The property type can differ from the ivar type in some situations with
1290 // Objective-C pointer types, we can always bit cast the RHS in these cases.
1291 // The following absurdity is just to ensure well-formed IR.
1292 CastKind argCK = CK_NoOp;
1293 if (ivarRef.getType()->isObjCObjectPointerType()) {
1294 if (argLoad.getType()->isObjCObjectPointerType())
1296 else if (argLoad.getType()->isBlockPointerType())
1297 argCK = CK_BlockPointerToObjCPointerCast;
1299 argCK = CK_CPointerToObjCPointerCast;
1300 } else if (ivarRef.getType()->isBlockPointerType()) {
1301 if (argLoad.getType()->isBlockPointerType())
1304 argCK = CK_AnyPointerToBlockPointerCast;
1305 } else if (ivarRef.getType()->isPointerType()) {
1308 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
1309 ivarRef.getType(), argCK, &argLoad,
1311 Expr *finalArg = &argLoad;
1312 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
1314 finalArg = &argCast;
1317 BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
1318 ivarRef.getType(), VK_RValue, OK_Ordinary,
1319 SourceLocation(), false);
1323 /// \brief Generate an Objective-C property setter function.
1325 /// The given Decl must be an ObjCImplementationDecl. \@synthesize
1326 /// is illegal within a category.
1327 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
1328 const ObjCPropertyImplDecl *PID) {
1329 llvm::Constant *AtomicHelperFn =
1330 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
1331 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
1332 ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
1333 assert(OMD && "Invalid call to generate setter (empty method)");
1334 StartObjCMethod(OMD, IMP->getClassInterface());
1336 generateObjCSetterBody(IMP, PID, AtomicHelperFn);
1342 struct DestroyIvar final : EHScopeStack::Cleanup {
1345 const ObjCIvarDecl *ivar;
1346 CodeGenFunction::Destroyer *destroyer;
1347 bool useEHCleanupForArray;
1349 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
1350 CodeGenFunction::Destroyer *destroyer,
1351 bool useEHCleanupForArray)
1352 : addr(addr), ivar(ivar), destroyer(destroyer),
1353 useEHCleanupForArray(useEHCleanupForArray) {}
1355 void Emit(CodeGenFunction &CGF, Flags flags) override {
1357 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
1358 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
1359 flags.isForNormalCleanup() && useEHCleanupForArray);
1364 /// Like CodeGenFunction::destroyARCStrong, but do it with a call.
1365 static void destroyARCStrongWithStore(CodeGenFunction &CGF,
1368 llvm::Value *null = getNullForVariable(addr);
1369 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
1372 static void emitCXXDestructMethod(CodeGenFunction &CGF,
1373 ObjCImplementationDecl *impl) {
1374 CodeGenFunction::RunCleanupsScope scope(CGF);
1376 llvm::Value *self = CGF.LoadObjCSelf();
1378 const ObjCInterfaceDecl *iface = impl->getClassInterface();
1379 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
1380 ivar; ivar = ivar->getNextIvar()) {
1381 QualType type = ivar->getType();
1383 // Check whether the ivar is a destructible type.
1384 QualType::DestructionKind dtorKind = type.isDestructedType();
1385 if (!dtorKind) continue;
1387 CodeGenFunction::Destroyer *destroyer = nullptr;
1389 // Use a call to objc_storeStrong to destroy strong ivars, for the
1390 // general benefit of the tools.
1391 if (dtorKind == QualType::DK_objc_strong_lifetime) {
1392 destroyer = destroyARCStrongWithStore;
1394 // Otherwise use the default for the destruction kind.
1396 destroyer = CGF.getDestroyer(dtorKind);
1399 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
1401 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
1402 cleanupKind & EHCleanup);
1405 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
1408 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1411 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
1412 StartObjCMethod(MD, IMP->getClassInterface());
1414 // Emit .cxx_construct.
1416 // Suppress the final autorelease in ARC.
1417 AutoreleaseResult = false;
1419 for (const auto *IvarInit : IMP->inits()) {
1420 FieldDecl *Field = IvarInit->getAnyMember();
1421 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
1422 LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
1423 LoadObjCSelf(), Ivar, 0);
1424 EmitAggExpr(IvarInit->getInit(),
1425 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
1426 AggValueSlot::DoesNotNeedGCBarriers,
1427 AggValueSlot::IsNotAliased));
1429 // constructor returns 'self'.
1430 CodeGenTypes &Types = CGM.getTypes();
1431 QualType IdTy(CGM.getContext().getObjCIdType());
1432 llvm::Value *SelfAsId =
1433 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
1434 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
1436 // Emit .cxx_destruct.
1438 emitCXXDestructMethod(*this, IMP);
1443 llvm::Value *CodeGenFunction::LoadObjCSelf() {
1444 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
1445 DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
1446 Self->getType(), VK_LValue, SourceLocation());
1447 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
1450 QualType CodeGenFunction::TypeOfSelfObject() {
1451 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
1452 ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
1453 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
1454 getContext().getCanonicalType(selfDecl->getType()));
1455 return PTy->getPointeeType();
1458 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
1459 llvm::Constant *EnumerationMutationFnPtr =
1460 CGM.getObjCRuntime().EnumerationMutationFunction();
1461 if (!EnumerationMutationFnPtr) {
1462 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
1465 CGCallee EnumerationMutationFn =
1466 CGCallee::forDirect(EnumerationMutationFnPtr);
1468 CGDebugInfo *DI = getDebugInfo();
1470 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
1472 // The local variable comes into scope immediately.
1473 AutoVarEmission variable = AutoVarEmission::invalid();
1474 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
1475 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
1477 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
1479 // Fast enumeration state.
1480 QualType StateTy = CGM.getObjCFastEnumerationStateType();
1481 Address StatePtr = CreateMemTemp(StateTy, "state.ptr");
1482 EmitNullInitialization(StatePtr, StateTy);
1484 // Number of elements in the items array.
1485 static const unsigned NumItems = 16;
1487 // Fetch the countByEnumeratingWithState:objects:count: selector.
1488 IdentifierInfo *II[] = {
1489 &CGM.getContext().Idents.get("countByEnumeratingWithState"),
1490 &CGM.getContext().Idents.get("objects"),
1491 &CGM.getContext().Idents.get("count")
1493 Selector FastEnumSel =
1494 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
1497 getContext().getConstantArrayType(getContext().getObjCIdType(),
1498 llvm::APInt(32, NumItems),
1499 ArrayType::Normal, 0);
1500 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
1502 RunCleanupsScope ForScope(*this);
1504 // Emit the collection pointer. In ARC, we do a retain.
1505 llvm::Value *Collection;
1506 if (getLangOpts().ObjCAutoRefCount) {
1507 Collection = EmitARCRetainScalarExpr(S.getCollection());
1509 // Enter a cleanup to do the release.
1510 EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
1512 Collection = EmitScalarExpr(S.getCollection());
1515 // The 'continue' label needs to appear within the cleanup for the
1516 // collection object.
1517 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
1519 // Send it our message:
1522 // The first argument is a temporary of the enumeration-state type.
1523 Args.add(RValue::get(StatePtr.getPointer()),
1524 getContext().getPointerType(StateTy));
1526 // The second argument is a temporary array with space for NumItems
1527 // pointers. We'll actually be loading elements from the array
1528 // pointer written into the control state; this buffer is so that
1529 // collections that *aren't* backed by arrays can still queue up
1530 // batches of elements.
1531 Args.add(RValue::get(ItemsPtr.getPointer()),
1532 getContext().getPointerType(ItemsTy));
1534 // The third argument is the capacity of that temporary array.
1535 llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
1536 llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
1537 Args.add(RValue::get(Count), getContext().UnsignedLongTy);
1539 // Start the enumeration.
1541 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1542 getContext().UnsignedLongTy,
1546 // The initial number of objects that were returned in the buffer.
1547 llvm::Value *initialBufferLimit = CountRV.getScalarVal();
1549 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
1550 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
1552 llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
1554 // If the limit pointer was zero to begin with, the collection is
1555 // empty; skip all this. Set the branch weight assuming this has the same
1556 // probability of exiting the loop as any other loop exit.
1557 uint64_t EntryCount = getCurrentProfileCount();
1558 Builder.CreateCondBr(
1559 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB,
1561 createProfileWeights(EntryCount, getProfileCount(S.getBody())));
1563 // Otherwise, initialize the loop.
1564 EmitBlock(LoopInitBB);
1566 // Save the initial mutations value. This is the value at an
1567 // address that was written into the state object by
1568 // countByEnumeratingWithState:objects:count:.
1569 Address StateMutationsPtrPtr = Builder.CreateStructGEP(
1570 StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
1571 llvm::Value *StateMutationsPtr
1572 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1574 llvm::Value *initialMutations =
1575 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
1576 "forcoll.initial-mutations");
1578 // Start looping. This is the point we return to whenever we have a
1579 // fresh, non-empty batch of objects.
1580 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
1581 EmitBlock(LoopBodyBB);
1583 // The current index into the buffer.
1584 llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
1585 index->addIncoming(zero, LoopInitBB);
1587 // The current buffer size.
1588 llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
1589 count->addIncoming(initialBufferLimit, LoopInitBB);
1591 incrementProfileCounter(&S);
1593 // Check whether the mutations value has changed from where it was
1594 // at start. StateMutationsPtr should actually be invariant between
1596 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1597 llvm::Value *currentMutations
1598 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
1601 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
1602 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
1604 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
1605 WasNotMutatedBB, WasMutatedBB);
1607 // If so, call the enumeration-mutation function.
1608 EmitBlock(WasMutatedBB);
1610 Builder.CreateBitCast(Collection,
1611 ConvertType(getContext().getObjCIdType()));
1613 Args2.add(RValue::get(V), getContext().getObjCIdType());
1614 // FIXME: We shouldn't need to get the function info here, the runtime already
1615 // should have computed it to build the function.
1617 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2),
1618 EnumerationMutationFn, ReturnValueSlot(), Args2);
1620 // Otherwise, or if the mutation function returns, just continue.
1621 EmitBlock(WasNotMutatedBB);
1623 // Initialize the element variable.
1624 RunCleanupsScope elementVariableScope(*this);
1625 bool elementIsVariable;
1626 LValue elementLValue;
1627 QualType elementType;
1628 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
1629 // Initialize the variable, in case it's a __block variable or something.
1630 EmitAutoVarInit(variable);
1632 const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
1633 DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
1634 VK_LValue, SourceLocation());
1635 elementLValue = EmitLValue(&tempDRE);
1636 elementType = D->getType();
1637 elementIsVariable = true;
1639 if (D->isARCPseudoStrong())
1640 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
1642 elementLValue = LValue(); // suppress warning
1643 elementType = cast<Expr>(S.getElement())->getType();
1644 elementIsVariable = false;
1646 llvm::Type *convertedElementType = ConvertType(elementType);
1648 // Fetch the buffer out of the enumeration state.
1649 // TODO: this pointer should actually be invariant between
1650 // refreshes, which would help us do certain loop optimizations.
1651 Address StateItemsPtr = Builder.CreateStructGEP(
1652 StatePtr, 1, getPointerSize(), "stateitems.ptr");
1653 llvm::Value *EnumStateItems =
1654 Builder.CreateLoad(StateItemsPtr, "stateitems");
1656 // Fetch the value at the current index from the buffer.
1657 llvm::Value *CurrentItemPtr =
1658 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
1659 llvm::Value *CurrentItem =
1660 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
1662 // Cast that value to the right type.
1663 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
1666 // Make sure we have an l-value. Yes, this gets evaluated every
1667 // time through the loop.
1668 if (!elementIsVariable) {
1669 elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1670 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
1672 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue,
1676 // If we do have an element variable, this assignment is the end of
1677 // its initialization.
1678 if (elementIsVariable)
1679 EmitAutoVarCleanups(variable);
1681 // Perform the loop body, setting up break and continue labels.
1682 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
1684 RunCleanupsScope Scope(*this);
1685 EmitStmt(S.getBody());
1687 BreakContinueStack.pop_back();
1689 // Destroy the element variable now.
1690 elementVariableScope.ForceCleanup();
1692 // Check whether there are more elements.
1693 EmitBlock(AfterBody.getBlock());
1695 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
1697 // First we check in the local buffer.
1698 llvm::Value *indexPlusOne
1699 = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
1701 // If we haven't overrun the buffer yet, we can continue.
1702 // Set the branch weights based on the simplifying assumption that this is
1703 // like a while-loop, i.e., ignoring that the false branch fetches more
1704 // elements and then returns to the loop.
1705 Builder.CreateCondBr(
1706 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB,
1707 createProfileWeights(getProfileCount(S.getBody()), EntryCount));
1709 index->addIncoming(indexPlusOne, AfterBody.getBlock());
1710 count->addIncoming(count, AfterBody.getBlock());
1712 // Otherwise, we have to fetch more elements.
1713 EmitBlock(FetchMoreBB);
1716 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1717 getContext().UnsignedLongTy,
1721 // If we got a zero count, we're done.
1722 llvm::Value *refetchCount = CountRV.getScalarVal();
1724 // (note that the message send might split FetchMoreBB)
1725 index->addIncoming(zero, Builder.GetInsertBlock());
1726 count->addIncoming(refetchCount, Builder.GetInsertBlock());
1728 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
1729 EmptyBB, LoopBodyBB);
1731 // No more elements.
1734 if (!elementIsVariable) {
1735 // If the element was not a declaration, set it to be null.
1737 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
1738 elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1739 EmitStoreThroughLValue(RValue::get(null), elementLValue);
1743 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
1745 ForScope.ForceCleanup();
1746 EmitBlock(LoopEnd.getBlock());
1749 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
1750 CGM.getObjCRuntime().EmitTryStmt(*this, S);
1753 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
1754 CGM.getObjCRuntime().EmitThrowStmt(*this, S);
1757 void CodeGenFunction::EmitObjCAtSynchronizedStmt(
1758 const ObjCAtSynchronizedStmt &S) {
1759 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
1763 struct CallObjCRelease final : EHScopeStack::Cleanup {
1764 CallObjCRelease(llvm::Value *object) : object(object) {}
1765 llvm::Value *object;
1767 void Emit(CodeGenFunction &CGF, Flags flags) override {
1768 // Releases at the end of the full-expression are imprecise.
1769 CGF.EmitARCRelease(object, ARCImpreciseLifetime);
1774 /// Produce the code for a CK_ARCConsumeObject. Does a primitive
1775 /// release at the end of the full-expression.
1776 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
1777 llvm::Value *object) {
1778 // If we're in a conditional branch, we need to make the cleanup
1780 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
1784 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
1785 llvm::Value *value) {
1786 return EmitARCRetainAutorelease(type, value);
1789 /// Given a number of pointers, inform the optimizer that they're
1790 /// being intrinsically used up until this point in the program.
1791 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
1792 llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
1794 llvm::FunctionType *fnType =
1795 llvm::FunctionType::get(CGM.VoidTy, None, true);
1796 fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
1799 // This isn't really a "runtime" function, but as an intrinsic it
1800 // doesn't really matter as long as we align things up.
1801 EmitNounwindRuntimeCall(fn, values);
1805 static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
1806 llvm::FunctionType *type,
1808 llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
1810 if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
1811 // If the target runtime doesn't naturally support ARC, emit weak
1812 // references to the runtime support library. We don't really
1813 // permit this to fail, but we need a particular relocation style.
1814 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
1815 !CGM.getTriple().isOSBinFormatCOFF()) {
1816 f->setLinkage(llvm::Function::ExternalWeakLinkage);
1817 } else if (fnName == "objc_retain" || fnName == "objc_release") {
1818 // If we have Native ARC, set nonlazybind attribute for these APIs for
1820 f->addFnAttr(llvm::Attribute::NonLazyBind);
1827 /// Perform an operation having the signature
1829 /// where a null input causes a no-op and returns null.
1830 static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
1832 llvm::Constant *&fn,
1834 bool isTailCall = false) {
1835 if (isa<llvm::ConstantPointerNull>(value)) return value;
1838 llvm::FunctionType *fnType =
1839 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
1840 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1843 // Cast the argument to 'id'.
1844 llvm::Type *origType = value->getType();
1845 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
1847 // Call the function.
1848 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
1850 call->setTailCall();
1852 // Cast the result back to the original type.
1853 return CGF.Builder.CreateBitCast(call, origType);
1856 /// Perform an operation having the following signature:
1858 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
1860 llvm::Constant *&fn,
1863 llvm::FunctionType *fnType =
1864 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false);
1865 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1868 // Cast the argument to 'id*'.
1869 llvm::Type *origType = addr.getElementType();
1870 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
1872 // Call the function.
1873 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
1875 // Cast the result back to a dereference of the original type.
1876 if (origType != CGF.Int8PtrTy)
1877 result = CGF.Builder.CreateBitCast(result, origType);
1882 /// Perform an operation having the following signature:
1884 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
1887 llvm::Constant *&fn,
1890 assert(addr.getElementType() == value->getType());
1893 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
1895 llvm::FunctionType *fnType
1896 = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
1897 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1900 llvm::Type *origType = value->getType();
1902 llvm::Value *args[] = {
1903 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
1904 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
1906 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
1908 if (ignored) return nullptr;
1910 return CGF.Builder.CreateBitCast(result, origType);
1913 /// Perform an operation having the following signature:
1914 /// void (i8**, i8**)
1915 static void emitARCCopyOperation(CodeGenFunction &CGF,
1918 llvm::Constant *&fn,
1920 assert(dst.getType() == src.getType());
1923 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy };
1925 llvm::FunctionType *fnType
1926 = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
1927 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1930 llvm::Value *args[] = {
1931 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
1932 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
1934 CGF.EmitNounwindRuntimeCall(fn, args);
1937 /// Produce the code to do a retain. Based on the type, calls one of:
1938 /// call i8* \@objc_retain(i8* %value)
1939 /// call i8* \@objc_retainBlock(i8* %value)
1940 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
1941 if (type->isBlockPointerType())
1942 return EmitARCRetainBlock(value, /*mandatory*/ false);
1944 return EmitARCRetainNonBlock(value);
1947 /// Retain the given object, with normal retain semantics.
1948 /// call i8* \@objc_retain(i8* %value)
1949 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
1950 return emitARCValueOperation(*this, value,
1951 CGM.getObjCEntrypoints().objc_retain,
1955 /// Retain the given block, with _Block_copy semantics.
1956 /// call i8* \@objc_retainBlock(i8* %value)
1958 /// \param mandatory - If false, emit the call with metadata
1959 /// indicating that it's okay for the optimizer to eliminate this call
1960 /// if it can prove that the block never escapes except down the stack.
1961 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
1964 = emitARCValueOperation(*this, value,
1965 CGM.getObjCEntrypoints().objc_retainBlock,
1966 "objc_retainBlock");
1968 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
1969 // tell the optimizer that it doesn't need to do this copy if the
1970 // block doesn't escape, where being passed as an argument doesn't
1971 // count as escaping.
1972 if (!mandatory && isa<llvm::Instruction>(result)) {
1973 llvm::CallInst *call
1974 = cast<llvm::CallInst>(result->stripPointerCasts());
1975 assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
1977 call->setMetadata("clang.arc.copy_on_escape",
1978 llvm::MDNode::get(Builder.getContext(), None));
1984 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
1985 // Fetch the void(void) inline asm which marks that we're going to
1986 // do something with the autoreleased return value.
1987 llvm::InlineAsm *&marker
1988 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker;
1991 = CGF.CGM.getTargetCodeGenInfo()
1992 .getARCRetainAutoreleasedReturnValueMarker();
1994 // If we have an empty assembly string, there's nothing to do.
1995 if (assembly.empty()) {
1997 // Otherwise, at -O0, build an inline asm that we're going to call
1999 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
2000 llvm::FunctionType *type =
2001 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false);
2003 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
2005 // If we're at -O1 and above, we don't want to litter the code
2006 // with this marker yet, so leave a breadcrumb for the ARC
2007 // optimizer to pick up.
2009 llvm::NamedMDNode *metadata =
2010 CGF.CGM.getModule().getOrInsertNamedMetadata(
2011 "clang.arc.retainAutoreleasedReturnValueMarker");
2012 assert(metadata->getNumOperands() <= 1);
2013 if (metadata->getNumOperands() == 0) {
2014 auto &ctx = CGF.getLLVMContext();
2015 metadata->addOperand(llvm::MDNode::get(ctx,
2016 llvm::MDString::get(ctx, assembly)));
2021 // Call the marker asm if we made one, which we do only at -O0.
2023 CGF.Builder.CreateCall(marker);
2026 /// Retain the given object which is the result of a function call.
2027 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
2029 /// Yes, this function name is one character away from a different
2030 /// call with completely different semantics.
2032 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
2033 emitAutoreleasedReturnValueMarker(*this);
2034 return emitARCValueOperation(*this, value,
2035 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
2036 "objc_retainAutoreleasedReturnValue");
2039 /// Claim a possibly-autoreleased return value at +0. This is only
2040 /// valid to do in contexts which do not rely on the retain to keep
2041 /// the object valid for for all of its uses; for example, when
2042 /// the value is ignored, or when it is being assigned to an
2043 /// __unsafe_unretained variable.
2045 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value)
2047 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
2048 emitAutoreleasedReturnValueMarker(*this);
2049 return emitARCValueOperation(*this, value,
2050 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
2051 "objc_unsafeClaimAutoreleasedReturnValue");
2054 /// Release the given object.
2055 /// call void \@objc_release(i8* %value)
2056 void CodeGenFunction::EmitARCRelease(llvm::Value *value,
2057 ARCPreciseLifetime_t precise) {
2058 if (isa<llvm::ConstantPointerNull>(value)) return;
2060 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
2062 llvm::FunctionType *fnType =
2063 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
2064 fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
2067 // Cast the argument to 'id'.
2068 value = Builder.CreateBitCast(value, Int8PtrTy);
2070 // Call objc_release.
2071 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
2073 if (precise == ARCImpreciseLifetime) {
2074 call->setMetadata("clang.imprecise_release",
2075 llvm::MDNode::get(Builder.getContext(), None));
2079 /// Destroy a __strong variable.
2081 /// At -O0, emit a call to store 'null' into the address;
2082 /// instrumenting tools prefer this because the address is exposed,
2083 /// but it's relatively cumbersome to optimize.
2085 /// At -O1 and above, just load and call objc_release.
2087 /// call void \@objc_storeStrong(i8** %addr, i8* null)
2088 void CodeGenFunction::EmitARCDestroyStrong(Address addr,
2089 ARCPreciseLifetime_t precise) {
2090 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2091 llvm::Value *null = getNullForVariable(addr);
2092 EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
2096 llvm::Value *value = Builder.CreateLoad(addr);
2097 EmitARCRelease(value, precise);
2100 /// Store into a strong object. Always calls this:
2101 /// call void \@objc_storeStrong(i8** %addr, i8* %value)
2102 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
2105 assert(addr.getElementType() == value->getType());
2107 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
2109 llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
2110 llvm::FunctionType *fnType
2111 = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
2112 fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
2115 llvm::Value *args[] = {
2116 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
2117 Builder.CreateBitCast(value, Int8PtrTy)
2119 EmitNounwindRuntimeCall(fn, args);
2121 if (ignored) return nullptr;
2125 /// Store into a strong object. Sometimes calls this:
2126 /// call void \@objc_storeStrong(i8** %addr, i8* %value)
2127 /// Other times, breaks it down into components.
2128 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
2129 llvm::Value *newValue,
2131 QualType type = dst.getType();
2132 bool isBlock = type->isBlockPointerType();
2134 // Use a store barrier at -O0 unless this is a block type or the
2135 // lvalue is inadequately aligned.
2136 if (shouldUseFusedARCCalls() &&
2138 (dst.getAlignment().isZero() ||
2139 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
2140 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
2143 // Otherwise, split it out.
2145 // Retain the new value.
2146 newValue = EmitARCRetain(type, newValue);
2148 // Read the old value.
2149 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation());
2151 // Store. We do this before the release so that any deallocs won't
2152 // see the old value.
2153 EmitStoreOfScalar(newValue, dst);
2155 // Finally, release the old value.
2156 EmitARCRelease(oldValue, dst.isARCPreciseLifetime());
2161 /// Autorelease the given object.
2162 /// call i8* \@objc_autorelease(i8* %value)
2163 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
2164 return emitARCValueOperation(*this, value,
2165 CGM.getObjCEntrypoints().objc_autorelease,
2166 "objc_autorelease");
2169 /// Autorelease the given object.
2170 /// call i8* \@objc_autoreleaseReturnValue(i8* %value)
2172 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
2173 return emitARCValueOperation(*this, value,
2174 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
2175 "objc_autoreleaseReturnValue",
2176 /*isTailCall*/ true);
2179 /// Do a fused retain/autorelease of the given object.
2180 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
2182 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
2183 return emitARCValueOperation(*this, value,
2184 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
2185 "objc_retainAutoreleaseReturnValue",
2186 /*isTailCall*/ true);
2189 /// Do a fused retain/autorelease of the given object.
2190 /// call i8* \@objc_retainAutorelease(i8* %value)
2192 /// %retain = call i8* \@objc_retainBlock(i8* %value)
2193 /// call i8* \@objc_autorelease(i8* %retain)
2194 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
2195 llvm::Value *value) {
2196 if (!type->isBlockPointerType())
2197 return EmitARCRetainAutoreleaseNonBlock(value);
2199 if (isa<llvm::ConstantPointerNull>(value)) return value;
2201 llvm::Type *origType = value->getType();
2202 value = Builder.CreateBitCast(value, Int8PtrTy);
2203 value = EmitARCRetainBlock(value, /*mandatory*/ true);
2204 value = EmitARCAutorelease(value);
2205 return Builder.CreateBitCast(value, origType);
2208 /// Do a fused retain/autorelease of the given object.
2209 /// call i8* \@objc_retainAutorelease(i8* %value)
2211 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
2212 return emitARCValueOperation(*this, value,
2213 CGM.getObjCEntrypoints().objc_retainAutorelease,
2214 "objc_retainAutorelease");
2217 /// i8* \@objc_loadWeak(i8** %addr)
2218 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
2219 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
2220 return emitARCLoadOperation(*this, addr,
2221 CGM.getObjCEntrypoints().objc_loadWeak,
2225 /// i8* \@objc_loadWeakRetained(i8** %addr)
2226 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
2227 return emitARCLoadOperation(*this, addr,
2228 CGM.getObjCEntrypoints().objc_loadWeakRetained,
2229 "objc_loadWeakRetained");
2232 /// i8* \@objc_storeWeak(i8** %addr, i8* %value)
2234 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
2237 return emitARCStoreOperation(*this, addr, value,
2238 CGM.getObjCEntrypoints().objc_storeWeak,
2239 "objc_storeWeak", ignored);
2242 /// i8* \@objc_initWeak(i8** %addr, i8* %value)
2243 /// Returns %value. %addr is known to not have a current weak entry.
2244 /// Essentially equivalent to:
2245 /// *addr = nil; objc_storeWeak(addr, value);
2246 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
2247 // If we're initializing to null, just write null to memory; no need
2248 // to get the runtime involved. But don't do this if optimization
2249 // is enabled, because accounting for this would make the optimizer
2250 // much more complicated.
2251 if (isa<llvm::ConstantPointerNull>(value) &&
2252 CGM.getCodeGenOpts().OptimizationLevel == 0) {
2253 Builder.CreateStore(value, addr);
2257 emitARCStoreOperation(*this, addr, value,
2258 CGM.getObjCEntrypoints().objc_initWeak,
2259 "objc_initWeak", /*ignored*/ true);
2262 /// void \@objc_destroyWeak(i8** %addr)
2263 /// Essentially objc_storeWeak(addr, nil).
2264 void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
2265 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
2267 llvm::FunctionType *fnType =
2268 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false);
2269 fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
2272 // Cast the argument to 'id*'.
2273 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
2275 EmitNounwindRuntimeCall(fn, addr.getPointer());
2278 /// void \@objc_moveWeak(i8** %dest, i8** %src)
2279 /// Disregards the current value in %dest. Leaves %src pointing to nothing.
2280 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
2281 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
2282 emitARCCopyOperation(*this, dst, src,
2283 CGM.getObjCEntrypoints().objc_moveWeak,
2287 /// void \@objc_copyWeak(i8** %dest, i8** %src)
2288 /// Disregards the current value in %dest. Essentially
2289 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
2290 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
2291 emitARCCopyOperation(*this, dst, src,
2292 CGM.getObjCEntrypoints().objc_copyWeak,
2296 /// Produce the code to do a objc_autoreleasepool_push.
2297 /// call i8* \@objc_autoreleasePoolPush(void)
2298 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
2299 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
2301 llvm::FunctionType *fnType =
2302 llvm::FunctionType::get(Int8PtrTy, false);
2303 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
2306 return EmitNounwindRuntimeCall(fn);
2309 /// Produce the code to do a primitive release.
2310 /// call void \@objc_autoreleasePoolPop(i8* %ptr)
2311 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
2312 assert(value->getType() == Int8PtrTy);
2314 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
2316 llvm::FunctionType *fnType =
2317 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
2319 // We don't want to use a weak import here; instead we should not
2320 // fall into this path.
2321 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
2324 // objc_autoreleasePoolPop can throw.
2325 EmitRuntimeCallOrInvoke(fn, value);
2328 /// Produce the code to do an MRR version objc_autoreleasepool_push.
2329 /// Which is: [[NSAutoreleasePool alloc] init];
2330 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
2331 /// init is declared as: - (id) init; in its NSObject super class.
2333 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
2334 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
2335 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this);
2336 // [NSAutoreleasePool alloc]
2337 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
2338 Selector AllocSel = getContext().Selectors.getSelector(0, &II);
2341 Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2342 getContext().getObjCIdType(),
2343 AllocSel, Receiver, Args);
2346 Receiver = AllocRV.getScalarVal();
2347 II = &CGM.getContext().Idents.get("init");
2348 Selector InitSel = getContext().Selectors.getSelector(0, &II);
2350 Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2351 getContext().getObjCIdType(),
2352 InitSel, Receiver, Args);
2353 return InitRV.getScalarVal();
2356 /// Produce the code to do a primitive release.
2358 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
2359 IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
2360 Selector DrainSel = getContext().Selectors.getSelector(0, &II);
2362 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
2363 getContext().VoidTy, DrainSel, Arg, Args);
2366 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
2369 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
2372 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
2375 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
2378 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
2381 CGF.EmitARCDestroyWeak(addr);
2385 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
2388 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2390 void Emit(CodeGenFunction &CGF, Flags flags) override {
2391 CGF.EmitObjCAutoreleasePoolPop(Token);
2394 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup {
2397 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2399 void Emit(CodeGenFunction &CGF, Flags flags) override {
2400 CGF.EmitObjCMRRAutoreleasePoolPop(Token);
2405 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
2406 if (CGM.getLangOpts().ObjCAutoRefCount)
2407 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
2409 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
2412 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2415 switch (type.getObjCLifetime()) {
2416 case Qualifiers::OCL_None:
2417 case Qualifiers::OCL_ExplicitNone:
2418 case Qualifiers::OCL_Strong:
2419 case Qualifiers::OCL_Autoreleasing:
2420 return TryEmitResult(CGF.EmitLoadOfLValue(lvalue,
2421 SourceLocation()).getScalarVal(),
2424 case Qualifiers::OCL_Weak:
2425 return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
2429 llvm_unreachable("impossible lifetime!");
2432 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2434 e = e->IgnoreParens();
2435 QualType type = e->getType();
2437 // If we're loading retained from a __strong xvalue, we can avoid
2438 // an extra retain/release pair by zeroing out the source of this
2439 // "move" operation.
2440 if (e->isXValue() &&
2441 !type.isConstQualified() &&
2442 type.getObjCLifetime() == Qualifiers::OCL_Strong) {
2444 LValue lv = CGF.EmitLValue(e);
2446 // Load the object pointer.
2447 llvm::Value *result = CGF.EmitLoadOfLValue(lv,
2448 SourceLocation()).getScalarVal();
2450 // Set the source pointer to NULL.
2451 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
2453 return TryEmitResult(result, true);
2456 // As a very special optimization, in ARC++, if the l-value is the
2457 // result of a non-volatile assignment, do a simple retain of the
2458 // result of the call to objc_storeWeak instead of reloading.
2459 if (CGF.getLangOpts().CPlusPlus &&
2460 !type.isVolatileQualified() &&
2461 type.getObjCLifetime() == Qualifiers::OCL_Weak &&
2462 isa<BinaryOperator>(e) &&
2463 cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
2464 return TryEmitResult(CGF.EmitScalarExpr(e), false);
2466 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
2469 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
2470 llvm::Value *value)>
2473 /// Insert code immediately after a call.
2474 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
2476 ValueTransform doAfterCall,
2477 ValueTransform doFallback) {
2478 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
2479 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2481 // Place the retain immediately following the call.
2482 CGF.Builder.SetInsertPoint(call->getParent(),
2483 ++llvm::BasicBlock::iterator(call));
2484 value = doAfterCall(CGF, value);
2486 CGF.Builder.restoreIP(ip);
2488 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
2489 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2491 // Place the retain at the beginning of the normal destination block.
2492 llvm::BasicBlock *BB = invoke->getNormalDest();
2493 CGF.Builder.SetInsertPoint(BB, BB->begin());
2494 value = doAfterCall(CGF, value);
2496 CGF.Builder.restoreIP(ip);
2499 // Bitcasts can arise because of related-result returns. Rewrite
2501 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
2502 llvm::Value *operand = bitcast->getOperand(0);
2503 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
2504 bitcast->setOperand(0, operand);
2507 // Generic fall-back case.
2509 // Retain using the non-block variant: we never need to do a copy
2510 // of a block that's been returned to us.
2511 return doFallback(CGF, value);
2515 /// Given that the given expression is some sort of call (which does
2516 /// not return retained), emit a retain following it.
2517 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF,
2519 llvm::Value *value = CGF.EmitScalarExpr(e);
2520 return emitARCOperationAfterCall(CGF, value,
2521 [](CodeGenFunction &CGF, llvm::Value *value) {
2522 return CGF.EmitARCRetainAutoreleasedReturnValue(value);
2524 [](CodeGenFunction &CGF, llvm::Value *value) {
2525 return CGF.EmitARCRetainNonBlock(value);
2529 /// Given that the given expression is some sort of call (which does
2530 /// not return retained), perform an unsafeClaim following it.
2531 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF,
2533 llvm::Value *value = CGF.EmitScalarExpr(e);
2534 return emitARCOperationAfterCall(CGF, value,
2535 [](CodeGenFunction &CGF, llvm::Value *value) {
2536 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value);
2538 [](CodeGenFunction &CGF, llvm::Value *value) {
2543 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E,
2544 bool allowUnsafeClaim) {
2545 if (allowUnsafeClaim &&
2546 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) {
2547 return emitARCUnsafeClaimCallResult(*this, E);
2549 llvm::Value *value = emitARCRetainCallResult(*this, E);
2550 return EmitObjCConsumeObject(E->getType(), value);
2554 /// Determine whether it might be important to emit a separate
2555 /// objc_retain_block on the result of the given expression, or
2556 /// whether it's okay to just emit it in a +1 context.
2557 static bool shouldEmitSeparateBlockRetain(const Expr *e) {
2558 assert(e->getType()->isBlockPointerType());
2559 e = e->IgnoreParens();
2561 // For future goodness, emit block expressions directly in +1
2562 // contexts if we can.
2563 if (isa<BlockExpr>(e))
2566 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
2567 switch (cast->getCastKind()) {
2568 // Emitting these operations in +1 contexts is goodness.
2569 case CK_LValueToRValue:
2570 case CK_ARCReclaimReturnedObject:
2571 case CK_ARCConsumeObject:
2572 case CK_ARCProduceObject:
2575 // These operations preserve a block type.
2578 return shouldEmitSeparateBlockRetain(cast->getSubExpr());
2580 // These operations are known to be bad (or haven't been considered).
2581 case CK_AnyPointerToBlockPointerCast:
2591 /// A CRTP base class for emitting expressions of retainable object
2592 /// pointer type in ARC.
2593 template <typename Impl, typename Result> class ARCExprEmitter {
2595 CodeGenFunction &CGF;
2596 Impl &asImpl() { return *static_cast<Impl*>(this); }
2598 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {}
2601 Result visit(const Expr *e);
2602 Result visitCastExpr(const CastExpr *e);
2603 Result visitPseudoObjectExpr(const PseudoObjectExpr *e);
2604 Result visitBinaryOperator(const BinaryOperator *e);
2605 Result visitBinAssign(const BinaryOperator *e);
2606 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e);
2607 Result visitBinAssignAutoreleasing(const BinaryOperator *e);
2608 Result visitBinAssignWeak(const BinaryOperator *e);
2609 Result visitBinAssignStrong(const BinaryOperator *e);
2611 // Minimal implementation:
2612 // Result visitLValueToRValue(const Expr *e)
2613 // Result visitConsumeObject(const Expr *e)
2614 // Result visitExtendBlockObject(const Expr *e)
2615 // Result visitReclaimReturnedObject(const Expr *e)
2616 // Result visitCall(const Expr *e)
2617 // Result visitExpr(const Expr *e)
2619 // Result emitBitCast(Result result, llvm::Type *resultType)
2620 // llvm::Value *getValueOfResult(Result result)
2624 /// Try to emit a PseudoObjectExpr under special ARC rules.
2626 /// This massively duplicates emitPseudoObjectRValue.
2627 template <typename Impl, typename Result>
2629 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) {
2630 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
2632 // Find the result expression.
2633 const Expr *resultExpr = E->getResultExpr();
2637 for (PseudoObjectExpr::const_semantics_iterator
2638 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
2639 const Expr *semantic = *i;
2641 // If this semantic expression is an opaque value, bind it
2642 // to the result of its source expression.
2643 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
2644 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
2647 // If this semantic is the result of the pseudo-object
2648 // expression, try to evaluate the source as +1.
2649 if (ov == resultExpr) {
2650 assert(!OVMA::shouldBindAsLValue(ov));
2651 result = asImpl().visit(ov->getSourceExpr());
2652 opaqueData = OVMA::bind(CGF, ov,
2653 RValue::get(asImpl().getValueOfResult(result)));
2655 // Otherwise, just bind it.
2657 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
2659 opaques.push_back(opaqueData);
2661 // Otherwise, if the expression is the result, evaluate it
2662 // and remember the result.
2663 } else if (semantic == resultExpr) {
2664 result = asImpl().visit(semantic);
2666 // Otherwise, evaluate the expression in an ignored context.
2668 CGF.EmitIgnoredExpr(semantic);
2672 // Unbind all the opaques now.
2673 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
2674 opaques[i].unbind(CGF);
2679 template <typename Impl, typename Result>
2680 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) {
2681 switch (e->getCastKind()) {
2683 // No-op casts don't change the type, so we just ignore them.
2685 return asImpl().visit(e->getSubExpr());
2687 // These casts can change the type.
2688 case CK_CPointerToObjCPointerCast:
2689 case CK_BlockPointerToObjCPointerCast:
2690 case CK_AnyPointerToBlockPointerCast:
2692 llvm::Type *resultType = CGF.ConvertType(e->getType());
2693 assert(e->getSubExpr()->getType()->hasPointerRepresentation());
2694 Result result = asImpl().visit(e->getSubExpr());
2695 return asImpl().emitBitCast(result, resultType);
2698 // Handle some casts specially.
2699 case CK_LValueToRValue:
2700 return asImpl().visitLValueToRValue(e->getSubExpr());
2701 case CK_ARCConsumeObject:
2702 return asImpl().visitConsumeObject(e->getSubExpr());
2703 case CK_ARCExtendBlockObject:
2704 return asImpl().visitExtendBlockObject(e->getSubExpr());
2705 case CK_ARCReclaimReturnedObject:
2706 return asImpl().visitReclaimReturnedObject(e->getSubExpr());
2708 // Otherwise, use the default logic.
2710 return asImpl().visitExpr(e);
2714 template <typename Impl, typename Result>
2716 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) {
2717 switch (e->getOpcode()) {
2719 CGF.EmitIgnoredExpr(e->getLHS());
2720 CGF.EnsureInsertPoint();
2721 return asImpl().visit(e->getRHS());
2724 return asImpl().visitBinAssign(e);
2727 return asImpl().visitExpr(e);
2731 template <typename Impl, typename Result>
2732 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) {
2733 switch (e->getLHS()->getType().getObjCLifetime()) {
2734 case Qualifiers::OCL_ExplicitNone:
2735 return asImpl().visitBinAssignUnsafeUnretained(e);
2737 case Qualifiers::OCL_Weak:
2738 return asImpl().visitBinAssignWeak(e);
2740 case Qualifiers::OCL_Autoreleasing:
2741 return asImpl().visitBinAssignAutoreleasing(e);
2743 case Qualifiers::OCL_Strong:
2744 return asImpl().visitBinAssignStrong(e);
2746 case Qualifiers::OCL_None:
2747 return asImpl().visitExpr(e);
2749 llvm_unreachable("bad ObjC ownership qualifier");
2752 /// The default rule for __unsafe_unretained emits the RHS recursively,
2753 /// stores into the unsafe variable, and propagates the result outward.
2754 template <typename Impl, typename Result>
2755 Result ARCExprEmitter<Impl,Result>::
2756 visitBinAssignUnsafeUnretained(const BinaryOperator *e) {
2757 // Recursively emit the RHS.
2758 // For __block safety, do this before emitting the LHS.
2759 Result result = asImpl().visit(e->getRHS());
2761 // Perform the store.
2763 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store);
2764 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)),
2770 template <typename Impl, typename Result>
2772 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) {
2773 return asImpl().visitExpr(e);
2776 template <typename Impl, typename Result>
2778 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) {
2779 return asImpl().visitExpr(e);
2782 template <typename Impl, typename Result>
2784 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) {
2785 return asImpl().visitExpr(e);
2788 /// The general expression-emission logic.
2789 template <typename Impl, typename Result>
2790 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) {
2791 // We should *never* see a nested full-expression here, because if
2792 // we fail to emit at +1, our caller must not retain after we close
2793 // out the full-expression. This isn't as important in the unsafe
2795 assert(!isa<ExprWithCleanups>(e));
2797 // Look through parens, __extension__, generic selection, etc.
2798 e = e->IgnoreParens();
2800 // Handle certain kinds of casts.
2801 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
2802 return asImpl().visitCastExpr(ce);
2804 // Handle the comma operator.
2805 } else if (auto op = dyn_cast<BinaryOperator>(e)) {
2806 return asImpl().visitBinaryOperator(op);
2808 // TODO: handle conditional operators here
2810 // For calls and message sends, use the retained-call logic.
2811 // Delegate inits are a special case in that they're the only
2812 // returns-retained expression that *isn't* surrounded by
2814 } else if (isa<CallExpr>(e) ||
2815 (isa<ObjCMessageExpr>(e) &&
2816 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
2817 return asImpl().visitCall(e);
2819 // Look through pseudo-object expressions.
2820 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
2821 return asImpl().visitPseudoObjectExpr(pseudo);
2824 return asImpl().visitExpr(e);
2829 /// An emitter for +1 results.
2830 struct ARCRetainExprEmitter :
2831 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> {
2833 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
2835 llvm::Value *getValueOfResult(TryEmitResult result) {
2836 return result.getPointer();
2839 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) {
2840 llvm::Value *value = result.getPointer();
2841 value = CGF.Builder.CreateBitCast(value, resultType);
2842 result.setPointer(value);
2846 TryEmitResult visitLValueToRValue(const Expr *e) {
2847 return tryEmitARCRetainLoadOfScalar(CGF, e);
2850 /// For consumptions, just emit the subexpression and thus elide
2851 /// the retain/release pair.
2852 TryEmitResult visitConsumeObject(const Expr *e) {
2853 llvm::Value *result = CGF.EmitScalarExpr(e);
2854 return TryEmitResult(result, true);
2857 /// Block extends are net +0. Naively, we could just recurse on
2858 /// the subexpression, but actually we need to ensure that the
2859 /// value is copied as a block, so there's a little filter here.
2860 TryEmitResult visitExtendBlockObject(const Expr *e) {
2861 llvm::Value *result; // will be a +0 value
2863 // If we can't safely assume the sub-expression will produce a
2864 // block-copied value, emit the sub-expression at +0.
2865 if (shouldEmitSeparateBlockRetain(e)) {
2866 result = CGF.EmitScalarExpr(e);
2868 // Otherwise, try to emit the sub-expression at +1 recursively.
2870 TryEmitResult subresult = asImpl().visit(e);
2872 // If that produced a retained value, just use that.
2873 if (subresult.getInt()) {
2877 // Otherwise it's +0.
2878 result = subresult.getPointer();
2881 // Retain the object as a block.
2882 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
2883 return TryEmitResult(result, true);
2886 /// For reclaims, emit the subexpression as a retained call and
2887 /// skip the consumption.
2888 TryEmitResult visitReclaimReturnedObject(const Expr *e) {
2889 llvm::Value *result = emitARCRetainCallResult(CGF, e);
2890 return TryEmitResult(result, true);
2893 /// When we have an undecorated call, retroactively do a claim.
2894 TryEmitResult visitCall(const Expr *e) {
2895 llvm::Value *result = emitARCRetainCallResult(CGF, e);
2896 return TryEmitResult(result, true);
2899 // TODO: maybe special-case visitBinAssignWeak?
2901 TryEmitResult visitExpr(const Expr *e) {
2902 // We didn't find an obvious production, so emit what we've got and
2903 // tell the caller that we didn't manage to retain.
2904 llvm::Value *result = CGF.EmitScalarExpr(e);
2905 return TryEmitResult(result, false);
2910 static TryEmitResult
2911 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
2912 return ARCRetainExprEmitter(CGF).visit(e);
2915 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2918 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
2919 llvm::Value *value = result.getPointer();
2920 if (!result.getInt())
2921 value = CGF.EmitARCRetain(type, value);
2925 /// EmitARCRetainScalarExpr - Semantically equivalent to
2926 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
2927 /// best-effort attempt to peephole expressions that naturally produce
2928 /// retained objects.
2929 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
2930 // The retain needs to happen within the full-expression.
2931 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
2932 enterFullExpression(cleanups);
2933 RunCleanupsScope scope(*this);
2934 return EmitARCRetainScalarExpr(cleanups->getSubExpr());
2937 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
2938 llvm::Value *value = result.getPointer();
2939 if (!result.getInt())
2940 value = EmitARCRetain(e->getType(), value);
2945 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
2946 // The retain needs to happen within the full-expression.
2947 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
2948 enterFullExpression(cleanups);
2949 RunCleanupsScope scope(*this);
2950 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
2953 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
2954 llvm::Value *value = result.getPointer();
2955 if (result.getInt())
2956 value = EmitARCAutorelease(value);
2958 value = EmitARCRetainAutorelease(e->getType(), value);
2962 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
2963 llvm::Value *result;
2966 if (shouldEmitSeparateBlockRetain(e)) {
2967 result = EmitScalarExpr(e);
2970 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
2971 result = subresult.getPointer();
2972 doRetain = !subresult.getInt();
2976 result = EmitARCRetainBlock(result, /*mandatory*/ true);
2977 return EmitObjCConsumeObject(e->getType(), result);
2980 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
2981 // In ARC, retain and autorelease the expression.
2982 if (getLangOpts().ObjCAutoRefCount) {
2983 // Do so before running any cleanups for the full-expression.
2984 // EmitARCRetainAutoreleaseScalarExpr does this for us.
2985 return EmitARCRetainAutoreleaseScalarExpr(expr);
2988 // Otherwise, use the normal scalar-expression emission. The
2989 // exception machinery doesn't do anything special with the
2990 // exception like retaining it, so there's no safety associated with
2991 // only running cleanups after the throw has started, and when it
2992 // matters it tends to be substantially inferior code.
2993 return EmitScalarExpr(expr);
2998 /// An emitter for assigning into an __unsafe_unretained context.
2999 struct ARCUnsafeUnretainedExprEmitter :
3000 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> {
3002 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
3004 llvm::Value *getValueOfResult(llvm::Value *value) {
3008 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) {
3009 return CGF.Builder.CreateBitCast(value, resultType);
3012 llvm::Value *visitLValueToRValue(const Expr *e) {
3013 return CGF.EmitScalarExpr(e);
3016 /// For consumptions, just emit the subexpression and perform the
3017 /// consumption like normal.
3018 llvm::Value *visitConsumeObject(const Expr *e) {
3019 llvm::Value *value = CGF.EmitScalarExpr(e);
3020 return CGF.EmitObjCConsumeObject(e->getType(), value);
3023 /// No special logic for block extensions. (This probably can't
3024 /// actually happen in this emitter, though.)
3025 llvm::Value *visitExtendBlockObject(const Expr *e) {
3026 return CGF.EmitARCExtendBlockObject(e);
3029 /// For reclaims, perform an unsafeClaim if that's enabled.
3030 llvm::Value *visitReclaimReturnedObject(const Expr *e) {
3031 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true);
3034 /// When we have an undecorated call, just emit it without adding
3035 /// the unsafeClaim.
3036 llvm::Value *visitCall(const Expr *e) {
3037 return CGF.EmitScalarExpr(e);
3040 /// Just do normal scalar emission in the default case.
3041 llvm::Value *visitExpr(const Expr *e) {
3042 return CGF.EmitScalarExpr(e);
3047 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
3049 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e);
3052 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to
3053 /// immediately releasing the resut of EmitARCRetainScalarExpr, but
3054 /// avoiding any spurious retains, including by performing reclaims
3055 /// with objc_unsafeClaimAutoreleasedReturnValue.
3056 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
3057 // Look through full-expressions.
3058 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
3059 enterFullExpression(cleanups);
3060 RunCleanupsScope scope(*this);
3061 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr());
3064 return emitARCUnsafeUnretainedScalarExpr(*this, e);
3067 std::pair<LValue,llvm::Value*>
3068 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e,
3070 // Evaluate the RHS first. If we're ignoring the result, assume
3071 // that we can emit at an unsafe +0.
3074 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS());
3076 value = EmitScalarExpr(e->getRHS());
3079 // Emit the LHS and perform the store.
3080 LValue lvalue = EmitLValue(e->getLHS());
3081 EmitStoreOfScalar(value, lvalue);
3083 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value);
3086 std::pair<LValue,llvm::Value*>
3087 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
3089 // Evaluate the RHS first.
3090 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
3091 llvm::Value *value = result.getPointer();
3093 bool hasImmediateRetain = result.getInt();
3095 // If we didn't emit a retained object, and the l-value is of block
3096 // type, then we need to emit the block-retain immediately in case
3097 // it invalidates the l-value.
3098 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
3099 value = EmitARCRetainBlock(value, /*mandatory*/ false);
3100 hasImmediateRetain = true;
3103 LValue lvalue = EmitLValue(e->getLHS());
3105 // If the RHS was emitted retained, expand this.
3106 if (hasImmediateRetain) {
3107 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation());
3108 EmitStoreOfScalar(value, lvalue);
3109 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
3111 value = EmitARCStoreStrong(lvalue, value, ignored);
3114 return std::pair<LValue,llvm::Value*>(lvalue, value);
3117 std::pair<LValue,llvm::Value*>
3118 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
3119 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
3120 LValue lvalue = EmitLValue(e->getLHS());
3122 EmitStoreOfScalar(value, lvalue);
3124 return std::pair<LValue,llvm::Value*>(lvalue, value);
3127 void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
3128 const ObjCAutoreleasePoolStmt &ARPS) {
3129 const Stmt *subStmt = ARPS.getSubStmt();
3130 const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
3132 CGDebugInfo *DI = getDebugInfo();
3134 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
3136 // Keep track of the current cleanup stack depth.
3137 RunCleanupsScope Scope(*this);
3138 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
3139 llvm::Value *token = EmitObjCAutoreleasePoolPush();
3140 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
3142 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
3143 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
3146 for (const auto *I : S.body())
3150 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
3153 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
3154 /// make sure it survives garbage collection until this point.
3155 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
3156 // We just use an inline assembly.
3157 llvm::FunctionType *extenderType
3158 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
3159 llvm::Value *extender
3160 = llvm::InlineAsm::get(extenderType,
3162 /* constraints */ "r",
3163 /* side effects */ true);
3165 object = Builder.CreateBitCast(object, VoidPtrTy);
3166 EmitNounwindRuntimeCall(extender, object);
3169 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
3170 /// non-trivial copy assignment function, produce following helper function.
3171 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
3174 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
3175 const ObjCPropertyImplDecl *PID) {
3176 if (!getLangOpts().CPlusPlus ||
3177 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3179 QualType Ty = PID->getPropertyIvarDecl()->getType();
3180 if (!Ty->isRecordType())
3182 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3183 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
3185 llvm::Constant *HelperFn = nullptr;
3186 if (hasTrivialSetExpr(PID))
3188 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
3189 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
3192 ASTContext &C = getContext();
3194 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
3195 FunctionDecl *FD = FunctionDecl::Create(C,
3196 C.getTranslationUnitDecl(),
3198 SourceLocation(), II, C.VoidTy,
3203 QualType DestTy = C.getPointerType(Ty);
3204 QualType SrcTy = Ty;
3206 SrcTy = C.getPointerType(SrcTy);
3208 FunctionArgList args;
3209 ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy);
3210 args.push_back(&dstDecl);
3211 ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy);
3212 args.push_back(&srcDecl);
3214 const CGFunctionInfo &FI =
3215 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
3217 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
3219 llvm::Function *Fn =
3220 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
3221 "__assign_helper_atomic_property_",
3224 CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
3226 StartFunction(FD, C.VoidTy, Fn, FI, args);
3228 DeclRefExpr DstExpr(&dstDecl, false, DestTy,
3229 VK_RValue, SourceLocation());
3230 UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
3231 VK_LValue, OK_Ordinary, SourceLocation());
3233 DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
3234 VK_RValue, SourceLocation());
3235 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
3236 VK_LValue, OK_Ordinary, SourceLocation());
3238 Expr *Args[2] = { &DST, &SRC };
3239 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
3240 CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
3241 Args, DestTy->getPointeeType(),
3242 VK_LValue, SourceLocation(), false);
3247 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
3248 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
3253 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
3254 const ObjCPropertyImplDecl *PID) {
3255 if (!getLangOpts().CPlusPlus ||
3256 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
3258 const ObjCPropertyDecl *PD = PID->getPropertyDecl();
3259 QualType Ty = PD->getType();
3260 if (!Ty->isRecordType())
3262 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
3264 llvm::Constant *HelperFn = nullptr;
3266 if (hasTrivialGetExpr(PID))
3268 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
3269 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
3273 ASTContext &C = getContext();
3275 = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
3276 FunctionDecl *FD = FunctionDecl::Create(C,
3277 C.getTranslationUnitDecl(),
3279 SourceLocation(), II, C.VoidTy,
3284 QualType DestTy = C.getPointerType(Ty);
3285 QualType SrcTy = Ty;
3287 SrcTy = C.getPointerType(SrcTy);
3289 FunctionArgList args;
3290 ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy);
3291 args.push_back(&dstDecl);
3292 ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy);
3293 args.push_back(&srcDecl);
3295 const CGFunctionInfo &FI =
3296 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
3298 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
3300 llvm::Function *Fn =
3301 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
3302 "__copy_helper_atomic_property_", &CGM.getModule());
3304 CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
3306 StartFunction(FD, C.VoidTy, Fn, FI, args);
3308 DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
3309 VK_RValue, SourceLocation());
3311 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
3312 VK_LValue, OK_Ordinary, SourceLocation());
3314 CXXConstructExpr *CXXConstExpr =
3315 cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
3317 SmallVector<Expr*, 4> ConstructorArgs;
3318 ConstructorArgs.push_back(&SRC);
3319 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
3320 CXXConstExpr->arg_end());
3322 CXXConstructExpr *TheCXXConstructExpr =
3323 CXXConstructExpr::Create(C, Ty, SourceLocation(),
3324 CXXConstExpr->getConstructor(),
3325 CXXConstExpr->isElidable(),
3327 CXXConstExpr->hadMultipleCandidates(),
3328 CXXConstExpr->isListInitialization(),
3329 CXXConstExpr->isStdInitListInitialization(),
3330 CXXConstExpr->requiresZeroInitialization(),
3331 CXXConstExpr->getConstructionKind(),
3334 DeclRefExpr DstExpr(&dstDecl, false, DestTy,
3335 VK_RValue, SourceLocation());
3337 RValue DV = EmitAnyExpr(&DstExpr);
3339 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
3340 EmitAggExpr(TheCXXConstructExpr,
3341 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
3343 AggValueSlot::IsDestructed,
3344 AggValueSlot::DoesNotNeedGCBarriers,
3345 AggValueSlot::IsNotAliased));
3348 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
3349 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
3354 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
3355 // Get selectors for retain/autorelease.
3356 IdentifierInfo *CopyID = &getContext().Idents.get("copy");
3357 Selector CopySelector =
3358 getContext().Selectors.getNullarySelector(CopyID);
3359 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
3360 Selector AutoreleaseSelector =
3361 getContext().Selectors.getNullarySelector(AutoreleaseID);
3363 // Emit calls to retain/autorelease.
3364 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
3365 llvm::Value *Val = Block;
3367 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3369 Val, CallArgList(), nullptr, nullptr);
3370 Val = Result.getScalarVal();
3371 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3372 Ty, AutoreleaseSelector,
3373 Val, CallArgList(), nullptr, nullptr);
3374 Val = Result.getScalarVal();
3379 CGObjCRuntime::~CGObjCRuntime() {}