1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Expr nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Target/TargetData.h"
27 using namespace clang;
28 using namespace CodeGen;
30 //===--------------------------------------------------------------------===//
31 // Miscellaneous Helper Methods
32 //===--------------------------------------------------------------------===//
34 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
35 unsigned addressSpace =
36 cast<llvm::PointerType>(value->getType())->getAddressSpace();
38 llvm::PointerType *destType = Int8PtrTy;
40 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
42 if (value->getType() == destType) return value;
43 return Builder.CreateBitCast(value, destType);
46 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
48 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
50 if (!Builder.isNamePreserving())
51 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
52 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
55 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
57 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
58 llvm::BasicBlock *Block = AllocaInsertPt->getParent();
59 Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
62 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
64 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
65 // FIXME: Should we prefer the preferred type alignment here?
66 CharUnits Align = getContext().getTypeAlignInChars(Ty);
67 Alloc->setAlignment(Align.getQuantity());
71 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
73 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
74 // FIXME: Should we prefer the preferred type alignment here?
75 CharUnits Align = getContext().getTypeAlignInChars(Ty);
76 Alloc->setAlignment(Align.getQuantity());
80 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
81 /// expression and compare the result against zero, returning an Int1Ty value.
82 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
83 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
84 llvm::Value *MemPtr = EmitScalarExpr(E);
85 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
88 QualType BoolTy = getContext().BoolTy;
89 if (!E->getType()->isAnyComplexType())
90 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
92 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
95 /// EmitIgnoredExpr - Emit code to compute the specified expression,
96 /// ignoring the result.
97 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
99 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
101 // Just emit it as an l-value and drop the result.
105 /// EmitAnyExpr - Emit code to compute the specified expression which
106 /// can have any type. The result is returned as an RValue struct.
107 /// If this is an aggregate expression, AggSlot indicates where the
108 /// result should be returned.
109 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
111 if (!hasAggregateLLVMType(E->getType()))
112 return RValue::get(EmitScalarExpr(E, IgnoreResult));
113 else if (E->getType()->isAnyComplexType())
114 return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
116 EmitAggExpr(E, AggSlot, IgnoreResult);
117 return AggSlot.asRValue();
120 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
121 /// always be accessible even if no aggregate location is provided.
122 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
123 AggValueSlot AggSlot = AggValueSlot::ignored();
125 if (hasAggregateLLVMType(E->getType()) &&
126 !E->getType()->isAnyComplexType())
127 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
128 return EmitAnyExpr(E, AggSlot);
131 /// EmitAnyExprToMem - Evaluate an expression into a given memory
133 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
134 llvm::Value *Location,
137 if (E->getType()->isAnyComplexType())
138 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
139 else if (hasAggregateLLVMType(E->getType()))
140 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
141 AggValueSlot::IsDestructed_t(IsInit),
142 AggValueSlot::DoesNotNeedGCBarriers,
143 AggValueSlot::IsAliased_t(!IsInit)));
145 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
146 LValue LV = MakeAddrLValue(Location, E->getType());
147 EmitStoreThroughLValue(RV, LV);
152 /// \brief An adjustment to be made to the temporary created when emitting a
153 /// reference binding, which accesses a particular subobject of that temporary.
154 struct SubobjectAdjustment {
155 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
159 const CastExpr *BasePath;
160 const CXXRecordDecl *DerivedClass;
166 SubobjectAdjustment(const CastExpr *BasePath,
167 const CXXRecordDecl *DerivedClass)
168 : Kind(DerivedToBaseAdjustment) {
169 DerivedToBase.BasePath = BasePath;
170 DerivedToBase.DerivedClass = DerivedClass;
173 SubobjectAdjustment(FieldDecl *Field)
174 : Kind(FieldAdjustment) {
181 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
182 const NamedDecl *InitializedDecl) {
183 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
184 if (VD->hasGlobalStorage()) {
185 llvm::SmallString<256> Name;
186 llvm::raw_svector_ostream Out(Name);
187 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
190 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
192 // Create the reference temporary.
193 llvm::GlobalValue *RefTemp =
194 new llvm::GlobalVariable(CGF.CGM.getModule(),
195 RefTempTy, /*isConstant=*/false,
196 llvm::GlobalValue::InternalLinkage,
197 llvm::Constant::getNullValue(RefTempTy),
203 return CGF.CreateMemTemp(Type, "ref.tmp");
207 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
208 llvm::Value *&ReferenceTemporary,
209 const CXXDestructorDecl *&ReferenceTemporaryDtor,
210 QualType &ObjCARCReferenceLifetimeType,
211 const NamedDecl *InitializedDecl) {
212 // Look through expressions for materialized temporaries (for now).
213 if (const MaterializeTemporaryExpr *M
214 = dyn_cast<MaterializeTemporaryExpr>(E)) {
215 // Objective-C++ ARC:
216 // If we are binding a reference to a temporary that has ownership, we
217 // need to perform retain/release operations on the temporary.
218 if (CGF.getContext().getLangOptions().ObjCAutoRefCount &&
219 E->getType()->isObjCLifetimeType() &&
220 (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
221 E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
222 E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
223 ObjCARCReferenceLifetimeType = E->getType();
225 E = M->GetTemporaryExpr();
228 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
231 if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) {
232 CodeGenFunction::RunCleanupsScope Scope(CGF);
234 return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
236 ReferenceTemporaryDtor,
237 ObjCARCReferenceLifetimeType,
241 if (const ObjCPropertyRefExpr *PRE =
242 dyn_cast<ObjCPropertyRefExpr>(E->IgnoreParenImpCasts()))
243 if (PRE->getGetterResultType()->isReferenceType())
247 if (E->isGLValue()) {
248 // Emit the expression as an lvalue.
249 LValue LV = CGF.EmitLValue(E);
250 if (LV.isPropertyRef()) {
251 RV = CGF.EmitLoadOfPropertyRefLValue(LV);
252 return RV.getScalarVal();
256 return LV.getAddress();
258 // We have to load the lvalue.
259 RV = CGF.EmitLoadOfLValue(LV);
261 if (!ObjCARCReferenceLifetimeType.isNull()) {
262 ReferenceTemporary = CreateReferenceTemporary(CGF,
263 ObjCARCReferenceLifetimeType,
267 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
268 ObjCARCReferenceLifetimeType);
270 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
273 bool ExtendsLifeOfTemporary = false;
274 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
275 if (Var->extendsLifetimeOfTemporary())
276 ExtendsLifeOfTemporary = true;
277 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
278 ExtendsLifeOfTemporary = true;
281 if (!ExtendsLifeOfTemporary) {
282 // Since the lifetime of this temporary isn't going to be extended,
283 // we need to clean it up ourselves at the end of the full expression.
284 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
285 case Qualifiers::OCL_None:
286 case Qualifiers::OCL_ExplicitNone:
287 case Qualifiers::OCL_Autoreleasing:
290 case Qualifiers::OCL_Strong: {
291 assert(!ObjCARCReferenceLifetimeType->isArrayType());
292 CleanupKind cleanupKind = CGF.getARCCleanupKind();
293 CGF.pushDestroy(cleanupKind,
295 ObjCARCReferenceLifetimeType,
296 CodeGenFunction::destroyARCStrongImprecise,
297 cleanupKind & EHCleanup);
301 case Qualifiers::OCL_Weak:
302 assert(!ObjCARCReferenceLifetimeType->isArrayType());
303 CGF.pushDestroy(NormalAndEHCleanup,
305 ObjCARCReferenceLifetimeType,
306 CodeGenFunction::destroyARCWeak,
307 /*useEHCleanupForArray*/ true);
311 ObjCARCReferenceLifetimeType = QualType();
314 return ReferenceTemporary;
317 SmallVector<SubobjectAdjustment, 2> Adjustments;
319 E = E->IgnoreParens();
321 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
322 if ((CE->getCastKind() == CK_DerivedToBase ||
323 CE->getCastKind() == CK_UncheckedDerivedToBase) &&
324 E->getType()->isRecordType()) {
325 E = CE->getSubExpr();
326 CXXRecordDecl *Derived
327 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
328 Adjustments.push_back(SubobjectAdjustment(CE, Derived));
332 if (CE->getCastKind() == CK_NoOp) {
333 E = CE->getSubExpr();
336 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
337 if (!ME->isArrow() && ME->getBase()->isRValue()) {
338 assert(ME->getBase()->getType()->isRecordType());
339 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
341 Adjustments.push_back(SubobjectAdjustment(Field));
347 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
348 if (opaque->getType()->isRecordType())
349 return CGF.EmitOpaqueValueLValue(opaque).getAddress();
355 // Create a reference temporary if necessary.
356 AggValueSlot AggSlot = AggValueSlot::ignored();
357 if (CGF.hasAggregateLLVMType(E->getType()) &&
358 !E->getType()->isAnyComplexType()) {
359 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
361 AggValueSlot::IsDestructed_t isDestructed
362 = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
363 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
365 AggValueSlot::DoesNotNeedGCBarriers,
366 AggValueSlot::IsNotAliased);
369 if (InitializedDecl) {
370 // Get the destructor for the reference temporary.
371 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
372 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
373 if (!ClassDecl->hasTrivialDestructor())
374 ReferenceTemporaryDtor = ClassDecl->getDestructor();
378 RV = CGF.EmitAnyExpr(E, AggSlot);
380 // Check if need to perform derived-to-base casts and/or field accesses, to
381 // get from the temporary object we created (and, potentially, for which we
382 // extended the lifetime) to the subobject we're binding the reference to.
383 if (!Adjustments.empty()) {
384 llvm::Value *Object = RV.getAggregateAddr();
385 for (unsigned I = Adjustments.size(); I != 0; --I) {
386 SubobjectAdjustment &Adjustment = Adjustments[I-1];
387 switch (Adjustment.Kind) {
388 case SubobjectAdjustment::DerivedToBaseAdjustment:
390 CGF.GetAddressOfBaseClass(Object,
391 Adjustment.DerivedToBase.DerivedClass,
392 Adjustment.DerivedToBase.BasePath->path_begin(),
393 Adjustment.DerivedToBase.BasePath->path_end(),
394 /*NullCheckValue=*/false);
397 case SubobjectAdjustment::FieldAdjustment: {
399 CGF.EmitLValueForField(Object, Adjustment.Field, 0);
401 Object = LV.getAddress();
405 // For non-simple lvalues, we actually have to create a copy of
406 // the object we're binding to.
407 QualType T = Adjustment.Field->getType().getNonReferenceType()
408 .getUnqualifiedType();
409 Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
410 LValue TempLV = CGF.MakeAddrLValue(Object,
411 Adjustment.Field->getType());
412 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
423 if (RV.isAggregate())
424 return RV.getAggregateAddr();
426 // Create a temporary variable that we can bind the reference to.
427 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
432 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
434 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
435 /*Volatile=*/false, Alignment, E->getType());
437 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
439 return ReferenceTemporary;
443 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
444 const NamedDecl *InitializedDecl) {
445 llvm::Value *ReferenceTemporary = 0;
446 const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
447 QualType ObjCARCReferenceLifetimeType;
448 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
449 ReferenceTemporaryDtor,
450 ObjCARCReferenceLifetimeType,
452 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
453 return RValue::get(Value);
455 // Make sure to call the destructor for the reference temporary.
456 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
457 if (VD && VD->hasGlobalStorage()) {
458 if (ReferenceTemporaryDtor) {
459 llvm::Constant *DtorFn =
460 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
461 EmitCXXGlobalDtorRegistration(DtorFn,
462 cast<llvm::Constant>(ReferenceTemporary));
464 assert(!ObjCARCReferenceLifetimeType.isNull());
465 // Note: We intentionally do not register a global "destructor" to
466 // release the object.
469 return RValue::get(Value);
472 if (ReferenceTemporaryDtor)
473 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
475 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
476 case Qualifiers::OCL_None:
478 "Not a reference temporary that needs to be deallocated");
479 case Qualifiers::OCL_ExplicitNone:
480 case Qualifiers::OCL_Autoreleasing:
484 case Qualifiers::OCL_Strong: {
485 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
486 CleanupKind cleanupKind = getARCCleanupKind();
487 // This local is a GCC and MSVC compiler workaround.
488 Destroyer *destroyer = precise ? &destroyARCStrongPrecise :
489 &destroyARCStrongImprecise;
490 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
491 *destroyer, cleanupKind & EHCleanup);
495 case Qualifiers::OCL_Weak: {
496 // This local is a GCC and MSVC compiler workaround.
497 Destroyer *destroyer = &destroyARCWeak;
498 // __weak objects always get EH cleanups; otherwise, exceptions
499 // could cause really nasty crashes instead of mere leaks.
500 pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
501 ObjCARCReferenceLifetimeType, *destroyer, true);
507 return RValue::get(Value);
511 /// getAccessedFieldNo - Given an encoded value and a result number, return the
512 /// input field number being accessed.
513 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
514 const llvm::Constant *Elts) {
515 if (isa<llvm::ConstantAggregateZero>(Elts))
518 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
521 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
525 // This needs to be to the standard address space.
526 Address = Builder.CreateBitCast(Address, Int8PtrTy);
528 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
530 // In time, people may want to control this and use a 1 here.
531 llvm::Value *Arg = Builder.getFalse();
532 llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
533 llvm::BasicBlock *Cont = createBasicBlock();
534 llvm::BasicBlock *Check = createBasicBlock();
535 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
536 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
539 Builder.CreateCondBr(Builder.CreateICmpUGE(C,
540 llvm::ConstantInt::get(IntPtrTy, Size)),
546 CodeGenFunction::ComplexPairTy CodeGenFunction::
547 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
548 bool isInc, bool isPre) {
549 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
550 LV.isVolatileQualified());
552 llvm::Value *NextVal;
553 if (isa<llvm::IntegerType>(InVal.first->getType())) {
554 uint64_t AmountVal = isInc ? 1 : -1;
555 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
557 // Add the inc/dec to the real part.
558 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
560 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
561 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
564 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
566 // Add the inc/dec to the real part.
567 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
570 ComplexPairTy IncVal(NextVal, InVal.second);
572 // Store the updated result through the lvalue.
573 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
575 // If this is a postinc, return the value read from memory, otherwise use the
577 return isPre ? IncVal : InVal;
581 //===----------------------------------------------------------------------===//
582 // LValue Expression Emission
583 //===----------------------------------------------------------------------===//
585 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
586 if (Ty->isVoidType())
587 return RValue::get(0);
589 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
590 llvm::Type *EltTy = ConvertType(CTy->getElementType());
591 llvm::Value *U = llvm::UndefValue::get(EltTy);
592 return RValue::getComplex(std::make_pair(U, U));
595 // If this is a use of an undefined aggregate type, the aggregate must have an
596 // identifiable address. Just because the contents of the value are undefined
597 // doesn't mean that the address can't be taken and compared.
598 if (hasAggregateLLVMType(Ty)) {
599 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
600 return RValue::getAggregate(DestPtr);
603 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
606 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
608 ErrorUnsupported(E, Name);
609 return GetUndefRValue(E->getType());
612 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
614 ErrorUnsupported(E, Name);
615 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
616 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
619 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
620 LValue LV = EmitLValue(E);
621 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
622 EmitCheck(LV.getAddress(),
623 getContext().getTypeSizeInChars(E->getType()).getQuantity());
627 /// EmitLValue - Emit code to compute a designator that specifies the location
628 /// of the expression.
630 /// This can return one of two things: a simple address or a bitfield reference.
631 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
632 /// an LLVM pointer type.
634 /// If this returns a bitfield reference, nothing about the pointee type of the
635 /// LLVM value is known: For example, it may not be a pointer to an integer.
637 /// If this returns a normal address, and if the lvalue's C type is fixed size,
638 /// this method guarantees that the returned pointer type will point to an LLVM
639 /// type of the same size of the lvalue's type. If the lvalue has a variable
640 /// length type, this is not possible.
642 LValue CodeGenFunction::EmitLValue(const Expr *E) {
643 switch (E->getStmtClass()) {
644 default: return EmitUnsupportedLValue(E, "l-value expression");
646 case Expr::ObjCSelectorExprClass:
647 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
648 case Expr::ObjCIsaExprClass:
649 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
650 case Expr::BinaryOperatorClass:
651 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
652 case Expr::CompoundAssignOperatorClass:
653 if (!E->getType()->isAnyComplexType())
654 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
655 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
656 case Expr::CallExprClass:
657 case Expr::CXXMemberCallExprClass:
658 case Expr::CXXOperatorCallExprClass:
659 return EmitCallExprLValue(cast<CallExpr>(E));
660 case Expr::VAArgExprClass:
661 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
662 case Expr::DeclRefExprClass:
663 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
664 case Expr::ParenExprClass:
665 return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
666 case Expr::GenericSelectionExprClass:
667 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
668 case Expr::PredefinedExprClass:
669 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
670 case Expr::StringLiteralClass:
671 return EmitStringLiteralLValue(cast<StringLiteral>(E));
672 case Expr::ObjCEncodeExprClass:
673 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
675 case Expr::BlockDeclRefExprClass:
676 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
678 case Expr::CXXTemporaryObjectExprClass:
679 case Expr::CXXConstructExprClass:
680 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
681 case Expr::CXXBindTemporaryExprClass:
682 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
683 case Expr::ExprWithCleanupsClass:
684 return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E));
685 case Expr::CXXScalarValueInitExprClass:
686 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
687 case Expr::CXXDefaultArgExprClass:
688 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
689 case Expr::CXXTypeidExprClass:
690 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
692 case Expr::ObjCMessageExprClass:
693 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
694 case Expr::ObjCIvarRefExprClass:
695 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
696 case Expr::ObjCPropertyRefExprClass:
697 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
698 case Expr::StmtExprClass:
699 return EmitStmtExprLValue(cast<StmtExpr>(E));
700 case Expr::UnaryOperatorClass:
701 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
702 case Expr::ArraySubscriptExprClass:
703 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
704 case Expr::ExtVectorElementExprClass:
705 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
706 case Expr::MemberExprClass:
707 return EmitMemberExpr(cast<MemberExpr>(E));
708 case Expr::CompoundLiteralExprClass:
709 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
710 case Expr::ConditionalOperatorClass:
711 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
712 case Expr::BinaryConditionalOperatorClass:
713 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
714 case Expr::ChooseExprClass:
715 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
716 case Expr::OpaqueValueExprClass:
717 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
718 case Expr::SubstNonTypeTemplateParmExprClass:
719 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
720 case Expr::ImplicitCastExprClass:
721 case Expr::CStyleCastExprClass:
722 case Expr::CXXFunctionalCastExprClass:
723 case Expr::CXXStaticCastExprClass:
724 case Expr::CXXDynamicCastExprClass:
725 case Expr::CXXReinterpretCastExprClass:
726 case Expr::CXXConstCastExprClass:
727 case Expr::ObjCBridgedCastExprClass:
728 return EmitCastLValue(cast<CastExpr>(E));
730 case Expr::MaterializeTemporaryExprClass:
731 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
735 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
736 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
737 lvalue.getAlignment(), lvalue.getType(),
738 lvalue.getTBAAInfo());
741 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
742 unsigned Alignment, QualType Ty,
743 llvm::MDNode *TBAAInfo) {
744 llvm::LoadInst *Load = Builder.CreateLoad(Addr);
746 Load->setVolatile(true);
748 Load->setAlignment(Alignment);
750 CGM.DecorateInstruction(Load, TBAAInfo);
752 return EmitFromMemory(Load, Ty);
755 static bool isBooleanUnderlyingType(QualType Ty) {
756 if (const EnumType *ET = dyn_cast<EnumType>(Ty))
757 return ET->getDecl()->getIntegerType()->isBooleanType();
761 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
762 // Bool has a different representation in memory than in registers.
763 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
764 // This should really always be an i1, but sometimes it's already
765 // an i8, and it's awkward to track those cases down.
766 if (Value->getType()->isIntegerTy(1))
767 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
768 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
774 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
775 // Bool has a different representation in memory than in registers.
776 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
777 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
778 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
784 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
785 bool Volatile, unsigned Alignment,
787 llvm::MDNode *TBAAInfo) {
788 Value = EmitToMemory(Value, Ty);
790 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
792 Store->setAlignment(Alignment);
794 CGM.DecorateInstruction(Store, TBAAInfo);
797 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
798 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
799 lvalue.getAlignment(), lvalue.getType(),
800 lvalue.getTBAAInfo());
803 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
804 /// method emits the address of the lvalue, then loads the result as an rvalue,
805 /// returning the rvalue.
806 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
807 if (LV.isObjCWeak()) {
808 // load of a __weak object.
809 llvm::Value *AddrWeakObj = LV.getAddress();
810 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
813 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
814 return RValue::get(EmitARCLoadWeak(LV.getAddress()));
817 assert(!LV.getType()->isFunctionType());
819 // Everything needs a load.
820 return RValue::get(EmitLoadOfScalar(LV));
823 if (LV.isVectorElt()) {
824 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
825 LV.isVolatileQualified());
826 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
830 // If this is a reference to a subset of the elements of a vector, either
831 // shuffle the input or extract/insert them as appropriate.
832 if (LV.isExtVectorElt())
833 return EmitLoadOfExtVectorElementLValue(LV);
836 return EmitLoadOfBitfieldLValue(LV);
838 assert(LV.isPropertyRef() && "Unknown LValue type!");
839 return EmitLoadOfPropertyRefLValue(LV);
842 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
843 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
845 // Get the output type.
846 llvm::Type *ResLTy = ConvertType(LV.getType());
847 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
849 // Compute the result as an OR of all of the individual component accesses.
850 llvm::Value *Res = 0;
851 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
852 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
854 // Get the field pointer.
855 llvm::Value *Ptr = LV.getBitFieldBaseAddr();
857 // Only offset by the field index if used, so that incoming values are not
858 // required to be structures.
860 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
862 // Offset by the byte offset, if used.
863 if (!AI.FieldByteOffset.isZero()) {
864 Ptr = EmitCastToVoidPtr(Ptr);
865 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
869 // Cast to the access type.
870 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
872 CGM.getContext().getTargetAddressSpace(LV.getType()));
873 Ptr = Builder.CreateBitCast(Ptr, PTy);
876 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
877 if (!AI.AccessAlignment.isZero())
878 Load->setAlignment(AI.AccessAlignment.getQuantity());
880 // Shift out unused low bits and mask out unused high bits.
881 llvm::Value *Val = Load;
882 if (AI.FieldBitStart)
883 Val = Builder.CreateLShr(Load, AI.FieldBitStart);
884 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
888 // Extend or truncate to the target size.
889 if (AI.AccessWidth < ResSizeInBits)
890 Val = Builder.CreateZExt(Val, ResLTy);
891 else if (AI.AccessWidth > ResSizeInBits)
892 Val = Builder.CreateTrunc(Val, ResLTy);
894 // Shift into place, and OR into the result.
895 if (AI.TargetBitOffset)
896 Val = Builder.CreateShl(Val, AI.TargetBitOffset);
897 Res = Res ? Builder.CreateOr(Res, Val) : Val;
900 // If the bit-field is signed, perform the sign-extension.
902 // FIXME: This can easily be folded into the load of the high bits, which
903 // could also eliminate the mask of high bits in some situations.
904 if (Info.isSigned()) {
905 unsigned ExtraBits = ResSizeInBits - Info.getSize();
907 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
908 ExtraBits, "bf.val.sext");
911 return RValue::get(Res);
914 // If this is a reference to a subset of the elements of a vector, create an
915 // appropriate shufflevector.
916 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
917 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
918 LV.isVolatileQualified());
920 const llvm::Constant *Elts = LV.getExtVectorElts();
922 // If the result of the expression is a non-vector type, we must be extracting
923 // a single element. Just codegen as an extractelement.
924 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
926 unsigned InIdx = getAccessedFieldNo(0, Elts);
927 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
928 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
931 // Always use shuffle vector to try to retain the original program structure
932 unsigned NumResultElts = ExprVT->getNumElements();
934 SmallVector<llvm::Constant*, 4> Mask;
935 for (unsigned i = 0; i != NumResultElts; ++i) {
936 unsigned InIdx = getAccessedFieldNo(i, Elts);
937 Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
940 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
941 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
943 return RValue::get(Vec);
948 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
949 /// lvalue, where both are guaranteed to the have the same type, and that type
951 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
952 if (!Dst.isSimple()) {
953 if (Dst.isVectorElt()) {
954 // Read/modify/write the vector, inserting the new element.
955 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
956 Dst.isVolatileQualified());
957 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
958 Dst.getVectorIdx(), "vecins");
959 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
963 // If this is an update of extended vector elements, insert them as
965 if (Dst.isExtVectorElt())
966 return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
968 if (Dst.isBitField())
969 return EmitStoreThroughBitfieldLValue(Src, Dst);
971 assert(Dst.isPropertyRef() && "Unknown LValue type");
972 return EmitStoreThroughPropertyRefLValue(Src, Dst);
975 // There's special magic for assigning into an ARC-qualified l-value.
976 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
978 case Qualifiers::OCL_None:
979 llvm_unreachable("present but none");
981 case Qualifiers::OCL_ExplicitNone:
985 case Qualifiers::OCL_Strong:
986 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
989 case Qualifiers::OCL_Weak:
990 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
993 case Qualifiers::OCL_Autoreleasing:
994 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
995 Src.getScalarVal()));
996 // fall into the normal path
1001 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1002 // load of a __weak object.
1003 llvm::Value *LvalueDst = Dst.getAddress();
1004 llvm::Value *src = Src.getScalarVal();
1005 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1009 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1010 // load of a __strong object.
1011 llvm::Value *LvalueDst = Dst.getAddress();
1012 llvm::Value *src = Src.getScalarVal();
1013 if (Dst.isObjCIvar()) {
1014 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1015 llvm::Type *ResultType = ConvertType(getContext().LongTy);
1016 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1017 llvm::Value *dst = RHS;
1018 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1020 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1021 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1022 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1024 } else if (Dst.isGlobalObjCRef()) {
1025 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1026 Dst.isThreadLocalRef());
1029 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1033 assert(Src.isScalar() && "Can't emit an agg store with this method");
1034 EmitStoreOfScalar(Src.getScalarVal(), Dst);
1037 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1038 llvm::Value **Result) {
1039 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1041 // Get the output type.
1042 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1043 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1045 // Get the source value, truncated to the width of the bit-field.
1046 llvm::Value *SrcVal = Src.getScalarVal();
1048 if (Dst.getType()->isBooleanType())
1049 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1051 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1055 // Return the new value of the bit-field, if requested.
1057 // Cast back to the proper type for result.
1058 llvm::Type *SrcTy = Src.getScalarVal()->getType();
1059 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1062 // Sign extend if necessary.
1063 if (Info.isSigned()) {
1064 unsigned ExtraBits = ResSizeInBits - Info.getSize();
1066 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1067 ExtraBits, "bf.reload.sext");
1070 *Result = ReloadVal;
1073 // Iterate over the components, writing each piece to memory.
1074 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1075 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1077 // Get the field pointer.
1078 llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1079 unsigned addressSpace =
1080 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1082 // Only offset by the field index if used, so that incoming values are not
1083 // required to be structures.
1085 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1087 // Offset by the byte offset, if used.
1088 if (!AI.FieldByteOffset.isZero()) {
1089 Ptr = EmitCastToVoidPtr(Ptr);
1090 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1094 // Cast to the access type.
1095 llvm::Type *AccessLTy =
1096 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1098 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1099 Ptr = Builder.CreateBitCast(Ptr, PTy);
1101 // Extract the piece of the bit-field value to write in this access, limited
1102 // to the values that are part of this access.
1103 llvm::Value *Val = SrcVal;
1104 if (AI.TargetBitOffset)
1105 Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1106 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1107 AI.TargetBitWidth));
1109 // Extend or truncate to the access size.
1110 if (ResSizeInBits < AI.AccessWidth)
1111 Val = Builder.CreateZExt(Val, AccessLTy);
1112 else if (ResSizeInBits > AI.AccessWidth)
1113 Val = Builder.CreateTrunc(Val, AccessLTy);
1115 // Shift into the position in memory.
1116 if (AI.FieldBitStart)
1117 Val = Builder.CreateShl(Val, AI.FieldBitStart);
1119 // If necessary, load and OR in bits that are outside of the bit-field.
1120 if (AI.TargetBitWidth != AI.AccessWidth) {
1121 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1122 if (!AI.AccessAlignment.isZero())
1123 Load->setAlignment(AI.AccessAlignment.getQuantity());
1125 // Compute the mask for zeroing the bits that are part of the bit-field.
1126 llvm::APInt InvMask =
1127 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1128 AI.FieldBitStart + AI.TargetBitWidth);
1130 // Apply the mask and OR in to the value to write.
1131 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1135 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1136 Dst.isVolatileQualified());
1137 if (!AI.AccessAlignment.isZero())
1138 Store->setAlignment(AI.AccessAlignment.getQuantity());
1142 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1144 // This access turns into a read/modify/write of the vector. Load the input
1146 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
1147 Dst.isVolatileQualified());
1148 const llvm::Constant *Elts = Dst.getExtVectorElts();
1150 llvm::Value *SrcVal = Src.getScalarVal();
1152 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1153 unsigned NumSrcElts = VTy->getNumElements();
1154 unsigned NumDstElts =
1155 cast<llvm::VectorType>(Vec->getType())->getNumElements();
1156 if (NumDstElts == NumSrcElts) {
1157 // Use shuffle vector is the src and destination are the same number of
1158 // elements and restore the vector mask since it is on the side it will be
1160 SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1161 for (unsigned i = 0; i != NumSrcElts; ++i) {
1162 unsigned InIdx = getAccessedFieldNo(i, Elts);
1163 Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
1166 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1167 Vec = Builder.CreateShuffleVector(SrcVal,
1168 llvm::UndefValue::get(Vec->getType()),
1170 } else if (NumDstElts > NumSrcElts) {
1171 // Extended the source vector to the same length and then shuffle it
1172 // into the destination.
1173 // FIXME: since we're shuffling with undef, can we just use the indices
1174 // into that? This could be simpler.
1175 SmallVector<llvm::Constant*, 4> ExtMask;
1177 for (i = 0; i != NumSrcElts; ++i)
1178 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
1179 for (; i != NumDstElts; ++i)
1180 ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
1181 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1182 llvm::Value *ExtSrcVal =
1183 Builder.CreateShuffleVector(SrcVal,
1184 llvm::UndefValue::get(SrcVal->getType()),
1187 SmallVector<llvm::Constant*, 4> Mask;
1188 for (unsigned i = 0; i != NumDstElts; ++i)
1189 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
1191 // modify when what gets shuffled in
1192 for (unsigned i = 0; i != NumSrcElts; ++i) {
1193 unsigned Idx = getAccessedFieldNo(i, Elts);
1194 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
1196 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1197 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1199 // We should never shorten the vector
1200 llvm_unreachable("unexpected shorten vector length");
1203 // If the Src is a scalar (not a vector) it must be updating one element.
1204 unsigned InIdx = getAccessedFieldNo(0, Elts);
1205 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1206 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1209 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
1212 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1213 // generating write-barries API. It is currently a global, ivar,
1215 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1217 bool IsMemberAccess=false) {
1218 if (Ctx.getLangOptions().getGC() == LangOptions::NonGC)
1221 if (isa<ObjCIvarRefExpr>(E)) {
1222 QualType ExpTy = E->getType();
1223 if (IsMemberAccess && ExpTy->isPointerType()) {
1224 // If ivar is a structure pointer, assigning to field of
1225 // this struct follows gcc's behavior and makes it a non-ivar
1226 // writer-barrier conservatively.
1227 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1228 if (ExpTy->isRecordType()) {
1229 LV.setObjCIvar(false);
1233 LV.setObjCIvar(true);
1234 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1235 LV.setBaseIvarExp(Exp->getBase());
1236 LV.setObjCArray(E->getType()->isArrayType());
1240 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1241 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1242 if (VD->hasGlobalStorage()) {
1243 LV.setGlobalObjCRef(true);
1244 LV.setThreadLocalRef(VD->isThreadSpecified());
1247 LV.setObjCArray(E->getType()->isArrayType());
1251 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1252 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1256 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1257 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1258 if (LV.isObjCIvar()) {
1259 // If cast is to a structure pointer, follow gcc's behavior and make it
1260 // a non-ivar write-barrier.
1261 QualType ExpTy = E->getType();
1262 if (ExpTy->isPointerType())
1263 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1264 if (ExpTy->isRecordType())
1265 LV.setObjCIvar(false);
1270 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1271 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1275 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1276 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1280 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1281 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1285 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1286 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1290 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1291 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1292 if (LV.isObjCIvar() && !LV.isObjCArray())
1293 // Using array syntax to assigning to what an ivar points to is not
1294 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1295 LV.setObjCIvar(false);
1296 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1297 // Using array syntax to assigning to what global points to is not
1298 // same as assigning to the global itself. {id *G;} G[i] = 0;
1299 LV.setGlobalObjCRef(false);
1303 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1304 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1305 // We don't know if member is an 'ivar', but this flag is looked at
1306 // only in the context of LV.isObjCIvar().
1307 LV.setObjCArray(E->getType()->isArrayType());
1312 static llvm::Value *
1313 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1314 llvm::Value *V, llvm::Type *IRType,
1315 StringRef Name = StringRef()) {
1316 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1317 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1320 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1321 const Expr *E, const VarDecl *VD) {
1322 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1323 "Var decl must have external storage or be a file var decl!");
1325 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1326 if (VD->getType()->isReferenceType())
1327 V = CGF.Builder.CreateLoad(V);
1329 V = EmitBitCastOfLValueToProperType(CGF, V,
1330 CGF.getTypes().ConvertTypeForMem(E->getType()));
1332 unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
1333 LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1334 setObjCGCLValueClass(CGF.getContext(), E, LV);
1338 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1339 const Expr *E, const FunctionDecl *FD) {
1340 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1341 if (!FD->hasPrototype()) {
1342 if (const FunctionProtoType *Proto =
1343 FD->getType()->getAs<FunctionProtoType>()) {
1344 // Ugly case: for a K&R-style definition, the type of the definition
1345 // isn't the same as the type of a use. Correct for this with a
1347 QualType NoProtoType =
1348 CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1349 NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1350 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1353 unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
1354 return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1357 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1358 const NamedDecl *ND = E->getDecl();
1359 unsigned Alignment = getContext().getDeclAlign(ND).getQuantity();
1361 if (ND->hasAttr<WeakRefAttr>()) {
1362 const ValueDecl *VD = cast<ValueDecl>(ND);
1363 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1364 return MakeAddrLValue(Aliasee, E->getType(), Alignment);
1367 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1369 // Check if this is a global variable.
1370 if (VD->hasExternalStorage() || VD->isFileVarDecl())
1371 return EmitGlobalVarDeclLValue(*this, E, VD);
1373 bool NonGCable = VD->hasLocalStorage() &&
1374 !VD->getType()->isReferenceType() &&
1375 !VD->hasAttr<BlocksAttr>();
1377 llvm::Value *V = LocalDeclMap[VD];
1378 if (!V && VD->isStaticLocal())
1379 V = CGM.getStaticLocalDeclAddress(VD);
1380 assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1382 if (VD->hasAttr<BlocksAttr>())
1383 V = BuildBlockByrefAddress(V, VD);
1385 if (VD->getType()->isReferenceType())
1386 V = Builder.CreateLoad(V);
1388 V = EmitBitCastOfLValueToProperType(*this, V,
1389 getTypes().ConvertTypeForMem(E->getType()));
1391 LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
1393 LV.getQuals().removeObjCGCAttr();
1396 setObjCGCLValueClass(getContext(), E, LV);
1400 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1401 return EmitFunctionDeclLValue(*this, E, fn);
1403 llvm_unreachable("Unhandled DeclRefExpr");
1405 // an invalid LValue, but the assert will
1406 // ensure that this point is never reached.
1410 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
1411 unsigned Alignment =
1412 getContext().getDeclAlign(E->getDecl()).getQuantity();
1413 return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
1416 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1417 // __extension__ doesn't affect lvalue-ness.
1418 if (E->getOpcode() == UO_Extension)
1419 return EmitLValue(E->getSubExpr());
1421 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1422 switch (E->getOpcode()) {
1423 default: llvm_unreachable("Unknown unary operator lvalue!");
1425 QualType T = E->getSubExpr()->getType()->getPointeeType();
1426 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1428 LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1429 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1431 // We should not generate __weak write barrier on indirect reference
1432 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1433 // But, we continue to generate __strong write barrier on indirect write
1434 // into a pointer to object.
1435 if (getContext().getLangOptions().ObjC1 &&
1436 getContext().getLangOptions().getGC() != LangOptions::NonGC &&
1438 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1443 LValue LV = EmitLValue(E->getSubExpr());
1444 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1445 llvm::Value *Addr = LV.getAddress();
1447 // real and imag are valid on scalars. This is a faster way of
1449 if (!cast<llvm::PointerType>(Addr->getType())
1450 ->getElementType()->isStructTy()) {
1451 assert(E->getSubExpr()->getType()->isArithmeticType());
1455 assert(E->getSubExpr()->getType()->isAnyComplexType());
1457 unsigned Idx = E->getOpcode() == UO_Imag;
1458 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1464 LValue LV = EmitLValue(E->getSubExpr());
1465 bool isInc = E->getOpcode() == UO_PreInc;
1467 if (E->getType()->isAnyComplexType())
1468 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1470 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1476 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1477 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1481 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1482 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1487 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1488 switch (E->getIdentType()) {
1490 return EmitUnsupportedLValue(E, "predefined expression");
1492 case PredefinedExpr::Func:
1493 case PredefinedExpr::Function:
1494 case PredefinedExpr::PrettyFunction: {
1495 unsigned Type = E->getIdentType();
1496 std::string GlobalVarName;
1499 default: llvm_unreachable("Invalid type");
1500 case PredefinedExpr::Func:
1501 GlobalVarName = "__func__.";
1503 case PredefinedExpr::Function:
1504 GlobalVarName = "__FUNCTION__.";
1506 case PredefinedExpr::PrettyFunction:
1507 GlobalVarName = "__PRETTY_FUNCTION__.";
1511 StringRef FnName = CurFn->getName();
1512 if (FnName.startswith("\01"))
1513 FnName = FnName.substr(1);
1514 GlobalVarName += FnName;
1516 const Decl *CurDecl = CurCodeDecl;
1518 CurDecl = getContext().getTranslationUnitDecl();
1520 std::string FunctionName =
1521 (isa<BlockDecl>(CurDecl)
1523 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
1526 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
1527 return MakeAddrLValue(C, E->getType());
1532 llvm::BasicBlock *CodeGenFunction::getTrapBB() {
1533 const CodeGenOptions &GCO = CGM.getCodeGenOpts();
1535 // If we are not optimzing, don't collapse all calls to trap in the function
1536 // to the same call, that way, in the debugger they can see which operation
1537 // did in fact fail. If we are optimizing, we collapse all calls to trap down
1538 // to just one per function to save on codesize.
1539 if (GCO.OptimizationLevel && TrapBB)
1542 llvm::BasicBlock *Cont = 0;
1543 if (HaveInsertPoint()) {
1544 Cont = createBasicBlock("cont");
1547 TrapBB = createBasicBlock("trap");
1550 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
1551 llvm::CallInst *TrapCall = Builder.CreateCall(F);
1552 TrapCall->setDoesNotReturn();
1553 TrapCall->setDoesNotThrow();
1554 Builder.CreateUnreachable();
1561 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
1562 /// array to pointer, return the array subexpression.
1563 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
1564 // If this isn't just an array->pointer decay, bail out.
1565 const CastExpr *CE = dyn_cast<CastExpr>(E);
1566 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
1569 // If this is a decay from variable width array, bail out.
1570 const Expr *SubExpr = CE->getSubExpr();
1571 if (SubExpr->getType()->isVariableArrayType())
1577 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1578 // The index must always be an integer, which is not an aggregate. Emit it.
1579 llvm::Value *Idx = EmitScalarExpr(E->getIdx());
1580 QualType IdxTy = E->getIdx()->getType();
1581 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
1583 // If the base is a vector type, then we are forming a vector element lvalue
1584 // with this subscript.
1585 if (E->getBase()->getType()->isVectorType()) {
1586 // Emit the vector as an lvalue to get its address.
1587 LValue LHS = EmitLValue(E->getBase());
1588 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
1589 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
1590 return LValue::MakeVectorElt(LHS.getAddress(), Idx,
1591 E->getBase()->getType());
1594 // Extend or truncate the index type to 32 or 64-bits.
1595 if (Idx->getType() != IntPtrTy)
1596 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
1598 // FIXME: As llvm implements the object size checking, this can come out.
1599 if (CatchUndefined) {
1600 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
1601 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
1602 if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1603 if (const ConstantArrayType *CAT
1604 = getContext().getAsConstantArrayType(DRE->getType())) {
1605 llvm::APInt Size = CAT->getSize();
1606 llvm::BasicBlock *Cont = createBasicBlock("cont");
1607 Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
1608 llvm::ConstantInt::get(Idx->getType(), Size)),
1617 // We know that the pointer points to a type of the correct size, unless the
1618 // size is a VLA or Objective-C interface.
1619 llvm::Value *Address = 0;
1620 unsigned ArrayAlignment = 0;
1621 if (const VariableArrayType *vla =
1622 getContext().getAsVariableArrayType(E->getType())) {
1623 // The base must be a pointer, which is not an aggregate. Emit
1624 // it. It needs to be emitted first in case it's what captures
1626 Address = EmitScalarExpr(E->getBase());
1628 // The element count here is the total number of non-VLA elements.
1629 llvm::Value *numElements = getVLASize(vla).first;
1631 // Effectively, the multiply by the VLA size is part of the GEP.
1632 // GEP indexes are signed, and scaling an index isn't permitted to
1633 // signed-overflow, so we use the same semantics for our explicit
1634 // multiply. We suppress this if overflow is not undefined behavior.
1635 if (getLangOptions().isSignedOverflowDefined()) {
1636 Idx = Builder.CreateMul(Idx, numElements);
1637 Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1639 Idx = Builder.CreateNSWMul(Idx, numElements);
1640 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
1642 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
1643 // Indexing over an interface, as in "NSString *P; P[4];"
1644 llvm::Value *InterfaceSize =
1645 llvm::ConstantInt::get(Idx->getType(),
1646 getContext().getTypeSizeInChars(OIT).getQuantity());
1648 Idx = Builder.CreateMul(Idx, InterfaceSize);
1650 // The base must be a pointer, which is not an aggregate. Emit it.
1651 llvm::Value *Base = EmitScalarExpr(E->getBase());
1652 Address = EmitCastToVoidPtr(Base);
1653 Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1654 Address = Builder.CreateBitCast(Address, Base->getType());
1655 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
1656 // If this is A[i] where A is an array, the frontend will have decayed the
1657 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
1658 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
1659 // "gep x, i" here. Emit one "gep A, 0, i".
1660 assert(Array->getType()->isArrayType() &&
1661 "Array to pointer decay must have array source type!");
1662 LValue ArrayLV = EmitLValue(Array);
1663 llvm::Value *ArrayPtr = ArrayLV.getAddress();
1664 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
1665 llvm::Value *Args[] = { Zero, Idx };
1667 // Propagate the alignment from the array itself to the result.
1668 ArrayAlignment = ArrayLV.getAlignment();
1670 if (getContext().getLangOptions().isSignedOverflowDefined())
1671 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
1673 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
1675 // The base must be a pointer, which is not an aggregate. Emit it.
1676 llvm::Value *Base = EmitScalarExpr(E->getBase());
1677 if (getContext().getLangOptions().isSignedOverflowDefined())
1678 Address = Builder.CreateGEP(Base, Idx, "arrayidx");
1680 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
1683 QualType T = E->getBase()->getType()->getPointeeType();
1684 assert(!T.isNull() &&
1685 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
1687 // Limit the alignment to that of the result type.
1688 if (ArrayAlignment) {
1689 unsigned Align = getContext().getTypeAlignInChars(T).getQuantity();
1690 ArrayAlignment = std::min(Align, ArrayAlignment);
1693 LValue LV = MakeAddrLValue(Address, T, ArrayAlignment);
1694 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
1696 if (getContext().getLangOptions().ObjC1 &&
1697 getContext().getLangOptions().getGC() != LangOptions::NonGC) {
1698 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1699 setObjCGCLValueClass(getContext(), E, LV);
1705 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
1706 SmallVector<unsigned, 4> &Elts) {
1707 SmallVector<llvm::Constant*, 4> CElts;
1709 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
1710 for (unsigned i = 0, e = Elts.size(); i != e; ++i)
1711 CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
1713 return llvm::ConstantVector::get(CElts);
1716 LValue CodeGenFunction::
1717 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
1718 // Emit the base vector as an l-value.
1721 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1723 // If it is a pointer to a vector, emit the address and form an lvalue with
1725 llvm::Value *Ptr = EmitScalarExpr(E->getBase());
1726 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
1727 Base = MakeAddrLValue(Ptr, PT->getPointeeType());
1728 Base.getQuals().removeObjCGCAttr();
1729 } else if (E->getBase()->isGLValue()) {
1730 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1731 // emit the base as an lvalue.
1732 assert(E->getBase()->getType()->isVectorType());
1733 Base = EmitLValue(E->getBase());
1735 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1736 assert(E->getBase()->getType()->isVectorType() &&
1737 "Result must be a vector");
1738 llvm::Value *Vec = EmitScalarExpr(E->getBase());
1740 // Store the vector to memory (because LValue wants an address).
1741 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
1742 Builder.CreateStore(Vec, VecMem);
1743 Base = MakeAddrLValue(VecMem, E->getBase()->getType());
1747 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
1749 // Encode the element access list into a vector of unsigned indices.
1750 SmallVector<unsigned, 4> Indices;
1751 E->getEncodedElementAccess(Indices);
1753 if (Base.isSimple()) {
1754 llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
1755 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type);
1757 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
1759 llvm::Constant *BaseElts = Base.getExtVectorElts();
1760 SmallVector<llvm::Constant *, 4> CElts;
1762 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
1763 if (isa<llvm::ConstantAggregateZero>(BaseElts))
1764 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
1766 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
1768 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
1769 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type);
1772 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
1773 bool isNonGC = false;
1774 Expr *BaseExpr = E->getBase();
1775 llvm::Value *BaseValue = NULL;
1776 Qualifiers BaseQuals;
1778 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1780 BaseValue = EmitScalarExpr(BaseExpr);
1781 const PointerType *PTy =
1782 BaseExpr->getType()->getAs<PointerType>();
1783 BaseQuals = PTy->getPointeeType().getQualifiers();
1785 LValue BaseLV = EmitLValue(BaseExpr);
1786 if (BaseLV.isNonGC())
1788 // FIXME: this isn't right for bitfields.
1789 BaseValue = BaseLV.getAddress();
1790 QualType BaseTy = BaseExpr->getType();
1791 BaseQuals = BaseTy.getQualifiers();
1794 NamedDecl *ND = E->getMemberDecl();
1795 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
1796 LValue LV = EmitLValueForField(BaseValue, Field,
1797 BaseQuals.getCVRQualifiers());
1798 LV.setNonGC(isNonGC);
1799 setObjCGCLValueClass(getContext(), E, LV);
1803 if (VarDecl *VD = dyn_cast<VarDecl>(ND))
1804 return EmitGlobalVarDeclLValue(*this, E, VD);
1806 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
1807 return EmitFunctionDeclLValue(*this, E, FD);
1809 llvm_unreachable("Unhandled member declaration!");
1812 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
1813 const FieldDecl *Field,
1814 unsigned CVRQualifiers) {
1815 const CGRecordLayout &RL =
1816 CGM.getTypes().getCGRecordLayout(Field->getParent());
1817 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
1818 return LValue::MakeBitfield(BaseValue, Info,
1819 Field->getType().withCVRQualifiers(CVRQualifiers));
1822 /// EmitLValueForAnonRecordField - Given that the field is a member of
1823 /// an anonymous struct or union buried inside a record, and given
1824 /// that the base value is a pointer to the enclosing record, derive
1825 /// an lvalue for the ultimate field.
1826 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
1827 const IndirectFieldDecl *Field,
1828 unsigned CVRQualifiers) {
1829 IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
1830 IEnd = Field->chain_end();
1832 LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I),
1834 if (++I == IEnd) return LV;
1836 assert(LV.isSimple());
1837 BaseValue = LV.getAddress();
1838 CVRQualifiers |= LV.getVRQualifiers();
1842 LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
1843 const FieldDecl *field,
1845 if (field->isBitField())
1846 return EmitLValueForBitfield(baseAddr, field, cvr);
1848 const RecordDecl *rec = field->getParent();
1849 QualType type = field->getType();
1851 bool mayAlias = rec->hasAttr<MayAliasAttr>();
1853 llvm::Value *addr = baseAddr;
1854 if (rec->isUnion()) {
1855 // For unions, there is no pointer adjustment.
1856 assert(!type->isReferenceType() && "union has reference member");
1858 // For structs, we GEP to the field that the record layout suggests.
1859 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
1860 addr = Builder.CreateStructGEP(addr, idx, field->getName());
1862 // If this is a reference field, load the reference right now.
1863 if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
1864 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
1865 if (cvr & Qualifiers::Volatile) load->setVolatile(true);
1867 if (CGM.shouldUseTBAA()) {
1870 tbaa = CGM.getTBAAInfo(getContext().CharTy);
1872 tbaa = CGM.getTBAAInfo(type);
1873 CGM.DecorateInstruction(load, tbaa);
1878 type = refType->getPointeeType();
1879 cvr = 0; // qualifiers don't recursively apply to referencee
1883 // Make sure that the address is pointing to the right type. This is critical
1884 // for both unions and structs. A union needs a bitcast, a struct element
1885 // will need a bitcast if the LLVM type laid out doesn't match the desired
1887 addr = EmitBitCastOfLValueToProperType(*this, addr,
1888 CGM.getTypes().ConvertTypeForMem(type),
1891 if (field->hasAttr<AnnotateAttr>())
1892 addr = EmitFieldAnnotations(field, addr);
1894 unsigned alignment = getContext().getDeclAlign(field).getQuantity();
1895 LValue LV = MakeAddrLValue(addr, type, alignment);
1896 LV.getQuals().addCVRQualifiers(cvr);
1898 // __weak attribute on a field is ignored.
1899 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
1900 LV.getQuals().removeObjCGCAttr();
1902 // Fields of may_alias structs act like 'char' for TBAA purposes.
1903 // FIXME: this should get propagated down through anonymous structs
1905 if (mayAlias && LV.getTBAAInfo())
1906 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
1912 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
1913 const FieldDecl *Field,
1914 unsigned CVRQualifiers) {
1915 QualType FieldType = Field->getType();
1917 if (!FieldType->isReferenceType())
1918 return EmitLValueForField(BaseValue, Field, CVRQualifiers);
1920 const CGRecordLayout &RL =
1921 CGM.getTypes().getCGRecordLayout(Field->getParent());
1922 unsigned idx = RL.getLLVMFieldNo(Field);
1923 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx);
1924 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
1927 // Make sure that the address is pointing to the right type. This is critical
1928 // for both unions and structs. A union needs a bitcast, a struct element
1929 // will need a bitcast if the LLVM type laid out doesn't match the desired
1931 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
1932 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1933 V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
1935 unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
1936 return MakeAddrLValue(V, FieldType, Alignment);
1939 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
1940 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
1941 const Expr *InitExpr = E->getInitializer();
1942 LValue Result = MakeAddrLValue(DeclPtr, E->getType());
1944 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
1950 LValue CodeGenFunction::
1951 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
1952 if (!expr->isGLValue()) {
1953 // ?: here should be an aggregate.
1954 assert((hasAggregateLLVMType(expr->getType()) &&
1955 !expr->getType()->isAnyComplexType()) &&
1956 "Unexpected conditional operator!");
1957 return EmitAggExprToLValue(expr);
1960 const Expr *condExpr = expr->getCond();
1962 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
1963 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
1964 if (!CondExprBool) std::swap(live, dead);
1966 if (!ContainsLabel(dead))
1967 return EmitLValue(live);
1970 OpaqueValueMapping binding(*this, expr);
1972 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
1973 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
1974 llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
1976 ConditionalEvaluation eval(*this);
1977 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
1979 // Any temporaries created here are conditional.
1980 EmitBlock(lhsBlock);
1982 LValue lhs = EmitLValue(expr->getTrueExpr());
1985 if (!lhs.isSimple())
1986 return EmitUnsupportedLValue(expr, "conditional operator");
1988 lhsBlock = Builder.GetInsertBlock();
1989 Builder.CreateBr(contBlock);
1991 // Any temporaries created here are conditional.
1992 EmitBlock(rhsBlock);
1994 LValue rhs = EmitLValue(expr->getFalseExpr());
1996 if (!rhs.isSimple())
1997 return EmitUnsupportedLValue(expr, "conditional operator");
1998 rhsBlock = Builder.GetInsertBlock();
2000 EmitBlock(contBlock);
2002 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2004 phi->addIncoming(lhs.getAddress(), lhsBlock);
2005 phi->addIncoming(rhs.getAddress(), rhsBlock);
2006 return MakeAddrLValue(phi, expr->getType());
2009 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
2010 /// If the cast is a dynamic_cast, we can have the usual lvalue result,
2011 /// otherwise if a cast is needed by the code generator in an lvalue context,
2012 /// then it must mean that we need the address of an aggregate in order to
2013 /// access one of its fields. This can happen for all the reasons that casts
2014 /// are permitted with aggregate result, including noop aggregate casts, and
2015 /// cast from scalar to union.
2016 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2017 switch (E->getCastKind()) {
2019 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2022 llvm_unreachable("dependent cast kind in IR gen!");
2024 case CK_GetObjCProperty: {
2025 LValue LV = EmitLValue(E->getSubExpr());
2026 assert(LV.isPropertyRef());
2027 RValue RV = EmitLoadOfPropertyRefLValue(LV);
2029 // Property is an aggregate r-value.
2030 if (RV.isAggregate()) {
2031 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2034 // Implicit property returns an l-value.
2035 assert(RV.isScalar());
2036 return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType());
2040 case CK_LValueToRValue:
2041 if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2042 || E->getType()->isRecordType())
2043 return EmitLValue(E->getSubExpr());
2044 // Fall through to synthesize a temporary.
2047 case CK_ArrayToPointerDecay:
2048 case CK_FunctionToPointerDecay:
2049 case CK_NullToMemberPointer:
2050 case CK_NullToPointer:
2051 case CK_IntegralToPointer:
2052 case CK_PointerToIntegral:
2053 case CK_PointerToBoolean:
2054 case CK_VectorSplat:
2055 case CK_IntegralCast:
2056 case CK_IntegralToBoolean:
2057 case CK_IntegralToFloating:
2058 case CK_FloatingToIntegral:
2059 case CK_FloatingToBoolean:
2060 case CK_FloatingCast:
2061 case CK_FloatingRealToComplex:
2062 case CK_FloatingComplexToReal:
2063 case CK_FloatingComplexToBoolean:
2064 case CK_FloatingComplexCast:
2065 case CK_FloatingComplexToIntegralComplex:
2066 case CK_IntegralRealToComplex:
2067 case CK_IntegralComplexToReal:
2068 case CK_IntegralComplexToBoolean:
2069 case CK_IntegralComplexCast:
2070 case CK_IntegralComplexToFloatingComplex:
2071 case CK_DerivedToBaseMemberPointer:
2072 case CK_BaseToDerivedMemberPointer:
2073 case CK_MemberPointerToBoolean:
2074 case CK_AnyPointerToBlockPointerCast:
2075 case CK_ARCProduceObject:
2076 case CK_ARCConsumeObject:
2077 case CK_ARCReclaimReturnedObject:
2078 case CK_ARCExtendBlockObject: {
2079 // These casts only produce lvalues when we're binding a reference to a
2080 // temporary realized from a (converted) pure rvalue. Emit the expression
2081 // as a value, copy it into a temporary, and return an lvalue referring to
2083 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2084 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2085 return MakeAddrLValue(V, E->getType());
2089 LValue LV = EmitLValue(E->getSubExpr());
2090 llvm::Value *V = LV.getAddress();
2091 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2092 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2095 case CK_ConstructorConversion:
2096 case CK_UserDefinedConversion:
2097 case CK_CPointerToObjCPointerCast:
2098 case CK_BlockPointerToObjCPointerCast:
2099 return EmitLValue(E->getSubExpr());
2101 case CK_UncheckedDerivedToBase:
2102 case CK_DerivedToBase: {
2103 const RecordType *DerivedClassTy =
2104 E->getSubExpr()->getType()->getAs<RecordType>();
2105 CXXRecordDecl *DerivedClassDecl =
2106 cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2108 LValue LV = EmitLValue(E->getSubExpr());
2109 llvm::Value *This = LV.getAddress();
2111 // Perform the derived-to-base conversion
2113 GetAddressOfBaseClass(This, DerivedClassDecl,
2114 E->path_begin(), E->path_end(),
2115 /*NullCheckValue=*/false);
2117 return MakeAddrLValue(Base, E->getType());
2120 return EmitAggExprToLValue(E);
2121 case CK_BaseToDerived: {
2122 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2123 CXXRecordDecl *DerivedClassDecl =
2124 cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2126 LValue LV = EmitLValue(E->getSubExpr());
2128 // Perform the base-to-derived conversion
2129 llvm::Value *Derived =
2130 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2131 E->path_begin(), E->path_end(),
2132 /*NullCheckValue=*/false);
2134 return MakeAddrLValue(Derived, E->getType());
2136 case CK_LValueBitCast: {
2137 // This must be a reinterpret_cast (or c-style equivalent).
2138 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2140 LValue LV = EmitLValue(E->getSubExpr());
2141 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2142 ConvertType(CE->getTypeAsWritten()));
2143 return MakeAddrLValue(V, E->getType());
2145 case CK_ObjCObjectLValueCast: {
2146 LValue LV = EmitLValue(E->getSubExpr());
2147 QualType ToType = getContext().getLValueReferenceType(E->getType());
2148 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2149 ConvertType(ToType));
2150 return MakeAddrLValue(V, E->getType());
2154 llvm_unreachable("Unhandled lvalue cast kind?");
2157 LValue CodeGenFunction::EmitNullInitializationLValue(
2158 const CXXScalarValueInitExpr *E) {
2159 QualType Ty = E->getType();
2160 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2161 EmitNullInitialization(LV.getAddress(), Ty);
2165 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2166 assert(e->isGLValue() || e->getType()->isRecordType());
2167 return getOpaqueLValueMapping(e);
2170 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2171 const MaterializeTemporaryExpr *E) {
2172 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2173 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2177 //===--------------------------------------------------------------------===//
2178 // Expression Emission
2179 //===--------------------------------------------------------------------===//
2181 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2182 ReturnValueSlot ReturnValue) {
2183 if (CGDebugInfo *DI = getDebugInfo())
2184 DI->EmitLocation(Builder, E->getLocStart());
2186 // Builtins never have block type.
2187 if (E->getCallee()->getType()->isBlockPointerType())
2188 return EmitBlockCallExpr(E, ReturnValue);
2190 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2191 return EmitCXXMemberCallExpr(CE, ReturnValue);
2193 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2194 return EmitCUDAKernelCallExpr(CE, ReturnValue);
2196 const Decl *TargetDecl = E->getCalleeDecl();
2197 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2198 if (unsigned builtinID = FD->getBuiltinID())
2199 return EmitBuiltinExpr(FD, builtinID, E);
2202 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2203 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2204 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2206 if (const CXXPseudoDestructorExpr *PseudoDtor
2207 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2208 QualType DestroyedType = PseudoDtor->getDestroyedType();
2209 if (getContext().getLangOptions().ObjCAutoRefCount &&
2210 DestroyedType->isObjCLifetimeType() &&
2211 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2212 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2213 // Automatic Reference Counting:
2214 // If the pseudo-expression names a retainable object with weak or
2215 // strong lifetime, the object shall be released.
2216 Expr *BaseExpr = PseudoDtor->getBase();
2217 llvm::Value *BaseValue = NULL;
2218 Qualifiers BaseQuals;
2220 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2221 if (PseudoDtor->isArrow()) {
2222 BaseValue = EmitScalarExpr(BaseExpr);
2223 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2224 BaseQuals = PTy->getPointeeType().getQualifiers();
2226 LValue BaseLV = EmitLValue(BaseExpr);
2227 BaseValue = BaseLV.getAddress();
2228 QualType BaseTy = BaseExpr->getType();
2229 BaseQuals = BaseTy.getQualifiers();
2232 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2233 case Qualifiers::OCL_None:
2234 case Qualifiers::OCL_ExplicitNone:
2235 case Qualifiers::OCL_Autoreleasing:
2238 case Qualifiers::OCL_Strong:
2239 EmitARCRelease(Builder.CreateLoad(BaseValue,
2240 PseudoDtor->getDestroyedType().isVolatileQualified()),
2244 case Qualifiers::OCL_Weak:
2245 EmitARCDestroyWeak(BaseValue);
2249 // C++ [expr.pseudo]p1:
2250 // The result shall only be used as the operand for the function call
2251 // operator (), and the result of such a call has type void. The only
2252 // effect is the evaluation of the postfix-expression before the dot or
2254 EmitScalarExpr(E->getCallee());
2257 return RValue::get(0);
2260 llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2261 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2262 E->arg_begin(), E->arg_end(), TargetDecl);
2265 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2266 // Comma expressions just emit their LHS then their RHS as an l-value.
2267 if (E->getOpcode() == BO_Comma) {
2268 EmitIgnoredExpr(E->getLHS());
2269 EnsureInsertPoint();
2270 return EmitLValue(E->getRHS());
2273 if (E->getOpcode() == BO_PtrMemD ||
2274 E->getOpcode() == BO_PtrMemI)
2275 return EmitPointerToDataMemberBinaryExpr(E);
2277 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2279 // Note that in all of these cases, __block variables need the RHS
2280 // evaluated first just in case the variable gets moved by the RHS.
2282 if (!hasAggregateLLVMType(E->getType())) {
2283 switch (E->getLHS()->getType().getObjCLifetime()) {
2284 case Qualifiers::OCL_Strong:
2285 return EmitARCStoreStrong(E, /*ignored*/ false).first;
2287 case Qualifiers::OCL_Autoreleasing:
2288 return EmitARCStoreAutoreleasing(E).first;
2290 // No reason to do any of these differently.
2291 case Qualifiers::OCL_None:
2292 case Qualifiers::OCL_ExplicitNone:
2293 case Qualifiers::OCL_Weak:
2297 RValue RV = EmitAnyExpr(E->getRHS());
2298 LValue LV = EmitLValue(E->getLHS());
2299 EmitStoreThroughLValue(RV, LV);
2303 if (E->getType()->isAnyComplexType())
2304 return EmitComplexAssignmentLValue(E);
2306 return EmitAggExprToLValue(E);
2309 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2310 RValue RV = EmitCallExpr(E);
2313 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2315 assert(E->getCallReturnType()->isReferenceType() &&
2316 "Can't have a scalar return unless the return type is a "
2319 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2322 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2323 // FIXME: This shouldn't require another copy.
2324 return EmitAggExprToLValue(E);
2327 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2328 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2329 && "binding l-value to type which needs a temporary");
2330 AggValueSlot Slot = CreateAggTemp(E->getType());
2331 EmitCXXConstructExpr(E, Slot);
2332 return MakeAddrLValue(Slot.getAddr(), E->getType());
2336 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2337 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2341 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2342 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2343 Slot.setExternallyDestructed();
2344 EmitAggExpr(E->getSubExpr(), Slot);
2345 EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
2346 return MakeAddrLValue(Slot.getAddr(), E->getType());
2349 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2350 RValue RV = EmitObjCMessageExpr(E);
2353 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2355 assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2356 "Can't have a scalar return unless the return type is a "
2359 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2362 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2364 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2365 return MakeAddrLValue(V, E->getType());
2368 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2369 const ObjCIvarDecl *Ivar) {
2370 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2373 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2374 llvm::Value *BaseValue,
2375 const ObjCIvarDecl *Ivar,
2376 unsigned CVRQualifiers) {
2377 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2378 Ivar, CVRQualifiers);
2381 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2382 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2383 llvm::Value *BaseValue = 0;
2384 const Expr *BaseExpr = E->getBase();
2385 Qualifiers BaseQuals;
2388 BaseValue = EmitScalarExpr(BaseExpr);
2389 ObjectTy = BaseExpr->getType()->getPointeeType();
2390 BaseQuals = ObjectTy.getQualifiers();
2392 LValue BaseLV = EmitLValue(BaseExpr);
2393 // FIXME: this isn't right for bitfields.
2394 BaseValue = BaseLV.getAddress();
2395 ObjectTy = BaseExpr->getType();
2396 BaseQuals = ObjectTy.getQualifiers();
2400 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2401 BaseQuals.getCVRQualifiers());
2402 setObjCGCLValueClass(getContext(), E, LV);
2406 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2407 // Can only get l-value for message expression returning aggregate type
2408 RValue RV = EmitAnyExprToTemp(E);
2409 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2412 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2413 ReturnValueSlot ReturnValue,
2414 CallExpr::const_arg_iterator ArgBeg,
2415 CallExpr::const_arg_iterator ArgEnd,
2416 const Decl *TargetDecl) {
2417 // Get the actual function type. The callee type will always be a pointer to
2418 // function type or a block pointer type.
2419 assert(CalleeType->isFunctionPointerType() &&
2420 "Call must have function pointer type!");
2422 CalleeType = getContext().getCanonicalType(CalleeType);
2424 const FunctionType *FnType
2425 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2428 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2430 const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType);
2433 // If the expression that denotes the called function has a type
2434 // that does not include a prototype, [the default argument
2435 // promotions are performed]. If the number of arguments does not
2436 // equal the number of parameters, the behavior is undefined. If
2437 // the function is defined with a type that includes a prototype,
2438 // and either the prototype ends with an ellipsis (, ...) or the
2439 // types of the arguments after promotion are not compatible with
2440 // the types of the parameters, the behavior is undefined. If the
2441 // function is defined with a type that does not include a
2442 // prototype, and the types of the arguments after promotion are
2443 // not compatible with those of the parameters after promotion,
2444 // the behavior is undefined [except in some trivial cases].
2445 // That is, in the general case, we should assume that a call
2446 // through an unprototyped function type works like a *non-variadic*
2447 // call. The way we make this work is to cast to the exact type
2448 // of the promoted arguments.
2449 if (isa<FunctionNoProtoType>(FnType) &&
2450 !getTargetHooks().isNoProtoCallVariadic(FnType->getCallConv())) {
2451 assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0))
2453 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false);
2454 CalleeTy = CalleeTy->getPointerTo();
2455 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2458 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2461 LValue CodeGenFunction::
2462 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2464 if (E->getOpcode() == BO_PtrMemI)
2465 BaseV = EmitScalarExpr(E->getLHS());
2467 BaseV = EmitLValue(E->getLHS()).getAddress();
2469 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2471 const MemberPointerType *MPT
2472 = E->getRHS()->getType()->getAs<MemberPointerType>();
2475 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2477 return MakeAddrLValue(AddV, MPT->getPointeeType());
2481 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
2482 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
2483 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
2484 if (E->isCmpXChg()) {
2485 // Note that cmpxchg only supports specifying one ordering and
2486 // doesn't support weak cmpxchg, at least at the moment.
2487 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2488 LoadVal1->setAlignment(Align);
2489 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
2490 LoadVal2->setAlignment(Align);
2491 llvm::AtomicCmpXchgInst *CXI =
2492 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
2493 CXI->setVolatile(E->isVolatile());
2494 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
2495 StoreVal1->setAlignment(Align);
2496 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
2497 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
2501 if (E->getOp() == AtomicExpr::Load) {
2502 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
2503 Load->setAtomic(Order);
2504 Load->setAlignment(Size);
2505 Load->setVolatile(E->isVolatile());
2506 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
2507 StoreDest->setAlignment(Align);
2511 if (E->getOp() == AtomicExpr::Store) {
2512 assert(!Dest && "Store does not return a value");
2513 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2514 LoadVal1->setAlignment(Align);
2515 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
2516 Store->setAtomic(Order);
2517 Store->setAlignment(Size);
2518 Store->setVolatile(E->isVolatile());
2522 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
2523 switch (E->getOp()) {
2524 case AtomicExpr::CmpXchgWeak:
2525 case AtomicExpr::CmpXchgStrong:
2526 case AtomicExpr::Store:
2527 case AtomicExpr::Load: assert(0 && "Already handled!");
2528 case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
2529 case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
2530 case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break;
2531 case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break;
2532 case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break;
2533 case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break;
2535 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2536 LoadVal1->setAlignment(Align);
2537 llvm::AtomicRMWInst *RMWI =
2538 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
2539 RMWI->setVolatile(E->isVolatile());
2540 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest);
2541 StoreDest->setAlignment(Align);
2544 // This function emits any expression (scalar, complex, or aggregate)
2545 // into a temporary alloca.
2546 static llvm::Value *
2547 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
2548 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
2549 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
2554 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
2555 llvm::Value *Dest) {
2556 if (Ty->isAnyComplexType())
2557 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
2558 if (CGF.hasAggregateLLVMType(Ty))
2559 return RValue::getAggregate(Dest);
2560 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
2563 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
2564 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
2565 QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType();
2566 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
2567 uint64_t Size = sizeChars.getQuantity();
2568 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
2569 unsigned Align = alignChars.getQuantity();
2570 unsigned MaxInlineWidth =
2571 getContext().getTargetInfo().getMaxAtomicInlineWidth();
2572 bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
2574 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
2575 Ptr = EmitScalarExpr(E->getPtr());
2576 Order = EmitScalarExpr(E->getOrder());
2577 if (E->isCmpXChg()) {
2578 Val1 = EmitScalarExpr(E->getVal1());
2579 Val2 = EmitValToTemp(*this, E->getVal2());
2580 OrderFail = EmitScalarExpr(E->getOrderFail());
2581 (void)OrderFail; // OrderFail is unused at the moment
2582 } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) &&
2583 MemTy->isPointerType()) {
2584 // For pointers, we're required to do a bit of math: adding 1 to an int*
2585 // is not the same as adding 1 to a uintptr_t.
2586 QualType Val1Ty = E->getVal1()->getType();
2587 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
2588 CharUnits PointeeIncAmt =
2589 getContext().getTypeSizeInChars(MemTy->getPointeeType());
2590 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
2591 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
2592 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
2593 } else if (E->getOp() != AtomicExpr::Load) {
2594 Val1 = EmitValToTemp(*this, E->getVal1());
2597 if (E->getOp() != AtomicExpr::Store && !Dest)
2598 Dest = CreateMemTemp(E->getType(), ".atomicdst");
2601 // FIXME: Finalize what the libcalls are actually supposed to look like.
2602 // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
2603 return EmitUnsupportedRValue(E, "atomic library call");
2607 const char* LibCallName;
2608 switch (E->getOp()) {
2609 case AtomicExpr::CmpXchgWeak:
2610 LibCallName = "__atomic_compare_exchange_generic"; break;
2611 case AtomicExpr::CmpXchgStrong:
2612 LibCallName = "__atomic_compare_exchange_generic"; break;
2613 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
2614 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
2615 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
2616 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
2617 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
2618 case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break;
2619 case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break;
2620 case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break;
2622 llvm::SmallVector<QualType, 4> Params;
2624 QualType RetTy = getContext().VoidTy;
2625 if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg())
2626 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
2627 getContext().VoidPtrTy);
2628 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
2629 getContext().VoidPtrTy);
2630 if (E->getOp() != AtomicExpr::Load)
2631 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
2632 getContext().VoidPtrTy);
2633 if (E->isCmpXChg()) {
2634 Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
2635 getContext().VoidPtrTy);
2636 RetTy = getContext().IntTy;
2638 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
2639 getContext().getSizeType());
2640 const CGFunctionInfo &FuncInfo =
2641 CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo());
2642 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false);
2643 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
2644 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
2647 if (E->getOp() == AtomicExpr::Store)
2648 return RValue::get(0);
2649 return ConvertTempToRValue(*this, E->getType(), Dest);
2652 llvm::Type *IPtrTy =
2653 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
2654 llvm::Value *OrigDest = Dest;
2655 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
2656 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
2657 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
2658 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
2660 if (isa<llvm::ConstantInt>(Order)) {
2661 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
2663 case 0: // memory_order_relaxed
2664 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2667 case 1: // memory_order_consume
2668 case 2: // memory_order_acquire
2669 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2672 case 3: // memory_order_release
2673 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2676 case 4: // memory_order_acq_rel
2677 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2678 llvm::AcquireRelease);
2680 case 5: // memory_order_seq_cst
2681 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2682 llvm::SequentiallyConsistent);
2684 default: // invalid order
2685 // We should not ever get here normally, but it's hard to
2686 // enforce that in general.
2689 if (E->getOp() == AtomicExpr::Store)
2690 return RValue::get(0);
2691 return ConvertTempToRValue(*this, E->getType(), OrigDest);
2694 // Long case, when Order isn't obviously constant.
2696 // Create all the relevant BB's
2697 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
2698 *AcqRelBB = 0, *SeqCstBB = 0;
2699 MonotonicBB = createBasicBlock("monotonic", CurFn);
2700 if (E->getOp() != AtomicExpr::Store)
2701 AcquireBB = createBasicBlock("acquire", CurFn);
2702 if (E->getOp() != AtomicExpr::Load)
2703 ReleaseBB = createBasicBlock("release", CurFn);
2704 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store)
2705 AcqRelBB = createBasicBlock("acqrel", CurFn);
2706 SeqCstBB = createBasicBlock("seqcst", CurFn);
2707 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
2709 // Create the switch for the split
2710 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
2711 // doesn't matter unless someone is crazy enough to use something that
2712 // doesn't fold to a constant for the ordering.
2713 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
2714 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
2716 // Emit all the different atomics
2717 Builder.SetInsertPoint(MonotonicBB);
2718 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2720 Builder.CreateBr(ContBB);
2721 if (E->getOp() != AtomicExpr::Store) {
2722 Builder.SetInsertPoint(AcquireBB);
2723 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2725 Builder.CreateBr(ContBB);
2726 SI->addCase(Builder.getInt32(1), AcquireBB);
2727 SI->addCase(Builder.getInt32(2), AcquireBB);
2729 if (E->getOp() != AtomicExpr::Load) {
2730 Builder.SetInsertPoint(ReleaseBB);
2731 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2733 Builder.CreateBr(ContBB);
2734 SI->addCase(Builder.getInt32(3), ReleaseBB);
2736 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) {
2737 Builder.SetInsertPoint(AcqRelBB);
2738 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2739 llvm::AcquireRelease);
2740 Builder.CreateBr(ContBB);
2741 SI->addCase(Builder.getInt32(4), AcqRelBB);
2743 Builder.SetInsertPoint(SeqCstBB);
2744 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
2745 llvm::SequentiallyConsistent);
2746 Builder.CreateBr(ContBB);
2747 SI->addCase(Builder.getInt32(5), SeqCstBB);
2749 // Cleanup and return
2750 Builder.SetInsertPoint(ContBB);
2751 if (E->getOp() == AtomicExpr::Store)
2752 return RValue::get(0);
2753 return ConvertTempToRValue(*this, E->getType(), OrigDest);