1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Expr nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/ConvertUTF.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/LLVMContext.h"
28 #include "llvm/MDBuilder.h"
29 #include "llvm/DataLayout.h"
30 #include "llvm/ADT/Hashing.h"
31 using namespace clang;
32 using namespace CodeGen;
34 //===--------------------------------------------------------------------===//
35 // Miscellaneous Helper Methods
36 //===--------------------------------------------------------------------===//
38 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
39 unsigned addressSpace =
40 cast<llvm::PointerType>(value->getType())->getAddressSpace();
42 llvm::PointerType *destType = Int8PtrTy;
44 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
46 if (value->getType() == destType) return value;
47 return Builder.CreateBitCast(value, destType);
50 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
52 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
54 if (!Builder.isNamePreserving())
55 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
56 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
59 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
61 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
62 llvm::BasicBlock *Block = AllocaInsertPt->getParent();
63 Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
66 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
68 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
69 // FIXME: Should we prefer the preferred type alignment here?
70 CharUnits Align = getContext().getTypeAlignInChars(Ty);
71 Alloc->setAlignment(Align.getQuantity());
75 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
77 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
78 // FIXME: Should we prefer the preferred type alignment here?
79 CharUnits Align = getContext().getTypeAlignInChars(Ty);
80 Alloc->setAlignment(Align.getQuantity());
84 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
85 /// expression and compare the result against zero, returning an Int1Ty value.
86 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
87 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
88 llvm::Value *MemPtr = EmitScalarExpr(E);
89 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
92 QualType BoolTy = getContext().BoolTy;
93 if (!E->getType()->isAnyComplexType())
94 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
96 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
99 /// EmitIgnoredExpr - Emit code to compute the specified expression,
100 /// ignoring the result.
101 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
103 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
105 // Just emit it as an l-value and drop the result.
109 /// EmitAnyExpr - Emit code to compute the specified expression which
110 /// can have any type. The result is returned as an RValue struct.
111 /// If this is an aggregate expression, AggSlot indicates where the
112 /// result should be returned.
113 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
114 AggValueSlot aggSlot,
116 if (!hasAggregateLLVMType(E->getType()))
117 return RValue::get(EmitScalarExpr(E, ignoreResult));
118 else if (E->getType()->isAnyComplexType())
119 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
121 if (!ignoreResult && aggSlot.isIgnored())
122 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
123 EmitAggExpr(E, aggSlot);
124 return aggSlot.asRValue();
127 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
128 /// always be accessible even if no aggregate location is provided.
129 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
130 AggValueSlot AggSlot = AggValueSlot::ignored();
132 if (hasAggregateLLVMType(E->getType()) &&
133 !E->getType()->isAnyComplexType())
134 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
135 return EmitAnyExpr(E, AggSlot);
138 /// EmitAnyExprToMem - Evaluate an expression into a given memory
140 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
141 llvm::Value *Location,
144 // FIXME: This function should take an LValue as an argument.
145 if (E->getType()->isAnyComplexType()) {
146 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
147 } else if (hasAggregateLLVMType(E->getType())) {
148 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
149 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
150 AggValueSlot::IsDestructed_t(IsInit),
151 AggValueSlot::DoesNotNeedGCBarriers,
152 AggValueSlot::IsAliased_t(!IsInit)));
154 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
155 LValue LV = MakeAddrLValue(Location, E->getType());
156 EmitStoreThroughLValue(RV, LV);
161 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
162 const NamedDecl *InitializedDecl) {
163 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
164 if (VD->hasGlobalStorage()) {
165 SmallString<256> Name;
166 llvm::raw_svector_ostream Out(Name);
167 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
170 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
172 // Create the reference temporary.
173 llvm::GlobalValue *RefTemp =
174 new llvm::GlobalVariable(CGF.CGM.getModule(),
175 RefTempTy, /*isConstant=*/false,
176 llvm::GlobalValue::InternalLinkage,
177 llvm::Constant::getNullValue(RefTempTy),
183 return CGF.CreateMemTemp(Type, "ref.tmp");
187 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
188 llvm::Value *&ReferenceTemporary,
189 const CXXDestructorDecl *&ReferenceTemporaryDtor,
190 QualType &ObjCARCReferenceLifetimeType,
191 const NamedDecl *InitializedDecl) {
192 const MaterializeTemporaryExpr *M = NULL;
193 E = E->findMaterializedTemporary(M);
194 // Objective-C++ ARC:
195 // If we are binding a reference to a temporary that has ownership, we
196 // need to perform retain/release operations on the temporary.
197 if (M && CGF.getLangOpts().ObjCAutoRefCount &&
198 M->getType()->isObjCLifetimeType() &&
199 (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
200 M->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
201 M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
202 ObjCARCReferenceLifetimeType = M->getType();
204 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
205 CGF.enterFullExpression(EWC);
206 CodeGenFunction::RunCleanupsScope Scope(CGF);
208 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
210 ReferenceTemporaryDtor,
211 ObjCARCReferenceLifetimeType,
216 if (E->isGLValue()) {
217 // Emit the expression as an lvalue.
218 LValue LV = CGF.EmitLValue(E);
221 return LV.getAddress();
223 // We have to load the lvalue.
224 RV = CGF.EmitLoadOfLValue(LV);
226 if (!ObjCARCReferenceLifetimeType.isNull()) {
227 ReferenceTemporary = CreateReferenceTemporary(CGF,
228 ObjCARCReferenceLifetimeType,
232 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
233 ObjCARCReferenceLifetimeType);
235 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
238 bool ExtendsLifeOfTemporary = false;
239 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
240 if (Var->extendsLifetimeOfTemporary())
241 ExtendsLifeOfTemporary = true;
242 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
243 ExtendsLifeOfTemporary = true;
246 if (!ExtendsLifeOfTemporary) {
247 // Since the lifetime of this temporary isn't going to be extended,
248 // we need to clean it up ourselves at the end of the full expression.
249 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
250 case Qualifiers::OCL_None:
251 case Qualifiers::OCL_ExplicitNone:
252 case Qualifiers::OCL_Autoreleasing:
255 case Qualifiers::OCL_Strong: {
256 assert(!ObjCARCReferenceLifetimeType->isArrayType());
257 CleanupKind cleanupKind = CGF.getARCCleanupKind();
258 CGF.pushDestroy(cleanupKind,
260 ObjCARCReferenceLifetimeType,
261 CodeGenFunction::destroyARCStrongImprecise,
262 cleanupKind & EHCleanup);
266 case Qualifiers::OCL_Weak:
267 assert(!ObjCARCReferenceLifetimeType->isArrayType());
268 CGF.pushDestroy(NormalAndEHCleanup,
270 ObjCARCReferenceLifetimeType,
271 CodeGenFunction::destroyARCWeak,
272 /*useEHCleanupForArray*/ true);
276 ObjCARCReferenceLifetimeType = QualType();
279 return ReferenceTemporary;
282 SmallVector<SubobjectAdjustment, 2> Adjustments;
283 E = E->skipRValueSubobjectAdjustments(Adjustments);
284 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
285 if (opaque->getType()->isRecordType())
286 return CGF.EmitOpaqueValueLValue(opaque).getAddress();
288 // Create a reference temporary if necessary.
289 AggValueSlot AggSlot = AggValueSlot::ignored();
290 if (CGF.hasAggregateLLVMType(E->getType()) &&
291 !E->getType()->isAnyComplexType()) {
292 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
294 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
295 AggValueSlot::IsDestructed_t isDestructed
296 = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
297 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
298 Qualifiers(), isDestructed,
299 AggValueSlot::DoesNotNeedGCBarriers,
300 AggValueSlot::IsNotAliased);
303 if (InitializedDecl) {
304 // Get the destructor for the reference temporary.
305 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
306 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
307 if (!ClassDecl->hasTrivialDestructor())
308 ReferenceTemporaryDtor = ClassDecl->getDestructor();
312 RV = CGF.EmitAnyExpr(E, AggSlot);
314 // Check if need to perform derived-to-base casts and/or field accesses, to
315 // get from the temporary object we created (and, potentially, for which we
316 // extended the lifetime) to the subobject we're binding the reference to.
317 if (!Adjustments.empty()) {
318 llvm::Value *Object = RV.getAggregateAddr();
319 for (unsigned I = Adjustments.size(); I != 0; --I) {
320 SubobjectAdjustment &Adjustment = Adjustments[I-1];
321 switch (Adjustment.Kind) {
322 case SubobjectAdjustment::DerivedToBaseAdjustment:
324 CGF.GetAddressOfBaseClass(Object,
325 Adjustment.DerivedToBase.DerivedClass,
326 Adjustment.DerivedToBase.BasePath->path_begin(),
327 Adjustment.DerivedToBase.BasePath->path_end(),
328 /*NullCheckValue=*/false);
331 case SubobjectAdjustment::FieldAdjustment: {
332 LValue LV = CGF.MakeAddrLValue(Object, E->getType());
333 LV = CGF.EmitLValueForField(LV, Adjustment.Field);
335 Object = LV.getAddress();
339 // For non-simple lvalues, we actually have to create a copy of
340 // the object we're binding to.
341 QualType T = Adjustment.Field->getType().getNonReferenceType()
342 .getUnqualifiedType();
343 Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
344 LValue TempLV = CGF.MakeAddrLValue(Object,
345 Adjustment.Field->getType());
346 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
350 case SubobjectAdjustment::MemberPointerAdjustment: {
351 llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
352 Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
353 CGF, Object, Ptr, Adjustment.Ptr.MPT);
363 if (RV.isAggregate())
364 return RV.getAggregateAddr();
366 // Create a temporary variable that we can bind the reference to.
367 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
372 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
374 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
375 /*Volatile=*/false, Alignment, E->getType());
377 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
379 return ReferenceTemporary;
383 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
384 const NamedDecl *InitializedDecl) {
385 llvm::Value *ReferenceTemporary = 0;
386 const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
387 QualType ObjCARCReferenceLifetimeType;
388 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
389 ReferenceTemporaryDtor,
390 ObjCARCReferenceLifetimeType,
392 if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) {
393 // C++11 [dcl.ref]p5 (as amended by core issue 453):
394 // If a glvalue to which a reference is directly bound designates neither
395 // an existing object or function of an appropriate type nor a region of
396 // storage of suitable size and alignment to contain an object of the
397 // reference's type, the behavior is undefined.
398 QualType Ty = E->getType();
399 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
401 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
402 return RValue::get(Value);
404 // Make sure to call the destructor for the reference temporary.
405 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
406 if (VD && VD->hasGlobalStorage()) {
407 if (ReferenceTemporaryDtor) {
408 llvm::Constant *DtorFn =
409 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
410 CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
411 cast<llvm::Constant>(ReferenceTemporary));
413 assert(!ObjCARCReferenceLifetimeType.isNull());
414 // Note: We intentionally do not register a global "destructor" to
415 // release the object.
418 return RValue::get(Value);
421 if (ReferenceTemporaryDtor)
422 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
424 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
425 case Qualifiers::OCL_None:
427 "Not a reference temporary that needs to be deallocated");
428 case Qualifiers::OCL_ExplicitNone:
429 case Qualifiers::OCL_Autoreleasing:
433 case Qualifiers::OCL_Strong: {
434 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
435 CleanupKind cleanupKind = getARCCleanupKind();
436 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
437 precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
438 cleanupKind & EHCleanup);
442 case Qualifiers::OCL_Weak: {
443 // __weak objects always get EH cleanups; otherwise, exceptions
444 // could cause really nasty crashes instead of mere leaks.
445 pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
446 ObjCARCReferenceLifetimeType, destroyARCWeak, true);
452 return RValue::get(Value);
456 /// getAccessedFieldNo - Given an encoded value and a result number, return the
457 /// input field number being accessed.
458 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
459 const llvm::Constant *Elts) {
460 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
464 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
465 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
467 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
468 llvm::Value *K47 = Builder.getInt64(47);
469 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
470 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
471 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
472 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
473 return Builder.CreateMul(B1, KMul);
476 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
477 llvm::Value *Address,
478 QualType Ty, CharUnits Alignment) {
479 if (!SanitizePerformTypeCheck)
482 // Don't check pointers outside the default address space. The null check
483 // isn't correct, the object-size check isn't supported by LLVM, and we can't
484 // communicate the addresses to the runtime handler for the vptr check.
485 if (Address->getType()->getPointerAddressSpace())
488 llvm::Value *Cond = 0;
490 if (getLangOpts().SanitizeNull) {
491 // The glvalue must not be an empty glvalue.
492 Cond = Builder.CreateICmpNE(
493 Address, llvm::Constant::getNullValue(Address->getType()));
496 if (getLangOpts().SanitizeObjectSize && !Ty->isIncompleteType()) {
497 uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
499 // The glvalue must refer to a large enough storage region.
500 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
502 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
503 llvm::Value *Min = Builder.getFalse();
504 llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
505 llvm::Value *LargeEnough =
506 Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
507 llvm::ConstantInt::get(IntPtrTy, Size));
508 Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
511 uint64_t AlignVal = 0;
513 if (getLangOpts().SanitizeAlignment) {
514 AlignVal = Alignment.getQuantity();
515 if (!Ty->isIncompleteType() && !AlignVal)
516 AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
518 // The glvalue must be suitably aligned.
521 Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
522 llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
523 llvm::Value *Aligned =
524 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
525 Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned;
530 llvm::Constant *StaticData[] = {
531 EmitCheckSourceLocation(Loc),
532 EmitCheckTypeDescriptor(Ty),
533 llvm::ConstantInt::get(SizeTy, AlignVal),
534 llvm::ConstantInt::get(Int8Ty, TCK)
536 EmitCheck(Cond, "type_mismatch", StaticData, Address);
539 // If possible, check that the vptr indicates that there is a subobject of
540 // type Ty at offset zero within this object.
541 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
542 if (getLangOpts().SanitizeVptr && TCK != TCK_ConstructorCall &&
543 RD && RD->hasDefinition() && RD->isDynamicClass()) {
544 // Compute a hash of the mangled name of the type.
546 // FIXME: This is not guaranteed to be deterministic! Move to a
547 // fingerprinting mechanism once LLVM provides one. For the time
548 // being the implementation happens to be deterministic.
549 llvm::SmallString<64> MangledName;
550 llvm::raw_svector_ostream Out(MangledName);
551 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
553 llvm::hash_code TypeHash = hash_value(Out.str());
555 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
556 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
557 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
558 llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
559 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
560 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
562 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
563 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
565 // Look the hash up in our cache.
566 const int CacheSize = 128;
567 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
568 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
569 "__ubsan_vptr_type_cache");
570 llvm::Value *Slot = Builder.CreateAnd(Hash,
571 llvm::ConstantInt::get(IntPtrTy,
573 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
574 llvm::Value *CacheVal =
575 Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
577 // If the hash isn't in the cache, call a runtime handler to perform the
578 // hard work of checking whether the vptr is for an object of the right
579 // type. This will either fill in the cache and return, or produce a
581 llvm::Constant *StaticData[] = {
582 EmitCheckSourceLocation(Loc),
583 EmitCheckTypeDescriptor(Ty),
584 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
585 llvm::ConstantInt::get(Int8Ty, TCK)
587 llvm::Value *DynamicData[] = { Address, Hash };
588 EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
589 "dynamic_type_cache_miss", StaticData, DynamicData, true);
594 CodeGenFunction::ComplexPairTy CodeGenFunction::
595 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
596 bool isInc, bool isPre) {
597 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
598 LV.isVolatileQualified());
600 llvm::Value *NextVal;
601 if (isa<llvm::IntegerType>(InVal.first->getType())) {
602 uint64_t AmountVal = isInc ? 1 : -1;
603 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
605 // Add the inc/dec to the real part.
606 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
608 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
609 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
612 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
614 // Add the inc/dec to the real part.
615 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
618 ComplexPairTy IncVal(NextVal, InVal.second);
620 // Store the updated result through the lvalue.
621 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
623 // If this is a postinc, return the value read from memory, otherwise use the
625 return isPre ? IncVal : InVal;
629 //===----------------------------------------------------------------------===//
630 // LValue Expression Emission
631 //===----------------------------------------------------------------------===//
633 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
634 if (Ty->isVoidType())
635 return RValue::get(0);
637 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
638 llvm::Type *EltTy = ConvertType(CTy->getElementType());
639 llvm::Value *U = llvm::UndefValue::get(EltTy);
640 return RValue::getComplex(std::make_pair(U, U));
643 // If this is a use of an undefined aggregate type, the aggregate must have an
644 // identifiable address. Just because the contents of the value are undefined
645 // doesn't mean that the address can't be taken and compared.
646 if (hasAggregateLLVMType(Ty)) {
647 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
648 return RValue::getAggregate(DestPtr);
651 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
654 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
656 ErrorUnsupported(E, Name);
657 return GetUndefRValue(E->getType());
660 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
662 ErrorUnsupported(E, Name);
663 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
664 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
667 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
668 LValue LV = EmitLValue(E);
669 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
670 EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
671 E->getType(), LV.getAlignment());
675 /// EmitLValue - Emit code to compute a designator that specifies the location
676 /// of the expression.
678 /// This can return one of two things: a simple address or a bitfield reference.
679 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
680 /// an LLVM pointer type.
682 /// If this returns a bitfield reference, nothing about the pointee type of the
683 /// LLVM value is known: For example, it may not be a pointer to an integer.
685 /// If this returns a normal address, and if the lvalue's C type is fixed size,
686 /// this method guarantees that the returned pointer type will point to an LLVM
687 /// type of the same size of the lvalue's type. If the lvalue has a variable
688 /// length type, this is not possible.
690 LValue CodeGenFunction::EmitLValue(const Expr *E) {
691 switch (E->getStmtClass()) {
692 default: return EmitUnsupportedLValue(E, "l-value expression");
694 case Expr::ObjCPropertyRefExprClass:
695 llvm_unreachable("cannot emit a property reference directly");
697 case Expr::ObjCSelectorExprClass:
698 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
699 case Expr::ObjCIsaExprClass:
700 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
701 case Expr::BinaryOperatorClass:
702 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
703 case Expr::CompoundAssignOperatorClass:
704 if (!E->getType()->isAnyComplexType())
705 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
706 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
707 case Expr::CallExprClass:
708 case Expr::CXXMemberCallExprClass:
709 case Expr::CXXOperatorCallExprClass:
710 case Expr::UserDefinedLiteralClass:
711 return EmitCallExprLValue(cast<CallExpr>(E));
712 case Expr::VAArgExprClass:
713 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
714 case Expr::DeclRefExprClass:
715 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
716 case Expr::ParenExprClass:
717 return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
718 case Expr::GenericSelectionExprClass:
719 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
720 case Expr::PredefinedExprClass:
721 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
722 case Expr::StringLiteralClass:
723 return EmitStringLiteralLValue(cast<StringLiteral>(E));
724 case Expr::ObjCEncodeExprClass:
725 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
726 case Expr::PseudoObjectExprClass:
727 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
728 case Expr::InitListExprClass:
729 return EmitInitListLValue(cast<InitListExpr>(E));
730 case Expr::CXXTemporaryObjectExprClass:
731 case Expr::CXXConstructExprClass:
732 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
733 case Expr::CXXBindTemporaryExprClass:
734 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
735 case Expr::CXXUuidofExprClass:
736 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
737 case Expr::LambdaExprClass:
738 return EmitLambdaLValue(cast<LambdaExpr>(E));
740 case Expr::ExprWithCleanupsClass: {
741 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
742 enterFullExpression(cleanups);
743 RunCleanupsScope Scope(*this);
744 return EmitLValue(cleanups->getSubExpr());
747 case Expr::CXXScalarValueInitExprClass:
748 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
749 case Expr::CXXDefaultArgExprClass:
750 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
751 case Expr::CXXTypeidExprClass:
752 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
754 case Expr::ObjCMessageExprClass:
755 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
756 case Expr::ObjCIvarRefExprClass:
757 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
758 case Expr::StmtExprClass:
759 return EmitStmtExprLValue(cast<StmtExpr>(E));
760 case Expr::UnaryOperatorClass:
761 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
762 case Expr::ArraySubscriptExprClass:
763 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
764 case Expr::ExtVectorElementExprClass:
765 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
766 case Expr::MemberExprClass:
767 return EmitMemberExpr(cast<MemberExpr>(E));
768 case Expr::CompoundLiteralExprClass:
769 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
770 case Expr::ConditionalOperatorClass:
771 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
772 case Expr::BinaryConditionalOperatorClass:
773 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
774 case Expr::ChooseExprClass:
775 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
776 case Expr::OpaqueValueExprClass:
777 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
778 case Expr::SubstNonTypeTemplateParmExprClass:
779 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
780 case Expr::ImplicitCastExprClass:
781 case Expr::CStyleCastExprClass:
782 case Expr::CXXFunctionalCastExprClass:
783 case Expr::CXXStaticCastExprClass:
784 case Expr::CXXDynamicCastExprClass:
785 case Expr::CXXReinterpretCastExprClass:
786 case Expr::CXXConstCastExprClass:
787 case Expr::ObjCBridgedCastExprClass:
788 return EmitCastLValue(cast<CastExpr>(E));
790 case Expr::MaterializeTemporaryExprClass:
791 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
795 /// Given an object of the given canonical type, can we safely copy a
796 /// value out of it based on its initializer?
797 static bool isConstantEmittableObjectType(QualType type) {
798 assert(type.isCanonical());
799 assert(!type->isReferenceType());
801 // Must be const-qualified but non-volatile.
802 Qualifiers qs = type.getLocalQualifiers();
803 if (!qs.hasConst() || qs.hasVolatile()) return false;
805 // Otherwise, all object types satisfy this except C++ classes with
806 // mutable subobjects or non-trivial copy/destroy behavior.
807 if (const RecordType *RT = dyn_cast<RecordType>(type))
808 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
809 if (RD->hasMutableFields() || !RD->isTrivial())
815 /// Can we constant-emit a load of a reference to a variable of the
816 /// given type? This is different from predicates like
817 /// Decl::isUsableInConstantExpressions because we do want it to apply
818 /// in situations that don't necessarily satisfy the language's rules
819 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
820 /// to do this with const float variables even if those variables
821 /// aren't marked 'constexpr'.
822 enum ConstantEmissionKind {
825 CEK_AsValueOrReference,
828 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
829 type = type.getCanonicalType();
830 if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
831 if (isConstantEmittableObjectType(ref->getPointeeType()))
832 return CEK_AsValueOrReference;
833 return CEK_AsReferenceOnly;
835 if (isConstantEmittableObjectType(type))
836 return CEK_AsValueOnly;
840 /// Try to emit a reference to the given value without producing it as
841 /// an l-value. This is actually more than an optimization: we can't
842 /// produce an l-value for variables that we never actually captured
843 /// in a block or lambda, which means const int variables or constexpr
844 /// literals or similar.
845 CodeGenFunction::ConstantEmission
846 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
847 ValueDecl *value = refExpr->getDecl();
849 // The value needs to be an enum constant or a constant variable.
850 ConstantEmissionKind CEK;
851 if (isa<ParmVarDecl>(value)) {
853 } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
854 CEK = checkVarTypeForConstantEmission(var->getType());
855 } else if (isa<EnumConstantDecl>(value)) {
856 CEK = CEK_AsValueOnly;
860 if (CEK == CEK_None) return ConstantEmission();
862 Expr::EvalResult result;
863 bool resultIsReference;
866 // It's best to evaluate all the way as an r-value if that's permitted.
867 if (CEK != CEK_AsReferenceOnly &&
868 refExpr->EvaluateAsRValue(result, getContext())) {
869 resultIsReference = false;
870 resultType = refExpr->getType();
872 // Otherwise, try to evaluate as an l-value.
873 } else if (CEK != CEK_AsValueOnly &&
874 refExpr->EvaluateAsLValue(result, getContext())) {
875 resultIsReference = true;
876 resultType = value->getType();
880 return ConstantEmission();
883 // In any case, if the initializer has side-effects, abandon ship.
884 if (result.HasSideEffects)
885 return ConstantEmission();
887 // Emit as a constant.
888 llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
890 // Make sure we emit a debug reference to the global variable.
891 // This should probably fire even for
892 if (isa<VarDecl>(value)) {
893 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
894 EmitDeclRefExprDbgValue(refExpr, C);
896 assert(isa<EnumConstantDecl>(value));
897 EmitDeclRefExprDbgValue(refExpr, C);
900 // If we emitted a reference constant, we need to dereference that.
901 if (resultIsReference)
902 return ConstantEmission::forReference(C);
904 return ConstantEmission::forValue(C);
907 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
908 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
909 lvalue.getAlignment().getQuantity(),
910 lvalue.getType(), lvalue.getTBAAInfo());
913 static bool hasBooleanRepresentation(QualType Ty) {
914 if (Ty->isBooleanType())
917 if (const EnumType *ET = Ty->getAs<EnumType>())
918 return ET->getDecl()->getIntegerType()->isBooleanType();
920 if (const AtomicType *AT = Ty->getAs<AtomicType>())
921 return hasBooleanRepresentation(AT->getValueType());
926 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
927 const EnumType *ET = Ty->getAs<EnumType>();
928 bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
929 CGM.getCodeGenOpts().StrictEnums &&
930 !ET->getDecl()->isFixed());
931 bool IsBool = hasBooleanRepresentation(Ty);
932 if (!IsBool && !IsRegularCPlusPlusEnum)
938 Min = llvm::APInt(8, 0);
939 End = llvm::APInt(8, 2);
941 const EnumDecl *ED = ET->getDecl();
942 llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType());
943 unsigned Bitwidth = LTy->getScalarSizeInBits();
944 unsigned NumNegativeBits = ED->getNumNegativeBits();
945 unsigned NumPositiveBits = ED->getNumPositiveBits();
947 if (NumNegativeBits) {
948 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
949 assert(NumBits <= Bitwidth);
950 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
953 assert(NumPositiveBits <= Bitwidth);
954 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
955 Min = llvm::APInt(Bitwidth, 0);
959 llvm::MDBuilder MDHelper(getLLVMContext());
960 return MDHelper.createRange(Min, End);
963 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
964 unsigned Alignment, QualType Ty,
965 llvm::MDNode *TBAAInfo) {
967 // For better performance, handle vector loads differently.
968 if (Ty->isVectorType()) {
970 const llvm::Type *EltTy =
971 cast<llvm::PointerType>(Addr->getType())->getElementType();
973 const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
975 // Handle vectors of size 3, like size 4 for better performance.
976 if (VTy->getNumElements() == 3) {
978 // Bitcast to vec4 type.
979 llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
981 llvm::PointerType *ptVec4Ty =
982 llvm::PointerType::get(vec4Ty,
983 (cast<llvm::PointerType>(
984 Addr->getType()))->getAddressSpace());
985 llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
988 llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
990 // Shuffle vector to get vec3.
991 llvm::SmallVector<llvm::Constant*, 3> Mask;
992 Mask.push_back(llvm::ConstantInt::get(
993 llvm::Type::getInt32Ty(getLLVMContext()),
995 Mask.push_back(llvm::ConstantInt::get(
996 llvm::Type::getInt32Ty(getLLVMContext()),
998 Mask.push_back(llvm::ConstantInt::get(
999 llvm::Type::getInt32Ty(getLLVMContext()),
1002 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1003 V = Builder.CreateShuffleVector(LoadVal,
1004 llvm::UndefValue::get(vec4Ty),
1005 MaskV, "extractVec");
1006 return EmitFromMemory(V, Ty);
1010 llvm::LoadInst *Load = Builder.CreateLoad(Addr);
1012 Load->setVolatile(true);
1014 Load->setAlignment(Alignment);
1016 CGM.DecorateInstruction(Load, TBAAInfo);
1017 // If this is an atomic type, all normal reads must be atomic
1018 if (Ty->isAtomicType())
1019 Load->setAtomic(llvm::SequentiallyConsistent);
1021 if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1022 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1023 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1025 return EmitFromMemory(Load, Ty);
1028 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1029 // Bool has a different representation in memory than in registers.
1030 if (hasBooleanRepresentation(Ty)) {
1031 // This should really always be an i1, but sometimes it's already
1032 // an i8, and it's awkward to track those cases down.
1033 if (Value->getType()->isIntegerTy(1))
1034 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
1035 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
1041 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1042 // Bool has a different representation in memory than in registers.
1043 if (hasBooleanRepresentation(Ty)) {
1044 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
1045 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1051 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1052 bool Volatile, unsigned Alignment,
1054 llvm::MDNode *TBAAInfo,
1057 // Handle vectors differently to get better performance.
1058 if (Ty->isVectorType()) {
1059 llvm::Type *SrcTy = Value->getType();
1060 llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy);
1061 // Handle vec3 special.
1062 if (VecTy->getNumElements() == 3) {
1063 llvm::LLVMContext &VMContext = getLLVMContext();
1065 // Our source is a vec3, do a shuffle vector to make it a vec4.
1066 llvm::SmallVector<llvm::Constant*, 4> Mask;
1067 Mask.push_back(llvm::ConstantInt::get(
1068 llvm::Type::getInt32Ty(VMContext),
1070 Mask.push_back(llvm::ConstantInt::get(
1071 llvm::Type::getInt32Ty(VMContext),
1073 Mask.push_back(llvm::ConstantInt::get(
1074 llvm::Type::getInt32Ty(VMContext),
1076 Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
1078 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1079 Value = Builder.CreateShuffleVector(Value,
1080 llvm::UndefValue::get(VecTy),
1081 MaskV, "extractVec");
1082 SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
1084 llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
1085 if (DstPtr->getElementType() != SrcTy) {
1087 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
1088 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
1092 Value = EmitToMemory(Value, Ty);
1094 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1096 Store->setAlignment(Alignment);
1098 CGM.DecorateInstruction(Store, TBAAInfo);
1099 if (!isInit && Ty->isAtomicType())
1100 Store->setAtomic(llvm::SequentiallyConsistent);
1103 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1105 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
1106 lvalue.getAlignment().getQuantity(), lvalue.getType(),
1107 lvalue.getTBAAInfo(), isInit);
1110 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1111 /// method emits the address of the lvalue, then loads the result as an rvalue,
1112 /// returning the rvalue.
1113 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
1114 if (LV.isObjCWeak()) {
1115 // load of a __weak object.
1116 llvm::Value *AddrWeakObj = LV.getAddress();
1117 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1120 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
1121 return RValue::get(EmitARCLoadWeak(LV.getAddress()));
1123 if (LV.isSimple()) {
1124 assert(!LV.getType()->isFunctionType());
1126 // Everything needs a load.
1127 return RValue::get(EmitLoadOfScalar(LV));
1130 if (LV.isVectorElt()) {
1131 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1132 LV.isVolatileQualified());
1133 Load->setAlignment(LV.getAlignment().getQuantity());
1134 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1138 // If this is a reference to a subset of the elements of a vector, either
1139 // shuffle the input or extract/insert them as appropriate.
1140 if (LV.isExtVectorElt())
1141 return EmitLoadOfExtVectorElementLValue(LV);
1143 assert(LV.isBitField() && "Unknown LValue type!");
1144 return EmitLoadOfBitfieldLValue(LV);
1147 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1148 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1150 // Get the output type.
1151 llvm::Type *ResLTy = ConvertType(LV.getType());
1152 unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
1154 // Compute the result as an OR of all of the individual component accesses.
1155 llvm::Value *Res = 0;
1156 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1157 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1158 CharUnits AccessAlignment = AI.AccessAlignment;
1159 if (!LV.getAlignment().isZero())
1160 AccessAlignment = std::min(AccessAlignment, LV.getAlignment());
1162 // Get the field pointer.
1163 llvm::Value *Ptr = LV.getBitFieldBaseAddr();
1165 // Only offset by the field index if used, so that incoming values are not
1166 // required to be structures.
1168 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1170 // Offset by the byte offset, if used.
1171 if (!AI.FieldByteOffset.isZero()) {
1172 Ptr = EmitCastToVoidPtr(Ptr);
1173 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1177 // Cast to the access type.
1178 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
1179 CGM.getContext().getTargetAddressSpace(LV.getType()));
1180 Ptr = Builder.CreateBitCast(Ptr, PTy);
1182 // Perform the load.
1183 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
1184 Load->setAlignment(AccessAlignment.getQuantity());
1186 // Shift out unused low bits and mask out unused high bits.
1187 llvm::Value *Val = Load;
1188 if (AI.FieldBitStart)
1189 Val = Builder.CreateLShr(Load, AI.FieldBitStart);
1190 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
1194 // Extend or truncate to the target size.
1195 if (AI.AccessWidth < ResSizeInBits)
1196 Val = Builder.CreateZExt(Val, ResLTy);
1197 else if (AI.AccessWidth > ResSizeInBits)
1198 Val = Builder.CreateTrunc(Val, ResLTy);
1200 // Shift into place, and OR into the result.
1201 if (AI.TargetBitOffset)
1202 Val = Builder.CreateShl(Val, AI.TargetBitOffset);
1203 Res = Res ? Builder.CreateOr(Res, Val) : Val;
1206 // If the bit-field is signed, perform the sign-extension.
1208 // FIXME: This can easily be folded into the load of the high bits, which
1209 // could also eliminate the mask of high bits in some situations.
1210 if (Info.isSigned()) {
1211 unsigned ExtraBits = ResSizeInBits - Info.getSize();
1213 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
1214 ExtraBits, "bf.val.sext");
1217 return RValue::get(Res);
1220 // If this is a reference to a subset of the elements of a vector, create an
1221 // appropriate shufflevector.
1222 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1223 llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1224 LV.isVolatileQualified());
1225 Load->setAlignment(LV.getAlignment().getQuantity());
1226 llvm::Value *Vec = Load;
1228 const llvm::Constant *Elts = LV.getExtVectorElts();
1230 // If the result of the expression is a non-vector type, we must be extracting
1231 // a single element. Just codegen as an extractelement.
1232 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1234 unsigned InIdx = getAccessedFieldNo(0, Elts);
1235 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1236 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1239 // Always use shuffle vector to try to retain the original program structure
1240 unsigned NumResultElts = ExprVT->getNumElements();
1242 SmallVector<llvm::Constant*, 4> Mask;
1243 for (unsigned i = 0; i != NumResultElts; ++i)
1244 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1246 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1247 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1249 return RValue::get(Vec);
1254 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1255 /// lvalue, where both are guaranteed to the have the same type, and that type
1257 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1258 if (!Dst.isSimple()) {
1259 if (Dst.isVectorElt()) {
1260 // Read/modify/write the vector, inserting the new element.
1261 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1262 Dst.isVolatileQualified());
1263 Load->setAlignment(Dst.getAlignment().getQuantity());
1264 llvm::Value *Vec = Load;
1265 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1266 Dst.getVectorIdx(), "vecins");
1267 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1268 Dst.isVolatileQualified());
1269 Store->setAlignment(Dst.getAlignment().getQuantity());
1273 // If this is an update of extended vector elements, insert them as
1275 if (Dst.isExtVectorElt())
1276 return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1278 assert(Dst.isBitField() && "Unknown LValue type");
1279 return EmitStoreThroughBitfieldLValue(Src, Dst);
1282 // There's special magic for assigning into an ARC-qualified l-value.
1283 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1285 case Qualifiers::OCL_None:
1286 llvm_unreachable("present but none");
1288 case Qualifiers::OCL_ExplicitNone:
1292 case Qualifiers::OCL_Strong:
1293 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1296 case Qualifiers::OCL_Weak:
1297 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1300 case Qualifiers::OCL_Autoreleasing:
1301 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1302 Src.getScalarVal()));
1303 // fall into the normal path
1308 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1309 // load of a __weak object.
1310 llvm::Value *LvalueDst = Dst.getAddress();
1311 llvm::Value *src = Src.getScalarVal();
1312 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1316 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1317 // load of a __strong object.
1318 llvm::Value *LvalueDst = Dst.getAddress();
1319 llvm::Value *src = Src.getScalarVal();
1320 if (Dst.isObjCIvar()) {
1321 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1322 llvm::Type *ResultType = ConvertType(getContext().LongTy);
1323 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1324 llvm::Value *dst = RHS;
1325 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1327 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1328 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1329 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1331 } else if (Dst.isGlobalObjCRef()) {
1332 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1333 Dst.isThreadLocalRef());
1336 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1340 assert(Src.isScalar() && "Can't emit an agg store with this method");
1341 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1344 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1345 llvm::Value **Result) {
1346 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1348 // Get the output type.
1349 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1350 unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
1352 // Get the source value, truncated to the width of the bit-field.
1353 llvm::Value *SrcVal = Src.getScalarVal();
1355 if (hasBooleanRepresentation(Dst.getType()))
1356 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1358 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1362 // Return the new value of the bit-field, if requested.
1364 // Cast back to the proper type for result.
1365 llvm::Type *SrcTy = Src.getScalarVal()->getType();
1366 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1369 // Sign extend if necessary.
1370 if (Info.isSigned()) {
1371 unsigned ExtraBits = ResSizeInBits - Info.getSize();
1373 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1374 ExtraBits, "bf.reload.sext");
1377 *Result = ReloadVal;
1380 // Iterate over the components, writing each piece to memory.
1381 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1382 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1383 CharUnits AccessAlignment = AI.AccessAlignment;
1384 if (!Dst.getAlignment().isZero())
1385 AccessAlignment = std::min(AccessAlignment, Dst.getAlignment());
1387 // Get the field pointer.
1388 llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1389 unsigned addressSpace =
1390 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1392 // Only offset by the field index if used, so that incoming values are not
1393 // required to be structures.
1395 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1397 // Offset by the byte offset, if used.
1398 if (!AI.FieldByteOffset.isZero()) {
1399 Ptr = EmitCastToVoidPtr(Ptr);
1400 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1404 // Cast to the access type.
1405 llvm::Type *AccessLTy =
1406 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1408 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1409 Ptr = Builder.CreateBitCast(Ptr, PTy);
1411 // Extract the piece of the bit-field value to write in this access, limited
1412 // to the values that are part of this access.
1413 llvm::Value *Val = SrcVal;
1414 if (AI.TargetBitOffset)
1415 Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1416 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1417 AI.TargetBitWidth));
1419 // Extend or truncate to the access size.
1420 if (ResSizeInBits < AI.AccessWidth)
1421 Val = Builder.CreateZExt(Val, AccessLTy);
1422 else if (ResSizeInBits > AI.AccessWidth)
1423 Val = Builder.CreateTrunc(Val, AccessLTy);
1425 // Shift into the position in memory.
1426 if (AI.FieldBitStart)
1427 Val = Builder.CreateShl(Val, AI.FieldBitStart);
1429 // If necessary, load and OR in bits that are outside of the bit-field.
1430 if (AI.TargetBitWidth != AI.AccessWidth) {
1431 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1432 Load->setAlignment(AccessAlignment.getQuantity());
1434 // Compute the mask for zeroing the bits that are part of the bit-field.
1435 llvm::APInt InvMask =
1436 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1437 AI.FieldBitStart + AI.TargetBitWidth);
1439 // Apply the mask and OR in to the value to write.
1440 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1444 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1445 Dst.isVolatileQualified());
1446 Store->setAlignment(AccessAlignment.getQuantity());
1450 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1452 // This access turns into a read/modify/write of the vector. Load the input
1454 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1455 Dst.isVolatileQualified());
1456 Load->setAlignment(Dst.getAlignment().getQuantity());
1457 llvm::Value *Vec = Load;
1458 const llvm::Constant *Elts = Dst.getExtVectorElts();
1460 llvm::Value *SrcVal = Src.getScalarVal();
1462 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1463 unsigned NumSrcElts = VTy->getNumElements();
1464 unsigned NumDstElts =
1465 cast<llvm::VectorType>(Vec->getType())->getNumElements();
1466 if (NumDstElts == NumSrcElts) {
1467 // Use shuffle vector is the src and destination are the same number of
1468 // elements and restore the vector mask since it is on the side it will be
1470 SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1471 for (unsigned i = 0; i != NumSrcElts; ++i)
1472 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1474 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1475 Vec = Builder.CreateShuffleVector(SrcVal,
1476 llvm::UndefValue::get(Vec->getType()),
1478 } else if (NumDstElts > NumSrcElts) {
1479 // Extended the source vector to the same length and then shuffle it
1480 // into the destination.
1481 // FIXME: since we're shuffling with undef, can we just use the indices
1482 // into that? This could be simpler.
1483 SmallVector<llvm::Constant*, 4> ExtMask;
1484 for (unsigned i = 0; i != NumSrcElts; ++i)
1485 ExtMask.push_back(Builder.getInt32(i));
1486 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1487 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1488 llvm::Value *ExtSrcVal =
1489 Builder.CreateShuffleVector(SrcVal,
1490 llvm::UndefValue::get(SrcVal->getType()),
1493 SmallVector<llvm::Constant*, 4> Mask;
1494 for (unsigned i = 0; i != NumDstElts; ++i)
1495 Mask.push_back(Builder.getInt32(i));
1497 // modify when what gets shuffled in
1498 for (unsigned i = 0; i != NumSrcElts; ++i)
1499 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1500 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1501 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1503 // We should never shorten the vector
1504 llvm_unreachable("unexpected shorten vector length");
1507 // If the Src is a scalar (not a vector) it must be updating one element.
1508 unsigned InIdx = getAccessedFieldNo(0, Elts);
1509 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1510 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1513 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1514 Dst.isVolatileQualified());
1515 Store->setAlignment(Dst.getAlignment().getQuantity());
1518 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1519 // generating write-barries API. It is currently a global, ivar,
1521 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1523 bool IsMemberAccess=false) {
1524 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1527 if (isa<ObjCIvarRefExpr>(E)) {
1528 QualType ExpTy = E->getType();
1529 if (IsMemberAccess && ExpTy->isPointerType()) {
1530 // If ivar is a structure pointer, assigning to field of
1531 // this struct follows gcc's behavior and makes it a non-ivar
1532 // writer-barrier conservatively.
1533 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1534 if (ExpTy->isRecordType()) {
1535 LV.setObjCIvar(false);
1539 LV.setObjCIvar(true);
1540 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1541 LV.setBaseIvarExp(Exp->getBase());
1542 LV.setObjCArray(E->getType()->isArrayType());
1546 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1547 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1548 if (VD->hasGlobalStorage()) {
1549 LV.setGlobalObjCRef(true);
1550 LV.setThreadLocalRef(VD->isThreadSpecified());
1553 LV.setObjCArray(E->getType()->isArrayType());
1557 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1558 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1562 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1563 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1564 if (LV.isObjCIvar()) {
1565 // If cast is to a structure pointer, follow gcc's behavior and make it
1566 // a non-ivar write-barrier.
1567 QualType ExpTy = E->getType();
1568 if (ExpTy->isPointerType())
1569 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1570 if (ExpTy->isRecordType())
1571 LV.setObjCIvar(false);
1576 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1577 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1581 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1582 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1586 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1587 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1591 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1592 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1596 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1597 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1598 if (LV.isObjCIvar() && !LV.isObjCArray())
1599 // Using array syntax to assigning to what an ivar points to is not
1600 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1601 LV.setObjCIvar(false);
1602 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1603 // Using array syntax to assigning to what global points to is not
1604 // same as assigning to the global itself. {id *G;} G[i] = 0;
1605 LV.setGlobalObjCRef(false);
1609 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1610 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1611 // We don't know if member is an 'ivar', but this flag is looked at
1612 // only in the context of LV.isObjCIvar().
1613 LV.setObjCArray(E->getType()->isArrayType());
1618 static llvm::Value *
1619 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1620 llvm::Value *V, llvm::Type *IRType,
1621 StringRef Name = StringRef()) {
1622 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1623 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1626 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1627 const Expr *E, const VarDecl *VD) {
1628 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1629 "Var decl must have external storage or be a file var decl!");
1631 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1632 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1633 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1634 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1635 QualType T = E->getType();
1637 if (VD->getType()->isReferenceType()) {
1638 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1639 LI->setAlignment(Alignment.getQuantity());
1641 LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1643 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1645 setObjCGCLValueClass(CGF.getContext(), E, LV);
1649 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1650 const Expr *E, const FunctionDecl *FD) {
1651 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1652 if (!FD->hasPrototype()) {
1653 if (const FunctionProtoType *Proto =
1654 FD->getType()->getAs<FunctionProtoType>()) {
1655 // Ugly case: for a K&R-style definition, the type of the definition
1656 // isn't the same as the type of a use. Correct for this with a
1658 QualType NoProtoType =
1659 CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1660 NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1661 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1664 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1665 return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1668 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1669 const NamedDecl *ND = E->getDecl();
1670 CharUnits Alignment = getContext().getDeclAlign(ND);
1671 QualType T = E->getType();
1673 // A DeclRefExpr for a reference initialized by a constant expression can
1674 // appear without being odr-used. Directly emit the constant initializer.
1675 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1676 const Expr *Init = VD->getAnyInitializer(VD);
1677 if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
1678 VD->isUsableInConstantExpressions(getContext()) &&
1679 VD->checkInitIsICE()) {
1680 llvm::Constant *Val =
1681 CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
1682 assert(Val && "failed to emit reference constant expression");
1683 // FIXME: Eventually we will want to emit vector element references.
1684 return MakeAddrLValue(Val, T, Alignment);
1688 // FIXME: We should be able to assert this for FunctionDecls as well!
1689 // FIXME: We should be able to assert this for all DeclRefExprs, not just
1690 // those with a valid source location.
1691 assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1692 !E->getLocation().isValid()) &&
1693 "Should not use decl without marking it used!");
1695 if (ND->hasAttr<WeakRefAttr>()) {
1696 const ValueDecl *VD = cast<ValueDecl>(ND);
1697 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1698 return MakeAddrLValue(Aliasee, T, Alignment);
1701 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1702 // Check if this is a global variable.
1703 if (VD->hasExternalStorage() || VD->isFileVarDecl())
1704 return EmitGlobalVarDeclLValue(*this, E, VD);
1706 bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1708 bool NonGCable = VD->hasLocalStorage() &&
1709 !VD->getType()->isReferenceType() &&
1712 llvm::Value *V = LocalDeclMap[VD];
1713 if (!V && VD->isStaticLocal())
1714 V = CGM.getStaticLocalDeclAddress(VD);
1716 // Use special handling for lambdas.
1718 if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1719 QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
1720 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
1722 return EmitLValueForField(LambdaLV, FD);
1725 assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1726 return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1730 assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1732 if (isBlockVariable)
1733 V = BuildBlockByrefAddress(V, VD);
1736 if (VD->getType()->isReferenceType()) {
1737 llvm::LoadInst *LI = Builder.CreateLoad(V);
1738 LI->setAlignment(Alignment.getQuantity());
1740 LV = MakeNaturalAlignAddrLValue(V, T);
1742 LV = MakeAddrLValue(V, T, Alignment);
1746 LV.getQuals().removeObjCGCAttr();
1749 setObjCGCLValueClass(getContext(), E, LV);
1753 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1754 return EmitFunctionDeclLValue(*this, E, fn);
1756 llvm_unreachable("Unhandled DeclRefExpr");
1759 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1760 // __extension__ doesn't affect lvalue-ness.
1761 if (E->getOpcode() == UO_Extension)
1762 return EmitLValue(E->getSubExpr());
1764 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1765 switch (E->getOpcode()) {
1766 default: llvm_unreachable("Unknown unary operator lvalue!");
1768 QualType T = E->getSubExpr()->getType()->getPointeeType();
1769 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1771 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1772 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1774 // We should not generate __weak write barrier on indirect reference
1775 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1776 // But, we continue to generate __strong write barrier on indirect write
1777 // into a pointer to object.
1778 if (getLangOpts().ObjC1 &&
1779 getLangOpts().getGC() != LangOptions::NonGC &&
1781 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1786 LValue LV = EmitLValue(E->getSubExpr());
1787 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1788 llvm::Value *Addr = LV.getAddress();
1790 // __real is valid on scalars. This is a faster way of testing that.
1791 // __imag can only produce an rvalue on scalars.
1792 if (E->getOpcode() == UO_Real &&
1793 !cast<llvm::PointerType>(Addr->getType())
1794 ->getElementType()->isStructTy()) {
1795 assert(E->getSubExpr()->getType()->isArithmeticType());
1799 assert(E->getSubExpr()->getType()->isAnyComplexType());
1801 unsigned Idx = E->getOpcode() == UO_Imag;
1802 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1808 LValue LV = EmitLValue(E->getSubExpr());
1809 bool isInc = E->getOpcode() == UO_PreInc;
1811 if (E->getType()->isAnyComplexType())
1812 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1814 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1820 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1821 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1825 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1826 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1830 static llvm::Constant*
1831 GetAddrOfConstantWideString(StringRef Str,
1832 const char *GlobalName,
1833 ASTContext &Context,
1834 QualType Ty, SourceLocation Loc,
1835 CodeGenModule &CGM) {
1837 StringLiteral *SL = StringLiteral::Create(Context,
1839 StringLiteral::Wide,
1842 llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
1843 llvm::GlobalVariable *GV =
1844 new llvm::GlobalVariable(CGM.getModule(), C->getType(),
1845 !CGM.getLangOpts().WritableStrings,
1846 llvm::GlobalValue::PrivateLinkage,
1848 const unsigned WideAlignment =
1849 Context.getTypeAlignInChars(Ty).getQuantity();
1850 GV->setAlignment(WideAlignment);
1854 static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
1855 SmallString<32>& Target) {
1856 Target.resize(CharByteWidth * (Source.size() + 1));
1857 char *ResultPtr = &Target[0];
1858 const UTF8 *ErrorPtr;
1859 bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
1862 Target.resize(ResultPtr - &Target[0]);
1865 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1866 switch (E->getIdentType()) {
1868 return EmitUnsupportedLValue(E, "predefined expression");
1870 case PredefinedExpr::Func:
1871 case PredefinedExpr::Function:
1872 case PredefinedExpr::LFunction:
1873 case PredefinedExpr::PrettyFunction: {
1874 unsigned IdentType = E->getIdentType();
1875 std::string GlobalVarName;
1877 switch (IdentType) {
1878 default: llvm_unreachable("Invalid type");
1879 case PredefinedExpr::Func:
1880 GlobalVarName = "__func__.";
1882 case PredefinedExpr::Function:
1883 GlobalVarName = "__FUNCTION__.";
1885 case PredefinedExpr::LFunction:
1886 GlobalVarName = "L__FUNCTION__.";
1888 case PredefinedExpr::PrettyFunction:
1889 GlobalVarName = "__PRETTY_FUNCTION__.";
1893 StringRef FnName = CurFn->getName();
1894 if (FnName.startswith("\01"))
1895 FnName = FnName.substr(1);
1896 GlobalVarName += FnName;
1898 const Decl *CurDecl = CurCodeDecl;
1900 CurDecl = getContext().getTranslationUnitDecl();
1902 std::string FunctionName =
1903 (isa<BlockDecl>(CurDecl)
1905 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
1908 const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
1910 if (ElemType->isWideCharType()) {
1911 SmallString<32> RawChars;
1912 ConvertUTF8ToWideString(
1913 getContext().getTypeSizeInChars(ElemType).getQuantity(),
1914 FunctionName, RawChars);
1915 C = GetAddrOfConstantWideString(RawChars,
1916 GlobalVarName.c_str(),
1922 C = CGM.GetAddrOfConstantCString(FunctionName,
1923 GlobalVarName.c_str(),
1926 return MakeAddrLValue(C, E->getType());
1931 /// Emit a type description suitable for use by a runtime sanitizer library. The
1932 /// format of a type descriptor is
1935 /// { i16 TypeKind, i16 TypeInfo }
1938 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
1939 /// integer, 1 for a floating point value, and -1 for anything else.
1940 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
1941 // FIXME: Only emit each type's descriptor once.
1942 uint16_t TypeKind = -1;
1943 uint16_t TypeInfo = 0;
1945 if (T->isIntegerType()) {
1947 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
1948 T->isSignedIntegerType();
1949 } else if (T->isFloatingType()) {
1951 TypeInfo = getContext().getTypeSize(T);
1954 // Format the type name as if for a diagnostic, including quotes and
1955 // optionally an 'aka'.
1956 llvm::SmallString<32> Buffer;
1957 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
1958 (intptr_t)T.getAsOpaquePtr(),
1959 0, 0, 0, 0, 0, 0, Buffer,
1960 ArrayRef<intptr_t>());
1962 llvm::Constant *Components[] = {
1963 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
1964 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
1966 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
1968 llvm::GlobalVariable *GV =
1969 new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(),
1970 /*isConstant=*/true,
1971 llvm::GlobalVariable::PrivateLinkage,
1973 GV->setUnnamedAddr(true);
1977 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
1978 llvm::Type *TargetTy = IntPtrTy;
1980 // Integers which fit in intptr_t are zero-extended and passed directly.
1981 if (V->getType()->isIntegerTy() &&
1982 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
1983 return Builder.CreateZExt(V, TargetTy);
1985 // Pointers are passed directly, everything else is passed by address.
1986 if (!V->getType()->isPointerTy()) {
1987 llvm::Value *Ptr = Builder.CreateAlloca(V->getType());
1988 Builder.CreateStore(V, Ptr);
1991 return Builder.CreatePtrToInt(V, TargetTy);
1994 /// \brief Emit a representation of a SourceLocation for passing to a handler
1995 /// in a sanitizer runtime library. The format for this data is:
1997 /// struct SourceLocation {
1998 /// const char *Filename;
1999 /// int32_t Line, Column;
2002 /// For an invalid SourceLocation, the Filename pointer is null.
2003 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
2004 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
2006 llvm::Constant *Data[] = {
2007 // FIXME: Only emit each file name once.
2008 PLoc.isValid() ? cast<llvm::Constant>(
2009 Builder.CreateGlobalStringPtr(PLoc.getFilename()))
2010 : llvm::Constant::getNullValue(Int8PtrTy),
2011 Builder.getInt32(PLoc.getLine()),
2012 Builder.getInt32(PLoc.getColumn())
2015 return llvm::ConstantStruct::getAnon(Data);
2018 void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
2019 llvm::ArrayRef<llvm::Constant *> StaticArgs,
2020 llvm::ArrayRef<llvm::Value *> DynamicArgs,
2022 llvm::BasicBlock *Cont = createBasicBlock("cont");
2024 llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
2025 Builder.CreateCondBr(Checked, Cont, Handler);
2028 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
2029 llvm::GlobalValue *InfoPtr =
2030 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), true,
2031 llvm::GlobalVariable::PrivateLinkage, Info);
2032 InfoPtr->setUnnamedAddr(true);
2034 llvm::SmallVector<llvm::Value *, 4> Args;
2035 llvm::SmallVector<llvm::Type *, 4> ArgTypes;
2036 Args.reserve(DynamicArgs.size() + 1);
2037 ArgTypes.reserve(DynamicArgs.size() + 1);
2039 // Handler functions take an i8* pointing to the (handler-specific) static
2040 // information block, followed by a sequence of intptr_t arguments
2041 // representing operand values.
2042 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
2043 ArgTypes.push_back(Int8PtrTy);
2044 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
2045 Args.push_back(EmitCheckValue(DynamicArgs[i]));
2046 ArgTypes.push_back(IntPtrTy);
2049 llvm::FunctionType *FnType =
2050 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
2051 llvm::AttrBuilder B;
2053 B.addAttribute(llvm::Attributes::NoReturn)
2054 .addAttribute(llvm::Attributes::NoUnwind);
2056 B.addAttribute(llvm::Attributes::UWTable);
2057 llvm::Value *Fn = CGM.CreateRuntimeFunction(FnType,
2058 ("__ubsan_handle_" + CheckName).str(),
2059 llvm::Attributes::get(getLLVMContext(),
2061 llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args);
2063 Builder.CreateBr(Cont);
2065 HandlerCall->setDoesNotReturn();
2066 HandlerCall->setDoesNotThrow();
2067 Builder.CreateUnreachable();
2073 void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) {
2074 llvm::BasicBlock *Cont = createBasicBlock("cont");
2076 // If we're optimizing, collapse all calls to trap down to just one per
2077 // function to save on code size.
2078 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
2079 TrapBB = createBasicBlock("trap");
2080 Builder.CreateCondBr(Checked, Cont, TrapBB);
2082 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
2083 llvm::CallInst *TrapCall = Builder.CreateCall(F);
2084 TrapCall->setDoesNotReturn();
2085 TrapCall->setDoesNotThrow();
2086 Builder.CreateUnreachable();
2088 Builder.CreateCondBr(Checked, Cont, TrapBB);
2094 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
2095 /// array to pointer, return the array subexpression.
2096 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
2097 // If this isn't just an array->pointer decay, bail out.
2098 const CastExpr *CE = dyn_cast<CastExpr>(E);
2099 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
2102 // If this is a decay from variable width array, bail out.
2103 const Expr *SubExpr = CE->getSubExpr();
2104 if (SubExpr->getType()->isVariableArrayType())
2110 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
2111 // The index must always be an integer, which is not an aggregate. Emit it.
2112 llvm::Value *Idx = EmitScalarExpr(E->getIdx());
2113 QualType IdxTy = E->getIdx()->getType();
2114 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
2116 // If the base is a vector type, then we are forming a vector element lvalue
2117 // with this subscript.
2118 if (E->getBase()->getType()->isVectorType()) {
2119 // Emit the vector as an lvalue to get its address.
2120 LValue LHS = EmitLValue(E->getBase());
2121 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
2122 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
2123 return LValue::MakeVectorElt(LHS.getAddress(), Idx,
2124 E->getBase()->getType(), LHS.getAlignment());
2127 // Extend or truncate the index type to 32 or 64-bits.
2128 if (Idx->getType() != IntPtrTy)
2129 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
2131 // We know that the pointer points to a type of the correct size, unless the
2132 // size is a VLA or Objective-C interface.
2133 llvm::Value *Address = 0;
2134 CharUnits ArrayAlignment;
2135 if (const VariableArrayType *vla =
2136 getContext().getAsVariableArrayType(E->getType())) {
2137 // The base must be a pointer, which is not an aggregate. Emit
2138 // it. It needs to be emitted first in case it's what captures
2140 Address = EmitScalarExpr(E->getBase());
2142 // The element count here is the total number of non-VLA elements.
2143 llvm::Value *numElements = getVLASize(vla).first;
2145 // Effectively, the multiply by the VLA size is part of the GEP.
2146 // GEP indexes are signed, and scaling an index isn't permitted to
2147 // signed-overflow, so we use the same semantics for our explicit
2148 // multiply. We suppress this if overflow is not undefined behavior.
2149 if (getLangOpts().isSignedOverflowDefined()) {
2150 Idx = Builder.CreateMul(Idx, numElements);
2151 Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2153 Idx = Builder.CreateNSWMul(Idx, numElements);
2154 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
2156 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
2157 // Indexing over an interface, as in "NSString *P; P[4];"
2158 llvm::Value *InterfaceSize =
2159 llvm::ConstantInt::get(Idx->getType(),
2160 getContext().getTypeSizeInChars(OIT).getQuantity());
2162 Idx = Builder.CreateMul(Idx, InterfaceSize);
2164 // The base must be a pointer, which is not an aggregate. Emit it.
2165 llvm::Value *Base = EmitScalarExpr(E->getBase());
2166 Address = EmitCastToVoidPtr(Base);
2167 Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2168 Address = Builder.CreateBitCast(Address, Base->getType());
2169 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
2170 // If this is A[i] where A is an array, the frontend will have decayed the
2171 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
2172 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
2173 // "gep x, i" here. Emit one "gep A, 0, i".
2174 assert(Array->getType()->isArrayType() &&
2175 "Array to pointer decay must have array source type!");
2176 LValue ArrayLV = EmitLValue(Array);
2177 llvm::Value *ArrayPtr = ArrayLV.getAddress();
2178 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2179 llvm::Value *Args[] = { Zero, Idx };
2181 // Propagate the alignment from the array itself to the result.
2182 ArrayAlignment = ArrayLV.getAlignment();
2184 if (getLangOpts().isSignedOverflowDefined())
2185 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
2187 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
2189 // The base must be a pointer, which is not an aggregate. Emit it.
2190 llvm::Value *Base = EmitScalarExpr(E->getBase());
2191 if (getLangOpts().isSignedOverflowDefined())
2192 Address = Builder.CreateGEP(Base, Idx, "arrayidx");
2194 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
2197 QualType T = E->getBase()->getType()->getPointeeType();
2198 assert(!T.isNull() &&
2199 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
2202 // Limit the alignment to that of the result type.
2204 if (!ArrayAlignment.isZero()) {
2205 CharUnits Align = getContext().getTypeAlignInChars(T);
2206 ArrayAlignment = std::min(Align, ArrayAlignment);
2207 LV = MakeAddrLValue(Address, T, ArrayAlignment);
2209 LV = MakeNaturalAlignAddrLValue(Address, T);
2212 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
2214 if (getLangOpts().ObjC1 &&
2215 getLangOpts().getGC() != LangOptions::NonGC) {
2216 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2217 setObjCGCLValueClass(getContext(), E, LV);
2223 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
2224 SmallVector<unsigned, 4> &Elts) {
2225 SmallVector<llvm::Constant*, 4> CElts;
2226 for (unsigned i = 0, e = Elts.size(); i != e; ++i)
2227 CElts.push_back(Builder.getInt32(Elts[i]));
2229 return llvm::ConstantVector::get(CElts);
2232 LValue CodeGenFunction::
2233 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
2234 // Emit the base vector as an l-value.
2237 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
2239 // If it is a pointer to a vector, emit the address and form an lvalue with
2241 llvm::Value *Ptr = EmitScalarExpr(E->getBase());
2242 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
2243 Base = MakeAddrLValue(Ptr, PT->getPointeeType());
2244 Base.getQuals().removeObjCGCAttr();
2245 } else if (E->getBase()->isGLValue()) {
2246 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
2247 // emit the base as an lvalue.
2248 assert(E->getBase()->getType()->isVectorType());
2249 Base = EmitLValue(E->getBase());
2251 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
2252 assert(E->getBase()->getType()->isVectorType() &&
2253 "Result must be a vector");
2254 llvm::Value *Vec = EmitScalarExpr(E->getBase());
2256 // Store the vector to memory (because LValue wants an address).
2257 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
2258 Builder.CreateStore(Vec, VecMem);
2259 Base = MakeAddrLValue(VecMem, E->getBase()->getType());
2263 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2265 // Encode the element access list into a vector of unsigned indices.
2266 SmallVector<unsigned, 4> Indices;
2267 E->getEncodedElementAccess(Indices);
2269 if (Base.isSimple()) {
2270 llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
2271 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
2272 Base.getAlignment());
2274 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2276 llvm::Constant *BaseElts = Base.getExtVectorElts();
2277 SmallVector<llvm::Constant *, 4> CElts;
2279 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
2280 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
2281 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2282 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
2283 Base.getAlignment());
2286 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
2287 Expr *BaseExpr = E->getBase();
2289 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2292 llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
2293 QualType PtrTy = BaseExpr->getType()->getPointeeType();
2294 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
2295 BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
2297 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
2299 NamedDecl *ND = E->getMemberDecl();
2300 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
2301 LValue LV = EmitLValueForField(BaseLV, Field);
2302 setObjCGCLValueClass(getContext(), E, LV);
2306 if (VarDecl *VD = dyn_cast<VarDecl>(ND))
2307 return EmitGlobalVarDeclLValue(*this, E, VD);
2309 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
2310 return EmitFunctionDeclLValue(*this, E, FD);
2312 llvm_unreachable("Unhandled member declaration!");
2315 LValue CodeGenFunction::EmitLValueForField(LValue base,
2316 const FieldDecl *field) {
2317 if (field->isBitField()) {
2318 const CGRecordLayout &RL =
2319 CGM.getTypes().getCGRecordLayout(field->getParent());
2320 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
2321 QualType fieldType =
2322 field->getType().withCVRQualifiers(base.getVRQualifiers());
2323 return LValue::MakeBitfield(base.getAddress(), Info, fieldType,
2324 base.getAlignment());
2327 const RecordDecl *rec = field->getParent();
2328 QualType type = field->getType();
2329 CharUnits alignment = getContext().getDeclAlign(field);
2331 // FIXME: It should be impossible to have an LValue without alignment for a
2333 if (!base.getAlignment().isZero())
2334 alignment = std::min(alignment, base.getAlignment());
2336 bool mayAlias = rec->hasAttr<MayAliasAttr>();
2338 llvm::Value *addr = base.getAddress();
2339 unsigned cvr = base.getVRQualifiers();
2340 if (rec->isUnion()) {
2341 // For unions, there is no pointer adjustment.
2342 assert(!type->isReferenceType() && "union has reference member");
2344 // For structs, we GEP to the field that the record layout suggests.
2345 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2346 addr = Builder.CreateStructGEP(addr, idx, field->getName());
2348 // If this is a reference field, load the reference right now.
2349 if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2350 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2351 if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2352 load->setAlignment(alignment.getQuantity());
2354 if (CGM.shouldUseTBAA()) {
2357 tbaa = CGM.getTBAAInfo(getContext().CharTy);
2359 tbaa = CGM.getTBAAInfo(type);
2360 CGM.DecorateInstruction(load, tbaa);
2365 type = refType->getPointeeType();
2366 if (type->isIncompleteType())
2367 alignment = CharUnits();
2369 alignment = getContext().getTypeAlignInChars(type);
2370 cvr = 0; // qualifiers don't recursively apply to referencee
2374 // Make sure that the address is pointing to the right type. This is critical
2375 // for both unions and structs. A union needs a bitcast, a struct element
2376 // will need a bitcast if the LLVM type laid out doesn't match the desired
2378 addr = EmitBitCastOfLValueToProperType(*this, addr,
2379 CGM.getTypes().ConvertTypeForMem(type),
2382 if (field->hasAttr<AnnotateAttr>())
2383 addr = EmitFieldAnnotations(field, addr);
2385 LValue LV = MakeAddrLValue(addr, type, alignment);
2386 LV.getQuals().addCVRQualifiers(cvr);
2388 // __weak attribute on a field is ignored.
2389 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2390 LV.getQuals().removeObjCGCAttr();
2392 // Fields of may_alias structs act like 'char' for TBAA purposes.
2393 // FIXME: this should get propagated down through anonymous structs
2395 if (mayAlias && LV.getTBAAInfo())
2396 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2402 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2403 const FieldDecl *Field) {
2404 QualType FieldType = Field->getType();
2406 if (!FieldType->isReferenceType())
2407 return EmitLValueForField(Base, Field);
2409 const CGRecordLayout &RL =
2410 CGM.getTypes().getCGRecordLayout(Field->getParent());
2411 unsigned idx = RL.getLLVMFieldNo(Field);
2412 llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2413 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2415 // Make sure that the address is pointing to the right type. This is critical
2416 // for both unions and structs. A union needs a bitcast, a struct element
2417 // will need a bitcast if the LLVM type laid out doesn't match the desired
2419 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2420 V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2422 CharUnits Alignment = getContext().getDeclAlign(Field);
2424 // FIXME: It should be impossible to have an LValue without alignment for a
2426 if (!Base.getAlignment().isZero())
2427 Alignment = std::min(Alignment, Base.getAlignment());
2429 return MakeAddrLValue(V, FieldType, Alignment);
2432 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2433 if (E->isFileScope()) {
2434 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2435 return MakeAddrLValue(GlobalPtr, E->getType());
2437 if (E->getType()->isVariablyModifiedType())
2438 // make sure to emit the VLA size.
2439 EmitVariablyModifiedType(E->getType());
2441 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2442 const Expr *InitExpr = E->getInitializer();
2443 LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2445 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2451 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
2452 if (!E->isGLValue())
2453 // Initializing an aggregate temporary in C++11: T{...}.
2454 return EmitAggExprToLValue(E);
2456 // An lvalue initializer list must be initializing a reference.
2457 assert(E->getNumInits() == 1 && "reference init with multiple values");
2458 return EmitLValue(E->getInit(0));
2461 LValue CodeGenFunction::
2462 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2463 if (!expr->isGLValue()) {
2464 // ?: here should be an aggregate.
2465 assert((hasAggregateLLVMType(expr->getType()) &&
2466 !expr->getType()->isAnyComplexType()) &&
2467 "Unexpected conditional operator!");
2468 return EmitAggExprToLValue(expr);
2471 OpaqueValueMapping binding(*this, expr);
2473 const Expr *condExpr = expr->getCond();
2475 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2476 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2477 if (!CondExprBool) std::swap(live, dead);
2479 if (!ContainsLabel(dead))
2480 return EmitLValue(live);
2483 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2484 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2485 llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2487 ConditionalEvaluation eval(*this);
2488 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2490 // Any temporaries created here are conditional.
2491 EmitBlock(lhsBlock);
2493 LValue lhs = EmitLValue(expr->getTrueExpr());
2496 if (!lhs.isSimple())
2497 return EmitUnsupportedLValue(expr, "conditional operator");
2499 lhsBlock = Builder.GetInsertBlock();
2500 Builder.CreateBr(contBlock);
2502 // Any temporaries created here are conditional.
2503 EmitBlock(rhsBlock);
2505 LValue rhs = EmitLValue(expr->getFalseExpr());
2507 if (!rhs.isSimple())
2508 return EmitUnsupportedLValue(expr, "conditional operator");
2509 rhsBlock = Builder.GetInsertBlock();
2511 EmitBlock(contBlock);
2513 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2515 phi->addIncoming(lhs.getAddress(), lhsBlock);
2516 phi->addIncoming(rhs.getAddress(), rhsBlock);
2517 return MakeAddrLValue(phi, expr->getType());
2520 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
2521 /// type. If the cast is to a reference, we can have the usual lvalue result,
2522 /// otherwise if a cast is needed by the code generator in an lvalue context,
2523 /// then it must mean that we need the address of an aggregate in order to
2524 /// access one of its members. This can happen for all the reasons that casts
2525 /// are permitted with aggregate result, including noop aggregate casts, and
2526 /// cast from scalar to union.
2527 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2528 switch (E->getCastKind()) {
2530 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2533 llvm_unreachable("dependent cast kind in IR gen!");
2535 case CK_BuiltinFnToFnPtr:
2536 llvm_unreachable("builtin functions are handled elsewhere");
2538 // These two casts are currently treated as no-ops, although they could
2539 // potentially be real operations depending on the target's ABI.
2540 case CK_NonAtomicToAtomic:
2541 case CK_AtomicToNonAtomic:
2544 case CK_LValueToRValue:
2545 if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2546 || E->getType()->isRecordType())
2547 return EmitLValue(E->getSubExpr());
2548 // Fall through to synthesize a temporary.
2551 case CK_ArrayToPointerDecay:
2552 case CK_FunctionToPointerDecay:
2553 case CK_NullToMemberPointer:
2554 case CK_NullToPointer:
2555 case CK_IntegralToPointer:
2556 case CK_PointerToIntegral:
2557 case CK_PointerToBoolean:
2558 case CK_VectorSplat:
2559 case CK_IntegralCast:
2560 case CK_IntegralToBoolean:
2561 case CK_IntegralToFloating:
2562 case CK_FloatingToIntegral:
2563 case CK_FloatingToBoolean:
2564 case CK_FloatingCast:
2565 case CK_FloatingRealToComplex:
2566 case CK_FloatingComplexToReal:
2567 case CK_FloatingComplexToBoolean:
2568 case CK_FloatingComplexCast:
2569 case CK_FloatingComplexToIntegralComplex:
2570 case CK_IntegralRealToComplex:
2571 case CK_IntegralComplexToReal:
2572 case CK_IntegralComplexToBoolean:
2573 case CK_IntegralComplexCast:
2574 case CK_IntegralComplexToFloatingComplex:
2575 case CK_DerivedToBaseMemberPointer:
2576 case CK_BaseToDerivedMemberPointer:
2577 case CK_MemberPointerToBoolean:
2578 case CK_ReinterpretMemberPointer:
2579 case CK_AnyPointerToBlockPointerCast:
2580 case CK_ARCProduceObject:
2581 case CK_ARCConsumeObject:
2582 case CK_ARCReclaimReturnedObject:
2583 case CK_ARCExtendBlockObject:
2584 case CK_CopyAndAutoreleaseBlockObject: {
2585 // These casts only produce lvalues when we're binding a reference to a
2586 // temporary realized from a (converted) pure rvalue. Emit the expression
2587 // as a value, copy it into a temporary, and return an lvalue referring to
2589 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2590 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2591 return MakeAddrLValue(V, E->getType());
2595 LValue LV = EmitLValue(E->getSubExpr());
2596 llvm::Value *V = LV.getAddress();
2597 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2598 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2601 case CK_ConstructorConversion:
2602 case CK_UserDefinedConversion:
2603 case CK_CPointerToObjCPointerCast:
2604 case CK_BlockPointerToObjCPointerCast:
2605 return EmitLValue(E->getSubExpr());
2607 case CK_UncheckedDerivedToBase:
2608 case CK_DerivedToBase: {
2609 const RecordType *DerivedClassTy =
2610 E->getSubExpr()->getType()->getAs<RecordType>();
2611 CXXRecordDecl *DerivedClassDecl =
2612 cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2614 LValue LV = EmitLValue(E->getSubExpr());
2615 llvm::Value *This = LV.getAddress();
2617 // Perform the derived-to-base conversion
2619 GetAddressOfBaseClass(This, DerivedClassDecl,
2620 E->path_begin(), E->path_end(),
2621 /*NullCheckValue=*/false);
2623 return MakeAddrLValue(Base, E->getType());
2626 return EmitAggExprToLValue(E);
2627 case CK_BaseToDerived: {
2628 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2629 CXXRecordDecl *DerivedClassDecl =
2630 cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2632 LValue LV = EmitLValue(E->getSubExpr());
2634 // Perform the base-to-derived conversion
2635 llvm::Value *Derived =
2636 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2637 E->path_begin(), E->path_end(),
2638 /*NullCheckValue=*/false);
2640 return MakeAddrLValue(Derived, E->getType());
2642 case CK_LValueBitCast: {
2643 // This must be a reinterpret_cast (or c-style equivalent).
2644 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2646 LValue LV = EmitLValue(E->getSubExpr());
2647 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2648 ConvertType(CE->getTypeAsWritten()));
2649 return MakeAddrLValue(V, E->getType());
2651 case CK_ObjCObjectLValueCast: {
2652 LValue LV = EmitLValue(E->getSubExpr());
2653 QualType ToType = getContext().getLValueReferenceType(E->getType());
2654 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2655 ConvertType(ToType));
2656 return MakeAddrLValue(V, E->getType());
2660 llvm_unreachable("Unhandled lvalue cast kind?");
2663 LValue CodeGenFunction::EmitNullInitializationLValue(
2664 const CXXScalarValueInitExpr *E) {
2665 QualType Ty = E->getType();
2666 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2667 EmitNullInitialization(LV.getAddress(), Ty);
2671 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2672 assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2673 return getOpaqueLValueMapping(e);
2676 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2677 const MaterializeTemporaryExpr *E) {
2678 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2679 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2682 RValue CodeGenFunction::EmitRValueForField(LValue LV,
2683 const FieldDecl *FD) {
2684 QualType FT = FD->getType();
2685 LValue FieldLV = EmitLValueForField(LV, FD);
2686 if (FT->isAnyComplexType())
2687 return RValue::getComplex(
2688 LoadComplexFromAddr(FieldLV.getAddress(),
2689 FieldLV.isVolatileQualified()));
2690 else if (CodeGenFunction::hasAggregateLLVMType(FT))
2691 return FieldLV.asAggregateRValue();
2693 return EmitLoadOfLValue(FieldLV);
2696 //===--------------------------------------------------------------------===//
2697 // Expression Emission
2698 //===--------------------------------------------------------------------===//
2700 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2701 ReturnValueSlot ReturnValue) {
2702 if (CGDebugInfo *DI = getDebugInfo())
2703 DI->EmitLocation(Builder, E->getLocStart());
2705 // Builtins never have block type.
2706 if (E->getCallee()->getType()->isBlockPointerType())
2707 return EmitBlockCallExpr(E, ReturnValue);
2709 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2710 return EmitCXXMemberCallExpr(CE, ReturnValue);
2712 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2713 return EmitCUDAKernelCallExpr(CE, ReturnValue);
2715 const Decl *TargetDecl = E->getCalleeDecl();
2716 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2717 if (unsigned builtinID = FD->getBuiltinID())
2718 return EmitBuiltinExpr(FD, builtinID, E);
2721 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2722 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2723 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2725 if (const CXXPseudoDestructorExpr *PseudoDtor
2726 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2727 QualType DestroyedType = PseudoDtor->getDestroyedType();
2728 if (getLangOpts().ObjCAutoRefCount &&
2729 DestroyedType->isObjCLifetimeType() &&
2730 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2731 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2732 // Automatic Reference Counting:
2733 // If the pseudo-expression names a retainable object with weak or
2734 // strong lifetime, the object shall be released.
2735 Expr *BaseExpr = PseudoDtor->getBase();
2736 llvm::Value *BaseValue = NULL;
2737 Qualifiers BaseQuals;
2739 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2740 if (PseudoDtor->isArrow()) {
2741 BaseValue = EmitScalarExpr(BaseExpr);
2742 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2743 BaseQuals = PTy->getPointeeType().getQualifiers();
2745 LValue BaseLV = EmitLValue(BaseExpr);
2746 BaseValue = BaseLV.getAddress();
2747 QualType BaseTy = BaseExpr->getType();
2748 BaseQuals = BaseTy.getQualifiers();
2751 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2752 case Qualifiers::OCL_None:
2753 case Qualifiers::OCL_ExplicitNone:
2754 case Qualifiers::OCL_Autoreleasing:
2757 case Qualifiers::OCL_Strong:
2758 EmitARCRelease(Builder.CreateLoad(BaseValue,
2759 PseudoDtor->getDestroyedType().isVolatileQualified()),
2763 case Qualifiers::OCL_Weak:
2764 EmitARCDestroyWeak(BaseValue);
2768 // C++ [expr.pseudo]p1:
2769 // The result shall only be used as the operand for the function call
2770 // operator (), and the result of such a call has type void. The only
2771 // effect is the evaluation of the postfix-expression before the dot or
2773 EmitScalarExpr(E->getCallee());
2776 return RValue::get(0);
2779 llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2780 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2781 E->arg_begin(), E->arg_end(), TargetDecl);
2784 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2785 // Comma expressions just emit their LHS then their RHS as an l-value.
2786 if (E->getOpcode() == BO_Comma) {
2787 EmitIgnoredExpr(E->getLHS());
2788 EnsureInsertPoint();
2789 return EmitLValue(E->getRHS());
2792 if (E->getOpcode() == BO_PtrMemD ||
2793 E->getOpcode() == BO_PtrMemI)
2794 return EmitPointerToDataMemberBinaryExpr(E);
2796 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2798 // Note that in all of these cases, __block variables need the RHS
2799 // evaluated first just in case the variable gets moved by the RHS.
2801 if (!hasAggregateLLVMType(E->getType())) {
2802 switch (E->getLHS()->getType().getObjCLifetime()) {
2803 case Qualifiers::OCL_Strong:
2804 return EmitARCStoreStrong(E, /*ignored*/ false).first;
2806 case Qualifiers::OCL_Autoreleasing:
2807 return EmitARCStoreAutoreleasing(E).first;
2809 // No reason to do any of these differently.
2810 case Qualifiers::OCL_None:
2811 case Qualifiers::OCL_ExplicitNone:
2812 case Qualifiers::OCL_Weak:
2816 RValue RV = EmitAnyExpr(E->getRHS());
2817 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
2818 EmitStoreThroughLValue(RV, LV);
2822 if (E->getType()->isAnyComplexType())
2823 return EmitComplexAssignmentLValue(E);
2825 return EmitAggExprToLValue(E);
2828 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2829 RValue RV = EmitCallExpr(E);
2832 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2834 assert(E->getCallReturnType()->isReferenceType() &&
2835 "Can't have a scalar return unless the return type is a "
2838 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2841 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2842 // FIXME: This shouldn't require another copy.
2843 return EmitAggExprToLValue(E);
2846 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2847 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2848 && "binding l-value to type which needs a temporary");
2849 AggValueSlot Slot = CreateAggTemp(E->getType());
2850 EmitCXXConstructExpr(E, Slot);
2851 return MakeAddrLValue(Slot.getAddr(), E->getType());
2855 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2856 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2859 llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
2860 return CGM.GetAddrOfUuidDescriptor(E);
2863 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
2864 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
2868 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2869 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2870 Slot.setExternallyDestructed();
2871 EmitAggExpr(E->getSubExpr(), Slot);
2872 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
2873 return MakeAddrLValue(Slot.getAddr(), E->getType());
2877 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
2878 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2879 EmitLambdaExpr(E, Slot);
2880 return MakeAddrLValue(Slot.getAddr(), E->getType());
2883 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2884 RValue RV = EmitObjCMessageExpr(E);
2887 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2889 assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2890 "Can't have a scalar return unless the return type is a "
2893 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2896 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2898 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2899 return MakeAddrLValue(V, E->getType());
2902 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2903 const ObjCIvarDecl *Ivar) {
2904 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2907 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2908 llvm::Value *BaseValue,
2909 const ObjCIvarDecl *Ivar,
2910 unsigned CVRQualifiers) {
2911 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2912 Ivar, CVRQualifiers);
2915 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2916 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2917 llvm::Value *BaseValue = 0;
2918 const Expr *BaseExpr = E->getBase();
2919 Qualifiers BaseQuals;
2922 BaseValue = EmitScalarExpr(BaseExpr);
2923 ObjectTy = BaseExpr->getType()->getPointeeType();
2924 BaseQuals = ObjectTy.getQualifiers();
2926 LValue BaseLV = EmitLValue(BaseExpr);
2927 // FIXME: this isn't right for bitfields.
2928 BaseValue = BaseLV.getAddress();
2929 ObjectTy = BaseExpr->getType();
2930 BaseQuals = ObjectTy.getQualifiers();
2934 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2935 BaseQuals.getCVRQualifiers());
2936 setObjCGCLValueClass(getContext(), E, LV);
2940 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2941 // Can only get l-value for message expression returning aggregate type
2942 RValue RV = EmitAnyExprToTemp(E);
2943 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2946 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2947 ReturnValueSlot ReturnValue,
2948 CallExpr::const_arg_iterator ArgBeg,
2949 CallExpr::const_arg_iterator ArgEnd,
2950 const Decl *TargetDecl) {
2951 // Get the actual function type. The callee type will always be a pointer to
2952 // function type or a block pointer type.
2953 assert(CalleeType->isFunctionPointerType() &&
2954 "Call must have function pointer type!");
2956 CalleeType = getContext().getCanonicalType(CalleeType);
2958 const FunctionType *FnType
2959 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2962 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2964 const CGFunctionInfo &FnInfo =
2965 CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
2968 // If the expression that denotes the called function has a type
2969 // that does not include a prototype, [the default argument
2970 // promotions are performed]. If the number of arguments does not
2971 // equal the number of parameters, the behavior is undefined. If
2972 // the function is defined with a type that includes a prototype,
2973 // and either the prototype ends with an ellipsis (, ...) or the
2974 // types of the arguments after promotion are not compatible with
2975 // the types of the parameters, the behavior is undefined. If the
2976 // function is defined with a type that does not include a
2977 // prototype, and the types of the arguments after promotion are
2978 // not compatible with those of the parameters after promotion,
2979 // the behavior is undefined [except in some trivial cases].
2980 // That is, in the general case, we should assume that a call
2981 // through an unprototyped function type works like a *non-variadic*
2982 // call. The way we make this work is to cast to the exact type
2983 // of the promoted arguments.
2984 if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
2985 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
2986 CalleeTy = CalleeTy->getPointerTo();
2987 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2990 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2993 LValue CodeGenFunction::
2994 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2996 if (E->getOpcode() == BO_PtrMemI)
2997 BaseV = EmitScalarExpr(E->getLHS());
2999 BaseV = EmitLValue(E->getLHS()).getAddress();
3001 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
3003 const MemberPointerType *MPT
3004 = E->getRHS()->getType()->getAs<MemberPointerType>();
3007 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
3009 return MakeAddrLValue(AddV, MPT->getPointeeType());
3013 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
3014 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
3015 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
3016 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
3017 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
3019 switch (E->getOp()) {
3020 case AtomicExpr::AO__c11_atomic_init:
3021 llvm_unreachable("Already handled!");
3023 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3024 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3025 case AtomicExpr::AO__atomic_compare_exchange:
3026 case AtomicExpr::AO__atomic_compare_exchange_n: {
3027 // Note that cmpxchg only supports specifying one ordering and
3028 // doesn't support weak cmpxchg, at least at the moment.
3029 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3030 LoadVal1->setAlignment(Align);
3031 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
3032 LoadVal2->setAlignment(Align);
3033 llvm::AtomicCmpXchgInst *CXI =
3034 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
3035 CXI->setVolatile(E->isVolatile());
3036 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
3037 StoreVal1->setAlignment(Align);
3038 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
3039 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
3043 case AtomicExpr::AO__c11_atomic_load:
3044 case AtomicExpr::AO__atomic_load_n:
3045 case AtomicExpr::AO__atomic_load: {
3046 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
3047 Load->setAtomic(Order);
3048 Load->setAlignment(Size);
3049 Load->setVolatile(E->isVolatile());
3050 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
3051 StoreDest->setAlignment(Align);
3055 case AtomicExpr::AO__c11_atomic_store:
3056 case AtomicExpr::AO__atomic_store:
3057 case AtomicExpr::AO__atomic_store_n: {
3058 assert(!Dest && "Store does not return a value");
3059 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3060 LoadVal1->setAlignment(Align);
3061 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
3062 Store->setAtomic(Order);
3063 Store->setAlignment(Size);
3064 Store->setVolatile(E->isVolatile());
3068 case AtomicExpr::AO__c11_atomic_exchange:
3069 case AtomicExpr::AO__atomic_exchange_n:
3070 case AtomicExpr::AO__atomic_exchange:
3071 Op = llvm::AtomicRMWInst::Xchg;
3074 case AtomicExpr::AO__atomic_add_fetch:
3075 PostOp = llvm::Instruction::Add;
3077 case AtomicExpr::AO__c11_atomic_fetch_add:
3078 case AtomicExpr::AO__atomic_fetch_add:
3079 Op = llvm::AtomicRMWInst::Add;
3082 case AtomicExpr::AO__atomic_sub_fetch:
3083 PostOp = llvm::Instruction::Sub;
3085 case AtomicExpr::AO__c11_atomic_fetch_sub:
3086 case AtomicExpr::AO__atomic_fetch_sub:
3087 Op = llvm::AtomicRMWInst::Sub;
3090 case AtomicExpr::AO__atomic_and_fetch:
3091 PostOp = llvm::Instruction::And;
3093 case AtomicExpr::AO__c11_atomic_fetch_and:
3094 case AtomicExpr::AO__atomic_fetch_and:
3095 Op = llvm::AtomicRMWInst::And;
3098 case AtomicExpr::AO__atomic_or_fetch:
3099 PostOp = llvm::Instruction::Or;
3101 case AtomicExpr::AO__c11_atomic_fetch_or:
3102 case AtomicExpr::AO__atomic_fetch_or:
3103 Op = llvm::AtomicRMWInst::Or;
3106 case AtomicExpr::AO__atomic_xor_fetch:
3107 PostOp = llvm::Instruction::Xor;
3109 case AtomicExpr::AO__c11_atomic_fetch_xor:
3110 case AtomicExpr::AO__atomic_fetch_xor:
3111 Op = llvm::AtomicRMWInst::Xor;
3114 case AtomicExpr::AO__atomic_nand_fetch:
3115 PostOp = llvm::Instruction::And;
3117 case AtomicExpr::AO__atomic_fetch_nand:
3118 Op = llvm::AtomicRMWInst::Nand;
3122 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3123 LoadVal1->setAlignment(Align);
3124 llvm::AtomicRMWInst *RMWI =
3125 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
3126 RMWI->setVolatile(E->isVolatile());
3128 // For __atomic_*_fetch operations, perform the operation again to
3129 // determine the value which was written.
3130 llvm::Value *Result = RMWI;
3132 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
3133 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
3134 Result = CGF.Builder.CreateNot(Result);
3135 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
3136 StoreDest->setAlignment(Align);
3139 // This function emits any expression (scalar, complex, or aggregate)
3140 // into a temporary alloca.
3141 static llvm::Value *
3142 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
3143 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
3144 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
3149 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
3150 llvm::Value *Dest) {
3151 if (Ty->isAnyComplexType())
3152 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
3153 if (CGF.hasAggregateLLVMType(Ty))
3154 return RValue::getAggregate(Dest);
3155 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
3158 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
3159 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
3160 QualType MemTy = AtomicTy;
3161 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
3162 MemTy = AT->getValueType();
3163 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
3164 uint64_t Size = sizeChars.getQuantity();
3165 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
3166 unsigned Align = alignChars.getQuantity();
3167 unsigned MaxInlineWidthInBits =
3168 getContext().getTargetInfo().getMaxAtomicInlineWidth();
3169 bool UseLibcall = (Size != Align ||
3170 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
3172 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
3173 Ptr = EmitScalarExpr(E->getPtr());
3175 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
3176 assert(!Dest && "Init does not return a value");
3177 if (!hasAggregateLLVMType(E->getVal1()->getType())) {
3178 QualType PointeeType
3179 = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
3180 EmitScalarInit(EmitScalarExpr(E->getVal1()),
3181 LValue::MakeAddr(Ptr, PointeeType, alignChars,
3183 } else if (E->getType()->isAnyComplexType()) {
3184 EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
3186 AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
3187 AtomicTy.getQualifiers(),
3188 AggValueSlot::IsNotDestructed,
3189 AggValueSlot::DoesNotNeedGCBarriers,
3190 AggValueSlot::IsNotAliased);
3191 EmitAggExpr(E->getVal1(), Slot);
3193 return RValue::get(0);
3196 Order = EmitScalarExpr(E->getOrder());
3198 switch (E->getOp()) {
3199 case AtomicExpr::AO__c11_atomic_init:
3200 llvm_unreachable("Already handled!");
3202 case AtomicExpr::AO__c11_atomic_load:
3203 case AtomicExpr::AO__atomic_load_n:
3206 case AtomicExpr::AO__atomic_load:
3207 Dest = EmitScalarExpr(E->getVal1());
3210 case AtomicExpr::AO__atomic_store:
3211 Val1 = EmitScalarExpr(E->getVal1());
3214 case AtomicExpr::AO__atomic_exchange:
3215 Val1 = EmitScalarExpr(E->getVal1());
3216 Dest = EmitScalarExpr(E->getVal2());
3219 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3220 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3221 case AtomicExpr::AO__atomic_compare_exchange_n:
3222 case AtomicExpr::AO__atomic_compare_exchange:
3223 Val1 = EmitScalarExpr(E->getVal1());
3224 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
3225 Val2 = EmitScalarExpr(E->getVal2());
3227 Val2 = EmitValToTemp(*this, E->getVal2());
3228 OrderFail = EmitScalarExpr(E->getOrderFail());
3229 // Evaluate and discard the 'weak' argument.
3230 if (E->getNumSubExprs() == 6)
3231 EmitScalarExpr(E->getWeak());
3234 case AtomicExpr::AO__c11_atomic_fetch_add:
3235 case AtomicExpr::AO__c11_atomic_fetch_sub:
3236 if (MemTy->isPointerType()) {
3237 // For pointer arithmetic, we're required to do a bit of math:
3238 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
3239 // ... but only for the C11 builtins. The GNU builtins expect the
3240 // user to multiply by sizeof(T).
3241 QualType Val1Ty = E->getVal1()->getType();
3242 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
3243 CharUnits PointeeIncAmt =
3244 getContext().getTypeSizeInChars(MemTy->getPointeeType());
3245 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
3246 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
3247 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
3251 case AtomicExpr::AO__atomic_fetch_add:
3252 case AtomicExpr::AO__atomic_fetch_sub:
3253 case AtomicExpr::AO__atomic_add_fetch:
3254 case AtomicExpr::AO__atomic_sub_fetch:
3255 case AtomicExpr::AO__c11_atomic_store:
3256 case AtomicExpr::AO__c11_atomic_exchange:
3257 case AtomicExpr::AO__atomic_store_n:
3258 case AtomicExpr::AO__atomic_exchange_n:
3259 case AtomicExpr::AO__c11_atomic_fetch_and:
3260 case AtomicExpr::AO__c11_atomic_fetch_or:
3261 case AtomicExpr::AO__c11_atomic_fetch_xor:
3262 case AtomicExpr::AO__atomic_fetch_and:
3263 case AtomicExpr::AO__atomic_fetch_or:
3264 case AtomicExpr::AO__atomic_fetch_xor:
3265 case AtomicExpr::AO__atomic_fetch_nand:
3266 case AtomicExpr::AO__atomic_and_fetch:
3267 case AtomicExpr::AO__atomic_or_fetch:
3268 case AtomicExpr::AO__atomic_xor_fetch:
3269 case AtomicExpr::AO__atomic_nand_fetch:
3270 Val1 = EmitValToTemp(*this, E->getVal1());
3274 if (!E->getType()->isVoidType() && !Dest)
3275 Dest = CreateMemTemp(E->getType(), ".atomicdst");
3277 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
3280 llvm::SmallVector<QualType, 5> Params;
3282 // Size is always the first parameter
3283 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
3284 getContext().getSizeType());
3285 // Atomic address is always the second parameter
3286 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
3287 getContext().VoidPtrTy);
3289 const char* LibCallName;
3290 QualType RetTy = getContext().VoidTy;
3291 switch (E->getOp()) {
3292 // There is only one libcall for compare an exchange, because there is no
3293 // optimisation benefit possible from a libcall version of a weak compare
3295 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
3296 // void *desired, int success, int failure)
3297 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3298 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3299 case AtomicExpr::AO__atomic_compare_exchange:
3300 case AtomicExpr::AO__atomic_compare_exchange_n:
3301 LibCallName = "__atomic_compare_exchange";
3302 RetTy = getContext().BoolTy;
3303 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3304 getContext().VoidPtrTy);
3305 Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
3306 getContext().VoidPtrTy);
3307 Args.add(RValue::get(Order),
3308 getContext().IntTy);
3311 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
3313 case AtomicExpr::AO__c11_atomic_exchange:
3314 case AtomicExpr::AO__atomic_exchange_n:
3315 case AtomicExpr::AO__atomic_exchange:
3316 LibCallName = "__atomic_exchange";
3317 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3318 getContext().VoidPtrTy);
3319 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3320 getContext().VoidPtrTy);
3322 // void __atomic_store(size_t size, void *mem, void *val, int order)
3323 case AtomicExpr::AO__c11_atomic_store:
3324 case AtomicExpr::AO__atomic_store:
3325 case AtomicExpr::AO__atomic_store_n:
3326 LibCallName = "__atomic_store";
3327 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3328 getContext().VoidPtrTy);
3330 // void __atomic_load(size_t size, void *mem, void *return, int order)
3331 case AtomicExpr::AO__c11_atomic_load:
3332 case AtomicExpr::AO__atomic_load:
3333 case AtomicExpr::AO__atomic_load_n:
3334 LibCallName = "__atomic_load";
3335 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3336 getContext().VoidPtrTy);
3339 // These are only defined for 1-16 byte integers. It is not clear what
3340 // their semantics would be on anything else...
3341 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
3342 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
3343 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
3344 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
3345 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
3347 default: return EmitUnsupportedRValue(E, "atomic library call");
3349 // order is always the last parameter
3350 Args.add(RValue::get(Order),
3351 getContext().IntTy);
3353 const CGFunctionInfo &FuncInfo =
3354 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
3355 FunctionType::ExtInfo(), RequiredArgs::All);
3356 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3357 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3358 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
3361 if (E->getType()->isVoidType())
3362 return RValue::get(0);
3363 return ConvertTempToRValue(*this, E->getType(), Dest);
3366 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
3367 E->getOp() == AtomicExpr::AO__atomic_store ||
3368 E->getOp() == AtomicExpr::AO__atomic_store_n;
3369 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
3370 E->getOp() == AtomicExpr::AO__atomic_load ||
3371 E->getOp() == AtomicExpr::AO__atomic_load_n;
3373 llvm::Type *IPtrTy =
3374 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
3375 llvm::Value *OrigDest = Dest;
3376 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
3377 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
3378 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
3379 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
3381 if (isa<llvm::ConstantInt>(Order)) {
3382 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3384 case 0: // memory_order_relaxed
3385 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3388 case 1: // memory_order_consume
3389 case 2: // memory_order_acquire
3391 break; // Avoid crashing on code with undefined behavior
3392 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3395 case 3: // memory_order_release
3397 break; // Avoid crashing on code with undefined behavior
3398 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3401 case 4: // memory_order_acq_rel
3402 if (IsLoad || IsStore)
3403 break; // Avoid crashing on code with undefined behavior
3404 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3405 llvm::AcquireRelease);
3407 case 5: // memory_order_seq_cst
3408 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3409 llvm::SequentiallyConsistent);
3411 default: // invalid order
3412 // We should not ever get here normally, but it's hard to
3413 // enforce that in general.
3416 if (E->getType()->isVoidType())
3417 return RValue::get(0);
3418 return ConvertTempToRValue(*this, E->getType(), OrigDest);
3421 // Long case, when Order isn't obviously constant.
3423 // Create all the relevant BB's
3424 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
3425 *AcqRelBB = 0, *SeqCstBB = 0;
3426 MonotonicBB = createBasicBlock("monotonic", CurFn);
3428 AcquireBB = createBasicBlock("acquire", CurFn);
3430 ReleaseBB = createBasicBlock("release", CurFn);
3431 if (!IsLoad && !IsStore)
3432 AcqRelBB = createBasicBlock("acqrel", CurFn);
3433 SeqCstBB = createBasicBlock("seqcst", CurFn);
3434 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3436 // Create the switch for the split
3437 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
3438 // doesn't matter unless someone is crazy enough to use something that
3439 // doesn't fold to a constant for the ordering.
3440 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3441 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
3443 // Emit all the different atomics
3444 Builder.SetInsertPoint(MonotonicBB);
3445 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3447 Builder.CreateBr(ContBB);
3449 Builder.SetInsertPoint(AcquireBB);
3450 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3452 Builder.CreateBr(ContBB);
3453 SI->addCase(Builder.getInt32(1), AcquireBB);
3454 SI->addCase(Builder.getInt32(2), AcquireBB);
3457 Builder.SetInsertPoint(ReleaseBB);
3458 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3460 Builder.CreateBr(ContBB);
3461 SI->addCase(Builder.getInt32(3), ReleaseBB);
3463 if (!IsLoad && !IsStore) {
3464 Builder.SetInsertPoint(AcqRelBB);
3465 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3466 llvm::AcquireRelease);
3467 Builder.CreateBr(ContBB);
3468 SI->addCase(Builder.getInt32(4), AcqRelBB);
3470 Builder.SetInsertPoint(SeqCstBB);
3471 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3472 llvm::SequentiallyConsistent);
3473 Builder.CreateBr(ContBB);
3474 SI->addCase(Builder.getInt32(5), SeqCstBB);
3476 // Cleanup and return
3477 Builder.SetInsertPoint(ContBB);
3478 if (E->getType()->isVoidType())
3479 return RValue::get(0);
3480 return ConvertTempToRValue(*this, E->getType(), OrigDest);
3483 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3484 assert(Val->getType()->isFPOrFPVectorTy());
3485 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3488 llvm::MDBuilder MDHelper(getLLVMContext());
3489 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3491 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3495 struct LValueOrRValue {
3501 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3502 const PseudoObjectExpr *E,
3504 AggValueSlot slot) {
3505 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3507 // Find the result expression, if any.
3508 const Expr *resultExpr = E->getResultExpr();
3509 LValueOrRValue result;
3511 for (PseudoObjectExpr::const_semantics_iterator
3512 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3513 const Expr *semantic = *i;
3515 // If this semantic expression is an opaque value, bind it
3516 // to the result of its source expression.
3517 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3519 // If this is the result expression, we may need to evaluate
3520 // directly into the slot.
3521 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3523 if (ov == resultExpr && ov->isRValue() && !forLValue &&
3524 CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
3525 !ov->getType()->isAnyComplexType()) {
3526 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3528 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3529 opaqueData = OVMA::bind(CGF, ov, LV);
3530 result.RV = slot.asRValue();
3532 // Otherwise, emit as normal.
3534 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3536 // If this is the result, also evaluate the result now.
3537 if (ov == resultExpr) {
3539 result.LV = CGF.EmitLValue(ov);
3541 result.RV = CGF.EmitAnyExpr(ov, slot);
3545 opaques.push_back(opaqueData);
3547 // Otherwise, if the expression is the result, evaluate it
3548 // and remember the result.
3549 } else if (semantic == resultExpr) {
3551 result.LV = CGF.EmitLValue(semantic);
3553 result.RV = CGF.EmitAnyExpr(semantic, slot);
3555 // Otherwise, evaluate the expression in an ignored context.
3557 CGF.EmitIgnoredExpr(semantic);
3561 // Unbind all the opaques now.
3562 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3563 opaques[i].unbind(CGF);
3568 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3569 AggValueSlot slot) {
3570 return emitPseudoObjectExpr(*this, E, false, slot).RV;
3573 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3574 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;