1 //===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the Expr constant evaluator.
11 // Constant expression evaluation produces four main results:
13 // * A success/failure flag indicating whether constant folding was successful.
14 // This is the 'bool' return value used by most of the code in this file. A
15 // 'false' return value indicates that constant folding has failed, and any
16 // appropriate diagnostic has already been produced.
18 // * An evaluated result, valid only if constant folding has not failed.
20 // * A flag indicating if evaluation encountered (unevaluated) side-effects.
21 // These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
22 // where it is possible to determine the evaluated result regardless.
24 // * A set of notes indicating why the evaluation was not a constant expression
25 // (under the C++11 / C++1y rules only, at the moment), or, if folding failed
26 // too, why the expression could not be folded.
28 // If we are checking for a potential constant expression, failure to constant
29 // fold a potential constant sub-expression will be indicated by a 'false'
30 // return value (the expression could not be folded) and no diagnostic (the
31 // expression is not necessarily non-constant).
33 //===----------------------------------------------------------------------===//
35 #include "Interp/Context.h"
36 #include "Interp/Frame.h"
37 #include "Interp/State.h"
38 #include "clang/AST/APValue.h"
39 #include "clang/AST/ASTContext.h"
40 #include "clang/AST/ASTDiagnostic.h"
41 #include "clang/AST/ASTLambda.h"
42 #include "clang/AST/Attr.h"
43 #include "clang/AST/CXXInheritance.h"
44 #include "clang/AST/CharUnits.h"
45 #include "clang/AST/CurrentSourceLocExprScope.h"
46 #include "clang/AST/Expr.h"
47 #include "clang/AST/OSLog.h"
48 #include "clang/AST/OptionalDiagnostic.h"
49 #include "clang/AST/RecordLayout.h"
50 #include "clang/AST/StmtVisitor.h"
51 #include "clang/AST/TypeLoc.h"
52 #include "clang/Basic/Builtins.h"
53 #include "clang/Basic/FixedPoint.h"
54 #include "clang/Basic/TargetInfo.h"
55 #include "llvm/ADT/Optional.h"
56 #include "llvm/ADT/SmallBitVector.h"
57 #include "llvm/Support/SaveAndRestore.h"
58 #include "llvm/Support/raw_ostream.h"
62 #define DEBUG_TYPE "exprconstant"
64 using namespace clang;
75 using SourceLocExprScopeGuard =
76 CurrentSourceLocExprScope::SourceLocExprScopeGuard;
78 static QualType getType(APValue::LValueBase B) {
79 if (!B) return QualType();
80 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
81 // FIXME: It's unclear where we're supposed to take the type from, and
82 // this actually matters for arrays of unknown bound. Eg:
84 // extern int arr[]; void f() { extern int arr[3]; };
85 // constexpr int *p = &arr[1]; // valid?
87 // For now, we take the array bound from the most recent declaration.
88 for (auto *Redecl = cast<ValueDecl>(D->getMostRecentDecl()); Redecl;
89 Redecl = cast_or_null<ValueDecl>(Redecl->getPreviousDecl())) {
90 QualType T = Redecl->getType();
91 if (!T->isIncompleteArrayType())
97 if (B.is<TypeInfoLValue>())
98 return B.getTypeInfoType();
100 if (B.is<DynamicAllocLValue>())
101 return B.getDynamicAllocType();
103 const Expr *Base = B.get<const Expr*>();
105 // For a materialized temporary, the type of the temporary we materialized
106 // may not be the type of the expression.
107 if (const MaterializeTemporaryExpr *MTE =
108 dyn_cast<MaterializeTemporaryExpr>(Base)) {
109 SmallVector<const Expr *, 2> CommaLHSs;
110 SmallVector<SubobjectAdjustment, 2> Adjustments;
111 const Expr *Temp = MTE->getSubExpr();
112 const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs,
114 // Keep any cv-qualifiers from the reference if we generated a temporary
115 // for it directly. Otherwise use the type after adjustment.
116 if (!Adjustments.empty())
117 return Inner->getType();
120 return Base->getType();
123 /// Get an LValue path entry, which is known to not be an array index, as a
124 /// field declaration.
125 static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
126 return dyn_cast_or_null<FieldDecl>(E.getAsBaseOrMember().getPointer());
128 /// Get an LValue path entry, which is known to not be an array index, as a
129 /// base class declaration.
130 static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
131 return dyn_cast_or_null<CXXRecordDecl>(E.getAsBaseOrMember().getPointer());
133 /// Determine whether this LValue path entry for a base class names a virtual
135 static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
136 return E.getAsBaseOrMember().getInt();
139 /// Given an expression, determine the type used to store the result of
140 /// evaluating that expression.
141 static QualType getStorageType(const ASTContext &Ctx, const Expr *E) {
144 return Ctx.getLValueReferenceType(E->getType());
147 /// Given a CallExpr, try to get the alloc_size attribute. May return null.
148 static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) {
149 const FunctionDecl *Callee = CE->getDirectCallee();
150 return Callee ? Callee->getAttr<AllocSizeAttr>() : nullptr;
153 /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
154 /// This will look through a single cast.
156 /// Returns null if we couldn't unwrap a function with alloc_size.
157 static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
158 if (!E->getType()->isPointerType())
161 E = E->IgnoreParens();
162 // If we're doing a variable assignment from e.g. malloc(N), there will
163 // probably be a cast of some kind. In exotic cases, we might also see a
164 // top-level ExprWithCleanups. Ignore them either way.
165 if (const auto *FE = dyn_cast<FullExpr>(E))
166 E = FE->getSubExpr()->IgnoreParens();
168 if (const auto *Cast = dyn_cast<CastExpr>(E))
169 E = Cast->getSubExpr()->IgnoreParens();
171 if (const auto *CE = dyn_cast<CallExpr>(E))
172 return getAllocSizeAttr(CE) ? CE : nullptr;
176 /// Determines whether or not the given Base contains a call to a function
177 /// with the alloc_size attribute.
178 static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
179 const auto *E = Base.dyn_cast<const Expr *>();
180 return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
183 /// The bound to claim that an array of unknown bound has.
184 /// The value in MostDerivedArraySize is undefined in this case. So, set it
185 /// to an arbitrary value that's likely to loudly break things if it's used.
186 static const uint64_t AssumedSizeForUnsizedArray =
187 std::numeric_limits<uint64_t>::max() / 2;
189 /// Determines if an LValue with the given LValueBase will have an unsized
190 /// array in its designator.
191 /// Find the path length and type of the most-derived subobject in the given
192 /// path, and find the size of the containing array, if any.
194 findMostDerivedSubobject(ASTContext &Ctx, APValue::LValueBase Base,
195 ArrayRef<APValue::LValuePathEntry> Path,
196 uint64_t &ArraySize, QualType &Type, bool &IsArray,
197 bool &FirstEntryIsUnsizedArray) {
198 // This only accepts LValueBases from APValues, and APValues don't support
199 // arrays that lack size info.
200 assert(!isBaseAnAllocSizeCall(Base) &&
201 "Unsized arrays shouldn't appear here");
202 unsigned MostDerivedLength = 0;
203 Type = getType(Base);
205 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
206 if (Type->isArrayType()) {
207 const ArrayType *AT = Ctx.getAsArrayType(Type);
208 Type = AT->getElementType();
209 MostDerivedLength = I + 1;
212 if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
213 ArraySize = CAT->getSize().getZExtValue();
215 assert(I == 0 && "unexpected unsized array designator");
216 FirstEntryIsUnsizedArray = true;
217 ArraySize = AssumedSizeForUnsizedArray;
219 } else if (Type->isAnyComplexType()) {
220 const ComplexType *CT = Type->castAs<ComplexType>();
221 Type = CT->getElementType();
223 MostDerivedLength = I + 1;
225 } else if (const FieldDecl *FD = getAsField(Path[I])) {
226 Type = FD->getType();
228 MostDerivedLength = I + 1;
231 // Path[I] describes a base class.
236 return MostDerivedLength;
239 /// A path from a glvalue to a subobject of that glvalue.
240 struct SubobjectDesignator {
241 /// True if the subobject was named in a manner not supported by C++11. Such
242 /// lvalues can still be folded, but they are not core constant expressions
243 /// and we cannot perform lvalue-to-rvalue conversions on them.
244 unsigned Invalid : 1;
246 /// Is this a pointer one past the end of an object?
247 unsigned IsOnePastTheEnd : 1;
249 /// Indicator of whether the first entry is an unsized array.
250 unsigned FirstEntryIsAnUnsizedArray : 1;
252 /// Indicator of whether the most-derived object is an array element.
253 unsigned MostDerivedIsArrayElement : 1;
255 /// The length of the path to the most-derived object of which this is a
257 unsigned MostDerivedPathLength : 28;
259 /// The size of the array of which the most-derived object is an element.
260 /// This will always be 0 if the most-derived object is not an array
261 /// element. 0 is not an indicator of whether or not the most-derived object
262 /// is an array, however, because 0-length arrays are allowed.
264 /// If the current array is an unsized array, the value of this is
266 uint64_t MostDerivedArraySize;
268 /// The type of the most derived object referred to by this address.
269 QualType MostDerivedType;
271 typedef APValue::LValuePathEntry PathEntry;
273 /// The entries on the path from the glvalue to the designated subobject.
274 SmallVector<PathEntry, 8> Entries;
276 SubobjectDesignator() : Invalid(true) {}
278 explicit SubobjectDesignator(QualType T)
279 : Invalid(false), IsOnePastTheEnd(false),
280 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
281 MostDerivedPathLength(0), MostDerivedArraySize(0),
282 MostDerivedType(T) {}
284 SubobjectDesignator(ASTContext &Ctx, const APValue &V)
285 : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
286 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
287 MostDerivedPathLength(0), MostDerivedArraySize(0) {
288 assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
290 IsOnePastTheEnd = V.isLValueOnePastTheEnd();
291 ArrayRef<PathEntry> VEntries = V.getLValuePath();
292 Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
293 if (V.getLValueBase()) {
294 bool IsArray = false;
295 bool FirstIsUnsizedArray = false;
296 MostDerivedPathLength = findMostDerivedSubobject(
297 Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize,
298 MostDerivedType, IsArray, FirstIsUnsizedArray);
299 MostDerivedIsArrayElement = IsArray;
300 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
305 void truncate(ASTContext &Ctx, APValue::LValueBase Base,
306 unsigned NewLength) {
310 assert(Base && "cannot truncate path for null pointer");
311 assert(NewLength <= Entries.size() && "not a truncation");
313 if (NewLength == Entries.size())
315 Entries.resize(NewLength);
317 bool IsArray = false;
318 bool FirstIsUnsizedArray = false;
319 MostDerivedPathLength = findMostDerivedSubobject(
320 Ctx, Base, Entries, MostDerivedArraySize, MostDerivedType, IsArray,
321 FirstIsUnsizedArray);
322 MostDerivedIsArrayElement = IsArray;
323 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
331 /// Determine whether the most derived subobject is an array without a
333 bool isMostDerivedAnUnsizedArray() const {
334 assert(!Invalid && "Calling this makes no sense on invalid designators");
335 return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
338 /// Determine what the most derived array's size is. Results in an assertion
339 /// failure if the most derived array lacks a size.
340 uint64_t getMostDerivedArraySize() const {
341 assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
342 return MostDerivedArraySize;
345 /// Determine whether this is a one-past-the-end pointer.
346 bool isOnePastTheEnd() const {
350 if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
351 Entries[MostDerivedPathLength - 1].getAsArrayIndex() ==
352 MostDerivedArraySize)
357 /// Get the range of valid index adjustments in the form
358 /// {maximum value that can be subtracted from this pointer,
359 /// maximum value that can be added to this pointer}
360 std::pair<uint64_t, uint64_t> validIndexAdjustments() {
361 if (Invalid || isMostDerivedAnUnsizedArray())
364 // [expr.add]p4: For the purposes of these operators, a pointer to a
365 // nonarray object behaves the same as a pointer to the first element of
366 // an array of length one with the type of the object as its element type.
367 bool IsArray = MostDerivedPathLength == Entries.size() &&
368 MostDerivedIsArrayElement;
369 uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
370 : (uint64_t)IsOnePastTheEnd;
372 IsArray ? getMostDerivedArraySize() : (uint64_t)1;
373 return {ArrayIndex, ArraySize - ArrayIndex};
376 /// Check that this refers to a valid subobject.
377 bool isValidSubobject() const {
380 return !isOnePastTheEnd();
382 /// Check that this refers to a valid subobject, and if not, produce a
383 /// relevant diagnostic and set the designator as invalid.
384 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
386 /// Get the type of the designated object.
387 QualType getType(ASTContext &Ctx) const {
388 assert(!Invalid && "invalid designator has no subobject type");
389 return MostDerivedPathLength == Entries.size()
391 : Ctx.getRecordType(getAsBaseClass(Entries.back()));
394 /// Update this designator to refer to the first element within this array.
395 void addArrayUnchecked(const ConstantArrayType *CAT) {
396 Entries.push_back(PathEntry::ArrayIndex(0));
398 // This is a most-derived object.
399 MostDerivedType = CAT->getElementType();
400 MostDerivedIsArrayElement = true;
401 MostDerivedArraySize = CAT->getSize().getZExtValue();
402 MostDerivedPathLength = Entries.size();
404 /// Update this designator to refer to the first element within the array of
405 /// elements of type T. This is an array of unknown size.
406 void addUnsizedArrayUnchecked(QualType ElemTy) {
407 Entries.push_back(PathEntry::ArrayIndex(0));
409 MostDerivedType = ElemTy;
410 MostDerivedIsArrayElement = true;
411 // The value in MostDerivedArraySize is undefined in this case. So, set it
412 // to an arbitrary value that's likely to loudly break things if it's
414 MostDerivedArraySize = AssumedSizeForUnsizedArray;
415 MostDerivedPathLength = Entries.size();
417 /// Update this designator to refer to the given base or member of this
419 void addDeclUnchecked(const Decl *D, bool Virtual = false) {
420 Entries.push_back(APValue::BaseOrMemberType(D, Virtual));
422 // If this isn't a base class, it's a new most-derived object.
423 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
424 MostDerivedType = FD->getType();
425 MostDerivedIsArrayElement = false;
426 MostDerivedArraySize = 0;
427 MostDerivedPathLength = Entries.size();
430 /// Update this designator to refer to the given complex component.
431 void addComplexUnchecked(QualType EltTy, bool Imag) {
432 Entries.push_back(PathEntry::ArrayIndex(Imag));
434 // This is technically a most-derived object, though in practice this
435 // is unlikely to matter.
436 MostDerivedType = EltTy;
437 MostDerivedIsArrayElement = true;
438 MostDerivedArraySize = 2;
439 MostDerivedPathLength = Entries.size();
441 void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E);
442 void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
444 /// Add N to the address of this subobject.
445 void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N) {
446 if (Invalid || !N) return;
447 uint64_t TruncatedN = N.extOrTrunc(64).getZExtValue();
448 if (isMostDerivedAnUnsizedArray()) {
449 diagnoseUnsizedArrayPointerArithmetic(Info, E);
450 // Can't verify -- trust that the user is doing the right thing (or if
451 // not, trust that the caller will catch the bad behavior).
452 // FIXME: Should we reject if this overflows, at least?
453 Entries.back() = PathEntry::ArrayIndex(
454 Entries.back().getAsArrayIndex() + TruncatedN);
458 // [expr.add]p4: For the purposes of these operators, a pointer to a
459 // nonarray object behaves the same as a pointer to the first element of
460 // an array of length one with the type of the object as its element type.
461 bool IsArray = MostDerivedPathLength == Entries.size() &&
462 MostDerivedIsArrayElement;
463 uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
464 : (uint64_t)IsOnePastTheEnd;
466 IsArray ? getMostDerivedArraySize() : (uint64_t)1;
468 if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
469 // Calculate the actual index in a wide enough type, so we can include
471 N = N.extend(std::max<unsigned>(N.getBitWidth() + 1, 65));
472 (llvm::APInt&)N += ArrayIndex;
473 assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
474 diagnosePointerArithmetic(Info, E, N);
479 ArrayIndex += TruncatedN;
480 assert(ArrayIndex <= ArraySize &&
481 "bounds check succeeded for out-of-bounds index");
484 Entries.back() = PathEntry::ArrayIndex(ArrayIndex);
486 IsOnePastTheEnd = (ArrayIndex != 0);
490 /// A stack frame in the constexpr call stack.
491 class CallStackFrame : public interp::Frame {
495 /// Parent - The caller of this stack frame.
496 CallStackFrame *Caller;
498 /// Callee - The function which was called.
499 const FunctionDecl *Callee;
501 /// This - The binding for the this pointer in this call, if any.
504 /// Arguments - Parameter bindings for this function call, indexed by
505 /// parameters' function scope indices.
508 /// Source location information about the default argument or default
509 /// initializer expression we're evaluating, if any.
510 CurrentSourceLocExprScope CurSourceLocExprScope;
512 // Note that we intentionally use std::map here so that references to
513 // values are stable.
514 typedef std::pair<const void *, unsigned> MapKeyTy;
515 typedef std::map<MapKeyTy, APValue> MapTy;
516 /// Temporaries - Temporary lvalues materialized within this stack frame.
519 /// CallLoc - The location of the call expression for this call.
520 SourceLocation CallLoc;
522 /// Index - The call index of this call.
525 /// The stack of integers for tracking version numbers for temporaries.
526 SmallVector<unsigned, 2> TempVersionStack = {1};
527 unsigned CurTempVersion = TempVersionStack.back();
529 unsigned getTempVersion() const { return TempVersionStack.back(); }
531 void pushTempVersion() {
532 TempVersionStack.push_back(++CurTempVersion);
535 void popTempVersion() {
536 TempVersionStack.pop_back();
539 // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
540 // on the overall stack usage of deeply-recursing constexpr evaluations.
541 // (We should cache this map rather than recomputing it repeatedly.)
542 // But let's try this and see how it goes; we can look into caching the map
543 // as a later change.
545 /// LambdaCaptureFields - Mapping from captured variables/this to
546 /// corresponding data members in the closure class.
547 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
548 FieldDecl *LambdaThisCaptureField;
550 CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
551 const FunctionDecl *Callee, const LValue *This,
555 // Return the temporary for Key whose version number is Version.
556 APValue *getTemporary(const void *Key, unsigned Version) {
557 MapKeyTy KV(Key, Version);
558 auto LB = Temporaries.lower_bound(KV);
559 if (LB != Temporaries.end() && LB->first == KV)
561 // Pair (Key,Version) wasn't found in the map. Check that no elements
562 // in the map have 'Key' as their key.
563 assert((LB == Temporaries.end() || LB->first.first != Key) &&
564 (LB == Temporaries.begin() || std::prev(LB)->first.first != Key) &&
565 "Element with key 'Key' found in map");
569 // Return the current temporary for Key in the map.
570 APValue *getCurrentTemporary(const void *Key) {
571 auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
572 if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
573 return &std::prev(UB)->second;
577 // Return the version number of the current temporary for Key.
578 unsigned getCurrentTemporaryVersion(const void *Key) const {
579 auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
580 if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
581 return std::prev(UB)->first.second;
585 /// Allocate storage for an object of type T in this stack frame.
586 /// Populates LV with a handle to the created object. Key identifies
587 /// the temporary within the stack frame, and must not be reused without
588 /// bumping the temporary version number.
589 template<typename KeyT>
590 APValue &createTemporary(const KeyT *Key, QualType T,
591 bool IsLifetimeExtended, LValue &LV);
593 void describe(llvm::raw_ostream &OS) override;
595 Frame *getCaller() const override { return Caller; }
596 SourceLocation getCallLocation() const override { return CallLoc; }
597 const FunctionDecl *getCallee() const override { return Callee; }
599 bool isStdFunction() const {
600 for (const DeclContext *DC = Callee; DC; DC = DC->getParent())
601 if (DC->isStdNamespace())
607 /// Temporarily override 'this'.
608 class ThisOverrideRAII {
610 ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
611 : Frame(Frame), OldThis(Frame.This) {
613 Frame.This = NewThis;
615 ~ThisOverrideRAII() {
616 Frame.This = OldThis;
619 CallStackFrame &Frame;
620 const LValue *OldThis;
624 static bool HandleDestruction(EvalInfo &Info, const Expr *E,
625 const LValue &This, QualType ThisType);
626 static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
627 APValue::LValueBase LVBase, APValue &Value,
631 /// A cleanup, and a flag indicating whether it is lifetime-extended.
633 llvm::PointerIntPair<APValue*, 1, bool> Value;
634 APValue::LValueBase Base;
638 Cleanup(APValue *Val, APValue::LValueBase Base, QualType T,
639 bool IsLifetimeExtended)
640 : Value(Val, IsLifetimeExtended), Base(Base), T(T) {}
642 bool isLifetimeExtended() const { return Value.getInt(); }
643 bool endLifetime(EvalInfo &Info, bool RunDestructors) {
644 if (RunDestructors) {
646 if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
647 Loc = VD->getLocation();
648 else if (const Expr *E = Base.dyn_cast<const Expr*>())
649 Loc = E->getExprLoc();
650 return HandleDestruction(Info, Loc, Base, *Value.getPointer(), T);
652 *Value.getPointer() = APValue();
656 bool hasSideEffect() {
657 return T.isDestructedType();
661 /// A reference to an object whose construction we are currently evaluating.
662 struct ObjectUnderConstruction {
663 APValue::LValueBase Base;
664 ArrayRef<APValue::LValuePathEntry> Path;
665 friend bool operator==(const ObjectUnderConstruction &LHS,
666 const ObjectUnderConstruction &RHS) {
667 return LHS.Base == RHS.Base && LHS.Path == RHS.Path;
669 friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) {
670 return llvm::hash_combine(Obj.Base, Obj.Path);
673 enum class ConstructionPhase {
683 template<> struct DenseMapInfo<ObjectUnderConstruction> {
684 using Base = DenseMapInfo<APValue::LValueBase>;
685 static ObjectUnderConstruction getEmptyKey() {
686 return {Base::getEmptyKey(), {}}; }
687 static ObjectUnderConstruction getTombstoneKey() {
688 return {Base::getTombstoneKey(), {}};
690 static unsigned getHashValue(const ObjectUnderConstruction &Object) {
691 return hash_value(Object);
693 static bool isEqual(const ObjectUnderConstruction &LHS,
694 const ObjectUnderConstruction &RHS) {
701 /// A dynamically-allocated heap object.
703 /// The value of this heap-allocated object.
705 /// The allocating expression; used for diagnostics. Either a CXXNewExpr
706 /// or a CallExpr (the latter is for direct calls to operator new inside
707 /// std::allocator<T>::allocate).
708 const Expr *AllocExpr = nullptr;
716 /// Get the kind of the allocation. This must match between allocation
717 /// and deallocation.
718 Kind getKind() const {
719 if (auto *NE = dyn_cast<CXXNewExpr>(AllocExpr))
720 return NE->isArray() ? ArrayNew : New;
721 assert(isa<CallExpr>(AllocExpr));
726 struct DynAllocOrder {
727 bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const {
728 return L.getIndex() < R.getIndex();
732 /// EvalInfo - This is a private struct used by the evaluator to capture
733 /// information about a subexpression as it is folded. It retains information
734 /// about the AST context, but also maintains information about the folded
737 /// If an expression could be evaluated, it is still possible it is not a C
738 /// "integer constant expression" or constant expression. If not, this struct
739 /// captures information about how and why not.
741 /// One bit of information passed *into* the request for constant folding
742 /// indicates whether the subexpression is "evaluated" or not according to C
743 /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
744 /// evaluate the expression regardless of what the RHS is, but C only allows
745 /// certain things in certain situations.
746 class EvalInfo : public interp::State {
750 /// EvalStatus - Contains information about the evaluation.
751 Expr::EvalStatus &EvalStatus;
753 /// CurrentCall - The top of the constexpr call stack.
754 CallStackFrame *CurrentCall;
756 /// CallStackDepth - The number of calls in the call stack right now.
757 unsigned CallStackDepth;
759 /// NextCallIndex - The next call index to assign.
760 unsigned NextCallIndex;
762 /// StepsLeft - The remaining number of evaluation steps we're permitted
763 /// to perform. This is essentially a limit for the number of statements
764 /// we will evaluate.
767 /// Enable the experimental new constant interpreter. If an expression is
768 /// not supported by the interpreter, an error is triggered.
769 bool EnableNewConstInterp;
771 /// BottomFrame - The frame in which evaluation started. This must be
772 /// initialized after CurrentCall and CallStackDepth.
773 CallStackFrame BottomFrame;
775 /// A stack of values whose lifetimes end at the end of some surrounding
776 /// evaluation frame.
777 llvm::SmallVector<Cleanup, 16> CleanupStack;
779 /// EvaluatingDecl - This is the declaration whose initializer is being
780 /// evaluated, if any.
781 APValue::LValueBase EvaluatingDecl;
783 enum class EvaluatingDeclKind {
785 /// We're evaluating the construction of EvaluatingDecl.
787 /// We're evaluating the destruction of EvaluatingDecl.
790 EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None;
792 /// EvaluatingDeclValue - This is the value being constructed for the
793 /// declaration whose initializer is being evaluated, if any.
794 APValue *EvaluatingDeclValue;
796 /// Set of objects that are currently being constructed.
797 llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase>
798 ObjectsUnderConstruction;
800 /// Current heap allocations, along with the location where each was
801 /// allocated. We use std::map here because we need stable addresses
802 /// for the stored APValues.
803 std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs;
805 /// The number of heap allocations performed so far in this evaluation.
806 unsigned NumHeapAllocs = 0;
808 struct EvaluatingConstructorRAII {
810 ObjectUnderConstruction Object;
812 EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object,
814 : EI(EI), Object(Object) {
816 EI.ObjectsUnderConstruction
817 .insert({Object, HasBases ? ConstructionPhase::Bases
818 : ConstructionPhase::AfterBases})
821 void finishedConstructingBases() {
822 EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
824 ~EvaluatingConstructorRAII() {
825 if (DidInsert) EI.ObjectsUnderConstruction.erase(Object);
829 struct EvaluatingDestructorRAII {
831 ObjectUnderConstruction Object;
833 EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object)
834 : EI(EI), Object(Object) {
835 DidInsert = EI.ObjectsUnderConstruction
836 .insert({Object, ConstructionPhase::Destroying})
839 void startedDestroyingBases() {
840 EI.ObjectsUnderConstruction[Object] =
841 ConstructionPhase::DestroyingBases;
843 ~EvaluatingDestructorRAII() {
845 EI.ObjectsUnderConstruction.erase(Object);
850 isEvaluatingCtorDtor(APValue::LValueBase Base,
851 ArrayRef<APValue::LValuePathEntry> Path) {
852 return ObjectsUnderConstruction.lookup({Base, Path});
855 /// If we're currently speculatively evaluating, the outermost call stack
856 /// depth at which we can mutate state, otherwise 0.
857 unsigned SpeculativeEvaluationDepth = 0;
859 /// The current array initialization index, if we're performing array
861 uint64_t ArrayInitIndex = -1;
863 /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
864 /// notes attached to it will also be stored, otherwise they will not be.
865 bool HasActiveDiagnostic;
867 /// Have we emitted a diagnostic explaining why we couldn't constant
868 /// fold (not just why it's not strictly a constant expression)?
869 bool HasFoldFailureDiagnostic;
871 /// Whether or not we're in a context where the front end requires a
873 bool InConstantContext;
875 /// Whether we're checking that an expression is a potential constant
876 /// expression. If so, do not fail on constructs that could become constant
877 /// later on (such as a use of an undefined global).
878 bool CheckingPotentialConstantExpression = false;
880 /// Whether we're checking for an expression that has undefined behavior.
881 /// If so, we will produce warnings if we encounter an operation that is
882 /// always undefined.
883 bool CheckingForUndefinedBehavior = false;
885 enum EvaluationMode {
886 /// Evaluate as a constant expression. Stop if we find that the expression
887 /// is not a constant expression.
888 EM_ConstantExpression,
890 /// Evaluate as a constant expression. Stop if we find that the expression
891 /// is not a constant expression. Some expressions can be retried in the
892 /// optimizer if we don't constant fold them here, but in an unevaluated
893 /// context we try to fold them immediately since the optimizer never
894 /// gets a chance to look at it.
895 EM_ConstantExpressionUnevaluated,
897 /// Fold the expression to a constant. Stop if we hit a side-effect that
901 /// Evaluate in any way we know how. Don't worry about side-effects that
902 /// can't be modeled.
903 EM_IgnoreSideEffects,
906 /// Are we checking whether the expression is a potential constant
908 bool checkingPotentialConstantExpression() const override {
909 return CheckingPotentialConstantExpression;
912 /// Are we checking an expression for overflow?
913 // FIXME: We should check for any kind of undefined or suspicious behavior
914 // in such constructs, not just overflow.
915 bool checkingForUndefinedBehavior() const override {
916 return CheckingForUndefinedBehavior;
919 EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
920 : Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr),
921 CallStackDepth(0), NextCallIndex(1),
922 StepsLeft(C.getLangOpts().ConstexprStepLimit),
923 EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
924 BottomFrame(*this, SourceLocation(), nullptr, nullptr, nullptr),
925 EvaluatingDecl((const ValueDecl *)nullptr),
926 EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
927 HasFoldFailureDiagnostic(false), InConstantContext(false),
934 void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
935 EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
936 EvaluatingDecl = Base;
937 IsEvaluatingDecl = EDK;
938 EvaluatingDeclValue = &Value;
941 bool CheckCallLimit(SourceLocation Loc) {
942 // Don't perform any constexpr calls (other than the call we're checking)
943 // when checking a potential constant expression.
944 if (checkingPotentialConstantExpression() && CallStackDepth > 1)
946 if (NextCallIndex == 0) {
947 // NextCallIndex has wrapped around.
948 FFDiag(Loc, diag::note_constexpr_call_limit_exceeded);
951 if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
953 FFDiag(Loc, diag::note_constexpr_depth_limit_exceeded)
954 << getLangOpts().ConstexprCallDepth;
958 std::pair<CallStackFrame *, unsigned>
959 getCallFrameAndDepth(unsigned CallIndex) {
960 assert(CallIndex && "no call index in getCallFrameAndDepth");
961 // We will eventually hit BottomFrame, which has Index 1, so Frame can't
962 // be null in this loop.
963 unsigned Depth = CallStackDepth;
964 CallStackFrame *Frame = CurrentCall;
965 while (Frame->Index > CallIndex) {
966 Frame = Frame->Caller;
969 if (Frame->Index == CallIndex)
970 return {Frame, Depth};
974 bool nextStep(const Stmt *S) {
976 FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded);
983 APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV);
985 Optional<DynAlloc*> lookupDynamicAlloc(DynamicAllocLValue DA) {
986 Optional<DynAlloc*> Result;
987 auto It = HeapAllocs.find(DA);
988 if (It != HeapAllocs.end())
989 Result = &It->second;
993 /// Information about a stack frame for std::allocator<T>::[de]allocate.
994 struct StdAllocatorCaller {
997 explicit operator bool() const { return FrameIndex != 0; };
1000 StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const {
1001 for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame;
1002 Call = Call->Caller) {
1003 const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Call->Callee);
1006 const IdentifierInfo *FnII = MD->getIdentifier();
1007 if (!FnII || !FnII->isStr(FnName))
1011 dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
1015 const IdentifierInfo *ClassII = CTSD->getIdentifier();
1016 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1017 if (CTSD->isInStdNamespace() && ClassII &&
1018 ClassII->isStr("allocator") && TAL.size() >= 1 &&
1019 TAL[0].getKind() == TemplateArgument::Type)
1020 return {Call->Index, TAL[0].getAsType()};
1026 void performLifetimeExtension() {
1027 // Disable the cleanups for lifetime-extended temporaries.
1029 std::remove_if(CleanupStack.begin(), CleanupStack.end(),
1030 [](Cleanup &C) { return C.isLifetimeExtended(); }),
1031 CleanupStack.end());
1034 /// Throw away any remaining cleanups at the end of evaluation. If any
1035 /// cleanups would have had a side-effect, note that as an unmodeled
1036 /// side-effect and return false. Otherwise, return true.
1037 bool discardCleanups() {
1038 for (Cleanup &C : CleanupStack) {
1039 if (C.hasSideEffect() && !noteSideEffect()) {
1040 CleanupStack.clear();
1044 CleanupStack.clear();
1049 interp::Frame *getCurrentFrame() override { return CurrentCall; }
1050 const interp::Frame *getBottomFrame() const override { return &BottomFrame; }
1052 bool hasActiveDiagnostic() override { return HasActiveDiagnostic; }
1053 void setActiveDiagnostic(bool Flag) override { HasActiveDiagnostic = Flag; }
1055 void setFoldFailureDiagnostic(bool Flag) override {
1056 HasFoldFailureDiagnostic = Flag;
1059 Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; }
1061 ASTContext &getCtx() const override { return Ctx; }
1063 // If we have a prior diagnostic, it will be noting that the expression
1064 // isn't a constant expression. This diagnostic is more important,
1065 // unless we require this evaluation to produce a constant expression.
1067 // FIXME: We might want to show both diagnostics to the user in
1068 // EM_ConstantFold mode.
1069 bool hasPriorDiagnostic() override {
1070 if (!EvalStatus.Diag->empty()) {
1072 case EM_ConstantFold:
1073 case EM_IgnoreSideEffects:
1074 if (!HasFoldFailureDiagnostic)
1076 // We've already failed to fold something. Keep that diagnostic.
1078 case EM_ConstantExpression:
1079 case EM_ConstantExpressionUnevaluated:
1080 setActiveDiagnostic(false);
1087 unsigned getCallStackDepth() override { return CallStackDepth; }
1090 /// Should we continue evaluation after encountering a side-effect that we
1092 bool keepEvaluatingAfterSideEffect() {
1094 case EM_IgnoreSideEffects:
1097 case EM_ConstantExpression:
1098 case EM_ConstantExpressionUnevaluated:
1099 case EM_ConstantFold:
1100 // By default, assume any side effect might be valid in some other
1101 // evaluation of this expression from a different context.
1102 return checkingPotentialConstantExpression() ||
1103 checkingForUndefinedBehavior();
1105 llvm_unreachable("Missed EvalMode case");
1108 /// Note that we have had a side-effect, and determine whether we should
1109 /// keep evaluating.
1110 bool noteSideEffect() {
1111 EvalStatus.HasSideEffects = true;
1112 return keepEvaluatingAfterSideEffect();
1115 /// Should we continue evaluation after encountering undefined behavior?
1116 bool keepEvaluatingAfterUndefinedBehavior() {
1118 case EM_IgnoreSideEffects:
1119 case EM_ConstantFold:
1122 case EM_ConstantExpression:
1123 case EM_ConstantExpressionUnevaluated:
1124 return checkingForUndefinedBehavior();
1126 llvm_unreachable("Missed EvalMode case");
1129 /// Note that we hit something that was technically undefined behavior, but
1130 /// that we can evaluate past it (such as signed overflow or floating-point
1131 /// division by zero.)
1132 bool noteUndefinedBehavior() override {
1133 EvalStatus.HasUndefinedBehavior = true;
1134 return keepEvaluatingAfterUndefinedBehavior();
1137 /// Should we continue evaluation as much as possible after encountering a
1138 /// construct which can't be reduced to a value?
1139 bool keepEvaluatingAfterFailure() const override {
1144 case EM_ConstantExpression:
1145 case EM_ConstantExpressionUnevaluated:
1146 case EM_ConstantFold:
1147 case EM_IgnoreSideEffects:
1148 return checkingPotentialConstantExpression() ||
1149 checkingForUndefinedBehavior();
1151 llvm_unreachable("Missed EvalMode case");
1154 /// Notes that we failed to evaluate an expression that other expressions
1155 /// directly depend on, and determine if we should keep evaluating. This
1156 /// should only be called if we actually intend to keep evaluating.
1158 /// Call noteSideEffect() instead if we may be able to ignore the value that
1159 /// we failed to evaluate, e.g. if we failed to evaluate Foo() in:
1161 /// (Foo(), 1) // use noteSideEffect
1162 /// (Foo() || true) // use noteSideEffect
1163 /// Foo() + 1 // use noteFailure
1164 LLVM_NODISCARD bool noteFailure() {
1165 // Failure when evaluating some expression often means there is some
1166 // subexpression whose evaluation was skipped. Therefore, (because we
1167 // don't track whether we skipped an expression when unwinding after an
1168 // evaluation failure) every evaluation failure that bubbles up from a
1169 // subexpression implies that a side-effect has potentially happened. We
1170 // skip setting the HasSideEffects flag to true until we decide to
1171 // continue evaluating after that point, which happens here.
1172 bool KeepGoing = keepEvaluatingAfterFailure();
1173 EvalStatus.HasSideEffects |= KeepGoing;
1177 class ArrayInitLoopIndex {
1179 uint64_t OuterIndex;
1182 ArrayInitLoopIndex(EvalInfo &Info)
1183 : Info(Info), OuterIndex(Info.ArrayInitIndex) {
1184 Info.ArrayInitIndex = 0;
1186 ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; }
1188 operator uint64_t&() { return Info.ArrayInitIndex; }
1192 /// Object used to treat all foldable expressions as constant expressions.
1193 struct FoldConstant {
1196 bool HadNoPriorDiags;
1197 EvalInfo::EvaluationMode OldMode;
1199 explicit FoldConstant(EvalInfo &Info, bool Enabled)
1202 HadNoPriorDiags(Info.EvalStatus.Diag &&
1203 Info.EvalStatus.Diag->empty() &&
1204 !Info.EvalStatus.HasSideEffects),
1205 OldMode(Info.EvalMode) {
1207 Info.EvalMode = EvalInfo::EM_ConstantFold;
1209 void keepDiagnostics() { Enabled = false; }
1211 if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
1212 !Info.EvalStatus.HasSideEffects)
1213 Info.EvalStatus.Diag->clear();
1214 Info.EvalMode = OldMode;
1218 /// RAII object used to set the current evaluation mode to ignore
1220 struct IgnoreSideEffectsRAII {
1222 EvalInfo::EvaluationMode OldMode;
1223 explicit IgnoreSideEffectsRAII(EvalInfo &Info)
1224 : Info(Info), OldMode(Info.EvalMode) {
1225 Info.EvalMode = EvalInfo::EM_IgnoreSideEffects;
1228 ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
1231 /// RAII object used to optionally suppress diagnostics and side-effects from
1232 /// a speculative evaluation.
1233 class SpeculativeEvaluationRAII {
1234 EvalInfo *Info = nullptr;
1235 Expr::EvalStatus OldStatus;
1236 unsigned OldSpeculativeEvaluationDepth;
1238 void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
1240 OldStatus = Other.OldStatus;
1241 OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth;
1242 Other.Info = nullptr;
1245 void maybeRestoreState() {
1249 Info->EvalStatus = OldStatus;
1250 Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth;
1254 SpeculativeEvaluationRAII() = default;
1256 SpeculativeEvaluationRAII(
1257 EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
1258 : Info(&Info), OldStatus(Info.EvalStatus),
1259 OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) {
1260 Info.EvalStatus.Diag = NewDiag;
1261 Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1;
1264 SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete;
1265 SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) {
1266 moveFromAndCancel(std::move(Other));
1269 SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) {
1270 maybeRestoreState();
1271 moveFromAndCancel(std::move(Other));
1275 ~SpeculativeEvaluationRAII() { maybeRestoreState(); }
1278 /// RAII object wrapping a full-expression or block scope, and handling
1279 /// the ending of the lifetime of temporaries created within it.
1280 template<bool IsFullExpression>
1283 unsigned OldStackSize;
1285 ScopeRAII(EvalInfo &Info)
1286 : Info(Info), OldStackSize(Info.CleanupStack.size()) {
1287 // Push a new temporary version. This is needed to distinguish between
1288 // temporaries created in different iterations of a loop.
1289 Info.CurrentCall->pushTempVersion();
1291 bool destroy(bool RunDestructors = true) {
1292 bool OK = cleanup(Info, RunDestructors, OldStackSize);
1297 if (OldStackSize != -1U)
1299 // Body moved to a static method to encourage the compiler to inline away
1300 // instances of this class.
1301 Info.CurrentCall->popTempVersion();
1304 static bool cleanup(EvalInfo &Info, bool RunDestructors,
1305 unsigned OldStackSize) {
1306 assert(OldStackSize <= Info.CleanupStack.size() &&
1307 "running cleanups out of order?");
1309 // Run all cleanups for a block scope, and non-lifetime-extended cleanups
1310 // for a full-expression scope.
1311 bool Success = true;
1312 for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) {
1313 if (!(IsFullExpression &&
1314 Info.CleanupStack[I - 1].isLifetimeExtended())) {
1315 if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) {
1322 // Compact lifetime-extended cleanups.
1323 auto NewEnd = Info.CleanupStack.begin() + OldStackSize;
1324 if (IsFullExpression)
1326 std::remove_if(NewEnd, Info.CleanupStack.end(),
1327 [](Cleanup &C) { return !C.isLifetimeExtended(); });
1328 Info.CleanupStack.erase(NewEnd, Info.CleanupStack.end());
1332 typedef ScopeRAII<false> BlockScopeRAII;
1333 typedef ScopeRAII<true> FullExpressionRAII;
1336 bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
1337 CheckSubobjectKind CSK) {
1340 if (isOnePastTheEnd()) {
1341 Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
1346 // Note, we do not diagnose if isMostDerivedAnUnsizedArray(), because there
1347 // must actually be at least one array element; even a VLA cannot have a
1348 // bound of zero. And if our index is nonzero, we already had a CCEDiag.
1352 void SubobjectDesignator::diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info,
1354 Info.CCEDiag(E, diag::note_constexpr_unsized_array_indexed);
1355 // Do not set the designator as invalid: we can represent this situation,
1356 // and correct handling of __builtin_object_size requires us to do so.
1359 void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
1362 // If we're complaining, we must be able to statically determine the size of
1363 // the most derived array.
1364 if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
1365 Info.CCEDiag(E, diag::note_constexpr_array_index)
1367 << static_cast<unsigned>(getMostDerivedArraySize());
1369 Info.CCEDiag(E, diag::note_constexpr_array_index)
1370 << N << /*non-array*/ 1;
1374 CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
1375 const FunctionDecl *Callee, const LValue *This,
1377 : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
1378 Arguments(Arguments), CallLoc(CallLoc), Index(Info.NextCallIndex++) {
1379 Info.CurrentCall = this;
1380 ++Info.CallStackDepth;
1383 CallStackFrame::~CallStackFrame() {
1384 assert(Info.CurrentCall == this && "calls retired out of order");
1385 --Info.CallStackDepth;
1386 Info.CurrentCall = Caller;
1389 static bool isRead(AccessKinds AK) {
1390 return AK == AK_Read || AK == AK_ReadObjectRepresentation;
1393 static bool isModification(AccessKinds AK) {
1396 case AK_ReadObjectRepresentation:
1398 case AK_DynamicCast:
1408 llvm_unreachable("unknown access kind");
1411 static bool isAnyAccess(AccessKinds AK) {
1412 return isRead(AK) || isModification(AK);
1415 /// Is this an access per the C++ definition?
1416 static bool isFormalAccess(AccessKinds AK) {
1417 return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy;
1421 struct ComplexValue {
1426 APSInt IntReal, IntImag;
1427 APFloat FloatReal, FloatImag;
1429 ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {}
1431 void makeComplexFloat() { IsInt = false; }
1432 bool isComplexFloat() const { return !IsInt; }
1433 APFloat &getComplexFloatReal() { return FloatReal; }
1434 APFloat &getComplexFloatImag() { return FloatImag; }
1436 void makeComplexInt() { IsInt = true; }
1437 bool isComplexInt() const { return IsInt; }
1438 APSInt &getComplexIntReal() { return IntReal; }
1439 APSInt &getComplexIntImag() { return IntImag; }
1441 void moveInto(APValue &v) const {
1442 if (isComplexFloat())
1443 v = APValue(FloatReal, FloatImag);
1445 v = APValue(IntReal, IntImag);
1447 void setFrom(const APValue &v) {
1448 assert(v.isComplexFloat() || v.isComplexInt());
1449 if (v.isComplexFloat()) {
1451 FloatReal = v.getComplexFloatReal();
1452 FloatImag = v.getComplexFloatImag();
1455 IntReal = v.getComplexIntReal();
1456 IntImag = v.getComplexIntImag();
1462 APValue::LValueBase Base;
1464 SubobjectDesignator Designator;
1466 bool InvalidBase : 1;
1468 const APValue::LValueBase getLValueBase() const { return Base; }
1469 CharUnits &getLValueOffset() { return Offset; }
1470 const CharUnits &getLValueOffset() const { return Offset; }
1471 SubobjectDesignator &getLValueDesignator() { return Designator; }
1472 const SubobjectDesignator &getLValueDesignator() const { return Designator;}
1473 bool isNullPointer() const { return IsNullPtr;}
1475 unsigned getLValueCallIndex() const { return Base.getCallIndex(); }
1476 unsigned getLValueVersion() const { return Base.getVersion(); }
1478 void moveInto(APValue &V) const {
1479 if (Designator.Invalid)
1480 V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr);
1482 assert(!InvalidBase && "APValues can't handle invalid LValue bases");
1483 V = APValue(Base, Offset, Designator.Entries,
1484 Designator.IsOnePastTheEnd, IsNullPtr);
1487 void setFrom(ASTContext &Ctx, const APValue &V) {
1488 assert(V.isLValue() && "Setting LValue from a non-LValue?");
1489 Base = V.getLValueBase();
1490 Offset = V.getLValueOffset();
1491 InvalidBase = false;
1492 Designator = SubobjectDesignator(Ctx, V);
1493 IsNullPtr = V.isNullPointer();
1496 void set(APValue::LValueBase B, bool BInvalid = false) {
1498 // We only allow a few types of invalid bases. Enforce that here.
1500 const auto *E = B.get<const Expr *>();
1501 assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
1502 "Unexpected type of invalid base");
1507 Offset = CharUnits::fromQuantity(0);
1508 InvalidBase = BInvalid;
1509 Designator = SubobjectDesignator(getType(B));
1513 void setNull(ASTContext &Ctx, QualType PointerTy) {
1514 Base = (Expr *)nullptr;
1516 CharUnits::fromQuantity(Ctx.getTargetNullPointerValue(PointerTy));
1517 InvalidBase = false;
1518 Designator = SubobjectDesignator(PointerTy->getPointeeType());
1522 void setInvalid(APValue::LValueBase B, unsigned I = 0) {
1526 std::string toString(ASTContext &Ctx, QualType T) const {
1528 moveInto(Printable);
1529 return Printable.getAsString(Ctx, T);
1533 // Check that this LValue is not based on a null pointer. If it is, produce
1534 // a diagnostic and mark the designator as invalid.
1535 template <typename GenDiagType>
1536 bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
1537 if (Designator.Invalid)
1541 Designator.setInvalid();
1548 bool checkNullPointer(EvalInfo &Info, const Expr *E,
1549 CheckSubobjectKind CSK) {
1550 return checkNullPointerDiagnosingWith([&Info, E, CSK] {
1551 Info.CCEDiag(E, diag::note_constexpr_null_subobject) << CSK;
1555 bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
1557 return checkNullPointerDiagnosingWith([&Info, E, AK] {
1558 Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
1562 // Check this LValue refers to an object. If not, set the designator to be
1563 // invalid and emit a diagnostic.
1564 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
1565 return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
1566 Designator.checkSubobject(Info, E, CSK);
1569 void addDecl(EvalInfo &Info, const Expr *E,
1570 const Decl *D, bool Virtual = false) {
1571 if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
1572 Designator.addDeclUnchecked(D, Virtual);
1574 void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) {
1575 if (!Designator.Entries.empty()) {
1576 Info.CCEDiag(E, diag::note_constexpr_unsupported_unsized_array);
1577 Designator.setInvalid();
1580 if (checkSubobject(Info, E, CSK_ArrayToPointer)) {
1581 assert(getType(Base)->isPointerType() || getType(Base)->isArrayType());
1582 Designator.FirstEntryIsAnUnsizedArray = true;
1583 Designator.addUnsizedArrayUnchecked(ElemTy);
1586 void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
1587 if (checkSubobject(Info, E, CSK_ArrayToPointer))
1588 Designator.addArrayUnchecked(CAT);
1590 void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
1591 if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
1592 Designator.addComplexUnchecked(EltTy, Imag);
1594 void clearIsNullPointer() {
1597 void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E,
1598 const APSInt &Index, CharUnits ElementSize) {
1599 // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
1600 // but we're not required to diagnose it and it's valid in C++.)
1604 // Compute the new offset in the appropriate width, wrapping at 64 bits.
1605 // FIXME: When compiling for a 32-bit target, we should use 32-bit
1607 uint64_t Offset64 = Offset.getQuantity();
1608 uint64_t ElemSize64 = ElementSize.getQuantity();
1609 uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
1610 Offset = CharUnits::fromQuantity(Offset64 + ElemSize64 * Index64);
1612 if (checkNullPointer(Info, E, CSK_ArrayIndex))
1613 Designator.adjustIndex(Info, E, Index);
1614 clearIsNullPointer();
1616 void adjustOffset(CharUnits N) {
1618 if (N.getQuantity())
1619 clearIsNullPointer();
1625 explicit MemberPtr(const ValueDecl *Decl) :
1626 DeclAndIsDerivedMember(Decl, false), Path() {}
1628 /// The member or (direct or indirect) field referred to by this member
1629 /// pointer, or 0 if this is a null member pointer.
1630 const ValueDecl *getDecl() const {
1631 return DeclAndIsDerivedMember.getPointer();
1633 /// Is this actually a member of some type derived from the relevant class?
1634 bool isDerivedMember() const {
1635 return DeclAndIsDerivedMember.getInt();
1637 /// Get the class which the declaration actually lives in.
1638 const CXXRecordDecl *getContainingRecord() const {
1639 return cast<CXXRecordDecl>(
1640 DeclAndIsDerivedMember.getPointer()->getDeclContext());
1643 void moveInto(APValue &V) const {
1644 V = APValue(getDecl(), isDerivedMember(), Path);
1646 void setFrom(const APValue &V) {
1647 assert(V.isMemberPointer());
1648 DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
1649 DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
1651 ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath();
1652 Path.insert(Path.end(), P.begin(), P.end());
1655 /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
1656 /// whether the member is a member of some class derived from the class type
1657 /// of the member pointer.
1658 llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
1659 /// Path - The path of base/derived classes from the member declaration's
1660 /// class (exclusive) to the class type of the member pointer (inclusive).
1661 SmallVector<const CXXRecordDecl*, 4> Path;
1663 /// Perform a cast towards the class of the Decl (either up or down the
1665 bool castBack(const CXXRecordDecl *Class) {
1666 assert(!Path.empty());
1667 const CXXRecordDecl *Expected;
1668 if (Path.size() >= 2)
1669 Expected = Path[Path.size() - 2];
1671 Expected = getContainingRecord();
1672 if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
1673 // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
1674 // if B does not contain the original member and is not a base or
1675 // derived class of the class containing the original member, the result
1676 // of the cast is undefined.
1677 // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
1678 // (D::*). We consider that to be a language defect.
1684 /// Perform a base-to-derived member pointer cast.
1685 bool castToDerived(const CXXRecordDecl *Derived) {
1688 if (!isDerivedMember()) {
1689 Path.push_back(Derived);
1692 if (!castBack(Derived))
1695 DeclAndIsDerivedMember.setInt(false);
1698 /// Perform a derived-to-base member pointer cast.
1699 bool castToBase(const CXXRecordDecl *Base) {
1703 DeclAndIsDerivedMember.setInt(true);
1704 if (isDerivedMember()) {
1705 Path.push_back(Base);
1708 return castBack(Base);
1712 /// Compare two member pointers, which are assumed to be of the same type.
1713 static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
1714 if (!LHS.getDecl() || !RHS.getDecl())
1715 return !LHS.getDecl() && !RHS.getDecl();
1716 if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
1718 return LHS.Path == RHS.Path;
1722 static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
1723 static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
1724 const LValue &This, const Expr *E,
1725 bool AllowNonLiteralTypes = false);
1726 static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
1727 bool InvalidBaseOK = false);
1728 static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info,
1729 bool InvalidBaseOK = false);
1730 static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
1732 static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
1733 static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
1734 static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
1736 static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
1737 static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
1738 static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
1740 static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
1742 /// Evaluate an integer or fixed point expression into an APResult.
1743 static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
1746 /// Evaluate only a fixed point expression into an APResult.
1747 static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
1750 //===----------------------------------------------------------------------===//
1752 //===----------------------------------------------------------------------===//
1754 /// Negate an APSInt in place, converting it to a signed form if necessary, and
1755 /// preserving its value (by extending by up to one bit as needed).
1756 static void negateAsSigned(APSInt &Int) {
1757 if (Int.isUnsigned() || Int.isMinSignedValue()) {
1758 Int = Int.extend(Int.getBitWidth() + 1);
1759 Int.setIsSigned(true);
1764 template<typename KeyT>
1765 APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T,
1766 bool IsLifetimeExtended, LValue &LV) {
1767 unsigned Version = getTempVersion();
1768 APValue::LValueBase Base(Key, Index, Version);
1770 APValue &Result = Temporaries[MapKeyTy(Key, Version)];
1771 assert(Result.isAbsent() && "temporary created multiple times");
1773 // If we're creating a temporary immediately in the operand of a speculative
1774 // evaluation, don't register a cleanup to be run outside the speculative
1775 // evaluation context, since we won't actually be able to initialize this
1777 if (Index <= Info.SpeculativeEvaluationDepth) {
1778 if (T.isDestructedType())
1779 Info.noteSideEffect();
1781 Info.CleanupStack.push_back(Cleanup(&Result, Base, T, IsLifetimeExtended));
1786 APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) {
1787 if (NumHeapAllocs > DynamicAllocLValue::getMaxIndex()) {
1788 FFDiag(E, diag::note_constexpr_heap_alloc_limit_exceeded);
1792 DynamicAllocLValue DA(NumHeapAllocs++);
1793 LV.set(APValue::LValueBase::getDynamicAlloc(DA, T));
1794 auto Result = HeapAllocs.emplace(std::piecewise_construct,
1795 std::forward_as_tuple(DA), std::tuple<>());
1796 assert(Result.second && "reused a heap alloc index?");
1797 Result.first->second.AllocExpr = E;
1798 return &Result.first->second.Value;
1801 /// Produce a string describing the given constexpr call.
1802 void CallStackFrame::describe(raw_ostream &Out) {
1803 unsigned ArgIndex = 0;
1804 bool IsMemberCall = isa<CXXMethodDecl>(Callee) &&
1805 !isa<CXXConstructorDecl>(Callee) &&
1806 cast<CXXMethodDecl>(Callee)->isInstance();
1809 Out << *Callee << '(';
1811 if (This && IsMemberCall) {
1813 This->moveInto(Val);
1814 Val.printPretty(Out, Info.Ctx,
1815 This->Designator.MostDerivedType);
1816 // FIXME: Add parens around Val if needed.
1817 Out << "->" << *Callee << '(';
1818 IsMemberCall = false;
1821 for (FunctionDecl::param_const_iterator I = Callee->param_begin(),
1822 E = Callee->param_end(); I != E; ++I, ++ArgIndex) {
1823 if (ArgIndex > (unsigned)IsMemberCall)
1826 const ParmVarDecl *Param = *I;
1827 const APValue &Arg = Arguments[ArgIndex];
1828 Arg.printPretty(Out, Info.Ctx, Param->getType());
1830 if (ArgIndex == 0 && IsMemberCall)
1831 Out << "->" << *Callee << '(';
1837 /// Evaluate an expression to see if it had side-effects, and discard its
1839 /// \return \c true if the caller should keep evaluating.
1840 static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
1842 if (!Evaluate(Scratch, Info, E))
1843 // We don't need the value, but we might have skipped a side effect here.
1844 return Info.noteSideEffect();
1848 /// Should this call expression be treated as a string literal?
1849 static bool IsStringLiteralCall(const CallExpr *E) {
1850 unsigned Builtin = E->getBuiltinCallee();
1851 return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
1852 Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
1855 static bool IsGlobalLValue(APValue::LValueBase B) {
1856 // C++11 [expr.const]p3 An address constant expression is a prvalue core
1857 // constant expression of pointer type that evaluates to...
1859 // ... a null pointer value, or a prvalue core constant expression of type
1861 if (!B) return true;
1863 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
1864 // ... the address of an object with static storage duration,
1865 if (const VarDecl *VD = dyn_cast<VarDecl>(D))
1866 return VD->hasGlobalStorage();
1867 // ... the address of a function,
1868 return isa<FunctionDecl>(D);
1871 if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
1874 const Expr *E = B.get<const Expr*>();
1875 switch (E->getStmtClass()) {
1878 case Expr::CompoundLiteralExprClass: {
1879 const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
1880 return CLE->isFileScope() && CLE->isLValue();
1882 case Expr::MaterializeTemporaryExprClass:
1883 // A materialized temporary might have been lifetime-extended to static
1884 // storage duration.
1885 return cast<MaterializeTemporaryExpr>(E)->getStorageDuration() == SD_Static;
1886 // A string literal has static storage duration.
1887 case Expr::StringLiteralClass:
1888 case Expr::PredefinedExprClass:
1889 case Expr::ObjCStringLiteralClass:
1890 case Expr::ObjCEncodeExprClass:
1891 case Expr::CXXUuidofExprClass:
1893 case Expr::ObjCBoxedExprClass:
1894 return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer();
1895 case Expr::CallExprClass:
1896 return IsStringLiteralCall(cast<CallExpr>(E));
1897 // For GCC compatibility, &&label has static storage duration.
1898 case Expr::AddrLabelExprClass:
1900 // A Block literal expression may be used as the initialization value for
1901 // Block variables at global or local static scope.
1902 case Expr::BlockExprClass:
1903 return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
1904 case Expr::ImplicitValueInitExprClass:
1906 // We can never form an lvalue with an implicit value initialization as its
1907 // base through expression evaluation, so these only appear in one case: the
1908 // implicit variable declaration we invent when checking whether a constexpr
1909 // constructor can produce a constant expression. We must assume that such
1910 // an expression might be a global lvalue.
1915 static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
1916 return LVal.Base.dyn_cast<const ValueDecl*>();
1919 static bool IsLiteralLValue(const LValue &Value) {
1920 if (Value.getLValueCallIndex())
1922 const Expr *E = Value.Base.dyn_cast<const Expr*>();
1923 return E && !isa<MaterializeTemporaryExpr>(E);
1926 static bool IsWeakLValue(const LValue &Value) {
1927 const ValueDecl *Decl = GetLValueBaseDecl(Value);
1928 return Decl && Decl->isWeak();
1931 static bool isZeroSized(const LValue &Value) {
1932 const ValueDecl *Decl = GetLValueBaseDecl(Value);
1933 if (Decl && isa<VarDecl>(Decl)) {
1934 QualType Ty = Decl->getType();
1935 if (Ty->isArrayType())
1936 return Ty->isIncompleteType() ||
1937 Decl->getASTContext().getTypeSize(Ty) == 0;
1942 static bool HasSameBase(const LValue &A, const LValue &B) {
1943 if (!A.getLValueBase())
1944 return !B.getLValueBase();
1945 if (!B.getLValueBase())
1948 if (A.getLValueBase().getOpaqueValue() !=
1949 B.getLValueBase().getOpaqueValue()) {
1950 const Decl *ADecl = GetLValueBaseDecl(A);
1953 const Decl *BDecl = GetLValueBaseDecl(B);
1954 if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl())
1958 return IsGlobalLValue(A.getLValueBase()) ||
1959 (A.getLValueCallIndex() == B.getLValueCallIndex() &&
1960 A.getLValueVersion() == B.getLValueVersion());
1963 static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
1964 assert(Base && "no location for a null lvalue");
1965 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
1967 Info.Note(VD->getLocation(), diag::note_declared_at);
1968 else if (const Expr *E = Base.dyn_cast<const Expr*>())
1969 Info.Note(E->getExprLoc(), diag::note_constexpr_temporary_here);
1970 else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
1971 // FIXME: Produce a note for dangling pointers too.
1972 if (Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA))
1973 Info.Note((*Alloc)->AllocExpr->getExprLoc(),
1974 diag::note_constexpr_dynamic_alloc_here);
1976 // We have no information to show for a typeid(T) object.
1979 enum class CheckEvaluationResultKind {
1984 /// Materialized temporaries that we've already checked to determine if they're
1985 /// initializsed by a constant expression.
1986 using CheckedTemporaries =
1987 llvm::SmallPtrSet<const MaterializeTemporaryExpr *, 8>;
1989 static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
1990 EvalInfo &Info, SourceLocation DiagLoc,
1991 QualType Type, const APValue &Value,
1992 Expr::ConstExprUsage Usage,
1993 SourceLocation SubobjectLoc,
1994 CheckedTemporaries &CheckedTemps);
1996 /// Check that this reference or pointer core constant expression is a valid
1997 /// value for an address or reference constant expression. Return true if we
1998 /// can fold this expression, whether or not it's a constant expression.
1999 static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
2000 QualType Type, const LValue &LVal,
2001 Expr::ConstExprUsage Usage,
2002 CheckedTemporaries &CheckedTemps) {
2003 bool IsReferenceType = Type->isReferenceType();
2005 APValue::LValueBase Base = LVal.getLValueBase();
2006 const SubobjectDesignator &Designator = LVal.getLValueDesignator();
2008 // Check that the object is a global. Note that the fake 'this' object we
2009 // manufacture when checking potential constant expressions is conservatively
2010 // assumed to be global here.
2011 if (!IsGlobalLValue(Base)) {
2012 if (Info.getLangOpts().CPlusPlus11) {
2013 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
2014 Info.FFDiag(Loc, diag::note_constexpr_non_global, 1)
2015 << IsReferenceType << !Designator.Entries.empty()
2017 NoteLValueLocation(Info, Base);
2021 // Don't allow references to temporaries to escape.
2024 assert((Info.checkingPotentialConstantExpression() ||
2025 LVal.getLValueCallIndex() == 0) &&
2026 "have call index for global lvalue");
2028 if (Base.is<DynamicAllocLValue>()) {
2029 Info.FFDiag(Loc, diag::note_constexpr_dynamic_alloc)
2030 << IsReferenceType << !Designator.Entries.empty();
2031 NoteLValueLocation(Info, Base);
2035 if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
2036 if (const VarDecl *Var = dyn_cast<const VarDecl>(VD)) {
2037 // Check if this is a thread-local variable.
2038 if (Var->getTLSKind())
2039 // FIXME: Diagnostic!
2042 // A dllimport variable never acts like a constant.
2043 if (Usage == Expr::EvaluateForCodeGen && Var->hasAttr<DLLImportAttr>())
2044 // FIXME: Diagnostic!
2047 if (const auto *FD = dyn_cast<const FunctionDecl>(VD)) {
2048 // __declspec(dllimport) must be handled very carefully:
2049 // We must never initialize an expression with the thunk in C++.
2050 // Doing otherwise would allow the same id-expression to yield
2051 // different addresses for the same function in different translation
2052 // units. However, this means that we must dynamically initialize the
2053 // expression with the contents of the import address table at runtime.
2055 // The C language has no notion of ODR; furthermore, it has no notion of
2056 // dynamic initialization. This means that we are permitted to
2057 // perform initialization with the address of the thunk.
2058 if (Info.getLangOpts().CPlusPlus && Usage == Expr::EvaluateForCodeGen &&
2059 FD->hasAttr<DLLImportAttr>())
2060 // FIXME: Diagnostic!
2063 } else if (const auto *MTE = dyn_cast_or_null<MaterializeTemporaryExpr>(
2064 Base.dyn_cast<const Expr *>())) {
2065 if (CheckedTemps.insert(MTE).second) {
2066 QualType TempType = getType(Base);
2067 if (TempType.isDestructedType()) {
2068 Info.FFDiag(MTE->getExprLoc(),
2069 diag::note_constexpr_unsupported_tempoarary_nontrivial_dtor)
2074 APValue *V = MTE->getOrCreateValue(false);
2075 assert(V && "evasluation result refers to uninitialised temporary");
2076 if (!CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
2077 Info, MTE->getExprLoc(), TempType, *V,
2078 Usage, SourceLocation(), CheckedTemps))
2083 // Allow address constant expressions to be past-the-end pointers. This is
2084 // an extension: the standard requires them to point to an object.
2085 if (!IsReferenceType)
2088 // A reference constant expression must refer to an object.
2090 // FIXME: diagnostic
2095 // Does this refer one past the end of some object?
2096 if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
2097 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
2098 Info.FFDiag(Loc, diag::note_constexpr_past_end, 1)
2099 << !Designator.Entries.empty() << !!VD << VD;
2100 NoteLValueLocation(Info, Base);
2106 /// Member pointers are constant expressions unless they point to a
2107 /// non-virtual dllimport member function.
2108 static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
2111 const APValue &Value,
2112 Expr::ConstExprUsage Usage) {
2113 const ValueDecl *Member = Value.getMemberPointerDecl();
2114 const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
2117 return Usage == Expr::EvaluateForMangling || FD->isVirtual() ||
2118 !FD->hasAttr<DLLImportAttr>();
2121 /// Check that this core constant expression is of literal type, and if not,
2122 /// produce an appropriate diagnostic.
2123 static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
2124 const LValue *This = nullptr) {
2125 if (!E->isRValue() || E->getType()->isLiteralType(Info.Ctx))
2128 // C++1y: A constant initializer for an object o [...] may also invoke
2129 // constexpr constructors for o and its subobjects even if those objects
2130 // are of non-literal class types.
2132 // C++11 missed this detail for aggregates, so classes like this:
2133 // struct foo_t { union { int i; volatile int j; } u; };
2134 // are not (obviously) initializable like so:
2135 // __attribute__((__require_constant_initialization__))
2136 // static const foo_t x = {{0}};
2137 // because "i" is a subobject with non-literal initialization (due to the
2138 // volatile member of the union). See:
2139 // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1677
2140 // Therefore, we use the C++1y behavior.
2141 if (This && Info.EvaluatingDecl == This->getLValueBase())
2144 // Prvalue constant expressions must be of literal types.
2145 if (Info.getLangOpts().CPlusPlus11)
2146 Info.FFDiag(E, diag::note_constexpr_nonliteral)
2149 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
2153 static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
2154 EvalInfo &Info, SourceLocation DiagLoc,
2155 QualType Type, const APValue &Value,
2156 Expr::ConstExprUsage Usage,
2157 SourceLocation SubobjectLoc,
2158 CheckedTemporaries &CheckedTemps) {
2159 if (!Value.hasValue()) {
2160 Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
2162 if (SubobjectLoc.isValid())
2163 Info.Note(SubobjectLoc, diag::note_constexpr_subobject_declared_here);
2167 // We allow _Atomic(T) to be initialized from anything that T can be
2168 // initialized from.
2169 if (const AtomicType *AT = Type->getAs<AtomicType>())
2170 Type = AT->getValueType();
2172 // Core issue 1454: For a literal constant expression of array or class type,
2173 // each subobject of its value shall have been initialized by a constant
2175 if (Value.isArray()) {
2176 QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
2177 for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
2178 if (!CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
2179 Value.getArrayInitializedElt(I), Usage,
2180 SubobjectLoc, CheckedTemps))
2183 if (!Value.hasArrayFiller())
2185 return CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
2186 Value.getArrayFiller(), Usage, SubobjectLoc,
2189 if (Value.isUnion() && Value.getUnionField()) {
2190 return CheckEvaluationResult(
2191 CERK, Info, DiagLoc, Value.getUnionField()->getType(),
2192 Value.getUnionValue(), Usage, Value.getUnionField()->getLocation(),
2195 if (Value.isStruct()) {
2196 RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
2197 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
2198 unsigned BaseIndex = 0;
2199 for (const CXXBaseSpecifier &BS : CD->bases()) {
2200 if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(),
2201 Value.getStructBase(BaseIndex), Usage,
2202 BS.getBeginLoc(), CheckedTemps))
2207 for (const auto *I : RD->fields()) {
2208 if (I->isUnnamedBitfield())
2211 if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(),
2212 Value.getStructField(I->getFieldIndex()),
2213 Usage, I->getLocation(), CheckedTemps))
2218 if (Value.isLValue() &&
2219 CERK == CheckEvaluationResultKind::ConstantExpression) {
2221 LVal.setFrom(Info.Ctx, Value);
2222 return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal, Usage,
2226 if (Value.isMemberPointer() &&
2227 CERK == CheckEvaluationResultKind::ConstantExpression)
2228 return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value, Usage);
2230 // Everything else is fine.
2234 /// Check that this core constant expression value is a valid value for a
2235 /// constant expression. If not, report an appropriate diagnostic. Does not
2236 /// check that the expression is of literal type.
2238 CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type,
2239 const APValue &Value,
2240 Expr::ConstExprUsage Usage = Expr::EvaluateForCodeGen) {
2241 CheckedTemporaries CheckedTemps;
2242 return CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
2243 Info, DiagLoc, Type, Value, Usage,
2244 SourceLocation(), CheckedTemps);
2247 /// Check that this evaluated value is fully-initialized and can be loaded by
2248 /// an lvalue-to-rvalue conversion.
2249 static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc,
2250 QualType Type, const APValue &Value) {
2251 CheckedTemporaries CheckedTemps;
2252 return CheckEvaluationResult(
2253 CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value,
2254 Expr::EvaluateForCodeGen, SourceLocation(), CheckedTemps);
2257 /// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless
2258 /// "the allocated storage is deallocated within the evaluation".
2259 static bool CheckMemoryLeaks(EvalInfo &Info) {
2260 if (!Info.HeapAllocs.empty()) {
2261 // We can still fold to a constant despite a compile-time memory leak,
2262 // so long as the heap allocation isn't referenced in the result (we check
2263 // that in CheckConstantExpression).
2264 Info.CCEDiag(Info.HeapAllocs.begin()->second.AllocExpr,
2265 diag::note_constexpr_memory_leak)
2266 << unsigned(Info.HeapAllocs.size() - 1);
2271 static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
2272 // A null base expression indicates a null pointer. These are always
2273 // evaluatable, and they are false unless the offset is zero.
2274 if (!Value.getLValueBase()) {
2275 Result = !Value.getLValueOffset().isZero();
2279 // We have a non-null base. These are generally known to be true, but if it's
2280 // a weak declaration it can be null at runtime.
2282 const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
2283 return !Decl || !Decl->isWeak();
2286 static bool HandleConversionToBool(const APValue &Val, bool &Result) {
2287 switch (Val.getKind()) {
2289 case APValue::Indeterminate:
2292 Result = Val.getInt().getBoolValue();
2294 case APValue::FixedPoint:
2295 Result = Val.getFixedPoint().getBoolValue();
2297 case APValue::Float:
2298 Result = !Val.getFloat().isZero();
2300 case APValue::ComplexInt:
2301 Result = Val.getComplexIntReal().getBoolValue() ||
2302 Val.getComplexIntImag().getBoolValue();
2304 case APValue::ComplexFloat:
2305 Result = !Val.getComplexFloatReal().isZero() ||
2306 !Val.getComplexFloatImag().isZero();
2308 case APValue::LValue:
2309 return EvalPointerValueAsBool(Val, Result);
2310 case APValue::MemberPointer:
2311 Result = Val.getMemberPointerDecl();
2313 case APValue::Vector:
2314 case APValue::Array:
2315 case APValue::Struct:
2316 case APValue::Union:
2317 case APValue::AddrLabelDiff:
2321 llvm_unreachable("unknown APValue kind");
2324 static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
2326 assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
2328 if (!Evaluate(Val, Info, E))
2330 return HandleConversionToBool(Val, Result);
2333 template<typename T>
2334 static bool HandleOverflow(EvalInfo &Info, const Expr *E,
2335 const T &SrcValue, QualType DestType) {
2336 Info.CCEDiag(E, diag::note_constexpr_overflow)
2337 << SrcValue << DestType;
2338 return Info.noteUndefinedBehavior();
2341 static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
2342 QualType SrcType, const APFloat &Value,
2343 QualType DestType, APSInt &Result) {
2344 unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
2345 // Determine whether we are converting to unsigned or signed.
2346 bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
2348 Result = APSInt(DestWidth, !DestSigned);
2350 if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
2351 & APFloat::opInvalidOp)
2352 return HandleOverflow(Info, E, Value, DestType);
2356 static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
2357 QualType SrcType, QualType DestType,
2359 APFloat Value = Result;
2361 Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
2362 APFloat::rmNearestTiesToEven, &ignored);
2366 static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
2367 QualType DestType, QualType SrcType,
2368 const APSInt &Value) {
2369 unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
2370 // Figure out if this is a truncate, extend or noop cast.
2371 // If the input is signed, do a sign extend, noop, or truncate.
2372 APSInt Result = Value.extOrTrunc(DestWidth);
2373 Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
2374 if (DestType->isBooleanType())
2375 Result = Value.getBoolValue();
2379 static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
2380 QualType SrcType, const APSInt &Value,
2381 QualType DestType, APFloat &Result) {
2382 Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
2383 Result.convertFromAPInt(Value, Value.isSigned(),
2384 APFloat::rmNearestTiesToEven);
2388 static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
2389 APValue &Value, const FieldDecl *FD) {
2390 assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield");
2392 if (!Value.isInt()) {
2393 // Trying to store a pointer-cast-to-integer into a bitfield.
2394 // FIXME: In this case, we should provide the diagnostic for casting
2395 // a pointer to an integer.
2396 assert(Value.isLValue() && "integral value neither int nor lvalue?");
2401 APSInt &Int = Value.getInt();
2402 unsigned OldBitWidth = Int.getBitWidth();
2403 unsigned NewBitWidth = FD->getBitWidthValue(Info.Ctx);
2404 if (NewBitWidth < OldBitWidth)
2405 Int = Int.trunc(NewBitWidth).extend(OldBitWidth);
2409 static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
2412 if (!Evaluate(SVal, Info, E))
2415 Res = SVal.getInt();
2418 if (SVal.isFloat()) {
2419 Res = SVal.getFloat().bitcastToAPInt();
2422 if (SVal.isVector()) {
2423 QualType VecTy = E->getType();
2424 unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
2425 QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
2426 unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
2427 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
2428 Res = llvm::APInt::getNullValue(VecSize);
2429 for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
2430 APValue &Elt = SVal.getVectorElt(i);
2431 llvm::APInt EltAsInt;
2433 EltAsInt = Elt.getInt();
2434 } else if (Elt.isFloat()) {
2435 EltAsInt = Elt.getFloat().bitcastToAPInt();
2437 // Don't try to handle vectors of anything other than int or float
2438 // (not sure if it's possible to hit this case).
2439 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
2442 unsigned BaseEltSize = EltAsInt.getBitWidth();
2444 Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
2446 Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
2450 // Give up if the input isn't an int, float, or vector. For example, we
2451 // reject "(v4i16)(intptr_t)&a".
2452 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
2456 /// Perform the given integer operation, which is known to need at most BitWidth
2457 /// bits, and check for overflow in the original type (if that type was not an
2459 template<typename Operation>
2460 static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
2461 const APSInt &LHS, const APSInt &RHS,
2462 unsigned BitWidth, Operation Op,
2464 if (LHS.isUnsigned()) {
2465 Result = Op(LHS, RHS);
2469 APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
2470 Result = Value.trunc(LHS.getBitWidth());
2471 if (Result.extend(BitWidth) != Value) {
2472 if (Info.checkingForUndefinedBehavior())
2473 Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
2474 diag::warn_integer_constant_overflow)
2475 << Result.toString(10) << E->getType();
2477 return HandleOverflow(Info, E, Value, E->getType());
2482 /// Perform the given binary integer operation.
2483 static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
2484 BinaryOperatorKind Opcode, APSInt RHS,
2491 return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() * 2,
2492 std::multiplies<APSInt>(), Result);
2494 return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
2495 std::plus<APSInt>(), Result);
2497 return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
2498 std::minus<APSInt>(), Result);
2499 case BO_And: Result = LHS & RHS; return true;
2500 case BO_Xor: Result = LHS ^ RHS; return true;
2501 case BO_Or: Result = LHS | RHS; return true;
2505 Info.FFDiag(E, diag::note_expr_divide_by_zero);
2508 Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
2509 // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports
2510 // this operation and gives the two's complement result.
2511 if (RHS.isNegative() && RHS.isAllOnesValue() &&
2512 LHS.isSigned() && LHS.isMinSignedValue())
2513 return HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1),
2517 if (Info.getLangOpts().OpenCL)
2518 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2519 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
2520 static_cast<uint64_t>(LHS.getBitWidth() - 1)),
2522 else if (RHS.isSigned() && RHS.isNegative()) {
2523 // During constant-folding, a negative shift is an opposite shift. Such
2524 // a shift is not a constant expression.
2525 Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
2530 // C++11 [expr.shift]p1: Shift width must be less than the bit width of
2531 // the shifted type.
2532 unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
2534 Info.CCEDiag(E, diag::note_constexpr_large_shift)
2535 << RHS << E->getType() << LHS.getBitWidth();
2536 } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus2a) {
2537 // C++11 [expr.shift]p2: A signed left shift must have a non-negative
2538 // operand, and must not overflow the corresponding unsigned type.
2539 // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
2540 // E1 x 2^E2 module 2^N.
2541 if (LHS.isNegative())
2542 Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
2543 else if (LHS.countLeadingZeros() < SA)
2544 Info.CCEDiag(E, diag::note_constexpr_lshift_discards);
2550 if (Info.getLangOpts().OpenCL)
2551 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2552 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
2553 static_cast<uint64_t>(LHS.getBitWidth() - 1)),
2555 else if (RHS.isSigned() && RHS.isNegative()) {
2556 // During constant-folding, a negative shift is an opposite shift. Such a
2557 // shift is not a constant expression.
2558 Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
2563 // C++11 [expr.shift]p1: Shift width must be less than the bit width of the
2565 unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
2567 Info.CCEDiag(E, diag::note_constexpr_large_shift)
2568 << RHS << E->getType() << LHS.getBitWidth();
2573 case BO_LT: Result = LHS < RHS; return true;
2574 case BO_GT: Result = LHS > RHS; return true;
2575 case BO_LE: Result = LHS <= RHS; return true;
2576 case BO_GE: Result = LHS >= RHS; return true;
2577 case BO_EQ: Result = LHS == RHS; return true;
2578 case BO_NE: Result = LHS != RHS; return true;
2580 llvm_unreachable("BO_Cmp should be handled elsewhere");
2584 /// Perform the given binary floating-point operation, in-place, on LHS.
2585 static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
2586 APFloat &LHS, BinaryOperatorKind Opcode,
2587 const APFloat &RHS) {
2593 LHS.multiply(RHS, APFloat::rmNearestTiesToEven);
2596 LHS.add(RHS, APFloat::rmNearestTiesToEven);
2599 LHS.subtract(RHS, APFloat::rmNearestTiesToEven);
2603 // If the second operand of / or % is zero the behavior is undefined.
2605 Info.CCEDiag(E, diag::note_expr_divide_by_zero);
2606 LHS.divide(RHS, APFloat::rmNearestTiesToEven);
2611 // If during the evaluation of an expression, the result is not
2612 // mathematically defined [...], the behavior is undefined.
2613 // FIXME: C++ rules require us to not conform to IEEE 754 here.
2615 Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN();
2616 return Info.noteUndefinedBehavior();
2621 /// Cast an lvalue referring to a base subobject to a derived class, by
2622 /// truncating the lvalue's path to the given length.
2623 static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
2624 const RecordDecl *TruncatedType,
2625 unsigned TruncatedElements) {
2626 SubobjectDesignator &D = Result.Designator;
2628 // Check we actually point to a derived class object.
2629 if (TruncatedElements == D.Entries.size())
2631 assert(TruncatedElements >= D.MostDerivedPathLength &&
2632 "not casting to a derived class");
2633 if (!Result.checkSubobject(Info, E, CSK_Derived))
2636 // Truncate the path to the subobject, and remove any derived-to-base offsets.
2637 const RecordDecl *RD = TruncatedType;
2638 for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
2639 if (RD->isInvalidDecl()) return false;
2640 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
2641 const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
2642 if (isVirtualBaseClass(D.Entries[I]))
2643 Result.Offset -= Layout.getVBaseClassOffset(Base);
2645 Result.Offset -= Layout.getBaseClassOffset(Base);
2648 D.Entries.resize(TruncatedElements);
2652 static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
2653 const CXXRecordDecl *Derived,
2654 const CXXRecordDecl *Base,
2655 const ASTRecordLayout *RL = nullptr) {
2657 if (Derived->isInvalidDecl()) return false;
2658 RL = &Info.Ctx.getASTRecordLayout(Derived);
2661 Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
2662 Obj.addDecl(Info, E, Base, /*Virtual*/ false);
2666 static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
2667 const CXXRecordDecl *DerivedDecl,
2668 const CXXBaseSpecifier *Base) {
2669 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
2671 if (!Base->isVirtual())
2672 return HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
2674 SubobjectDesignator &D = Obj.Designator;
2678 // Extract most-derived object and corresponding type.
2679 DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl();
2680 if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength))
2683 // Find the virtual base class.
2684 if (DerivedDecl->isInvalidDecl()) return false;
2685 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
2686 Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
2687 Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
2691 static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E,
2692 QualType Type, LValue &Result) {
2693 for (CastExpr::path_const_iterator PathI = E->path_begin(),
2694 PathE = E->path_end();
2695 PathI != PathE; ++PathI) {
2696 if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(),
2699 Type = (*PathI)->getType();
2704 /// Cast an lvalue referring to a derived class to a known base subobject.
2705 static bool CastToBaseClass(EvalInfo &Info, const Expr *E, LValue &Result,
2706 const CXXRecordDecl *DerivedRD,
2707 const CXXRecordDecl *BaseRD) {
2708 CXXBasePaths Paths(/*FindAmbiguities=*/false,
2709 /*RecordPaths=*/true, /*DetectVirtual=*/false);
2710 if (!DerivedRD->isDerivedFrom(BaseRD, Paths))
2711 llvm_unreachable("Class must be derived from the passed in base class!");
2713 for (CXXBasePathElement &Elem : Paths.front())
2714 if (!HandleLValueBase(Info, E, Result, Elem.Class, Elem.Base))
2719 /// Update LVal to refer to the given field, which must be a member of the type
2720 /// currently described by LVal.
2721 static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
2722 const FieldDecl *FD,
2723 const ASTRecordLayout *RL = nullptr) {
2725 if (FD->getParent()->isInvalidDecl()) return false;
2726 RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
2729 unsigned I = FD->getFieldIndex();
2730 LVal.adjustOffset(Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I)));
2731 LVal.addDecl(Info, E, FD);
2735 /// Update LVal to refer to the given indirect field.
2736 static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
2738 const IndirectFieldDecl *IFD) {
2739 for (const auto *C : IFD->chain())
2740 if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(C)))
2745 /// Get the size of the given type in char units.
2746 static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
2747 QualType Type, CharUnits &Size) {
2748 // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
2750 if (Type->isVoidType() || Type->isFunctionType()) {
2751 Size = CharUnits::One();
2755 if (Type->isDependentType()) {
2760 if (!Type->isConstantSizeType()) {
2761 // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
2762 // FIXME: Better diagnostic.
2767 Size = Info.Ctx.getTypeSizeInChars(Type);
2771 /// Update a pointer value to model pointer arithmetic.
2772 /// \param Info - Information about the ongoing evaluation.
2773 /// \param E - The expression being evaluated, for diagnostic purposes.
2774 /// \param LVal - The pointer value to be updated.
2775 /// \param EltTy - The pointee type represented by LVal.
2776 /// \param Adjustment - The adjustment, in objects of type EltTy, to add.
2777 static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
2778 LValue &LVal, QualType EltTy,
2779 APSInt Adjustment) {
2780 CharUnits SizeOfPointee;
2781 if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
2784 LVal.adjustOffsetAndIndex(Info, E, Adjustment, SizeOfPointee);
2788 static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
2789 LValue &LVal, QualType EltTy,
2790 int64_t Adjustment) {
2791 return HandleLValueArrayAdjustment(Info, E, LVal, EltTy,
2792 APSInt::get(Adjustment));
2795 /// Update an lvalue to refer to a component of a complex number.
2796 /// \param Info - Information about the ongoing evaluation.
2797 /// \param LVal - The lvalue to be updated.
2798 /// \param EltTy - The complex number's component type.
2799 /// \param Imag - False for the real component, true for the imaginary.
2800 static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
2801 LValue &LVal, QualType EltTy,
2804 CharUnits SizeOfComponent;
2805 if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent))
2807 LVal.Offset += SizeOfComponent;
2809 LVal.addComplex(Info, E, EltTy, Imag);
2813 /// Try to evaluate the initializer for a variable declaration.
2815 /// \param Info Information about the ongoing evaluation.
2816 /// \param E An expression to be used when printing diagnostics.
2817 /// \param VD The variable whose initializer should be obtained.
2818 /// \param Frame The frame in which the variable was created. Must be null
2819 /// if this variable is not local to the evaluation.
2820 /// \param Result Filled in with a pointer to the value of the variable.
2821 static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
2822 const VarDecl *VD, CallStackFrame *Frame,
2823 APValue *&Result, const LValue *LVal) {
2825 // If this is a parameter to an active constexpr function call, perform
2826 // argument substitution.
2827 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
2828 // Assume arguments of a potential constant expression are unknown
2829 // constant expressions.
2830 if (Info.checkingPotentialConstantExpression())
2832 if (!Frame || !Frame->Arguments) {
2833 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
2836 Result = &Frame->Arguments[PVD->getFunctionScopeIndex()];
2840 // If this is a local variable, dig out its value.
2842 Result = LVal ? Frame->getTemporary(VD, LVal->getLValueVersion())
2843 : Frame->getCurrentTemporary(VD);
2845 // Assume variables referenced within a lambda's call operator that were
2846 // not declared within the call operator are captures and during checking
2847 // of a potential constant expression, assume they are unknown constant
2849 assert(isLambdaCallOperator(Frame->Callee) &&
2850 (VD->getDeclContext() != Frame->Callee || VD->isInitCapture()) &&
2851 "missing value for local variable");
2852 if (Info.checkingPotentialConstantExpression())
2854 // FIXME: implement capture evaluation during constant expr evaluation.
2855 Info.FFDiag(E->getBeginLoc(),
2856 diag::note_unimplemented_constexpr_lambda_feature_ast)
2857 << "captures not currently allowed";
2863 // Dig out the initializer, and use the declaration which it's attached to.
2864 const Expr *Init = VD->getAnyInitializer(VD);
2865 if (!Init || Init->isValueDependent()) {
2866 // If we're checking a potential constant expression, the variable could be
2867 // initialized later.
2868 if (!Info.checkingPotentialConstantExpression())
2869 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
2873 // If we're currently evaluating the initializer of this declaration, use that
2875 if (Info.EvaluatingDecl.dyn_cast<const ValueDecl*>() == VD) {
2876 Result = Info.EvaluatingDeclValue;
2880 // Never evaluate the initializer of a weak variable. We can't be sure that
2881 // this is the definition which will be used.
2883 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
2887 // Check that we can fold the initializer. In C++, we will have already done
2888 // this in the cases where it matters for conformance.
2889 SmallVector<PartialDiagnosticAt, 8> Notes;
2890 if (!VD->evaluateValue(Notes)) {
2891 Info.FFDiag(E, diag::note_constexpr_var_init_non_constant,
2892 Notes.size() + 1) << VD;
2893 Info.Note(VD->getLocation(), diag::note_declared_at);
2894 Info.addNotes(Notes);
2896 } else if (!VD->checkInitIsICE()) {
2897 Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
2898 Notes.size() + 1) << VD;
2899 Info.Note(VD->getLocation(), diag::note_declared_at);
2900 Info.addNotes(Notes);
2903 Result = VD->getEvaluatedValue();
2907 static bool IsConstNonVolatile(QualType T) {
2908 Qualifiers Quals = T.getQualifiers();
2909 return Quals.hasConst() && !Quals.hasVolatile();
2912 /// Get the base index of the given base class within an APValue representing
2913 /// the given derived class.
2914 static unsigned getBaseIndex(const CXXRecordDecl *Derived,
2915 const CXXRecordDecl *Base) {
2916 Base = Base->getCanonicalDecl();
2918 for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
2919 E = Derived->bases_end(); I != E; ++I, ++Index) {
2920 if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
2924 llvm_unreachable("base class missing from derived class's bases list");
2927 /// Extract the value of a character from a string literal.
2928 static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
2930 assert(!isa<SourceLocExpr>(Lit) &&
2931 "SourceLocExpr should have already been converted to a StringLiteral");
2933 // FIXME: Support MakeStringConstant
2934 if (const auto *ObjCEnc = dyn_cast<ObjCEncodeExpr>(Lit)) {
2936 Info.Ctx.getObjCEncodingForType(ObjCEnc->getEncodedType(), Str);
2937 assert(Index <= Str.size() && "Index too large");
2938 return APSInt::getUnsigned(Str.c_str()[Index]);
2941 if (auto PE = dyn_cast<PredefinedExpr>(Lit))
2942 Lit = PE->getFunctionName();
2943 const StringLiteral *S = cast<StringLiteral>(Lit);
2944 const ConstantArrayType *CAT =
2945 Info.Ctx.getAsConstantArrayType(S->getType());
2946 assert(CAT && "string literal isn't an array");
2947 QualType CharType = CAT->getElementType();
2948 assert(CharType->isIntegerType() && "unexpected character type");
2950 APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
2951 CharType->isUnsignedIntegerType());
2952 if (Index < S->getLength())
2953 Value = S->getCodeUnit(Index);
2957 // Expand a string literal into an array of characters.
2959 // FIXME: This is inefficient; we should probably introduce something similar
2960 // to the LLVM ConstantDataArray to make this cheaper.
2961 static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
2963 QualType AllocType = QualType()) {
2964 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
2965 AllocType.isNull() ? S->getType() : AllocType);
2966 assert(CAT && "string literal isn't an array");
2967 QualType CharType = CAT->getElementType();
2968 assert(CharType->isIntegerType() && "unexpected character type");
2970 unsigned Elts = CAT->getSize().getZExtValue();
2971 Result = APValue(APValue::UninitArray(),
2972 std::min(S->getLength(), Elts), Elts);
2973 APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
2974 CharType->isUnsignedIntegerType());
2975 if (Result.hasArrayFiller())
2976 Result.getArrayFiller() = APValue(Value);
2977 for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) {
2978 Value = S->getCodeUnit(I);
2979 Result.getArrayInitializedElt(I) = APValue(Value);
2983 // Expand an array so that it has more than Index filled elements.
2984 static void expandArray(APValue &Array, unsigned Index) {
2985 unsigned Size = Array.getArraySize();
2986 assert(Index < Size);
2988 // Always at least double the number of elements for which we store a value.
2989 unsigned OldElts = Array.getArrayInitializedElts();
2990 unsigned NewElts = std::max(Index+1, OldElts * 2);
2991 NewElts = std::min(Size, std::max(NewElts, 8u));
2993 // Copy the data across.
2994 APValue NewValue(APValue::UninitArray(), NewElts, Size);
2995 for (unsigned I = 0; I != OldElts; ++I)
2996 NewValue.getArrayInitializedElt(I).swap(Array.getArrayInitializedElt(I));
2997 for (unsigned I = OldElts; I != NewElts; ++I)
2998 NewValue.getArrayInitializedElt(I) = Array.getArrayFiller();
2999 if (NewValue.hasArrayFiller())
3000 NewValue.getArrayFiller() = Array.getArrayFiller();
3001 Array.swap(NewValue);
3004 /// Determine whether a type would actually be read by an lvalue-to-rvalue
3005 /// conversion. If it's of class type, we may assume that the copy operation
3006 /// is trivial. Note that this is never true for a union type with fields
3007 /// (because the copy always "reads" the active member) and always true for
3008 /// a non-class type.
3009 static bool isReadByLvalueToRvalueConversion(QualType T) {
3010 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
3011 if (!RD || (RD->isUnion() && !RD->field_empty()))
3016 for (auto *Field : RD->fields())
3017 if (isReadByLvalueToRvalueConversion(Field->getType()))
3020 for (auto &BaseSpec : RD->bases())
3021 if (isReadByLvalueToRvalueConversion(BaseSpec.getType()))
3027 /// Diagnose an attempt to read from any unreadable field within the specified
3028 /// type, which might be a class type.
3029 static bool diagnoseMutableFields(EvalInfo &Info, const Expr *E, AccessKinds AK,
3031 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
3035 if (!RD->hasMutableFields())
3038 for (auto *Field : RD->fields()) {
3039 // If we're actually going to read this field in some way, then it can't
3040 // be mutable. If we're in a union, then assigning to a mutable field
3041 // (even an empty one) can change the active member, so that's not OK.
3042 // FIXME: Add core issue number for the union case.
3043 if (Field->isMutable() &&
3044 (RD->isUnion() || isReadByLvalueToRvalueConversion(Field->getType()))) {
3045 Info.FFDiag(E, diag::note_constexpr_access_mutable, 1) << AK << Field;
3046 Info.Note(Field->getLocation(), diag::note_declared_at);
3050 if (diagnoseMutableFields(Info, E, AK, Field->getType()))
3054 for (auto &BaseSpec : RD->bases())
3055 if (diagnoseMutableFields(Info, E, AK, BaseSpec.getType()))
3058 // All mutable fields were empty, and thus not actually read.
3062 static bool lifetimeStartedInEvaluation(EvalInfo &Info,
3063 APValue::LValueBase Base,
3064 bool MutableSubobject = false) {
3065 // A temporary we created.
3066 if (Base.getCallIndex())
3069 auto *Evaluating = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>();
3073 auto *BaseD = Base.dyn_cast<const ValueDecl*>();
3075 switch (Info.IsEvaluatingDecl) {
3076 case EvalInfo::EvaluatingDeclKind::None:
3079 case EvalInfo::EvaluatingDeclKind::Ctor:
3080 // The variable whose initializer we're evaluating.
3082 return declaresSameEntity(Evaluating, BaseD);
3084 // A temporary lifetime-extended by the variable whose initializer we're
3086 if (auto *BaseE = Base.dyn_cast<const Expr *>())
3087 if (auto *BaseMTE = dyn_cast<MaterializeTemporaryExpr>(BaseE))
3088 return declaresSameEntity(BaseMTE->getExtendingDecl(), Evaluating);
3091 case EvalInfo::EvaluatingDeclKind::Dtor:
3092 // C++2a [expr.const]p6:
3093 // [during constant destruction] the lifetime of a and its non-mutable
3094 // subobjects (but not its mutable subobjects) [are] considered to start
3097 // FIXME: We can meaningfully extend this to cover non-const objects, but
3098 // we will need special handling: we should be able to access only
3099 // subobjects of such objects that are themselves declared const.
3101 !(BaseD->getType().isConstQualified() ||
3102 BaseD->getType()->isReferenceType()) ||
3105 return declaresSameEntity(Evaluating, BaseD);
3108 llvm_unreachable("unknown evaluating decl kind");
3112 /// A handle to a complete object (an object that is not a subobject of
3113 /// another object).
3114 struct CompleteObject {
3115 /// The identity of the object.
3116 APValue::LValueBase Base;
3117 /// The value of the complete object.
3119 /// The type of the complete object.
3122 CompleteObject() : Value(nullptr) {}
3123 CompleteObject(APValue::LValueBase Base, APValue *Value, QualType Type)
3124 : Base(Base), Value(Value), Type(Type) {}
3126 bool mayAccessMutableMembers(EvalInfo &Info, AccessKinds AK) const {
3127 // In C++14 onwards, it is permitted to read a mutable member whose
3128 // lifetime began within the evaluation.
3129 // FIXME: Should we also allow this in C++11?
3130 if (!Info.getLangOpts().CPlusPlus14)
3132 return lifetimeStartedInEvaluation(Info, Base, /*MutableSubobject*/true);
3135 explicit operator bool() const { return !Type.isNull(); }
3137 } // end anonymous namespace
3139 static QualType getSubobjectType(QualType ObjType, QualType SubobjType,
3140 bool IsMutable = false) {
3141 // C++ [basic.type.qualifier]p1:
3142 // - A const object is an object of type const T or a non-mutable subobject
3143 // of a const object.
3144 if (ObjType.isConstQualified() && !IsMutable)
3145 SubobjType.addConst();
3146 // - A volatile object is an object of type const T or a subobject of a
3148 if (ObjType.isVolatileQualified())
3149 SubobjType.addVolatile();
3153 /// Find the designated sub-object of an rvalue.
3154 template<typename SubobjectHandler>
3155 typename SubobjectHandler::result_type
3156 findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
3157 const SubobjectDesignator &Sub, SubobjectHandler &handler) {
3159 // A diagnostic will have already been produced.
3160 return handler.failed();
3161 if (Sub.isOnePastTheEnd() || Sub.isMostDerivedAnUnsizedArray()) {
3162 if (Info.getLangOpts().CPlusPlus11)
3163 Info.FFDiag(E, Sub.isOnePastTheEnd()
3164 ? diag::note_constexpr_access_past_end
3165 : diag::note_constexpr_access_unsized_array)
3166 << handler.AccessKind;
3169 return handler.failed();
3172 APValue *O = Obj.Value;
3173 QualType ObjType = Obj.Type;
3174 const FieldDecl *LastField = nullptr;
3175 const FieldDecl *VolatileField = nullptr;
3177 // Walk the designator's path to find the subobject.
3178 for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
3179 // Reading an indeterminate value is undefined, but assigning over one is OK.
3180 if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) ||
3181 (O->isIndeterminate() && handler.AccessKind != AK_Construct &&
3182 handler.AccessKind != AK_Assign &&
3183 handler.AccessKind != AK_ReadObjectRepresentation)) {
3184 if (!Info.checkingPotentialConstantExpression())
3185 Info.FFDiag(E, diag::note_constexpr_access_uninit)
3186 << handler.AccessKind << O->isIndeterminate();
3187 return handler.failed();
3190 // C++ [class.ctor]p5, C++ [class.dtor]p5:
3191 // const and volatile semantics are not applied on an object under
3192 // {con,de}struction.
3193 if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) &&
3194 ObjType->isRecordType() &&
3195 Info.isEvaluatingCtorDtor(
3196 Obj.Base, llvm::makeArrayRef(Sub.Entries.begin(),
3197 Sub.Entries.begin() + I)) !=
3198 ConstructionPhase::None) {
3199 ObjType = Info.Ctx.getCanonicalType(ObjType);
3200 ObjType.removeLocalConst();
3201 ObjType.removeLocalVolatile();
3204 // If this is our last pass, check that the final object type is OK.
3205 if (I == N || (I == N - 1 && ObjType->isAnyComplexType())) {
3206 // Accesses to volatile objects are prohibited.
3207 if (ObjType.isVolatileQualified() && isFormalAccess(handler.AccessKind)) {
3208 if (Info.getLangOpts().CPlusPlus) {
3211 const NamedDecl *Decl = nullptr;
3212 if (VolatileField) {
3214 Loc = VolatileField->getLocation();
3215 Decl = VolatileField;
3216 } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl*>()) {
3218 Loc = VD->getLocation();
3222 if (auto *E = Obj.Base.dyn_cast<const Expr *>())
3223 Loc = E->getExprLoc();
3225 Info.FFDiag(E, diag::note_constexpr_access_volatile_obj, 1)
3226 << handler.AccessKind << DiagKind << Decl;
3227 Info.Note(Loc, diag::note_constexpr_volatile_here) << DiagKind;
3229 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
3231 return handler.failed();
3234 // If we are reading an object of class type, there may still be more
3235 // things we need to check: if there are any mutable subobjects, we
3236 // cannot perform this read. (This only happens when performing a trivial
3237 // copy or assignment.)
3238 if (ObjType->isRecordType() &&
3239 !Obj.mayAccessMutableMembers(Info, handler.AccessKind) &&
3240 diagnoseMutableFields(Info, E, handler.AccessKind, ObjType))
3241 return handler.failed();
3245 if (!handler.found(*O, ObjType))
3248 // If we modified a bit-field, truncate it to the right width.
3249 if (isModification(handler.AccessKind) &&
3250 LastField && LastField->isBitField() &&
3251 !truncateBitfieldValue(Info, E, *O, LastField))
3257 LastField = nullptr;
3258 if (ObjType->isArrayType()) {
3259 // Next subobject is an array element.
3260 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType);
3261 assert(CAT && "vla in literal type?");
3262 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
3263 if (CAT->getSize().ule(Index)) {
3264 // Note, it should not be possible to form a pointer with a valid
3265 // designator which points more than one past the end of the array.
3266 if (Info.getLangOpts().CPlusPlus11)
3267 Info.FFDiag(E, diag::note_constexpr_access_past_end)
3268 << handler.AccessKind;
3271 return handler.failed();
3274 ObjType = CAT->getElementType();
3276 if (O->getArrayInitializedElts() > Index)
3277 O = &O->getArrayInitializedElt(Index);
3278 else if (!isRead(handler.AccessKind)) {
3279 expandArray(*O, Index);
3280 O = &O->getArrayInitializedElt(Index);
3282 O = &O->getArrayFiller();
3283 } else if (ObjType->isAnyComplexType()) {
3284 // Next subobject is a complex number.
3285 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
3287 if (Info.getLangOpts().CPlusPlus11)
3288 Info.FFDiag(E, diag::note_constexpr_access_past_end)
3289 << handler.AccessKind;
3292 return handler.failed();
3295 ObjType = getSubobjectType(
3296 ObjType, ObjType->castAs<ComplexType>()->getElementType());
3298 assert(I == N - 1 && "extracting subobject of scalar?");
3299 if (O->isComplexInt()) {
3300 return handler.found(Index ? O->getComplexIntImag()
3301 : O->getComplexIntReal(), ObjType);
3303 assert(O->isComplexFloat());
3304 return handler.found(Index ? O->getComplexFloatImag()
3305 : O->getComplexFloatReal(), ObjType);
3307 } else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
3308 if (Field->isMutable() &&
3309 !Obj.mayAccessMutableMembers(Info, handler.AccessKind)) {
3310 Info.FFDiag(E, diag::note_constexpr_access_mutable, 1)
3311 << handler.AccessKind << Field;
3312 Info.Note(Field->getLocation(), diag::note_declared_at);
3313 return handler.failed();
3316 // Next subobject is a class, struct or union field.
3317 RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
3318 if (RD->isUnion()) {
3319 const FieldDecl *UnionField = O->getUnionField();
3321 UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
3322 if (I == N - 1 && handler.AccessKind == AK_Construct) {
3323 // Placement new onto an inactive union member makes it active.
3324 O->setUnion(Field, APValue());
3326 // FIXME: If O->getUnionValue() is absent, report that there's no
3327 // active union member rather than reporting the prior active union
3328 // member. We'll need to fix nullptr_t to not use APValue() as its
3329 // representation first.
3330 Info.FFDiag(E, diag::note_constexpr_access_inactive_union_member)
3331 << handler.AccessKind << Field << !UnionField << UnionField;
3332 return handler.failed();
3335 O = &O->getUnionValue();
3337 O = &O->getStructField(Field->getFieldIndex());
3339 ObjType = getSubobjectType(ObjType, Field->getType(), Field->isMutable());
3341 if (Field->getType().isVolatileQualified())
3342 VolatileField = Field;
3344 // Next subobject is a base class.
3345 const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
3346 const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
3347 O = &O->getStructBase(getBaseIndex(Derived, Base));
3349 ObjType = getSubobjectType(ObjType, Info.Ctx.getRecordType(Base));
3355 struct ExtractSubobjectHandler {
3359 const AccessKinds AccessKind;
3361 typedef bool result_type;
3362 bool failed() { return false; }
3363 bool found(APValue &Subobj, QualType SubobjType) {
3365 if (AccessKind == AK_ReadObjectRepresentation)
3367 return CheckFullyInitialized(Info, E->getExprLoc(), SubobjType, Result);
3369 bool found(APSInt &Value, QualType SubobjType) {
3370 Result = APValue(Value);
3373 bool found(APFloat &Value, QualType SubobjType) {
3374 Result = APValue(Value);
3378 } // end anonymous namespace
3380 /// Extract the designated sub-object of an rvalue.
3381 static bool extractSubobject(EvalInfo &Info, const Expr *E,
3382 const CompleteObject &Obj,
3383 const SubobjectDesignator &Sub, APValue &Result,
3384 AccessKinds AK = AK_Read) {
3385 assert(AK == AK_Read || AK == AK_ReadObjectRepresentation);
3386 ExtractSubobjectHandler Handler = {Info, E, Result, AK};
3387 return findSubobject(Info, E, Obj, Sub, Handler);
3391 struct ModifySubobjectHandler {
3396 typedef bool result_type;
3397 static const AccessKinds AccessKind = AK_Assign;
3399 bool checkConst(QualType QT) {
3400 // Assigning to a const object has undefined behavior.
3401 if (QT.isConstQualified()) {
3402 Info.FFDiag(E, diag::note_constexpr_modify_const_type) << QT;
3408 bool failed() { return false; }
3409 bool found(APValue &Subobj, QualType SubobjType) {
3410 if (!checkConst(SubobjType))
3412 // We've been given ownership of NewVal, so just swap it in.
3413 Subobj.swap(NewVal);
3416 bool found(APSInt &Value, QualType SubobjType) {
3417 if (!checkConst(SubobjType))
3419 if (!NewVal.isInt()) {
3420 // Maybe trying to write a cast pointer value into a complex?
3424 Value = NewVal.getInt();
3427 bool found(APFloat &Value, QualType SubobjType) {
3428 if (!checkConst(SubobjType))
3430 Value = NewVal.getFloat();
3434 } // end anonymous namespace
3436 const AccessKinds ModifySubobjectHandler::AccessKind;
3438 /// Update the designated sub-object of an rvalue to the given value.
3439 static bool modifySubobject(EvalInfo &Info, const Expr *E,
3440 const CompleteObject &Obj,
3441 const SubobjectDesignator &Sub,
3443 ModifySubobjectHandler Handler = { Info, NewVal, E };
3444 return findSubobject(Info, E, Obj, Sub, Handler);
3447 /// Find the position where two subobject designators diverge, or equivalently
3448 /// the length of the common initial subsequence.
3449 static unsigned FindDesignatorMismatch(QualType ObjType,
3450 const SubobjectDesignator &A,
3451 const SubobjectDesignator &B,
3452 bool &WasArrayIndex) {
3453 unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size());
3454 for (/**/; I != N; ++I) {
3455 if (!ObjType.isNull() &&
3456 (ObjType->isArrayType() || ObjType->isAnyComplexType())) {
3457 // Next subobject is an array element.
3458 if (A.Entries[I].getAsArrayIndex() != B.Entries[I].getAsArrayIndex()) {
3459 WasArrayIndex = true;
3462 if (ObjType->isAnyComplexType())
3463 ObjType = ObjType->castAs<ComplexType>()->getElementType();
3465 ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
3467 if (A.Entries[I].getAsBaseOrMember() !=
3468 B.Entries[I].getAsBaseOrMember()) {
3469 WasArrayIndex = false;
3472 if (const FieldDecl *FD = getAsField(A.Entries[I]))
3473 // Next subobject is a field.
3474 ObjType = FD->getType();
3476 // Next subobject is a base class.
3477 ObjType = QualType();
3480 WasArrayIndex = false;
3484 /// Determine whether the given subobject designators refer to elements of the
3485 /// same array object.
3486 static bool AreElementsOfSameArray(QualType ObjType,
3487 const SubobjectDesignator &A,
3488 const SubobjectDesignator &B) {
3489 if (A.Entries.size() != B.Entries.size())
3492 bool IsArray = A.MostDerivedIsArrayElement;
3493 if (IsArray && A.MostDerivedPathLength != A.Entries.size())
3494 // A is a subobject of the array element.
3497 // If A (and B) designates an array element, the last entry will be the array
3498 // index. That doesn't have to match. Otherwise, we're in the 'implicit array
3499 // of length 1' case, and the entire path must match.
3501 unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
3502 return CommonLength >= A.Entries.size() - IsArray;
3505 /// Find the complete object to which an LValue refers.
3506 static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
3507 AccessKinds AK, const LValue &LVal,
3508 QualType LValType) {
3509 if (LVal.InvalidBase) {
3511 return CompleteObject();
3515 Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
3516 return CompleteObject();
3519 CallStackFrame *Frame = nullptr;
3521 if (LVal.getLValueCallIndex()) {
3522 std::tie(Frame, Depth) =
3523 Info.getCallFrameAndDepth(LVal.getLValueCallIndex());
3525 Info.FFDiag(E, diag::note_constexpr_lifetime_ended, 1)
3526 << AK << LVal.Base.is<const ValueDecl*>();
3527 NoteLValueLocation(Info, LVal.Base);
3528 return CompleteObject();
3532 bool IsAccess = isAnyAccess(AK);
3534 // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
3535 // is not a constant expression (even if the object is non-volatile). We also
3536 // apply this rule to C++98, in order to conform to the expected 'volatile'
3538 if (isFormalAccess(AK) && LValType.isVolatileQualified()) {
3539 if (Info.getLangOpts().CPlusPlus)
3540 Info.FFDiag(E, diag::note_constexpr_access_volatile_type)
3544 return CompleteObject();
3547 // Compute value storage location and type of base object.
3548 APValue *BaseVal = nullptr;
3549 QualType BaseType = getType(LVal.Base);
3551 if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
3552 // In C++98, const, non-volatile integers initialized with ICEs are ICEs.
3553 // In C++11, constexpr, non-volatile variables initialized with constant
3554 // expressions are constant expressions too. Inside constexpr functions,
3555 // parameters are constant expressions even if they're non-const.
3556 // In C++1y, objects local to a constant expression (those with a Frame) are
3557 // both readable and writable inside constant expressions.
3558 // In C, such things can also be folded, although they are not ICEs.
3559 const VarDecl *VD = dyn_cast<VarDecl>(D);
3561 if (const VarDecl *VDef = VD->getDefinition(Info.Ctx))
3564 if (!VD || VD->isInvalidDecl()) {
3566 return CompleteObject();
3569 // Unless we're looking at a local variable or argument in a constexpr call,
3570 // the variable we're reading must be const.
3572 if (Info.getLangOpts().CPlusPlus14 &&
3573 lifetimeStartedInEvaluation(Info, LVal.Base)) {
3574 // OK, we can read and modify an object if we're in the process of
3575 // evaluating its initializer, because its lifetime began in this
3577 } else if (isModification(AK)) {
3578 // All the remaining cases do not permit modification of the object.
3579 Info.FFDiag(E, diag::note_constexpr_modify_global);
3580 return CompleteObject();
3581 } else if (VD->isConstexpr()) {
3582 // OK, we can read this variable.
3583 } else if (BaseType->isIntegralOrEnumerationType()) {
3584 // In OpenCL if a variable is in constant address space it is a const
3586 if (!(BaseType.isConstQualified() ||
3587 (Info.getLangOpts().OpenCL &&
3588 BaseType.getAddressSpace() == LangAS::opencl_constant))) {
3590 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
3591 if (Info.getLangOpts().CPlusPlus) {
3592 Info.FFDiag(E, diag::note_constexpr_ltor_non_const_int, 1) << VD;
3593 Info.Note(VD->getLocation(), diag::note_declared_at);
3597 return CompleteObject();
3599 } else if (!IsAccess) {
3600 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
3601 } else if (BaseType->isFloatingType() && BaseType.isConstQualified()) {
3602 // We support folding of const floating-point types, in order to make
3603 // static const data members of such types (supported as an extension)
3605 if (Info.getLangOpts().CPlusPlus11) {
3606 Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
3607 Info.Note(VD->getLocation(), diag::note_declared_at);
3611 } else if (BaseType.isConstQualified() && VD->hasDefinition(Info.Ctx)) {
3612 Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr) << VD;
3613 // Keep evaluating to see what we can do.
3615 // FIXME: Allow folding of values of any literal type in all languages.
3616 if (Info.checkingPotentialConstantExpression() &&
3617 VD->getType().isConstQualified() && !VD->hasDefinition(Info.Ctx)) {
3618 // The definition of this variable could be constexpr. We can't
3619 // access it right now, but may be able to in future.
3620 } else if (Info.getLangOpts().CPlusPlus11) {
3621 Info.FFDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
3622 Info.Note(VD->getLocation(), diag::note_declared_at);
3626 return CompleteObject();
3630 if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal, &LVal))
3631 return CompleteObject();
3632 } else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) {
3633 Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA);
3635 Info.FFDiag(E, diag::note_constexpr_access_deleted_object) << AK;
3636 return CompleteObject();
3638 return CompleteObject(LVal.Base, &(*Alloc)->Value,
3639 LVal.Base.getDynamicAllocType());
3641 const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
3644 if (const MaterializeTemporaryExpr *MTE =
3645 dyn_cast_or_null<MaterializeTemporaryExpr>(Base)) {
3646 assert(MTE->getStorageDuration() == SD_Static &&
3647 "should have a frame for a non-global materialized temporary");
3649 // Per C++1y [expr.const]p2:
3650 // an lvalue-to-rvalue conversion [is not allowed unless it applies to]
3651 // - a [...] glvalue of integral or enumeration type that refers to
3652 // a non-volatile const object [...]
3654 // - a [...] glvalue of literal type that refers to a non-volatile
3655 // object whose lifetime began within the evaluation of e.
3657 // C++11 misses the 'began within the evaluation of e' check and
3658 // instead allows all temporaries, including things like:
3661 // constexpr int k = r;
3662 // Therefore we use the C++14 rules in C++11 too.
3664 // Note that temporaries whose lifetimes began while evaluating a
3665 // variable's constructor are not usable while evaluating the
3666 // corresponding destructor, not even if they're of const-qualified
3668 if (!(BaseType.isConstQualified() &&
3669 BaseType->isIntegralOrEnumerationType()) &&
3670 !lifetimeStartedInEvaluation(Info, LVal.Base)) {
3672 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
3673 Info.FFDiag(E, diag::note_constexpr_access_static_temporary, 1) << AK;
3674 Info.Note(MTE->getExprLoc(), diag::note_constexpr_temporary_here);
3675 return CompleteObject();
3678 BaseVal = MTE->getOrCreateValue(false);
3679 assert(BaseVal && "got reference to unevaluated temporary");
3682 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
3685 Info.FFDiag(E, diag::note_constexpr_access_unreadable_object)
3687 << Val.getAsString(Info.Ctx,
3688 Info.Ctx.getLValueReferenceType(LValType));
3689 NoteLValueLocation(Info, LVal.Base);
3690 return CompleteObject();
3693 BaseVal = Frame->getTemporary(Base, LVal.Base.getVersion());
3694 assert(BaseVal && "missing value for temporary");
3698 // In C++14, we can't safely access any mutable state when we might be
3699 // evaluating after an unmodeled side effect.
3701 // FIXME: Not all local state is mutable. Allow local constant subobjects
3702 // to be read here (but take care with 'mutable' fields).
3703 if ((Frame && Info.getLangOpts().CPlusPlus14 &&
3704 Info.EvalStatus.HasSideEffects) ||
3705 (isModification(AK) && Depth < Info.SpeculativeEvaluationDepth))
3706 return CompleteObject();
3708 return CompleteObject(LVal.getLValueBase(), BaseVal, BaseType);
3711 /// Perform an lvalue-to-rvalue conversion on the given glvalue. This
3712 /// can also be used for 'lvalue-to-lvalue' conversions for looking up the
3713 /// glvalue referred to by an entity of reference type.
3715 /// \param Info - Information about the ongoing evaluation.
3716 /// \param Conv - The expression for which we are performing the conversion.
3717 /// Used for diagnostics.
3718 /// \param Type - The type of the glvalue (before stripping cv-qualifiers in the
3719 /// case of a non-class type).
3720 /// \param LVal - The glvalue on which we are attempting to perform this action.
3721 /// \param RVal - The produced value will be placed here.
3722 /// \param WantObjectRepresentation - If true, we're looking for the object
3723 /// representation rather than the value, and in particular,
3724 /// there is no requirement that the result be fully initialized.
3726 handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type,
3727 const LValue &LVal, APValue &RVal,
3728 bool WantObjectRepresentation = false) {
3729 if (LVal.Designator.Invalid)
3732 // Check for special cases where there is no existing APValue to look at.
3733 const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
3736 WantObjectRepresentation ? AK_ReadObjectRepresentation : AK_Read;
3738 if (Base && !LVal.getLValueCallIndex() && !Type.isVolatileQualified()) {
3739 if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) {
3740 // In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
3741 // initializer until now for such expressions. Such an expression can't be
3742 // an ICE in C, so this only matters for fold.
3743 if (Type.isVolatileQualified()) {
3748 if (!Evaluate(Lit, Info, CLE->getInitializer()))
3750 CompleteObject LitObj(LVal.Base, &Lit, Base->getType());
3751 return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal, AK);
3752 } else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
3753 // Special-case character extraction so we don't have to construct an
3754 // APValue for the whole string.
3755 assert(LVal.Designator.Entries.size() <= 1 &&
3756 "Can only read characters from string literals");
3757 if (LVal.Designator.Entries.empty()) {
3758 // Fail for now for LValue to RValue conversion of an array.
3759 // (This shouldn't show up in C/C++, but it could be triggered by a
3760 // weird EvaluateAsRValue call from a tool.)
3764 if (LVal.Designator.isOnePastTheEnd()) {
3765 if (Info.getLangOpts().CPlusPlus11)
3766 Info.FFDiag(Conv, diag::note_constexpr_access_past_end) << AK;
3771 uint64_t CharIndex = LVal.Designator.Entries[0].getAsArrayIndex();
3772 RVal = APValue(extractStringLiteralCharacter(Info, Base, CharIndex));
3777 CompleteObject Obj = findCompleteObject(Info, Conv, AK, LVal, Type);
3778 return Obj && extractSubobject(Info, Conv, Obj, LVal.Designator, RVal, AK);
3781 /// Perform an assignment of Val to LVal. Takes ownership of Val.
3782 static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
3783 QualType LValType, APValue &Val) {
3784 if (LVal.Designator.Invalid)
3787 if (!Info.getLangOpts().CPlusPlus14) {
3792 CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
3793 return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val);
3797 struct CompoundAssignSubobjectHandler {
3800 QualType PromotedLHSType;
3801 BinaryOperatorKind Opcode;
3804 static const AccessKinds AccessKind = AK_Assign;
3806 typedef bool result_type;
3808 bool checkConst(QualType QT) {
3809 // Assigning to a const object has undefined behavior.
3810 if (QT.isConstQualified()) {
3811 Info.FFDiag(E, diag::note_constexpr_modify_const_type) << QT;
3817 bool failed() { return false; }
3818 bool found(APValue &Subobj, QualType SubobjType) {
3819 switch (Subobj.getKind()) {
3821 return found(Subobj.getInt(), SubobjType);
3822 case APValue::Float:
3823 return found(Subobj.getFloat(), SubobjType);
3824 case APValue::ComplexInt:
3825 case APValue::ComplexFloat:
3826 // FIXME: Implement complex compound assignment.
3829 case APValue::LValue:
3830 return foundPointer(Subobj, SubobjType);
3832 // FIXME: can this happen?
3837 bool found(APSInt &Value, QualType SubobjType) {
3838 if (!checkConst(SubobjType))
3841 if (!SubobjType->isIntegerType()) {
3842 // We don't support compound assignment on integer-cast-to-pointer
3850 HandleIntToIntCast(Info, E, PromotedLHSType, SubobjType, Value);
3851 if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS.getInt(), LHS))
3853 Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS);
3855 } else if (RHS.isFloat()) {
3856 APFloat FValue(0.0);
3857 return HandleIntToFloatCast(Info, E, SubobjType, Value, PromotedLHSType,
3859 handleFloatFloatBinOp(Info, E, FValue, Opcode, RHS.getFloat()) &&
3860 HandleFloatToIntCast(Info, E, PromotedLHSType, FValue, SubobjType,
3867 bool found(APFloat &Value, QualType SubobjType) {
3868 return checkConst(SubobjType) &&
3869 HandleFloatToFloatCast(Info, E, SubobjType, PromotedLHSType,
3871 handleFloatFloatBinOp(Info, E, Value, Opcode, RHS.getFloat()) &&
3872 HandleFloatToFloatCast(Info, E, PromotedLHSType, SubobjType, Value);
3874 bool foundPointer(APValue &Subobj, QualType SubobjType) {
3875 if (!checkConst(SubobjType))
3878 QualType PointeeType;
3879 if (const PointerType *PT = SubobjType->getAs<PointerType>())
3880 PointeeType = PT->getPointeeType();
3882 if (PointeeType.isNull() || !RHS.isInt() ||
3883 (Opcode != BO_Add && Opcode != BO_Sub)) {
3888 APSInt Offset = RHS.getInt();
3889 if (Opcode == BO_Sub)
3890 negateAsSigned(Offset);
3893 LVal.setFrom(Info.Ctx, Subobj);
3894 if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType, Offset))
3896 LVal.moveInto(Subobj);
3900 } // end anonymous namespace
3902 const AccessKinds CompoundAssignSubobjectHandler::AccessKind;
3904 /// Perform a compound assignment of LVal <op>= RVal.
3905 static bool handleCompoundAssignment(
3906 EvalInfo &Info, const Expr *E,
3907 const LValue &LVal, QualType LValType, QualType PromotedLValType,
3908 BinaryOperatorKind Opcode, const APValue &RVal) {
3909 if (LVal.Designator.Invalid)
3912 if (!Info.getLangOpts().CPlusPlus14) {
3917 CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
3918 CompoundAssignSubobjectHandler Handler = { Info, E, PromotedLValType, Opcode,
3920 return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
3924 struct IncDecSubobjectHandler {
3926 const UnaryOperator *E;
3927 AccessKinds AccessKind;
3930 typedef bool result_type;
3932 bool checkConst(QualType QT) {
3933 // Assigning to a const object has undefined behavior.
3934 if (QT.isConstQualified()) {
3935 Info.FFDiag(E, diag::note_constexpr_modify_const_type) << QT;
3941 bool failed() { return false; }
3942 bool found(APValue &Subobj, QualType SubobjType) {
3943 // Stash the old value. Also clear Old, so we don't clobber it later
3944 // if we're post-incrementing a complex.
3950 switch (Subobj.getKind()) {
3952 return found(Subobj.getInt(), SubobjType);
3953 case APValue::Float:
3954 return found(Subobj.getFloat(), SubobjType);
3955 case APValue::ComplexInt:
3956 return found(Subobj.getComplexIntReal(),
3957 SubobjType->castAs<ComplexType>()->getElementType()
3958 .withCVRQualifiers(SubobjType.getCVRQualifiers()));
3959 case APValue::ComplexFloat:
3960 return found(Subobj.getComplexFloatReal(),
3961 SubobjType->castAs<ComplexType>()->getElementType()
3962 .withCVRQualifiers(SubobjType.getCVRQualifiers()));
3963 case APValue::LValue:
3964 return foundPointer(Subobj, SubobjType);
3966 // FIXME: can this happen?
3971 bool found(APSInt &Value, QualType SubobjType) {
3972 if (!checkConst(SubobjType))
3975 if (!SubobjType->isIntegerType()) {
3976 // We don't support increment / decrement on integer-cast-to-pointer
3982 if (Old) *Old = APValue(Value);
3984 // bool arithmetic promotes to int, and the conversion back to bool
3985 // doesn't reduce mod 2^n, so special-case it.
3986 if (SubobjType->isBooleanType()) {
3987 if (AccessKind == AK_Increment)
3994 bool WasNegative = Value.isNegative();
3995 if (AccessKind == AK_Increment) {
3998 if (!WasNegative && Value.isNegative() && E->canOverflow()) {
3999 APSInt ActualValue(Value, /*IsUnsigned*/true);
4000 return HandleOverflow(Info, E, ActualValue, SubobjType);
4005 if (WasNegative && !Value.isNegative() && E->canOverflow()) {
4006 unsigned BitWidth = Value.getBitWidth();
4007 APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
4008 ActualValue.setBit(BitWidth);
4009 return HandleOverflow(Info, E, ActualValue, SubobjType);
4014 bool found(APFloat &Value, QualType SubobjType) {
4015 if (!checkConst(SubobjType))
4018 if (Old) *Old = APValue(Value);
4020 APFloat One(Value.getSemantics(), 1);
4021 if (AccessKind == AK_Increment)
4022 Value.add(One, APFloat::rmNearestTiesToEven);
4024 Value.subtract(One, APFloat::rmNearestTiesToEven);
4027 bool foundPointer(APValue &Subobj, QualType SubobjType) {
4028 if (!checkConst(SubobjType))
4031 QualType PointeeType;
4032 if (const PointerType *PT = SubobjType->getAs<PointerType>())
4033 PointeeType = PT->getPointeeType();
4040 LVal.setFrom(Info.Ctx, Subobj);
4041 if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType,
4042 AccessKind == AK_Increment ? 1 : -1))
4044 LVal.moveInto(Subobj);
4048 } // end anonymous namespace
4050 /// Perform an increment or decrement on LVal.
4051 static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
4052 QualType LValType, bool IsIncrement, APValue *Old) {
4053 if (LVal.Designator.Invalid)
4056 if (!Info.getLangOpts().CPlusPlus14) {
4061 AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
4062 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
4063 IncDecSubobjectHandler Handler = {Info, cast<UnaryOperator>(E), AK, Old};
4064 return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
4067 /// Build an lvalue for the object argument of a member function call.
4068 static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
4070 if (Object->getType()->isPointerType() && Object->isRValue())
4071 return EvaluatePointer(Object, This, Info);
4073 if (Object->isGLValue())
4074 return EvaluateLValue(Object, This, Info);
4076 if (Object->getType()->isLiteralType(Info.Ctx))
4077 return EvaluateTemporary(Object, This, Info);
4079 Info.FFDiag(Object, diag::note_constexpr_nonliteral) << Object->getType();
4083 /// HandleMemberPointerAccess - Evaluate a member access operation and build an
4084 /// lvalue referring to the result.
4086 /// \param Info - Information about the ongoing evaluation.
4087 /// \param LV - An lvalue referring to the base of the member pointer.
4088 /// \param RHS - The member pointer expression.
4089 /// \param IncludeMember - Specifies whether the member itself is included in
4090 /// the resulting LValue subobject designator. This is not possible when
4091 /// creating a bound member function.
4092 /// \return The field or method declaration to which the member pointer refers,
4093 /// or 0 if evaluation fails.
4094 static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
4098 bool IncludeMember = true) {
4100 if (!EvaluateMemberPointer(RHS, MemPtr, Info))
4103 // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
4104 // member value, the behavior is undefined.
4105 if (!MemPtr.getDecl()) {
4106 // FIXME: Specific diagnostic.
4111 if (MemPtr.isDerivedMember()) {
4112 // This is a member of some derived class. Truncate LV appropriately.
4113 // The end of the derived-to-base path for the base object must match the
4114 // derived-to-base path for the member pointer.
4115 if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
4116 LV.Designator.Entries.size()) {
4120 unsigned PathLengthToMember =
4121 LV.Designator.Entries.size() - MemPtr.Path.size();
4122 for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
4123 const CXXRecordDecl *LVDecl = getAsBaseClass(
4124 LV.Designator.Entries[PathLengthToMember + I]);
4125 const CXXRecordDecl *MPDecl = MemPtr.Path[I];
4126 if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) {
4132 // Truncate the lvalue to the appropriate derived class.
4133 if (!CastToDerivedClass(Info, RHS, LV, MemPtr.getContainingRecord(),
4134 PathLengthToMember))
4136 } else if (!MemPtr.Path.empty()) {
4137 // Extend the LValue path with the member pointer's path.
4138 LV.Designator.Entries.reserve(LV.Designator.Entries.size() +
4139 MemPtr.Path.size() + IncludeMember);
4141 // Walk down to the appropriate base class.
4142 if (const PointerType *PT = LVType->getAs<PointerType>())
4143 LVType = PT->getPointeeType();
4144 const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
4145 assert(RD && "member pointer access on non-class-type expression");
4146 // The first class in the path is that of the lvalue.
4147 for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
4148 const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
4149 if (!HandleLValueDirectBase(Info, RHS, LV, RD, Base))
4153 // Finally cast to the class containing the member.
4154 if (!HandleLValueDirectBase(Info, RHS, LV, RD,
4155 MemPtr.getContainingRecord()))
4159 // Add the member. Note that we cannot build bound member functions here.
4160 if (IncludeMember) {
4161 if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl())) {
4162 if (!HandleLValueMember(Info, RHS, LV, FD))
4164 } else if (const IndirectFieldDecl *IFD =
4165 dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) {
4166 if (!HandleLValueIndirectMember(Info, RHS, LV, IFD))
4169 llvm_unreachable("can't construct reference to bound member function");
4173 return MemPtr.getDecl();
4176 static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
4177 const BinaryOperator *BO,
4179 bool IncludeMember = true) {
4180 assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
4182 if (!EvaluateObjectArgument(Info, BO->getLHS(), LV)) {
4183 if (Info.noteFailure()) {
4185 EvaluateMemberPointer(BO->getRHS(), MemPtr, Info);
4190 return HandleMemberPointerAccess(Info, BO->getLHS()->getType(), LV,
4191 BO->getRHS(), IncludeMember);
4194 /// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
4195 /// the provided lvalue, which currently refers to the base object.
4196 static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
4198 SubobjectDesignator &D = Result.Designator;
4199 if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived))
4202 QualType TargetQT = E->getType();
4203 if (const PointerType *PT = TargetQT->getAs<PointerType>())
4204 TargetQT = PT->getPointeeType();
4206 // Check this cast lands within the final derived-to-base subobject path.
4207 if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) {
4208 Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
4209 << D.MostDerivedType << TargetQT;
4213 // Check the type of the final cast. We don't need to check the path,
4214 // since a cast can only be formed if the path is unique.
4215 unsigned NewEntriesSize = D.Entries.size() - E->path_size();
4216 const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
4217 const CXXRecordDecl *FinalType;
4218 if (NewEntriesSize == D.MostDerivedPathLength)
4219 FinalType = D.MostDerivedType->getAsCXXRecordDecl();
4221 FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]);
4222 if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) {
4223 Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
4224 << D.MostDerivedType << TargetQT;
4228 // Truncate the lvalue to the appropriate derived class.
4229 return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize);
4232 /// Get the value to use for a default-initialized object of type T.
4233 static APValue getDefaultInitValue(QualType T) {
4234 if (auto *RD = T->getAsCXXRecordDecl()) {
4236 return APValue((const FieldDecl*)nullptr);
4238 APValue Struct(APValue::UninitStruct(), RD->getNumBases(),
4239 std::distance(RD->field_begin(), RD->field_end()));
4242 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
4243 End = RD->bases_end(); I != End; ++I, ++Index)
4244 Struct.getStructBase(Index) = getDefaultInitValue(I->getType());
4246 for (const auto *I : RD->fields()) {
4247 if (I->isUnnamedBitfield())
4249 Struct.getStructField(I->getFieldIndex()) =
4250 getDefaultInitValue(I->getType());
4256 dyn_cast_or_null<ConstantArrayType>(T->getAsArrayTypeUnsafe())) {
4257 APValue Array(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
4258 if (Array.hasArrayFiller())
4259 Array.getArrayFiller() = getDefaultInitValue(AT->getElementType());
4263 return APValue::IndeterminateValue();
4267 enum EvalStmtResult {
4268 /// Evaluation failed.
4270 /// Hit a 'return' statement.
4272 /// Evaluation succeeded.
4274 /// Hit a 'continue' statement.
4276 /// Hit a 'break' statement.
4278 /// Still scanning for 'case' or 'default' statement.
4283 static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
4284 // We don't need to evaluate the initializer for a static local.
4285 if (!VD->hasLocalStorage())
4290 Info.CurrentCall->createTemporary(VD, VD->getType(), true, Result);
4292 const Expr *InitE = VD->getInit();
4294 Val = getDefaultInitValue(VD->getType());
4298 if (InitE->isValueDependent())
4301 if (!EvaluateInPlace(Val, Info, Result, InitE)) {
4302 // Wipe out any partially-computed value, to allow tracking that this
4303 // evaluation failed.
4311 static bool EvaluateDecl(EvalInfo &Info, const Decl *D) {
4314 if (const VarDecl *VD = dyn_cast<VarDecl>(D))
4315 OK &= EvaluateVarDecl(Info, VD);
4317 if (const DecompositionDecl *DD = dyn_cast<DecompositionDecl>(D))
4318 for (auto *BD : DD->bindings())
4319 if (auto *VD = BD->getHoldingVar())
4320 OK &= EvaluateDecl(Info, VD);
4326 /// Evaluate a condition (either a variable declaration or an expression).
4327 static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
4328 const Expr *Cond, bool &Result) {
4329 FullExpressionRAII Scope(Info);
4330 if (CondDecl && !EvaluateDecl(Info, CondDecl))
4332 if (!EvaluateAsBooleanCondition(Cond, Result, Info))
4334 return Scope.destroy();
4338 /// A location where the result (returned value) of evaluating a
4339 /// statement should be stored.
4341 /// The APValue that should be filled in with the returned value.
4343 /// The location containing the result, if any (used to support RVO).
4347 struct TempVersionRAII {
4348 CallStackFrame &Frame;
4350 TempVersionRAII(CallStackFrame &Frame) : Frame(Frame) {
4351 Frame.pushTempVersion();
4354 ~TempVersionRAII() {
4355 Frame.popTempVersion();
4361 static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
4363 const SwitchCase *SC = nullptr);
4365 /// Evaluate the body of a loop, and translate the result as appropriate.
4366 static EvalStmtResult EvaluateLoopBody(StmtResult &Result, EvalInfo &Info,
4368 const SwitchCase *Case = nullptr) {
4369 BlockScopeRAII Scope(Info);
4371 EvalStmtResult ESR = EvaluateStmt(Result, Info, Body, Case);
4372 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy())
4377 return ESR_Succeeded;
4380 return ESR_Continue;
4383 case ESR_CaseNotFound:
4386 llvm_unreachable("Invalid EvalStmtResult!");
4389 /// Evaluate a switch statement.
4390 static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
4391 const SwitchStmt *SS) {
4392 BlockScopeRAII Scope(Info);
4394 // Evaluate the switch condition.
4397 if (const Stmt *Init = SS->getInit()) {
4398 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init);
4399 if (ESR != ESR_Succeeded) {
4400 if (ESR != ESR_Failed && !Scope.destroy())
4406 FullExpressionRAII CondScope(Info);
4407 if (SS->getConditionVariable() &&
4408 !EvaluateDecl(Info, SS->getConditionVariable()))
4410 if (!EvaluateInteger(SS->getCond(), Value, Info))
4412 if (!CondScope.destroy())
4416 // Find the switch case corresponding to the value of the condition.
4417 // FIXME: Cache this lookup.
4418 const SwitchCase *Found = nullptr;
4419 for (const SwitchCase *SC = SS->getSwitchCaseList(); SC;
4420 SC = SC->getNextSwitchCase()) {
4421 if (isa<DefaultStmt>(SC)) {
4426 const CaseStmt *CS = cast<CaseStmt>(SC);
4427 APSInt LHS = CS->getLHS()->EvaluateKnownConstInt(Info.Ctx);
4428 APSInt RHS = CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx)
4430 if (LHS <= Value && Value <= RHS) {
4437 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
4439 // Search the switch body for the switch case and evaluate it from there.
4440 EvalStmtResult ESR = EvaluateStmt(Result, Info, SS->getBody(), Found);
4441 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy())
4446 return ESR_Succeeded;
4452 case ESR_CaseNotFound:
4453 // This can only happen if the switch case is nested within a statement
4454 // expression. We have no intention of supporting that.
4455 Info.FFDiag(Found->getBeginLoc(),
4456 diag::note_constexpr_stmt_expr_unsupported);
4459 llvm_unreachable("Invalid EvalStmtResult!");
4462 // Evaluate a statement.
4463 static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
4464 const Stmt *S, const SwitchCase *Case) {
4465 if (!Info.nextStep(S))
4468 // If we're hunting down a 'case' or 'default' label, recurse through
4469 // substatements until we hit the label.
4471 switch (S->getStmtClass()) {
4472 case Stmt::CompoundStmtClass:
4473 // FIXME: Precompute which substatement of a compound statement we
4474 // would jump to, and go straight there rather than performing a
4475 // linear scan each time.
4476 case Stmt::LabelStmtClass:
4477 case Stmt::AttributedStmtClass:
4478 case Stmt::DoStmtClass:
4481 case Stmt::CaseStmtClass:
4482 case Stmt::DefaultStmtClass:
4487 case Stmt::IfStmtClass: {
4488 // FIXME: Precompute which side of an 'if' we would jump to, and go
4489 // straight there rather than scanning both sides.
4490 const IfStmt *IS = cast<IfStmt>(S);
4492 // Wrap the evaluation in a block scope, in case it's a DeclStmt
4493 // preceded by our switch label.
4494 BlockScopeRAII Scope(Info);
4496 // Step into the init statement in case it brings an (uninitialized)
4497 // variable into scope.
4498 if (const Stmt *Init = IS->getInit()) {
4499 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init, Case);
4500 if (ESR != ESR_CaseNotFound) {
4501 assert(ESR != ESR_Succeeded);
4506 // Condition variable must be initialized if it exists.
4507 // FIXME: We can skip evaluating the body if there's a condition
4508 // variable, as there can't be any case labels within it.
4509 // (The same is true for 'for' statements.)
4511 EvalStmtResult ESR = EvaluateStmt(Result, Info, IS->getThen(), Case);
4512 if (ESR == ESR_Failed)
4514 if (ESR != ESR_CaseNotFound)
4515 return Scope.destroy() ? ESR : ESR_Failed;
4517 return ESR_CaseNotFound;
4519 ESR = EvaluateStmt(Result, Info, IS->getElse(), Case);
4520 if (ESR == ESR_Failed)
4522 if (ESR != ESR_CaseNotFound)
4523 return Scope.destroy() ? ESR : ESR_Failed;
4524 return ESR_CaseNotFound;
4527 case Stmt::WhileStmtClass: {
4528 EvalStmtResult ESR =
4529 EvaluateLoopBody(Result, Info, cast<WhileStmt>(S)->getBody(), Case);
4530 if (ESR != ESR_Continue)
4535 case Stmt::ForStmtClass: {
4536 const ForStmt *FS = cast<ForStmt>(S);
4537 BlockScopeRAII Scope(Info);
4539 // Step into the init statement in case it brings an (uninitialized)
4540 // variable into scope.
4541 if (const Stmt *Init = FS->getInit()) {
4542 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init, Case);
4543 if (ESR != ESR_CaseNotFound) {
4544 assert(ESR != ESR_Succeeded);
4549 EvalStmtResult ESR =
4550 EvaluateLoopBody(Result, Info, FS->getBody(), Case);
4551 if (ESR != ESR_Continue)
4554 FullExpressionRAII IncScope(Info);
4555 if (!EvaluateIgnoredValue(Info, FS->getInc()) || !IncScope.destroy())
4561 case Stmt::DeclStmtClass: {
4562 // Start the lifetime of any uninitialized variables we encounter. They
4563 // might be used by the selected branch of the switch.
4564 const DeclStmt *DS = cast<DeclStmt>(S);
4565 for (const auto *D : DS->decls()) {
4566 if (const auto *VD = dyn_cast<VarDecl>(D)) {
4567 if (VD->hasLocalStorage() && !VD->getInit())
4568 if (!EvaluateVarDecl(Info, VD))
4570 // FIXME: If the variable has initialization that can't be jumped
4571 // over, bail out of any immediately-surrounding compound-statement
4572 // too. There can't be any case labels here.
4575 return ESR_CaseNotFound;
4579 return ESR_CaseNotFound;
4583 switch (S->getStmtClass()) {
4585 if (const Expr *E = dyn_cast<Expr>(S)) {
4586 // Don't bother evaluating beyond an expression-statement which couldn't
4588 // FIXME: Do we need the FullExpressionRAII object here?
4589 // VisitExprWithCleanups should create one when necessary.
4590 FullExpressionRAII Scope(Info);
4591 if (!EvaluateIgnoredValue(Info, E) || !Scope.destroy())
4593 return ESR_Succeeded;
4596 Info.FFDiag(S->getBeginLoc());
4599 case Stmt::NullStmtClass:
4600 return ESR_Succeeded;
4602 case Stmt::DeclStmtClass: {
4603 const DeclStmt *DS = cast<DeclStmt>(S);
4604 for (const auto *D : DS->decls()) {
4605 // Each declaration initialization is its own full-expression.
4606 FullExpressionRAII Scope(Info);
4607 if (!EvaluateDecl(Info, D) && !Info.noteFailure())
4609 if (!Scope.destroy())
4612 return ESR_Succeeded;
4615 case Stmt::ReturnStmtClass: {
4616 const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
4617 FullExpressionRAII Scope(Info);
4620 ? EvaluateInPlace(Result.Value, Info, *Result.Slot, RetExpr)
4621 : Evaluate(Result.Value, Info, RetExpr)))
4623 return Scope.destroy() ? ESR_Returned : ESR_Failed;
4626 case Stmt::CompoundStmtClass: {
4627 BlockScopeRAII Scope(Info);
4629 const CompoundStmt *CS = cast<CompoundStmt>(S);
4630 for (const auto *BI : CS->body()) {
4631 EvalStmtResult ESR = EvaluateStmt(Result, Info, BI, Case);
4632 if (ESR == ESR_Succeeded)
4634 else if (ESR != ESR_CaseNotFound) {
4635 if (ESR != ESR_Failed && !Scope.destroy())
4641 return ESR_CaseNotFound;
4642 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
4645 case Stmt::IfStmtClass: {
4646 const IfStmt *IS = cast<IfStmt>(S);
4648 // Evaluate the condition, as either a var decl or as an expression.
4649 BlockScopeRAII Scope(Info);
4650 if (const Stmt *Init = IS->getInit()) {
4651 EvalStmtResult ESR = EvaluateStmt(Result, Info, Init);
4652 if (ESR != ESR_Succeeded) {
4653 if (ESR != ESR_Failed && !Scope.destroy())
4659 if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), Cond))
4662 if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) {
4663 EvalStmtResult ESR = EvaluateStmt(Result, Info, SubStmt);
4664 if (ESR != ESR_Succeeded) {
4665 if (ESR != ESR_Failed && !Scope.destroy())
4670 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
4673 case Stmt::WhileStmtClass: {
4674 const WhileStmt *WS = cast<WhileStmt>(S);
4676 BlockScopeRAII Scope(Info);
4678 if (!EvaluateCond(Info, WS->getConditionVariable(), WS->getCond(),
4684 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, WS->getBody());
4685 if (ESR != ESR_Continue) {
4686 if (ESR != ESR_Failed && !Scope.destroy())
4690 if (!Scope.destroy())
4693 return ESR_Succeeded;
4696 case Stmt::DoStmtClass: {
4697 const DoStmt *DS = cast<DoStmt>(S);
4700 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, DS->getBody(), Case);
4701 if (ESR != ESR_Continue)
4705 FullExpressionRAII CondScope(Info);
4706 if (!EvaluateAsBooleanCondition(DS->getCond(), Continue, Info) ||
4707 !CondScope.destroy())
4710 return ESR_Succeeded;
4713 case Stmt::ForStmtClass: {
4714 const ForStmt *FS = cast<ForStmt>(S);
4715 BlockScopeRAII ForScope(Info);
4716 if (FS->getInit()) {
4717 EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit());
4718 if (ESR != ESR_Succeeded) {
4719 if (ESR != ESR_Failed && !ForScope.destroy())
4725 BlockScopeRAII IterScope(Info);
4726 bool Continue = true;
4727 if (FS->getCond() && !EvaluateCond(Info, FS->getConditionVariable(),
4728 FS->getCond(), Continue))
4733 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, FS->getBody());
4734 if (ESR != ESR_Continue) {
4735 if (ESR != ESR_Failed && (!IterScope.destroy() || !ForScope.destroy()))
4741 FullExpressionRAII IncScope(Info);
4742 if (!EvaluateIgnoredValue(Info, FS->getInc()) || !IncScope.destroy())
4746 if (!IterScope.destroy())
4749 return ForScope.destroy() ? ESR_Succeeded : ESR_Failed;
4752 case Stmt::CXXForRangeStmtClass: {
4753 const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(S);
4754 BlockScopeRAII Scope(Info);
4756 // Evaluate the init-statement if present.
4757 if (FS->getInit()) {
4758 EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit());
4759 if (ESR != ESR_Succeeded) {
4760 if (ESR != ESR_Failed && !Scope.destroy())
4766 // Initialize the __range variable.
4767 EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getRangeStmt());
4768 if (ESR != ESR_Succeeded) {
4769 if (ESR != ESR_Failed && !Scope.destroy())
4774 // Create the __begin and __end iterators.
4775 ESR = EvaluateStmt(Result, Info, FS->getBeginStmt());
4776 if (ESR != ESR_Succeeded) {
4777 if (ESR != ESR_Failed && !Scope.destroy())
4781 ESR = EvaluateStmt(Result, Info, FS->getEndStmt());
4782 if (ESR != ESR_Succeeded) {
4783 if (ESR != ESR_Failed && !Scope.destroy())
4789 // Condition: __begin != __end.
4791 bool Continue = true;
4792 FullExpressionRAII CondExpr(Info);
4793 if (!EvaluateAsBooleanCondition(FS->getCond(), Continue, Info))
4799 // User's variable declaration, initialized by *__begin.
4800 BlockScopeRAII InnerScope(Info);
4801 ESR = EvaluateStmt(Result, Info, FS->getLoopVarStmt());
4802 if (ESR != ESR_Succeeded) {
4803 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy()))
4809 ESR = EvaluateLoopBody(Result, Info, FS->getBody());
4810 if (ESR != ESR_Continue) {
4811 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy()))
4816 // Increment: ++__begin
4817 if (!EvaluateIgnoredValue(Info, FS->getInc()))
4820 if (!InnerScope.destroy())
4824 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
4827 case Stmt::SwitchStmtClass:
4828 return EvaluateSwitch(Result, Info, cast<SwitchStmt>(S));
4830 case Stmt::ContinueStmtClass:
4831 return ESR_Continue;
4833 case Stmt::BreakStmtClass:
4836 case Stmt::LabelStmtClass:
4837 return EvaluateStmt(Result, Info, cast<LabelStmt>(S)->getSubStmt(), Case);
4839 case Stmt::AttributedStmtClass:
4840 // As a general principle, C++11 attributes can be ignored without
4841 // any semantic impact.
4842 return EvaluateStmt(Result, Info, cast<AttributedStmt>(S)->getSubStmt(),
4845 case Stmt::CaseStmtClass:
4846 case Stmt::DefaultStmtClass:
4847 return EvaluateStmt(Result, Info, cast<SwitchCase>(S)->getSubStmt(), Case);
4848 case Stmt::CXXTryStmtClass:
4849 // Evaluate try blocks by evaluating all sub statements.
4850 return EvaluateStmt(Result, Info, cast<CXXTryStmt>(S)->getTryBlock(), Case);
4854 /// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
4855 /// default constructor. If so, we'll fold it whether or not it's marked as
4856 /// constexpr. If it is marked as constexpr, we will never implicitly define it,
4857 /// so we need special handling.
4858 static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
4859 const CXXConstructorDecl *CD,
4860 bool IsValueInitialization) {
4861 if (!CD->isTrivial() || !CD->isDefaultConstructor())
4864 // Value-initialization does not call a trivial default constructor, so such a
4865 // call is a core constant expression whether or not the constructor is
4867 if (!CD->isConstexpr() && !IsValueInitialization) {
4868 if (Info.getLangOpts().CPlusPlus11) {
4869 // FIXME: If DiagDecl is an implicitly-declared special member function,
4870 // we should be much more explicit about why it's not constexpr.
4871 Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1)
4872 << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
4873 Info.Note(CD->getLocation(), diag::note_declared_at);
4875 Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
4881 /// CheckConstexprFunction - Check that a function can be called in a constant
4883 static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
4884 const FunctionDecl *Declaration,
4885 const FunctionDecl *Definition,
4887 // Potential constant expressions can contain calls to declared, but not yet
4888 // defined, constexpr functions.
4889 if (Info.checkingPotentialConstantExpression() && !Definition &&
4890 Declaration->isConstexpr())
4893 // Bail out if the function declaration itself is invalid. We will
4894 // have produced a relevant diagnostic while parsing it, so just
4895 // note the problematic sub-expression.
4896 if (Declaration->isInvalidDecl()) {
4897 Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
4901 // DR1872: An instantiated virtual constexpr function can't be called in a
4902 // constant expression (prior to C++20). We can still constant-fold such a
4904 if (!Info.Ctx.getLangOpts().CPlusPlus2a && isa<CXXMethodDecl>(Declaration) &&
4905 cast<CXXMethodDecl>(Declaration)->isVirtual())
4906 Info.CCEDiag(CallLoc, diag::note_constexpr_virtual_call);
4908 if (Definition && Definition->isInvalidDecl()) {
4909 Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
4913 // Can we evaluate this function call?
4914 if (Definition && Definition->isConstexpr() && Body)
4917 if (Info.getLangOpts().CPlusPlus11) {
4918 const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
4920 // If this function is not constexpr because it is an inherited
4921 // non-constexpr constructor, diagnose that directly.
4922 auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
4923 if (CD && CD->isInheritingConstructor()) {
4924 auto *Inherited = CD->getInheritedConstructor().getConstructor();
4925 if (!Inherited->isConstexpr())
4926 DiagDecl = CD = Inherited;
4929 // FIXME: If DiagDecl is an implicitly-declared special member function
4930 // or an inheriting constructor, we should be much more explicit about why
4931 // it's not constexpr.
4932 if (CD && CD->isInheritingConstructor())
4933 Info.FFDiag(CallLoc, diag::note_constexpr_invalid_inhctor, 1)
4934 << CD->getInheritedConstructor().getConstructor()->getParent();
4936 Info.FFDiag(CallLoc, diag::note_constexpr_invalid_function, 1)
4937 << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
4938 Info.Note(DiagDecl->getLocation(), diag::note_declared_at);
4940 Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
4946 struct CheckDynamicTypeHandler {
4947 AccessKinds AccessKind;
4948 typedef bool result_type;
4949 bool failed() { return false; }
4950 bool found(APValue &Subobj, QualType SubobjType) { return true; }
4951 bool found(APSInt &Value, QualType SubobjType) { return true; }
4952 bool found(APFloat &Value, QualType SubobjType) { return true; }
4954 } // end anonymous namespace
4956 /// Check that we can access the notional vptr of an object / determine its
4958 static bool checkDynamicType(EvalInfo &Info, const Expr *E, const LValue &This,
4959 AccessKinds AK, bool Polymorphic) {
4960 if (This.Designator.Invalid)
4963 CompleteObject Obj = findCompleteObject(Info, E, AK, This, QualType());
4969 // The object is not usable in constant expressions, so we can't inspect
4970 // its value to see if it's in-lifetime or what the active union members
4971 // are. We can still check for a one-past-the-end lvalue.
4972 if (This.Designator.isOnePastTheEnd() ||
4973 This.Designator.isMostDerivedAnUnsizedArray()) {
4974 Info.FFDiag(E, This.Designator.isOnePastTheEnd()
4975 ? diag::note_constexpr_access_past_end
4976 : diag::note_constexpr_access_unsized_array)
4979 } else if (Polymorphic) {
4980 // Conservatively refuse to perform a polymorphic operation if we would
4981 // not be able to read a notional 'vptr' value.
4984 QualType StarThisType =
4985 Info.Ctx.getLValueReferenceType(This.Designator.getType(Info.Ctx));
4986 Info.FFDiag(E, diag::note_constexpr_polymorphic_unknown_dynamic_type)
4987 << AK << Val.getAsString(Info.Ctx, StarThisType);
4993 CheckDynamicTypeHandler Handler{AK};
4994 return Obj && findSubobject(Info, E, Obj, This.Designator, Handler);
4997 /// Check that the pointee of the 'this' pointer in a member function call is
4998 /// either within its lifetime or in its period of construction or destruction.
5000 checkNonVirtualMemberCallThisPointer(EvalInfo &Info, const Expr *E,
5002 const CXXMethodDecl *NamedMember) {
5003 return checkDynamicType(
5005 isa<CXXDestructorDecl>(NamedMember) ? AK_Destroy : AK_MemberCall, false);
5008 struct DynamicType {
5009 /// The dynamic class type of the object.
5010 const CXXRecordDecl *Type;
5011 /// The corresponding path length in the lvalue.
5012 unsigned PathLength;
5015 static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator,
5016 unsigned PathLength) {
5017 assert(PathLength >= Designator.MostDerivedPathLength && PathLength <=
5018 Designator.Entries.size() && "invalid path length");
5019 return (PathLength == Designator.MostDerivedPathLength)
5020 ? Designator.MostDerivedType->getAsCXXRecordDecl()
5021 : getAsBaseClass(Designator.Entries[PathLength - 1]);
5024 /// Determine the dynamic type of an object.
5025 static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E,
5026 LValue &This, AccessKinds AK) {
5027 // If we don't have an lvalue denoting an object of class type, there is no
5028 // meaningful dynamic type. (We consider objects of non-class type to have no
5030 if (!checkDynamicType(Info, E, This, AK, true))
5033 // Refuse to compute a dynamic type in the presence of virtual bases. This
5034 // shouldn't happen other than in constant-folding situations, since literal
5035 // types can't have virtual bases.
5037 // Note that consumers of DynamicType assume that the type has no virtual
5038 // bases, and will need modifications if this restriction is relaxed.
5039 const CXXRecordDecl *Class =
5040 This.Designator.MostDerivedType->getAsCXXRecordDecl();
5041 if (!Class || Class->getNumVBases()) {
5046 // FIXME: For very deep class hierarchies, it might be beneficial to use a
5047 // binary search here instead. But the overwhelmingly common case is that
5048 // we're not in the middle of a constructor, so it probably doesn't matter
5050 ArrayRef<APValue::LValuePathEntry> Path = This.Designator.Entries;
5051 for (unsigned PathLength = This.Designator.MostDerivedPathLength;
5052 PathLength <= Path.size(); ++PathLength) {
5053 switch (Info.isEvaluatingCtorDtor(This.getLValueBase(),
5054 Path.slice(0, PathLength))) {
5055 case ConstructionPhase::Bases:
5056 case ConstructionPhase::DestroyingBases:
5057 // We're constructing or destroying a base class. This is not the dynamic
5061 case ConstructionPhase::None:
5062 case ConstructionPhase::AfterBases:
5063 case ConstructionPhase::Destroying:
5064 // We've finished constructing the base classes and not yet started
5065 // destroying them again, so this is the dynamic type.
5066 return DynamicType{getBaseClassType(This.Designator, PathLength),
5071 // CWG issue 1517: we're constructing a base class of the object described by
5072 // 'This', so that object has not yet begun its period of construction and
5073 // any polymorphic operation on it results in undefined behavior.
5078 /// Perform virtual dispatch.
5079 static const CXXMethodDecl *HandleVirtualDispatch(
5080 EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found,
5081 llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) {
5082 Optional<DynamicType> DynType = ComputeDynamicType(
5084 isa<CXXDestructorDecl>(Found) ? AK_Destroy : AK_MemberCall);
5088 // Find the final overrider. It must be declared in one of the classes on the
5089 // path from the dynamic type to the static type.
5090 // FIXME: If we ever allow literal types to have virtual base classes, that
5092 const CXXMethodDecl *Callee = Found;
5093 unsigned PathLength = DynType->PathLength;
5094 for (/**/; PathLength <= This.Designator.Entries.size(); ++PathLength) {
5095 const CXXRecordDecl *Class = getBaseClassType(This.Designator, PathLength);
5096 const CXXMethodDecl *Overrider =
5097 Found->getCorrespondingMethodDeclaredInClass(Class, false);
5104 // C++2a [class.abstract]p6:
5105 // the effect of making a virtual call to a pure virtual function [...] is
5107 if (Callee->isPure()) {
5108 Info.FFDiag(E, diag::note_constexpr_pure_virtual_call, 1) << Callee;
5109 Info.Note(Callee->getLocation(), diag::note_declared_at);
5113 // If necessary, walk the rest of the path to determine the sequence of
5114 // covariant adjustment steps to apply.
5115 if (!Info.Ctx.hasSameUnqualifiedType(Callee->getReturnType(),
5116 Found->getReturnType())) {
5117 CovariantAdjustmentPath.push_back(Callee->getReturnType());
5118 for (unsigned CovariantPathLength = PathLength + 1;
5119 CovariantPathLength != This.Designator.Entries.size();
5120 ++CovariantPathLength) {
5121 const CXXRecordDecl *NextClass =
5122 getBaseClassType(This.Designator, CovariantPathLength);
5123 const CXXMethodDecl *Next =
5124 Found->getCorrespondingMethodDeclaredInClass(NextClass, false);
5125 if (Next && !Info.Ctx.hasSameUnqualifiedType(
5126 Next->getReturnType(), CovariantAdjustmentPath.back()))
5127 CovariantAdjustmentPath.push_back(Next->getReturnType());
5129 if (!Info.Ctx.hasSameUnqualifiedType(Found->getReturnType(),
5130 CovariantAdjustmentPath.back()))
5131 CovariantAdjustmentPath.push_back(Found->getReturnType());
5134 // Perform 'this' adjustment.
5135 if (!CastToDerivedClass(Info, E, This, Callee->getParent(), PathLength))
5141 /// Perform the adjustment from a value returned by a virtual function to
5142 /// a value of the statically expected type, which may be a pointer or
5143 /// reference to a base class of the returned type.
5144 static bool HandleCovariantReturnAdjustment(EvalInfo &Info, const Expr *E,
5146 ArrayRef<QualType> Path) {
5147 assert(Result.isLValue() &&
5148 "unexpected kind of APValue for covariant return");
5149 if (Result.isNullPointer())
5153 LVal.setFrom(Info.Ctx, Result);
5155 const CXXRecordDecl *OldClass = Path[0]->getPointeeCXXRecordDecl();
5156 for (unsigned I = 1; I != Path.size(); ++I) {
5157 const CXXRecordDecl *NewClass = Path[I]->getPointeeCXXRecordDecl();
5158 assert(OldClass && NewClass && "unexpected kind of covariant return");
5159 if (OldClass != NewClass &&
5160 !CastToBaseClass(Info, E, LVal, OldClass, NewClass))
5162 OldClass = NewClass;
5165 LVal.moveInto(Result);
5169 /// Determine whether \p Base, which is known to be a direct base class of
5170 /// \p Derived, is a public base class.
5171 static bool isBaseClassPublic(const CXXRecordDecl *Derived,
5172 const CXXRecordDecl *Base) {
5173 for (const CXXBaseSpecifier &BaseSpec : Derived->bases()) {
5174 auto *BaseClass = BaseSpec.getType()->getAsCXXRecordDecl();
5175 if (BaseClass && declaresSameEntity(BaseClass, Base))
5176 return BaseSpec.getAccessSpecifier() == AS_public;
5178 llvm_unreachable("Base is not a direct base of Derived");
5181 /// Apply the given dynamic cast operation on the provided lvalue.
5183 /// This implements the hard case of dynamic_cast, requiring a "runtime check"
5184 /// to find a suitable target subobject.
5185 static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
5187 // We can't do anything with a non-symbolic pointer value.
5188 SubobjectDesignator &D = Ptr.Designator;
5192 // C++ [expr.dynamic.cast]p6:
5193 // If v is a null pointer value, the result is a null pointer value.
5194 if (Ptr.isNullPointer() && !E->isGLValue())
5197 // For all the other cases, we need the pointer to point to an object within
5198 // its lifetime / period of construction / destruction, and we need to know
5199 // its dynamic type.
5200 Optional<DynamicType> DynType =
5201 ComputeDynamicType(Info, E, Ptr, AK_DynamicCast);
5205 // C++ [expr.dynamic.cast]p7:
5206 // If T is "pointer to cv void", then the result is a pointer to the most
5208 if (E->getType()->isVoidPointerType())
5209 return CastToDerivedClass(Info, E, Ptr, DynType->Type, DynType->PathLength);
5211 const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl();
5212 assert(C && "dynamic_cast target is not void pointer nor class");
5213 CanQualType CQT = Info.Ctx.getCanonicalType(Info.Ctx.getRecordType(C));
5215 auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) {
5216 // C++ [expr.dynamic.cast]p9:
5217 if (!E->isGLValue()) {
5218 // The value of a failed cast to pointer type is the null pointer value
5219 // of the required result type.
5220 Ptr.setNull(Info.Ctx, E->getType());
5224 // A failed cast to reference type throws [...] std::bad_cast.
5226 if (!Paths && (declaresSameEntity(DynType->Type, C) ||
5227 DynType->Type->isDerivedFrom(C)))
5229 else if (!Paths || Paths->begin() == Paths->end())
5231 else if (Paths->isAmbiguous(CQT))
5234 assert(Paths->front().Access != AS_public && "why did the cast fail?");
5237 Info.FFDiag(E, diag::note_constexpr_dynamic_cast_to_reference_failed)
5238 << DiagKind << Ptr.Designator.getType(Info.Ctx)
5239 << Info.Ctx.getRecordType(DynType->Type)
5240 << E->getType().getUnqualifiedType();
5244 // Runtime check, phase 1:
5245 // Walk from the base subobject towards the derived object looking for the
5247 for (int PathLength = Ptr.Designator.Entries.size();
5248 PathLength >= (int)DynType->PathLength; --PathLength) {
5249 const CXXRecordDecl *Class = getBaseClassType(Ptr.Designator, PathLength);
5250 if (declaresSameEntity(Class, C))
5251 return CastToDerivedClass(Info, E, Ptr, Class, PathLength);
5252 // We can only walk across public inheritance edges.
5253 if (PathLength > (int)DynType->PathLength &&
5254 !isBaseClassPublic(getBaseClassType(Ptr.Designator, PathLength - 1),
5256 return RuntimeCheckFailed(nullptr);
5259 // Runtime check, phase 2:
5260 // Search the dynamic type for an unambiguous public base of type C.
5261 CXXBasePaths Paths(/*FindAmbiguities=*/true,
5262 /*RecordPaths=*/true, /*DetectVirtual=*/false);
5263 if (DynType->Type->isDerivedFrom(C, Paths) && !Paths.isAmbiguous(CQT) &&
5264 Paths.front().Access == AS_public) {
5265 // Downcast to the dynamic type...
5266 if (!CastToDerivedClass(Info, E, Ptr, DynType->Type, DynType->PathLength))
5268 // ... then upcast to the chosen base class subobject.
5269 for (CXXBasePathElement &Elem : Paths.front())
5270 if (!HandleLValueBase(Info, E, Ptr, Elem.Class, Elem.Base))
5275 // Otherwise, the runtime check fails.
5276 return RuntimeCheckFailed(&Paths);
5280 struct StartLifetimeOfUnionMemberHandler {
5281 const FieldDecl *Field;
5283 static const AccessKinds AccessKind = AK_Assign;
5285 typedef bool result_type;
5286 bool failed() { return false; }
5287 bool found(APValue &Subobj, QualType SubobjType) {
5288 // We are supposed to perform no initialization but begin the lifetime of
5289 // the object. We interpret that as meaning to do what default
5290 // initialization of the object would do if all constructors involved were
5292 // * All base, non-variant member, and array element subobjects' lifetimes
5294 // * No variant members' lifetimes begin
5295 // * All scalar subobjects whose lifetimes begin have indeterminate values
5296 assert(SubobjType->isUnionType());
5297 if (!declaresSameEntity(Subobj.getUnionField(), Field) ||
5298 !Subobj.getUnionValue().hasValue())
5299 Subobj.setUnion(Field, getDefaultInitValue(Field->getType()));
5302 bool found(APSInt &Value, QualType SubobjType) {
5303 llvm_unreachable("wrong value kind for union object");
5305 bool found(APFloat &Value, QualType SubobjType) {
5306 llvm_unreachable("wrong value kind for union object");
5309 } // end anonymous namespace
5311 const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind;
5313 /// Handle a builtin simple-assignment or a call to a trivial assignment
5314 /// operator whose left-hand side might involve a union member access. If it
5315 /// does, implicitly start the lifetime of any accessed union elements per
5316 /// C++20 [class.union]5.
5317 static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
5318 const LValue &LHS) {
5319 if (LHS.InvalidBase || LHS.Designator.Invalid)
5322 llvm::SmallVector<std::pair<unsigned, const FieldDecl*>, 4> UnionPathLengths;
5323 // C++ [class.union]p5:
5324 // define the set S(E) of subexpressions of E as follows:
5325 unsigned PathLength = LHS.Designator.Entries.size();
5326 for (const Expr *E = LHSExpr; E != nullptr;) {
5327 // -- If E is of the form A.B, S(E) contains the elements of S(A)...
5328 if (auto *ME = dyn_cast<MemberExpr>(E)) {
5329 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
5330 // Note that we can't implicitly start the lifetime of a reference,
5331 // so we don't need to proceed any further if we reach one.
5332 if (!FD || FD->getType()->isReferenceType())
5335 // ... and also contains A.B if B names a union member ...
5336 if (FD->getParent()->isUnion()) {
5337 // ... of a non-class, non-array type, or of a class type with a
5338 // trivial default constructor that is not deleted, or an array of
5341 FD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
5342 if (!RD || RD->hasTrivialDefaultConstructor())
5343 UnionPathLengths.push_back({PathLength - 1, FD});
5348 assert(declaresSameEntity(FD,
5349 LHS.Designator.Entries[PathLength]
5350 .getAsBaseOrMember().getPointer()));
5352 // -- If E is of the form A[B] and is interpreted as a built-in array
5353 // subscripting operator, S(E) is [S(the array operand, if any)].
5354 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(E)) {
5355 // Step over an ArrayToPointerDecay implicit cast.
5356 auto *Base = ASE->getBase()->IgnoreImplicit();
5357 if (!Base->getType()->isArrayType())
5363 } else if (auto *ICE = dyn_cast<ImplicitCastExpr>(E)) {
5364 // Step over a derived-to-base conversion.
5365 E = ICE->getSubExpr();
5366 if (ICE->getCastKind() == CK_NoOp)
5368 if (ICE->getCastKind() != CK_DerivedToBase &&
5369 ICE->getCastKind() != CK_UncheckedDerivedToBase)
5371 // Walk path backwards as we walk up from the base to the derived class.
5372 for (const CXXBaseSpecifier *Elt : llvm::reverse(ICE->path())) {
5375 assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(),
5376 LHS.Designator.Entries[PathLength]
5377 .getAsBaseOrMember().getPointer()));
5380 // -- Otherwise, S(E) is empty.
5386 // Common case: no unions' lifetimes are started.
5387 if (UnionPathLengths.empty())
5390 // if modification of X [would access an inactive union member], an object
5391 // of the type of X is implicitly created
5392 CompleteObject Obj =
5393 findCompleteObject(Info, LHSExpr, AK_Assign, LHS, LHSExpr->getType());
5396 for (std::pair<unsigned, const FieldDecl *> LengthAndField :
5397 llvm::reverse(UnionPathLengths)) {
5398 // Form a designator for the union object.
5399 SubobjectDesignator D = LHS.Designator;
5400 D.truncate(Info.Ctx, LHS.Base, LengthAndField.first);
5402 StartLifetimeOfUnionMemberHandler StartLifetime{LengthAndField.second};
5403 if (!findSubobject(Info, LHSExpr, Obj, D, StartLifetime))
5410 /// Determine if a class has any fields that might need to be copied by a
5411 /// trivial copy or move operation.
5412 static bool hasFields(const CXXRecordDecl *RD) {
5413 if (!RD || RD->isEmpty())
5415 for (auto *FD : RD->fields()) {
5416 if (FD->isUnnamedBitfield())
5420 for (auto &Base : RD->bases())
5421 if (hasFields(Base.getType()->getAsCXXRecordDecl()))
5427 typedef SmallVector<APValue, 8> ArgVector;
5430 /// EvaluateArgs - Evaluate the arguments to a function call.
5431 static bool EvaluateArgs(ArrayRef<const Expr *> Args, ArgVector &ArgValues,
5432 EvalInfo &Info, const FunctionDecl *Callee) {
5433 bool Success = true;
5434 llvm::SmallBitVector ForbiddenNullArgs;
5435 if (Callee->hasAttr<NonNullAttr>()) {
5436 ForbiddenNullArgs.resize(Args.size());
5437 for (const auto *Attr : Callee->specific_attrs<NonNullAttr>()) {
5438 if (!Attr->args_size()) {
5439 ForbiddenNullArgs.set();
5442 for (auto Idx : Attr->args()) {
5443 unsigned ASTIdx = Idx.getASTIndex();
5444 if (ASTIdx >= Args.size())
5446 ForbiddenNullArgs[ASTIdx] = 1;
5450 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
5451 if (!Evaluate(ArgValues[Idx], Info, Args[Idx])) {
5452 // If we're checking for a potential constant expression, evaluate all
5453 // initializers even if some of them fail.
5454 if (!Info.noteFailure())
5457 } else if (!ForbiddenNullArgs.empty() &&
5458 ForbiddenNullArgs[Idx] &&
5459 ArgValues[Idx].isLValue() &&
5460 ArgValues[Idx].isNullPointer()) {
5461 Info.CCEDiag(Args[Idx], diag::note_non_null_attribute_failed);
5462 if (!Info.noteFailure())
5470 /// Evaluate a function call.
5471 static bool HandleFunctionCall(SourceLocation CallLoc,
5472 const FunctionDecl *Callee, const LValue *This,
5473 ArrayRef<const Expr*> Args, const Stmt *Body,
5474 EvalInfo &Info, APValue &Result,
5475 const LValue *ResultSlot) {
5476 ArgVector ArgValues(Args.size());
5477 if (!EvaluateArgs(Args, ArgValues, Info, Callee))
5480 if (!Info.CheckCallLimit(CallLoc))
5483 CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data());
5485 // For a trivial copy or move assignment, perform an APValue copy. This is
5486 // essential for unions, where the operations performed by the assignment
5487 // operator cannot be represented as statements.
5489 // Skip this for non-union classes with no fields; in that case, the defaulted
5490 // copy/move does not actually read the object.
5491 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee);
5492 if (MD && MD->isDefaulted() &&
5493 (MD->getParent()->isUnion() ||
5494 (MD->isTrivial() && hasFields(MD->getParent())))) {
5496 (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
5498 RHS.setFrom(Info.Ctx, ArgValues[0]);
5500 if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), RHS,
5501 RHSValue, MD->getParent()->isUnion()))
5503 if (Info.getLangOpts().CPlusPlus2a && MD->isTrivial() &&
5504 !HandleUnionActiveMemberChange(Info, Args[0], *This))
5506 if (!handleAssignment(Info, Args[0], *This, MD->getThisType(),
5509 This->moveInto(Result);
5511 } else if (MD && isLambdaCallOperator(MD)) {
5512 // We're in a lambda; determine the lambda capture field maps unless we're
5513 // just constexpr checking a lambda's call operator. constexpr checking is
5514 // done before the captures have been added to the closure object (unless
5515 // we're inferring constexpr-ness), so we don't have access to them in this
5516 // case. But since we don't need the captures to constexpr check, we can
5517 // just ignore them.
5518 if (!Info.checkingPotentialConstantExpression())
5519 MD->getParent()->getCaptureFields(Frame.LambdaCaptureFields,
5520 Frame.LambdaThisCaptureField);
5523 StmtResult Ret = {Result, ResultSlot};
5524 EvalStmtResult ESR = EvaluateStmt(Ret, Info, Body);
5525 if (ESR == ESR_Succeeded) {
5526 if (Callee->getReturnType()->isVoidType())
5528 Info.FFDiag(Callee->getEndLoc(), diag::note_constexpr_no_return);
5530 return ESR == ESR_Returned;
5533 /// Evaluate a constructor call.
5534 static bool HandleConstructorCall(const Expr *E, const LValue &This,
5536 const CXXConstructorDecl *Definition,
5537 EvalInfo &Info, APValue &Result) {
5538 SourceLocation CallLoc = E->getExprLoc();
5539 if (!Info.CheckCallLimit(CallLoc))
5542 const CXXRecordDecl *RD = Definition->getParent();
5543 if (RD->getNumVBases()) {
5544 Info.FFDiag(CallLoc, diag::note_constexpr_virtual_base) << RD;
5548 EvalInfo::EvaluatingConstructorRAII EvalObj(
5550 ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries},
5552 CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues);
5554 // FIXME: Creating an APValue just to hold a nonexistent return value is
5557 StmtResult Ret = {RetVal, nullptr};
5559 // If it's a delegating constructor, delegate.
5560 if (Definition->isDelegatingConstructor()) {
5561 CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
5563 FullExpressionRAII InitScope(Info);
5564 if (!EvaluateInPlace(Result, Info, This, (*I)->getInit()) ||
5565 !InitScope.destroy())
5568 return EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed;
5571 // For a trivial copy or move constructor, perform an APValue copy. This is
5572 // essential for unions (or classes with anonymous union members), where the
5573 // operations performed by the constructor cannot be represented by
5574 // ctor-initializers.
5576 // Skip this for empty non-union classes; we should not perform an
5577 // lvalue-to-rvalue conversion on them because their copy constructor does not
5578 // actually read them.
5579 if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() &&
5580 (Definition->getParent()->isUnion() ||
5581 (Definition->isTrivial() && hasFields(Definition->getParent())))) {
5583 RHS.setFrom(Info.Ctx, ArgValues[0]);
5584 return handleLValueToRValueConversion(
5585 Info, E, Definition->getParamDecl(0)->getType().getNonReferenceType(),
5586 RHS, Result, Definition->getParent()->isUnion());
5589 // Reserve space for the struct members.
5590 if (!RD->isUnion() && !Result.hasValue())
5591 Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
5592 std::distance(RD->field_begin(), RD->field_end()));
5594 if (RD->isInvalidDecl()) return false;
5595 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
5597 // A scope for temporaries lifetime-extended by reference members.
5598 BlockScopeRAII LifetimeExtendedScope(Info);
5600 bool Success = true;
5601 unsigned BasesSeen = 0;
5603 CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
5605 CXXRecordDecl::field_iterator FieldIt = RD->field_begin();
5606 auto SkipToField = [&](FieldDecl *FD, bool Indirect) {
5607 // We might be initializing the same field again if this is an indirect
5608 // field initialization.
5609 if (FieldIt == RD->field_end() ||
5610 FieldIt->getFieldIndex() > FD->getFieldIndex()) {
5611 assert(Indirect && "fields out of order?");
5615 // Default-initialize any fields with no explicit initializer.
5616 for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) {
5617 assert(FieldIt != RD->field_end() && "missing field?");
5618 if (!FieldIt->isUnnamedBitfield())
5619 Result.getStructField(FieldIt->getFieldIndex()) =
5620 getDefaultInitValue(FieldIt->getType());
5624 for (const auto *I : Definition->inits()) {
5625 LValue Subobject = This;
5626 LValue SubobjectParent = This;
5627 APValue *Value = &Result;
5629 // Determine the subobject to initialize.
5630 FieldDecl *FD = nullptr;
5631 if (I->isBaseInitializer()) {
5632 QualType BaseType(I->getBaseClass(), 0);
5634 // Non-virtual base classes are initialized in the order in the class
5635 // definition. We have already checked for virtual base classes.
5636 assert(!BaseIt->isVirtual() && "virtual base for literal type");
5637 assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) &&
5638 "base class initializers not in expected order");
5641 if (!HandleLValueDirectBase(Info, I->getInit(), Subobject, RD,
5642 BaseType->getAsCXXRecordDecl(), &Layout))
5644 Value = &Result.getStructBase(BasesSeen++);
5645 } else if ((FD = I->getMember())) {
5646 if (!HandleLValueMember(Info, I->getInit(), Subobject, FD, &Layout))
5648 if (RD->isUnion()) {
5649 Result = APValue(FD);
5650 Value = &Result.getUnionValue();
5652 SkipToField(FD, false);
5653 Value = &Result.getStructField(FD->getFieldIndex());
5655 } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
5656 // Walk the indirect field decl's chain to find the object to initialize,
5657 // and make sure we've initialized every step along it.
5658 auto IndirectFieldChain = IFD->chain();
5659 for (auto *C : IndirectFieldChain) {
5660 FD = cast<FieldDecl>(C);
5661 CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
5662 // Switch the union field if it differs. This happens if we had
5663 // preceding zero-initialization, and we're now initializing a union
5664 // subobject other than the first.
5665 // FIXME: In this case, the values of the other subobjects are
5666 // specified, since zero-initialization sets all padding bits to zero.
5667 if (!Value->hasValue() ||
5668 (Value->isUnion() && Value->getUnionField() != FD)) {
5670 *Value = APValue(FD);
5672 // FIXME: This immediately starts the lifetime of all members of an
5673 // anonymous struct. It would be preferable to strictly start member
5674 // lifetime in initialization order.
5675 *Value = getDefaultInitValue(Info.Ctx.getRecordType(CD));
5677 // Store Subobject as its parent before updating it for the last element
5679 if (C == IndirectFieldChain.back())
5680 SubobjectParent = Subobject;
5681 if (!HandleLValueMember(Info, I->getInit(), Subobject, FD))
5684 Value = &Value->getUnionValue();
5686 if (C == IndirectFieldChain.front() && !RD->isUnion())
5687 SkipToField(FD, true);
5688 Value = &Value->getStructField(FD->getFieldIndex());
5692 llvm_unreachable("unknown base initializer kind");
5695 // Need to override This for implicit field initializers as in this case
5696 // This refers to innermost anonymous struct/union containing initializer,
5697 // not to currently constructed class.
5698 const Expr *Init = I->getInit();
5699 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent,
5700 isa<CXXDefaultInitExpr>(Init));
5701 FullExpressionRAII InitScope(Info);
5702 if (!EvaluateInPlace(*Value, Info, Subobject, Init) ||
5703 (FD && FD->isBitField() &&
5704 !truncateBitfieldValue(Info, Init, *Value, FD))) {
5705 // If we're checking for a potential constant expression, evaluate all
5706 // initializers even if some of them fail.
5707 if (!Info.noteFailure())
5712 // This is the point at which the dynamic type of the object becomes this
5714 if (I->isBaseInitializer() && BasesSeen == RD->getNumBases())
5715 EvalObj.finishedConstructingBases();
5718 // Default-initialize any remaining fields.
5719 if (!RD->isUnion()) {
5720 for (; FieldIt != RD->field_end(); ++FieldIt) {
5721 if (!FieldIt->isUnnamedBitfield())
5722 Result.getStructField(FieldIt->getFieldIndex()) =
5723 getDefaultInitValue(FieldIt->getType());
5728 EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed &&
5729 LifetimeExtendedScope.destroy();
5732 static bool HandleConstructorCall(const Expr *E, const LValue &This,
5733 ArrayRef<const Expr*> Args,
5734 const CXXConstructorDecl *Definition,
5735 EvalInfo &Info, APValue &Result) {
5736 ArgVector ArgValues(Args.size());
5737 if (!EvaluateArgs(Args, ArgValues, Info, Definition))
5740 return HandleConstructorCall(E, This, ArgValues.data(), Definition,
5744 static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
5745 const LValue &This, APValue &Value,
5747 // Objects can only be destroyed while they're within their lifetimes.
5748 // FIXME: We have no representation for whether an object of type nullptr_t
5749 // is in its lifetime; it usually doesn't matter. Perhaps we should model it
5750 // as indeterminate instead?
5751 if (Value.isAbsent() && !T->isNullPtrType()) {
5753 This.moveInto(Printable);
5754 Info.FFDiag(CallLoc, diag::note_constexpr_destroy_out_of_lifetime)
5755 << Printable.getAsString(Info.Ctx, Info.Ctx.getLValueReferenceType(T));
5759 // Invent an expression for location purposes.
5760 // FIXME: We shouldn't need to do this.
5761 OpaqueValueExpr LocE(CallLoc, Info.Ctx.IntTy, VK_RValue);
5763 // For arrays, destroy elements right-to-left.
5764 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) {
5765 uint64_t Size = CAT->getSize().getZExtValue();
5766 QualType ElemT = CAT->getElementType();
5768 LValue ElemLV = This;
5769 ElemLV.addArray(Info, &LocE, CAT);
5770 if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, Size))
5773 // Ensure that we have actual array elements available to destroy; the
5774 // destructors might mutate the value, so we can't run them on the array
5776 if (Size && Size > Value.getArrayInitializedElts())
5777 expandArray(Value, Value.getArraySize() - 1);
5779 for (; Size != 0; --Size) {
5780 APValue &Elem = Value.getArrayInitializedElt(Size - 1);
5781 if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, -1) ||
5782 !HandleDestructionImpl(Info, CallLoc, ElemLV, Elem, ElemT))
5786 // End the lifetime of this array now.
5791 const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
5793 if (T.isDestructedType()) {
5794 Info.FFDiag(CallLoc, diag::note_constexpr_unsupported_destruction) << T;
5802 if (RD->getNumVBases()) {
5803 Info.FFDiag(CallLoc, diag::note_constexpr_virtual_base) << RD;
5807 const CXXDestructorDecl *DD = RD->getDestructor();
5808 if (!DD && !RD->hasTrivialDestructor()) {
5809 Info.FFDiag(CallLoc);
5813 if (!DD || DD->isTrivial() ||
5814 (RD->isAnonymousStructOrUnion() && RD->isUnion())) {
5815 // A trivial destructor just ends the lifetime of the object. Check for
5816 // this case before checking for a body, because we might not bother
5817 // building a body for a trivial destructor. Note that it doesn't matter
5818 // whether the destructor is constexpr in this case; all trivial
5819 // destructors are constexpr.
5821 // If an anonymous union would be destroyed, some enclosing destructor must
5822 // have been explicitly defined, and the anonymous union destruction should
5828 if (!Info.CheckCallLimit(CallLoc))
5831 const FunctionDecl *Definition = nullptr;
5832 const Stmt *Body = DD->getBody(Definition);
5834 if (!CheckConstexprFunction(Info, CallLoc, DD, Definition, Body))
5837 CallStackFrame Frame(Info, CallLoc, Definition, &This, nullptr);
5839 // We're now in the period of destruction of this object.
5840 unsigned BasesLeft = RD->getNumBases();
5841 EvalInfo::EvaluatingDestructorRAII EvalObj(
5843 ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries});
5844 if (!EvalObj.DidInsert) {
5845 // C++2a [class.dtor]p19:
5846 // the behavior is undefined if the destructor is invoked for an object
5847 // whose lifetime has ended
5848 // (Note that formally the lifetime ends when the period of destruction
5849 // begins, even though certain uses of the object remain valid until the
5850 // period of destruction ends.)
5851 Info.FFDiag(CallLoc, diag::note_constexpr_double_destroy);
5855 // FIXME: Creating an APValue just to hold a nonexistent return value is
5858 StmtResult Ret = {RetVal, nullptr};
5859 if (EvaluateStmt(Ret, Info, Definition->getBody()) == ESR_Failed)
5862 // A union destructor does not implicitly destroy its members.
5866 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
5868 // We don't have a good way to iterate fields in reverse, so collect all the
5869 // fields first and then walk them backwards.
5870 SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
5871 for (const FieldDecl *FD : llvm::reverse(Fields)) {
5872 if (FD->isUnnamedBitfield())
5875 LValue Subobject = This;
5876 if (!HandleLValueMember(Info, &LocE, Subobject, FD, &Layout))
5879 APValue *SubobjectValue = &Value.getStructField(FD->getFieldIndex());
5880 if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue,
5886 EvalObj.startedDestroyingBases();
5888 // Destroy base classes in reverse order.
5889 for (const CXXBaseSpecifier &Base : llvm::reverse(RD->bases())) {
5892 QualType BaseType = Base.getType();
5893 LValue Subobject = This;
5894 if (!HandleLValueDirectBase(Info, &LocE, Subobject, RD,
5895 BaseType->getAsCXXRecordDecl(), &Layout))
5898 APValue *SubobjectValue = &Value.getStructBase(BasesLeft);
5899 if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue,
5903 assert(BasesLeft == 0 && "NumBases was wrong?");
5905 // The period of destruction ends now. The object is gone.
5911 struct DestroyObjectHandler {
5915 const AccessKinds AccessKind;
5917 typedef bool result_type;
5918 bool failed() { return false; }
5919 bool found(APValue &Subobj, QualType SubobjType) {
5920 return HandleDestructionImpl(Info, E->getExprLoc(), This, Subobj,
5923 bool found(APSInt &Value, QualType SubobjType) {
5924 Info.FFDiag(E, diag::note_constexpr_destroy_complex_elem);
5927 bool found(APFloat &Value, QualType SubobjType) {
5928 Info.FFDiag(E, diag::note_constexpr_destroy_complex_elem);
5934 /// Perform a destructor or pseudo-destructor call on the given object, which
5935 /// might in general not be a complete object.
5936 static bool HandleDestruction(EvalInfo &Info, const Expr *E,
5937 const LValue &This, QualType ThisType) {
5938 CompleteObject Obj = findCompleteObject(Info, E, AK_Destroy, This, ThisType);
5939 DestroyObjectHandler Handler = {Info, E, This, AK_Destroy};
5940 return Obj && findSubobject(Info, E, Obj, This.Designator, Handler);
5943 /// Destroy and end the lifetime of the given complete object.
5944 static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
5945 APValue::LValueBase LVBase, APValue &Value,
5947 // If we've had an unmodeled side-effect, we can't rely on mutable state
5948 // (such as the object we're about to destroy) being correct.
5949 if (Info.EvalStatus.HasSideEffects)
5954 return HandleDestructionImpl(Info, Loc, LV, Value, T);
5957 /// Perform a call to 'perator new' or to `__builtin_operator_new'.
5958 static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E,
5960 if (Info.checkingPotentialConstantExpression() ||
5961 Info.SpeculativeEvaluationDepth)
5964 // This is permitted only within a call to std::allocator<T>::allocate.
5965 auto Caller = Info.getStdAllocatorCaller("allocate");
5967 Info.FFDiag(E->getExprLoc(), Info.getLangOpts().CPlusPlus2a
5968 ? diag::note_constexpr_new_untyped
5969 : diag::note_constexpr_new);
5973 QualType ElemType = Caller.ElemType;
5974 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
5975 Info.FFDiag(E->getExprLoc(),
5976 diag::note_constexpr_new_not_complete_object_type)
5977 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
5982 if (!EvaluateInteger(E->getArg(0), ByteSize, Info))
5984 bool IsNothrow = false;
5985 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) {
5986 EvaluateIgnoredValue(Info, E->getArg(I));
5987 IsNothrow |= E->getType()->isNothrowT();
5991 if (!HandleSizeof(Info, E->getExprLoc(), ElemType, ElemSize))
5993 APInt Size, Remainder;
5994 APInt ElemSizeAP(ByteSize.getBitWidth(), ElemSize.getQuantity());
5995 APInt::udivrem(ByteSize, ElemSizeAP, Size, Remainder);
5996 if (Remainder != 0) {
5997 // This likely indicates a bug in the implementation of 'std::allocator'.
5998 Info.FFDiag(E->getExprLoc(), diag::note_constexpr_operator_new_bad_size)
5999 << ByteSize << APSInt(ElemSizeAP, true) << ElemType;
6003 if (ByteSize.getActiveBits() > ConstantArrayType::getMaxSizeBits(Info.Ctx)) {
6005 Result.setNull(Info.Ctx, E->getType());
6009 Info.FFDiag(E, diag::note_constexpr_new_too_large) << APSInt(Size, true);
6013 QualType AllocType = Info.Ctx.getConstantArrayType(ElemType, Size, nullptr,
6014 ArrayType::Normal, 0);
6015 APValue *Val = Info.createHeapAlloc(E, AllocType, Result);
6016 *Val = APValue(APValue::UninitArray(), 0, Size.getZExtValue());
6017 Result.addArray(Info, E, cast<ConstantArrayType>(AllocType));
6021 static bool hasVirtualDestructor(QualType T) {
6022 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
6023 if (CXXDestructorDecl *DD = RD->getDestructor())
6024 return DD->isVirtual();
6028 static const FunctionDecl *getVirtualOperatorDelete(QualType T) {
6029 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
6030 if (CXXDestructorDecl *DD = RD->getDestructor())
6031 return DD->isVirtual() ? DD->getOperatorDelete() : nullptr;
6035 /// Check that the given object is a suitable pointer to a heap allocation that
6036 /// still exists and is of the right kind for the purpose of a deletion.
6038 /// On success, returns the heap allocation to deallocate. On failure, produces
6039 /// a diagnostic and returns None.
6040 static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E,
6041 const LValue &Pointer,
6042 DynAlloc::Kind DeallocKind) {
6043 auto PointerAsString = [&] {
6044 return Pointer.toString(Info.Ctx, Info.Ctx.VoidPtrTy);
6047 DynamicAllocLValue DA = Pointer.Base.dyn_cast<DynamicAllocLValue>();
6049 Info.FFDiag(E, diag::note_constexpr_delete_not_heap_alloc)
6050 << PointerAsString();
6052 NoteLValueLocation(Info, Pointer.Base);
6056 Optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
6058 Info.FFDiag(E, diag::note_constexpr_double_delete);
6062 QualType AllocType = Pointer.Base.getDynamicAllocType();
6063 if (DeallocKind != (*Alloc)->getKind()) {
6064 Info.FFDiag(E, diag::note_constexpr_new_delete_mismatch)
6065 << DeallocKind << (*Alloc)->getKind() << AllocType;
6066 NoteLValueLocation(Info, Pointer.Base);
6070 bool Subobject = false;
6071 if (DeallocKind == DynAlloc::New) {
6072 Subobject = Pointer.Designator.MostDerivedPathLength != 0 ||
6073 Pointer.Designator.isOnePastTheEnd();
6075 Subobject = Pointer.Designator.Entries.size() != 1 ||
6076 Pointer.Designator.Entries[0].getAsArrayIndex() != 0;
6079 Info.FFDiag(E, diag::note_constexpr_delete_subobject)
6080 << PointerAsString() << Pointer.Designator.isOnePastTheEnd();
6087 // Perform a call to 'operator delete' or '__builtin_operator_delete'.
6088 bool HandleOperatorDeleteCall(EvalInfo &Info, const CallExpr *E) {
6089 if (Info.checkingPotentialConstantExpression() ||
6090 Info.SpeculativeEvaluationDepth)
6093 // This is permitted only within a call to std::allocator<T>::deallocate.
6094 if (!Info.getStdAllocatorCaller("deallocate")) {
6095 Info.FFDiag(E->getExprLoc());
6100 if (!EvaluatePointer(E->getArg(0), Pointer, Info))
6102 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I)
6103 EvaluateIgnoredValue(Info, E->getArg(I));
6105 if (Pointer.Designator.Invalid)
6108 // Deleting a null pointer has no effect.
6109 if (Pointer.isNullPointer())
6112 if (!CheckDeleteKind(Info, E, Pointer, DynAlloc::StdAllocator))
6115 Info.HeapAllocs.erase(Pointer.Base.get<DynamicAllocLValue>());
6119 //===----------------------------------------------------------------------===//
6120 // Generic Evaluation
6121 //===----------------------------------------------------------------------===//
6124 class BitCastBuffer {
6125 // FIXME: We're going to need bit-level granularity when we support
6127 // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but
6128 // we don't support a host or target where that is the case. Still, we should
6129 // use a more generic type in case we ever do.
6130 SmallVector<Optional<unsigned char>, 32> Bytes;
6132 static_assert(std::numeric_limits<unsigned char>::digits >= 8,
6133 "Need at least 8 bit unsigned char");
6135 bool TargetIsLittleEndian;
6138 BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian)
6139 : Bytes(Width.getQuantity()),
6140 TargetIsLittleEndian(TargetIsLittleEndian) {}
6143 bool readObject(CharUnits Offset, CharUnits Width,
6144 SmallVectorImpl<unsigned char> &Output) const {
6145 for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
6146 // If a byte of an integer is uninitialized, then the whole integer is
6148 if (!Bytes[I.getQuantity()])
6150 Output.push_back(*Bytes[I.getQuantity()]);
6152 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
6153 std::reverse(Output.begin(), Output.end());
6157 void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) {
6158 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
6159 std::reverse(Input.begin(), Input.end());
6162 for (unsigned char Byte : Input) {
6163 assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?");
6164 Bytes[Offset.getQuantity() + Index] = Byte;
6169 size_t size() { return Bytes.size(); }
6172 /// Traverse an APValue to produce an BitCastBuffer, emulating how the current
6173 /// target would represent the value at runtime.
6174 class APValueToBufferConverter {
6176 BitCastBuffer Buffer;
6177 const CastExpr *BCE;
6179 APValueToBufferConverter(EvalInfo &Info, CharUnits ObjectWidth,
6180 const CastExpr *BCE)
6182 Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()),
6185 bool visit(const APValue &Val, QualType Ty) {
6186 return visit(Val, Ty, CharUnits::fromQuantity(0));
6189 // Write out Val with type Ty into Buffer starting at Offset.
6190 bool visit(const APValue &Val, QualType Ty, CharUnits Offset) {
6191 assert((size_t)Offset.getQuantity() <= Buffer.size());
6193 // As a special case, nullptr_t has an indeterminate value.
6194 if (Ty->isNullPtrType())
6197 // Dig through Src to find the byte at SrcOffset.
6198 switch (Val.getKind()) {
6199 case APValue::Indeterminate:
6204 return visitInt(Val.getInt(), Ty, Offset);
6205 case APValue::Float:
6206 return visitFloat(Val.getFloat(), Ty, Offset);
6207 case APValue::Array:
6208 return visitArray(Val, Ty, Offset);
6209 case APValue::Struct:
6210 return visitRecord(Val, Ty, Offset);
6212 case APValue::ComplexInt:
6213 case APValue::ComplexFloat:
6214 case APValue::Vector:
6215 case APValue::FixedPoint:
6216 // FIXME: We should support these.
6218 case APValue::Union:
6219 case APValue::MemberPointer:
6220 case APValue::AddrLabelDiff: {
6221 Info.FFDiag(BCE->getBeginLoc(),
6222 diag::note_constexpr_bit_cast_unsupported_type)
6227 case APValue::LValue:
6228 llvm_unreachable("LValue subobject in bit_cast?");
6230 llvm_unreachable("Unhandled APValue::ValueKind");
6233 bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) {
6234 const RecordDecl *RD = Ty->getAsRecordDecl();
6235 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
6237 // Visit the base classes.
6238 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
6239 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
6240 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
6241 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
6243 if (!visitRecord(Val.getStructBase(I), BS.getType(),
6244 Layout.getBaseClassOffset(BaseDecl) + Offset))
6249 // Visit the fields.
6250 unsigned FieldIdx = 0;
6251 for (FieldDecl *FD : RD->fields()) {
6252 if (FD->isBitField()) {
6253 Info.FFDiag(BCE->getBeginLoc(),
6254 diag::note_constexpr_bit_cast_unsupported_bitfield);
6258 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
6260 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 &&
6261 "only bit-fields can have sub-char alignment");
6262 CharUnits FieldOffset =
6263 Info.Ctx.toCharUnitsFromBits(FieldOffsetBits) + Offset;
6264 QualType FieldTy = FD->getType();
6265 if (!visit(Val.getStructField(FieldIdx), FieldTy, FieldOffset))
6273 bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) {
6275 dyn_cast_or_null<ConstantArrayType>(Ty->getAsArrayTypeUnsafe());
6279 CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(CAT->getElementType());
6280 unsigned NumInitializedElts = Val.getArrayInitializedElts();
6281 unsigned ArraySize = Val.getArraySize();
6282 // First, initialize the initialized elements.
6283 for (unsigned I = 0; I != NumInitializedElts; ++I) {
6284 const APValue &SubObj = Val.getArrayInitializedElt(I);
6285 if (!visit(SubObj, CAT->getElementType(), Offset + I * ElemWidth))
6289 // Next, initialize the rest of the array using the filler.
6290 if (Val.hasArrayFiller()) {
6291 const APValue &Filler = Val.getArrayFiller();
6292 for (unsigned I = NumInitializedElts; I != ArraySize; ++I) {
6293 if (!visit(Filler, CAT->getElementType(), Offset + I * ElemWidth))
6301 bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
6302 CharUnits Width = Info.Ctx.getTypeSizeInChars(Ty);
6303 SmallVector<unsigned char, 8> Bytes(Width.getQuantity());
6304 llvm::StoreIntToMemory(Val, &*Bytes.begin(), Width.getQuantity());
6305 Buffer.writeObject(Offset, Bytes);
6309 bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) {
6310 APSInt AsInt(Val.bitcastToAPInt());
6311 return visitInt(AsInt, Ty, Offset);
6315 static Optional<BitCastBuffer> convert(EvalInfo &Info, const APValue &Src,
6316 const CastExpr *BCE) {
6317 CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType());
6318 APValueToBufferConverter Converter(Info, DstSize, BCE);
6319 if (!Converter.visit(Src, BCE->getSubExpr()->getType()))
6321 return Converter.Buffer;
6325 /// Write an BitCastBuffer into an APValue.
6326 class BufferToAPValueConverter {
6328 const BitCastBuffer &Buffer;
6329 const CastExpr *BCE;
6331 BufferToAPValueConverter(EvalInfo &Info, const BitCastBuffer &Buffer,
6332 const CastExpr *BCE)
6333 : Info(Info), Buffer(Buffer), BCE(BCE) {}
6335 // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
6336 // with an invalid type, so anything left is a deficiency on our part (FIXME).
6337 // Ideally this will be unreachable.
6338 llvm::NoneType unsupportedType(QualType Ty) {
6339 Info.FFDiag(BCE->getBeginLoc(),
6340 diag::note_constexpr_bit_cast_unsupported_type)
6345 Optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
6346 const EnumType *EnumSugar = nullptr) {
6347 if (T->isNullPtrType()) {
6348 uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QualType(T, 0));
6349 return APValue((Expr *)nullptr,
6350 /*Offset=*/CharUnits::fromQuantity(NullValue),
6351 APValue::NoLValuePath{}, /*IsNullPtr=*/true);
6354 CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
6355 SmallVector<uint8_t, 8> Bytes;
6356 if (!Buffer.readObject(Offset, SizeOf, Bytes)) {
6357 // If this is std::byte or unsigned char, then its okay to store an
6358 // indeterminate value.
6359 bool IsStdByte = EnumSugar && EnumSugar->isStdByteType();
6361 !EnumSugar && (T->isSpecificBuiltinType(BuiltinType::UChar) ||
6362 T->isSpecificBuiltinType(BuiltinType::Char_U));
6363 if (!IsStdByte && !IsUChar) {
6364 QualType DisplayType(EnumSugar ? (const Type *)EnumSugar : T, 0);
6365 Info.FFDiag(BCE->getExprLoc(),
6366 diag::note_constexpr_bit_cast_indet_dest)
6367 << DisplayType << Info.Ctx.getLangOpts().CharIsSigned;
6371 return APValue::IndeterminateValue();
6374 APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true);
6375 llvm::LoadIntFromMemory(Val, &*Bytes.begin(), Bytes.size());
6377 if (T->isIntegralOrEnumerationType()) {
6378 Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
6379 return APValue(Val);
6382 if (T->isRealFloatingType()) {
6383 const llvm::fltSemantics &Semantics =
6384 Info.Ctx.getFloatTypeSemantics(QualType(T, 0));
6385 return APValue(APFloat(Semantics, Val));
6388 return unsupportedType(QualType(T, 0));
6391 Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
6392 const RecordDecl *RD = RTy->getAsRecordDecl();
6393 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
6395 unsigned NumBases = 0;
6396 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6397 NumBases = CXXRD->getNumBases();
6399 APValue ResultVal(APValue::UninitStruct(), NumBases,
6400 std::distance(RD->field_begin(), RD->field_end()));
6402 // Visit the base classes.
6403 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
6404 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
6405 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
6406 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
6407 if (BaseDecl->isEmpty() ||
6408 Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
6411 Optional<APValue> SubObj = visitType(
6412 BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset);
6415 ResultVal.getStructBase(I) = *SubObj;
6419 // Visit the fields.
6420 unsigned FieldIdx = 0;
6421 for (FieldDecl *FD : RD->fields()) {
6422 // FIXME: We don't currently support bit-fields. A lot of the logic for
6423 // this is in CodeGen, so we need to factor it around.
6424 if (FD->isBitField()) {
6425 Info.FFDiag(BCE->getBeginLoc(),
6426 diag::note_constexpr_bit_cast_unsupported_bitfield);
6430 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
6431 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0);
6433 CharUnits FieldOffset =
6434 CharUnits::fromQuantity(FieldOffsetBits / Info.Ctx.getCharWidth()) +
6436 QualType FieldTy = FD->getType();
6437 Optional<APValue> SubObj = visitType(FieldTy, FieldOffset);
6440 ResultVal.getStructField(FieldIdx) = *SubObj;
6447 Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
6448 QualType RepresentationType = Ty->getDecl()->getIntegerType();
6449 assert(!RepresentationType.isNull() &&
6450 "enum forward decl should be caught by Sema");
6451 const auto *AsBuiltin =
6452 RepresentationType.getCanonicalType()->castAs<BuiltinType>();
6453 // Recurse into the underlying type. Treat std::byte transparently as
6455 return visit(AsBuiltin, Offset, /*EnumTy=*/Ty);
6458 Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
6459 size_t Size = Ty->getSize().getLimitedValue();
6460 CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
6462 APValue ArrayValue(APValue::UninitArray(), Size, Size);
6463 for (size_t I = 0; I != Size; ++I) {
6464 Optional<APValue> ElementValue =
6465 visitType(Ty->getElementType(), Offset + I * ElementWidth);
6468 ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
6474 Optional<APValue> visit(const Type *Ty, CharUnits Offset) {
6475 return unsupportedType(QualType(Ty, 0));
6478 Optional<APValue> visitType(QualType Ty, CharUnits Offset) {
6479 QualType Can = Ty.getCanonicalType();
6481 switch (Can->getTypeClass()) {
6482 #define TYPE(Class, Base) \
6484 return visit(cast<Class##Type>(Can.getTypePtr()), Offset);
6485 #define ABSTRACT_TYPE(Class, Base)
6486 #define NON_CANONICAL_TYPE(Class, Base) \
6488 llvm_unreachable("non-canonical type should be impossible!");
6489 #define DEPENDENT_TYPE(Class, Base) \
6492 "dependent types aren't supported in the constant evaluator!");
6493 #define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base) \
6495 llvm_unreachable("either dependent or not canonical!");
6496 #include "clang/AST/TypeNodes.inc"
6498 llvm_unreachable("Unhandled Type::TypeClass");
6502 // Pull out a full value of type DstType.
6503 static Optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer,
6504 const CastExpr *BCE) {
6505 BufferToAPValueConverter Converter(Info, Buffer, BCE);
6506 return Converter.visitType(BCE->getType(), CharUnits::fromQuantity(0));
6510 static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
6511 QualType Ty, EvalInfo *Info,
6512 const ASTContext &Ctx,
6513 bool CheckingDest) {
6514 Ty = Ty.getCanonicalType();
6516 auto diag = [&](int Reason) {
6518 Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_type)
6519 << CheckingDest << (Reason == 4) << Reason;
6522 auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) {
6524 Info->Note(NoteLoc, diag::note_constexpr_bit_cast_invalid_subtype)
6525 << NoteTy << Construct << Ty;
6529 if (Ty->isUnionType())
6531 if (Ty->isPointerType())
6533 if (Ty->isMemberPointerType())
6535 if (Ty.isVolatileQualified())
6538 if (RecordDecl *Record = Ty->getAsRecordDecl()) {
6539 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Record)) {
6540 for (CXXBaseSpecifier &BS : CXXRD->bases())
6541 if (!checkBitCastConstexprEligibilityType(Loc, BS.getType(), Info, Ctx,
6543 return note(1, BS.getType(), BS.getBeginLoc());
6545 for (FieldDecl *FD : Record->fields()) {
6546 if (FD->getType()->isReferenceType())
6548 if (!checkBitCastConstexprEligibilityType(Loc, FD->getType(), Info, Ctx,
6550 return note(0, FD->getType(), FD->getBeginLoc());
6554 if (Ty->isArrayType() &&
6555 !checkBitCastConstexprEligibilityType(Loc, Ctx.getBaseElementType(Ty),
6556 Info, Ctx, CheckingDest))
6562 static bool checkBitCastConstexprEligibility(EvalInfo *Info,
6563 const ASTContext &Ctx,
6564 const CastExpr *BCE) {
6565 bool DestOK = checkBitCastConstexprEligibilityType(
6566 BCE->getBeginLoc(), BCE->getType(), Info, Ctx, true);
6567 bool SourceOK = DestOK && checkBitCastConstexprEligibilityType(
6569 BCE->getSubExpr()->getType(), Info, Ctx, false);
6573 static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
6574 APValue &SourceValue,
6575 const CastExpr *BCE) {
6576 assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
6577 "no host or target supports non 8-bit chars");
6578 assert(SourceValue.isLValue() &&
6579 "LValueToRValueBitcast requires an lvalue operand!");
6581 if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE))
6584 LValue SourceLValue;
6585 APValue SourceRValue;
6586 SourceLValue.setFrom(Info.Ctx, SourceValue);
6587 if (!handleLValueToRValueConversion(
6588 Info, BCE, BCE->getSubExpr()->getType().withConst(), SourceLValue,
6589 SourceRValue, /*WantObjectRepresentation=*/true))
6592 // Read out SourceValue into a char buffer.
6593 Optional<BitCastBuffer> Buffer =
6594 APValueToBufferConverter::convert(Info, SourceRValue, BCE);
6598 // Write out the buffer into a new APValue.
6599 Optional<APValue> MaybeDestValue =
6600 BufferToAPValueConverter::convert(Info, *Buffer, BCE);
6601 if (!MaybeDestValue)
6604 DestValue = std::move(*MaybeDestValue);
6608 template <class Derived>
6609 class ExprEvaluatorBase
6610 : public ConstStmtVisitor<Derived, bool> {
6612 Derived &getDerived() { return static_cast<Derived&>(*this); }
6613 bool DerivedSuccess(const APValue &V, const Expr *E) {
6614 return getDerived().Success(V, E);
6616 bool DerivedZeroInitialization(const Expr *E) {
6617 return getDerived().ZeroInitialization(E);
6620 // Check whether a conditional operator with a non-constant condition is a
6621 // potential constant expression. If neither arm is a potential constant
6622 // expression, then the conditional operator is not either.
6623 template<typename ConditionalOperator>
6624 void CheckPotentialConstantConditional(const ConditionalOperator *E) {
6625 assert(Info.checkingPotentialConstantExpression());
6627 // Speculatively evaluate both arms.
6628 SmallVector<PartialDiagnosticAt, 8> Diag;
6630 SpeculativeEvaluationRAII Speculate(Info, &Diag);
6631 StmtVisitorTy::Visit(E->getFalseExpr());
6637 SpeculativeEvaluationRAII Speculate(Info, &Diag);
6639 StmtVisitorTy::Visit(E->getTrueExpr());
6644 Error(E, diag::note_constexpr_conditional_never_const);
6648 template<typename ConditionalOperator>
6649 bool HandleConditionalOperator(const ConditionalOperator *E) {
6651 if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
6652 if (Info.checkingPotentialConstantExpression() && Info.noteFailure()) {
6653 CheckPotentialConstantConditional(E);
6656 if (Info.noteFailure()) {
6657 StmtVisitorTy::Visit(E->getTrueExpr());
6658 StmtVisitorTy::Visit(E->getFalseExpr());
6663 Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
6664 return StmtVisitorTy::Visit(EvalExpr);
6669 typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy;
6670 typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
6672 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
6673 return Info.CCEDiag(E, D);
6676 bool ZeroInitialization(const Expr *E) { return Error(E); }
6679 ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
6681 EvalInfo &getEvalInfo() { return Info; }
6683 /// Report an evaluation error. This should only be called when an error is
6684 /// first discovered. When propagating an error, just return false.
6685 bool Error(const Expr *E, diag::kind D) {
6689 bool Error(const Expr *E) {
6690 return Error(E, diag::note_invalid_subexpr_in_const_expr);
6693 bool VisitStmt(const Stmt *) {
6694 llvm_unreachable("Expression evaluator should not be called on stmts");
6696 bool VisitExpr(const Expr *E) {
6700 bool VisitConstantExpr(const ConstantExpr *E)
6701 { return StmtVisitorTy::Visit(E->getSubExpr()); }
6702 bool VisitParenExpr(const ParenExpr *E)
6703 { return StmtVisitorTy::Visit(E->getSubExpr()); }
6704 bool VisitUnaryExtension(const UnaryOperator *E)
6705 { return StmtVisitorTy::Visit(E->getSubExpr()); }
6706 bool VisitUnaryPlus(const UnaryOperator *E)
6707 { return StmtVisitorTy::Visit(E->getSubExpr()); }
6708 bool VisitChooseExpr(const ChooseExpr *E)
6709 { return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
6710 bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
6711 { return StmtVisitorTy::Visit(E->getResultExpr()); }
6712 bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
6713 { return StmtVisitorTy::Visit(E->getReplacement()); }
6714 bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
6715 TempVersionRAII RAII(*Info.CurrentCall);
6716 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
6717 return StmtVisitorTy::Visit(E->getExpr());
6719 bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
6720 TempVersionRAII RAII(*Info.CurrentCall);
6721 // The initializer may not have been parsed yet, or might be erroneous.
6724 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
6725 return StmtVisitorTy::Visit(E->getExpr());
6728 bool VisitExprWithCleanups(const ExprWithCleanups *E) {
6729 FullExpressionRAII Scope(Info);
6730 return StmtVisitorTy::Visit(E->getSubExpr()) && Scope.destroy();
6733 // Temporaries are registered when created, so we don't care about
6734 // CXXBindTemporaryExpr.
6735 bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) {
6736 return StmtVisitorTy::Visit(E->getSubExpr());
6739 bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
6740 CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
6741 return static_cast<Derived*>(this)->VisitCastExpr(E);
6743 bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
6744 if (!Info.Ctx.getLangOpts().CPlusPlus2a)
6745 CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
6746 return static_cast<Derived*>(this)->VisitCastExpr(E);
6748 bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
6749 return static_cast<Derived*>(this)->VisitCastExpr(E);
6752 bool VisitBinaryOperator(const BinaryOperator *E) {
6753 switch (E->getOpcode()) {
6758 VisitIgnoredValue(E->getLHS());
6759 return StmtVisitorTy::Visit(E->getRHS());
6764 if (!HandleMemberPointerAccess(Info, E, Obj))
6767 if (!handleLValueToRValueConversion(Info, E, E->getType(), Obj, Result))
6769 return DerivedSuccess(Result, E);
6774 bool VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *E) {
6775 return StmtVisitorTy::Visit(E->getSemanticForm());
6778 bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
6779 // Evaluate and cache the common expression. We treat it as a temporary,
6780 // even though it's not quite the same thing.
6782 if (!Evaluate(Info.CurrentCall->createTemporary(
6783 E->getOpaqueValue(),
6784 getStorageType(Info.Ctx, E->getOpaqueValue()), false,
6786 Info, E->getCommon()))
6789 return HandleConditionalOperator(E);
6792 bool VisitConditionalOperator(const ConditionalOperator *E) {
6793 bool IsBcpCall = false;
6794 // If the condition (ignoring parens) is a __builtin_constant_p call,
6795 // the result is a constant expression if it can be folded without
6796 // side-effects. This is an important GNU extension. See GCC PR38377
6798 if (const CallExpr *CallCE =
6799 dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
6800 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
6803 // Always assume __builtin_constant_p(...) ? ... : ... is a potential
6804 // constant expression; we can't check whether it's potentially foldable.
6805 // FIXME: We should instead treat __builtin_constant_p as non-constant if
6806 // it would return 'false' in this mode.
6807 if (Info.checkingPotentialConstantExpression() && IsBcpCall)
6810 FoldConstant Fold(Info, IsBcpCall);
6811 if (!HandleConditionalOperator(E)) {
6812 Fold.keepDiagnostics();
6819 bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
6820 if (APValue *Value = Info.CurrentCall->getCurrentTemporary(E))
6821 return DerivedSuccess(*Value, E);
6823 const Expr *Source = E->getSourceExpr();
6826 if (Source == E) { // sanity checking.
6827 assert(0 && "OpaqueValueExpr recursively refers to itself");
6830 return StmtVisitorTy::Visit(Source);
6833 bool VisitPseudoObjectExpr(const PseudoObjectExpr *E) {
6834 for (const Expr *SemE : E->semantics()) {
6835 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SemE)) {
6836 // FIXME: We can't handle the case where an OpaqueValueExpr is also the
6837 // result expression: there could be two different LValues that would
6838 // refer to the same object in that case, and we can't model that.
6839 if (SemE == E->getResultExpr())
6842 // Unique OVEs get evaluated if and when we encounter them when
6843 // emitting the rest of the semantic form, rather than eagerly.
6844 if (OVE->isUnique())
6848 if (!Evaluate(Info.CurrentCall->createTemporary(
6849 OVE, getStorageType(Info.Ctx, OVE), false, LV),
6850 Info, OVE->getSourceExpr()))
6852 } else if (SemE == E->getResultExpr()) {
6853 if (!StmtVisitorTy::Visit(SemE))
6856 if (!EvaluateIgnoredValue(Info, SemE))
6863 bool VisitCallExpr(const CallExpr *E) {
6865 if (!handleCallExpr(E, Result, nullptr))
6867 return DerivedSuccess(Result, E);
6870 bool handleCallExpr(const CallExpr *E, APValue &Result,
6871 const LValue *ResultSlot) {
6872 const Expr *Callee = E->getCallee()->IgnoreParens();
6873 QualType CalleeType = Callee->getType();
6875 const FunctionDecl *FD = nullptr;
6876 LValue *This = nullptr, ThisVal;
6877 auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
6878 bool HasQualifier = false;
6880 // Extract function decl and 'this' pointer from the callee.
6881 if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
6882 const CXXMethodDecl *Member = nullptr;
6883 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) {
6884 // Explicit bound member calls, such as x.f() or p->g();
6885 if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal))
6887 Member = dyn_cast<CXXMethodDecl>(ME->getMemberDecl());
6889 return Error(Callee);
6891 HasQualifier = ME->hasQualifier();
6892 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) {
6893 // Indirect bound member calls ('.*' or '->*').
6894 const ValueDecl *D =
6895 HandleMemberPointerAccess(Info, BE, ThisVal, false);
6898 Member = dyn_cast<CXXMethodDecl>(D);
6900 return Error(Callee);
6902 } else if (const auto *PDE = dyn_cast<CXXPseudoDestructorExpr>(Callee)) {
6903 if (!Info.getLangOpts().CPlusPlus2a)
6904 Info.CCEDiag(PDE, diag::note_constexpr_pseudo_destructor);
6905 // FIXME: If pseudo-destructor calls ever start ending the lifetime of
6906 // their callee, we should start calling HandleDestruction here.
6907 // For now, we just evaluate the object argument and discard it.
6908 return EvaluateObjectArgument(Info, PDE->getBase(), ThisVal);
6910 return Error(Callee);
6912 } else if (CalleeType->isFunctionPointerType()) {
6914 if (!EvaluatePointer(Callee, Call, Info))
6917 if (!Call.getLValueOffset().isZero())
6918 return Error(Callee);
6919 FD = dyn_cast_or_null<FunctionDecl>(
6920 Call.getLValueBase().dyn_cast<const ValueDecl*>());
6922 return Error(Callee);
6923 // Don't call function pointers which have been cast to some other type.
6924 // Per DR (no number yet), the caller and callee can differ in noexcept.
6925 if (!Info.Ctx.hasSameFunctionTypeIgnoringExceptionSpec(
6926 CalleeType->getPointeeType(), FD->getType())) {
6930 // Overloaded operator calls to member functions are represented as normal
6931 // calls with '*this' as the first argument.
6932 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
6933 if (MD && !MD->isStatic()) {
6934 // FIXME: When selecting an implicit conversion for an overloaded
6935 // operator delete, we sometimes try to evaluate calls to conversion
6936 // operators without a 'this' parameter!
6940 if (!EvaluateObjectArgument(Info, Args[0], ThisVal))
6943 Args = Args.slice(1);
6944 } else if (MD && MD->isLambdaStaticInvoker()) {
6945 // Map the static invoker for the lambda back to the call operator.
6946 // Conveniently, we don't have to slice out the 'this' argument (as is
6947 // being done for the non-static case), since a static member function
6948 // doesn't have an implicit argument passed in.
6949 const CXXRecordDecl *ClosureClass = MD->getParent();
6951 ClosureClass->captures_begin() == ClosureClass->captures_end() &&
6952 "Number of captures must be zero for conversion to function-ptr");
6954 const CXXMethodDecl *LambdaCallOp =
6955 ClosureClass->getLambdaCallOperator();
6957 // Set 'FD', the function that will be called below, to the call
6958 // operator. If the closure object represents a generic lambda, find
6959 // the corresponding specialization of the call operator.
6961 if (ClosureClass->isGenericLambda()) {
6962 assert(MD->isFunctionTemplateSpecialization() &&
6963 "A generic lambda's static-invoker function must be a "
6964 "template specialization");
6965 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
6966 FunctionTemplateDecl *CallOpTemplate =
6967 LambdaCallOp->getDescribedFunctionTemplate();
6968 void *InsertPos = nullptr;
6969 FunctionDecl *CorrespondingCallOpSpecialization =
6970 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos);
6971 assert(CorrespondingCallOpSpecialization &&
6972 "We must always have a function call operator specialization "
6973 "that corresponds to our static invoker specialization");
6974 FD = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
6977 } else if (FD->isReplaceableGlobalAllocationFunction()) {
6978 if (FD->getDeclName().getCXXOverloadedOperator() == OO_New ||
6979 FD->getDeclName().getCXXOverloadedOperator() == OO_Array_New) {
6981 if (!HandleOperatorNewCall(Info, E, Ptr))
6983 Ptr.moveInto(Result);
6986 return HandleOperatorDeleteCall(Info, E);
6992 SmallVector<QualType, 4> CovariantAdjustmentPath;
6994 auto *NamedMember = dyn_cast<CXXMethodDecl>(FD);
6995 if (NamedMember && NamedMember->isVirtual() && !HasQualifier) {
6996 // Perform virtual dispatch, if necessary.
6997 FD = HandleVirtualDispatch(Info, E, *This, NamedMember,
6998 CovariantAdjustmentPath);
7002 // Check that the 'this' pointer points to an object of the right type.
7003 // FIXME: If this is an assignment operator call, we may need to change
7004 // the active union member before we check this.
7005 if (!checkNonVirtualMemberCallThisPointer(Info, E, *This, NamedMember))
7010 // Destructor calls are different enough that they have their own codepath.
7011 if (auto *DD = dyn_cast<CXXDestructorDecl>(FD)) {
7012 assert(This && "no 'this' pointer for destructor call");
7013 return HandleDestruction(Info, E, *This,
7014 Info.Ctx.getRecordType(DD->getParent()));
7017 const FunctionDecl *Definition = nullptr;
7018 Stmt *Body = FD->getBody(Definition);
7020 if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body) ||
7021 !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body, Info,
7022 Result, ResultSlot))
7025 if (!CovariantAdjustmentPath.empty() &&
7026 !HandleCovariantReturnAdjustment(Info, E, Result,
7027 CovariantAdjustmentPath))
7033 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
7034 return StmtVisitorTy::Visit(E->getInitializer());
7036 bool VisitInitListExpr(const InitListExpr *E) {
7037 if (E->getNumInits() == 0)
7038 return DerivedZeroInitialization(E);
7039 if (E->getNumInits() == 1)
7040 return StmtVisitorTy::Visit(E->getInit(0));
7043 bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
7044 return DerivedZeroInitialization(E);
7046 bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
7047 return DerivedZeroInitialization(E);
7049 bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
7050 return DerivedZeroInitialization(E);
7053 /// A member expression where the object is a prvalue is itself a prvalue.
7054 bool VisitMemberExpr(const MemberExpr *E) {
7055 assert(!Info.Ctx.getLangOpts().CPlusPlus11 &&
7056 "missing temporary materialization conversion");
7057 assert(!E->isArrow() && "missing call to bound member function?");
7060 if (!Evaluate(Val, Info, E->getBase()))
7063 QualType BaseTy = E->getBase()->getType();
7065 const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
7066 if (!FD) return Error(E);
7067 assert(!FD->getType()->isReferenceType() && "prvalue reference?");
7068 assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
7069 FD->getParent()->getCanonicalDecl() && "record / field mismatch");
7071 // Note: there is no lvalue base here. But this case should only ever
7072 // happen in C or in C++98, where we cannot be evaluating a constexpr
7073 // constructor, which is the only case the base matters.
7074 CompleteObject Obj(APValue::LValueBase(), &Val, BaseTy);
7075 SubobjectDesignator Designator(BaseTy);
7076 Designator.addDeclUnchecked(FD);
7079 return extractSubobject(Info, E, Obj, Designator, Result) &&
7080 DerivedSuccess(Result, E);
7083 bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E) {
7085 if (!Evaluate(Val, Info, E->getBase()))
7088 if (Val.isVector()) {
7089 SmallVector<uint32_t, 4> Indices;
7090 E->getEncodedElementAccess(Indices);
7091 if (Indices.size() == 1) {
7093 return DerivedSuccess(Val.getVectorElt(Indices[0]), E);
7095 // Construct new APValue vector.
7096 SmallVector<APValue, 4> Elts;
7097 for (unsigned I = 0; I < Indices.size(); ++I) {
7098 Elts.push_back(Val.getVectorElt(Indices[I]));
7100 APValue VecResult(Elts.data(), Indices.size());
7101 return DerivedSuccess(VecResult, E);
7108 bool VisitCastExpr(const CastExpr *E) {
7109 switch (E->getCastKind()) {
7113 case CK_AtomicToNonAtomic: {
7115 // This does not need to be done in place even for class/array types:
7116 // atomic-to-non-atomic conversion implies copying the object
7118 if (!Evaluate(AtomicVal, Info, E->getSubExpr()))
7120 return DerivedSuccess(AtomicVal, E);
7124 case CK_UserDefinedConversion:
7125 return StmtVisitorTy::Visit(E->getSubExpr());
7127 case CK_LValueToRValue: {
7129 if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
7132 // Note, we use the subexpression's type in order to retain cv-qualifiers.
7133 if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
7136 return DerivedSuccess(RVal, E);
7138 case CK_LValueToRValueBitCast: {
7139 APValue DestValue, SourceValue;
7140 if (!Evaluate(SourceValue, Info, E->getSubExpr()))
7142 if (!handleLValueToRValueBitCast(Info, DestValue, SourceValue, E))
7144 return DerivedSuccess(DestValue, E);
7147 case CK_AddressSpaceConversion: {
7149 if (!Evaluate(Value, Info, E->getSubExpr()))
7151 return DerivedSuccess(Value, E);
7158 bool VisitUnaryPostInc(const UnaryOperator *UO) {
7159 return VisitUnaryPostIncDec(UO);
7161 bool VisitUnaryPostDec(const UnaryOperator *UO) {
7162 return VisitUnaryPostIncDec(UO);
7164 bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
7165 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
7169 if (!EvaluateLValue(UO->getSubExpr(), LVal, Info))
7172 if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(),
7173 UO->isIncrementOp(), &RVal))
7175 return DerivedSuccess(RVal, UO);
7178 bool VisitStmtExpr(const StmtExpr *E) {
7179 // We will have checked the full-expressions inside the statement expression
7180 // when they were completed, and don't need to check them again now.
7181 if (Info.checkingForUndefinedBehavior())
7184 const CompoundStmt *CS = E->getSubStmt();
7185 if (CS->body_empty())
7188 BlockScopeRAII Scope(Info);
7189 for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
7190 BE = CS->body_end();
7193 const Expr *FinalExpr = dyn_cast<Expr>(*BI);
7195 Info.FFDiag((*BI)->getBeginLoc(),
7196 diag::note_constexpr_stmt_expr_unsupported);
7199 return this->Visit(FinalExpr) && Scope.destroy();
7202 APValue ReturnValue;
7203 StmtResult Result = { ReturnValue, nullptr };
7204 EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI);
7205 if (ESR != ESR_Succeeded) {
7206 // FIXME: If the statement-expression terminated due to 'return',
7207 // 'break', or 'continue', it would be nice to propagate that to
7208 // the outer statement evaluation rather than bailing out.
7209 if (ESR != ESR_Failed)
7210 Info.FFDiag((*BI)->getBeginLoc(),
7211 diag::note_constexpr_stmt_expr_unsupported);
7216 llvm_unreachable("Return from function from the loop above.");
7219 /// Visit a value which is evaluated, but whose value is ignored.
7220 void VisitIgnoredValue(const Expr *E) {
7221 EvaluateIgnoredValue(Info, E);
7224 /// Potentially visit a MemberExpr's base expression.
7225 void VisitIgnoredBaseExpression(const Expr *E) {
7226 // While MSVC doesn't evaluate the base expression, it does diagnose the
7227 // presence of side-effecting behavior.
7228 if (Info.getLangOpts().MSVCCompat && !E->HasSideEffects(Info.Ctx))
7230 VisitIgnoredValue(E);
7236 //===----------------------------------------------------------------------===//
7237 // Common base class for lvalue and temporary evaluation.
7238 //===----------------------------------------------------------------------===//
7240 template<class Derived>
7241 class LValueExprEvaluatorBase
7242 : public ExprEvaluatorBase<Derived> {
7246 typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
7247 typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
7249 bool Success(APValue::LValueBase B) {
7254 bool evaluatePointer(const Expr *E, LValue &Result) {
7255 return EvaluatePointer(E, Result, this->Info, InvalidBaseOK);
7259 LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result, bool InvalidBaseOK)
7260 : ExprEvaluatorBaseTy(Info), Result(Result),
7261 InvalidBaseOK(InvalidBaseOK) {}
7263 bool Success(const APValue &V, const Expr *E) {
7264 Result.setFrom(this->Info.Ctx, V);
7268 bool VisitMemberExpr(const MemberExpr *E) {
7269 // Handle non-static data members.
7273 EvalOK = evaluatePointer(E->getBase(), Result);
7274 BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
7275 } else if (E->getBase()->isRValue()) {
7276 assert(E->getBase()->getType()->isRecordType());
7277 EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info);
7278 BaseTy = E->getBase()->getType();
7280 EvalOK = this->Visit(E->getBase());
7281 BaseTy = E->getBase()->getType();
7286 Result.setInvalid(E);
7290 const ValueDecl *MD = E->getMemberDecl();
7291 if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
7292 assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
7293 FD->getParent()->getCanonicalDecl() && "record / field mismatch");
7295 if (!HandleLValueMember(this->Info, E, Result, FD))
7297 } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) {
7298 if (!HandleLValueIndirectMember(this->Info, E, Result, IFD))
7301 return this->Error(E);
7303 if (MD->getType()->isReferenceType()) {
7305 if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
7308 return Success(RefValue, E);
7313 bool VisitBinaryOperator(const BinaryOperator *E) {
7314 switch (E->getOpcode()) {
7316 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
7320 return HandleMemberPointerAccess(this->Info, E, Result);
7324 bool VisitCastExpr(const CastExpr *E) {
7325 switch (E->getCastKind()) {
7327 return ExprEvaluatorBaseTy::VisitCastExpr(E);
7329 case CK_DerivedToBase:
7330 case CK_UncheckedDerivedToBase:
7331 if (!this->Visit(E->getSubExpr()))
7334 // Now figure out the necessary offset to add to the base LV to get from
7335 // the derived class to the base class.
7336 return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(),
7343 //===----------------------------------------------------------------------===//
7344 // LValue Evaluation
7346 // This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
7347 // function designators (in C), decl references to void objects (in C), and
7348 // temporaries (if building with -Wno-address-of-temporary).
7350 // LValue evaluation produces values comprising a base expression of one of the
7356 // * CompoundLiteralExpr in C (and in global scope in C++)
7359 // * ObjCStringLiteralExpr
7363 // * CallExpr for a MakeStringConstant builtin
7364 // - typeid(T) expressions, as TypeInfoLValues
7365 // - Locals and temporaries
7366 // * MaterializeTemporaryExpr
7367 // * Any Expr, with a CallIndex indicating the function in which the temporary
7368 // was evaluated, for cases where the MaterializeTemporaryExpr is missing
7369 // from the AST (FIXME).
7370 // * A MaterializeTemporaryExpr that has static storage duration, with no
7371 // CallIndex, for a lifetime-extended temporary.
7372 // plus an offset in bytes.
7373 //===----------------------------------------------------------------------===//
7375 class LValueExprEvaluator
7376 : public LValueExprEvaluatorBase<LValueExprEvaluator> {
7378 LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) :
7379 LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {}
7381 bool VisitVarDecl(const Expr *E, const VarDecl *VD);
7382 bool VisitUnaryPreIncDec(const UnaryOperator *UO);
7384 bool VisitDeclRefExpr(const DeclRefExpr *E);
7385 bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
7386 bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
7387 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
7388 bool VisitMemberExpr(const MemberExpr *E);
7389 bool VisitStringLiteral(const StringLiteral *E) { return Success(E); }
7390 bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); }
7391 bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
7392 bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
7393 bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
7394 bool VisitUnaryDeref(const UnaryOperator *E);
7395 bool VisitUnaryReal(const UnaryOperator *E);
7396 bool VisitUnaryImag(const UnaryOperator *E);
7397 bool VisitUnaryPreInc(const UnaryOperator *UO) {
7398 return VisitUnaryPreIncDec(UO);
7400 bool VisitUnaryPreDec(const UnaryOperator *UO) {
7401 return VisitUnaryPreIncDec(UO);
7403 bool VisitBinAssign(const BinaryOperator *BO);
7404 bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO);
7406 bool VisitCastExpr(const CastExpr *E) {
7407 switch (E->getCastKind()) {
7409 return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
7411 case CK_LValueBitCast:
7412 this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
7413 if (!Visit(E->getSubExpr()))
7415 Result.Designator.setInvalid();
7418 case CK_BaseToDerived:
7419 if (!Visit(E->getSubExpr()))
7421 return HandleBaseToDerivedCast(Info, E, Result);
7424 if (!Visit(E->getSubExpr()))
7426 return HandleDynamicCast(Info, cast<ExplicitCastExpr>(E), Result);
7430 } // end anonymous namespace
7432 /// Evaluate an expression as an lvalue. This can be legitimately called on
7433 /// expressions which are not glvalues, in three cases:
7434 /// * function designators in C, and
7435 /// * "extern void" objects
7436 /// * @selector() expressions in Objective-C
7437 static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
7438 bool InvalidBaseOK) {
7439 assert(E->isGLValue() || E->getType()->isFunctionType() ||
7440 E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E));
7441 return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
7444 bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
7445 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl()))
7447 if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
7448 return VisitVarDecl(E, VD);
7449 if (const BindingDecl *BD = dyn_cast<BindingDecl>(E->getDecl()))
7450 return Visit(BD->getBinding());
7455 bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
7457 // If we are within a lambda's call operator, check whether the 'VD' referred
7458 // to within 'E' actually represents a lambda-capture that maps to a
7459 // data-member/field within the closure object, and if so, evaluate to the
7460 // field or what the field refers to.
7461 if (Info.CurrentCall && isLambdaCallOperator(Info.CurrentCall->Callee) &&
7462 isa<DeclRefExpr>(E) &&
7463 cast<DeclRefExpr>(E)->refersToEnclosingVariableOrCapture()) {
7464 // We don't always have a complete capture-map when checking or inferring if
7465 // the function call operator meets the requirements of a constexpr function
7466 // - but we don't need to evaluate the captures to determine constexprness
7467 // (dcl.constexpr C++17).
7468 if (Info.checkingPotentialConstantExpression())
7471 if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) {
7472 // Start with 'Result' referring to the complete closure object...
7473 Result = *Info.CurrentCall->This;
7474 // ... then update it to refer to the field of the closure object
7475 // that represents the capture.
7476 if (!HandleLValueMember(Info, E, Result, FD))
7478 // And if the field is of reference type, update 'Result' to refer to what
7479 // the field refers to.
7480 if (FD->getType()->isReferenceType()) {
7482 if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result,
7485 Result.setFrom(Info.Ctx, RVal);
7490 CallStackFrame *Frame = nullptr;
7491 if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1) {
7492 // Only if a local variable was declared in the function currently being
7493 // evaluated, do we expect to be able to find its value in the current
7494 // frame. (Otherwise it was likely declared in an enclosing context and
7495 // could either have a valid evaluatable value (for e.g. a constexpr
7496 // variable) or be ill-formed (and trigger an appropriate evaluation
7498 if (Info.CurrentCall->Callee &&
7499 Info.CurrentCall->Callee->Equals(VD->getDeclContext())) {
7500 Frame = Info.CurrentCall;
7504 if (!VD->getType()->isReferenceType()) {
7506 Result.set({VD, Frame->Index,
7507 Info.CurrentCall->getCurrentTemporaryVersion(VD)});
7514 if (!evaluateVarDeclInit(Info, E, VD, Frame, V, nullptr))
7516 if (!V->hasValue()) {
7517 // FIXME: Is it possible for V to be indeterminate here? If so, we should
7518 // adjust the diagnostic to say that.
7519 if (!Info.checkingPotentialConstantExpression())
7520 Info.FFDiag(E, diag::note_constexpr_use_uninit_reference);
7523 return Success(*V, E);
7526 bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
7527 const MaterializeTemporaryExpr *E) {
7528 // Walk through the expression to find the materialized temporary itself.
7529 SmallVector<const Expr *, 2> CommaLHSs;
7530 SmallVector<SubobjectAdjustment, 2> Adjustments;
7532 E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
7534 // If we passed any comma operators, evaluate their LHSs.
7535 for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
7536 if (!EvaluateIgnoredValue(Info, CommaLHSs[I]))
7539 // A materialized temporary with static storage duration can appear within the
7540 // result of a constant expression evaluation, so we need to preserve its
7541 // value for use outside this evaluation.
7543 if (E->getStorageDuration() == SD_Static) {
7544 Value = E->getOrCreateValue(true);
7548 Value = &Info.CurrentCall->createTemporary(
7549 E, E->getType(), E->getStorageDuration() == SD_Automatic, Result);
7552 QualType Type = Inner->getType();
7554 // Materialize the temporary itself.
7555 if (!EvaluateInPlace(*Value, Info, Result, Inner)) {
7560 // Adjust our lvalue to refer to the desired subobject.
7561 for (unsigned I = Adjustments.size(); I != 0; /**/) {
7563 switch (Adjustments[I].Kind) {
7564 case SubobjectAdjustment::DerivedToBaseAdjustment:
7565 if (!HandleLValueBasePath(Info, Adjustments[I].DerivedToBase.BasePath,
7568 Type = Adjustments[I].DerivedToBase.BasePath->getType();
7571 case SubobjectAdjustment::FieldAdjustment:
7572 if (!HandleLValueMember(Info, E, Result, Adjustments[I].Field))
7574 Type = Adjustments[I].Field->getType();
7577 case SubobjectAdjustment::MemberPointerAdjustment:
7578 if (!HandleMemberPointerAccess(this->Info, Type, Result,
7579 Adjustments[I].Ptr.RHS))
7581 Type = Adjustments[I].Ptr.MPT->getPointeeType();
7590 LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
7591 assert((!Info.getLangOpts().CPlusPlus || E->isFileScope()) &&
7592 "lvalue compound literal in c++?");
7593 // Defer visiting the literal until the lvalue-to-rvalue conversion. We can
7594 // only see this when folding in C, so there's no standard to follow here.
7598 bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
7599 TypeInfoLValue TypeInfo;
7601 if (!E->isPotentiallyEvaluated()) {
7602 if (E->isTypeOperand())
7603 TypeInfo = TypeInfoLValue(E->getTypeOperand(Info.Ctx).getTypePtr());
7605 TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr());
7607 if (!Info.Ctx.getLangOpts().CPlusPlus2a) {
7608 Info.CCEDiag(E, diag::note_constexpr_typeid_polymorphic)
7609 << E->getExprOperand()->getType()
7610 << E->getExprOperand()->getSourceRange();
7613 if (!Visit(E->getExprOperand()))
7616 Optional<DynamicType> DynType =
7617 ComputeDynamicType(Info, E, Result, AK_TypeId);
7622 TypeInfoLValue(Info.Ctx.getRecordType(DynType->Type).getTypePtr());
7625 return Success(APValue::LValueBase::getTypeInfo(TypeInfo, E->getType()));
7628 bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
7632 bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
7633 // Handle static data members.
7634 if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) {
7635 VisitIgnoredBaseExpression(E->getBase());
7636 return VisitVarDecl(E, VD);
7639 // Handle static member functions.
7640 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) {
7641 if (MD->isStatic()) {
7642 VisitIgnoredBaseExpression(E->getBase());
7647 // Handle non-static data members.
7648 return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
7651 bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
7652 // FIXME: Deal with vectors as array subscript bases.
7653 if (E->getBase()->getType()->isVectorType())
7656 bool Success = true;
7657 if (!evaluatePointer(E->getBase(), Result)) {
7658 if (!Info.noteFailure())
7664 if (!EvaluateInteger(E->getIdx(), Index, Info))
7668 HandleLValueArrayAdjustment(Info, E, Result, E->getType(), Index);
7671 bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
7672 return evaluatePointer(E->getSubExpr(), Result);
7675 bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
7676 if (!Visit(E->getSubExpr()))
7678 // __real is a no-op on scalar lvalues.
7679 if (E->getSubExpr()->getType()->isAnyComplexType())
7680 HandleLValueComplexElement(Info, E, Result, E->getType(), false);
7684 bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
7685 assert(E->getSubExpr()->getType()->isAnyComplexType() &&
7686 "lvalue __imag__ on scalar?");
7687 if (!Visit(E->getSubExpr()))
7689 HandleLValueComplexElement(Info, E, Result, E->getType(), true);
7693 bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
7694 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
7697 if (!this->Visit(UO->getSubExpr()))
7700 return handleIncDec(
7701 this->Info, UO, Result, UO->getSubExpr()->getType(),
7702 UO->isIncrementOp(), nullptr);
7705 bool LValueExprEvaluator::VisitCompoundAssignOperator(
7706 const CompoundAssignOperator *CAO) {
7707 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
7712 // The overall lvalue result is the result of evaluating the LHS.
7713 if (!this->Visit(CAO->getLHS())) {
7714 if (Info.noteFailure())
7715 Evaluate(RHS, this->Info, CAO->getRHS());
7719 if (!Evaluate(RHS, this->Info, CAO->getRHS()))
7722 return handleCompoundAssignment(
7724 Result, CAO->getLHS()->getType(), CAO->getComputationLHSType(),
7725 CAO->getOpForCompoundAssignment(CAO->getOpcode()), RHS);
7728 bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
7729 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
7734 if (!this->Visit(E->getLHS())) {
7735 if (Info.noteFailure())
7736 Evaluate(NewVal, this->Info, E->getRHS());
7740 if (!Evaluate(NewVal, this->Info, E->getRHS()))
7743 if (Info.getLangOpts().CPlusPlus2a &&
7744 !HandleUnionActiveMemberChange(Info, E->getLHS(), Result))
7747 return handleAssignment(this->Info, E, Result, E->getLHS()->getType(),
7751 //===----------------------------------------------------------------------===//
7752 // Pointer Evaluation
7753 //===----------------------------------------------------------------------===//
7755 /// Attempts to compute the number of bytes available at the pointer
7756 /// returned by a function with the alloc_size attribute. Returns true if we
7757 /// were successful. Places an unsigned number into `Result`.
7759 /// This expects the given CallExpr to be a call to a function with an
7760 /// alloc_size attribute.
7761 static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
7762 const CallExpr *Call,
7763 llvm::APInt &Result) {
7764 const AllocSizeAttr *AllocSize = getAllocSizeAttr(Call);
7766 assert(AllocSize && AllocSize->getElemSizeParam().isValid());
7767 unsigned SizeArgNo = AllocSize->getElemSizeParam().getASTIndex();
7768 unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType());
7769 if (Call->getNumArgs() <= SizeArgNo)
7772 auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) {
7773 Expr::EvalResult ExprResult;
7774 if (!E->EvaluateAsInt(ExprResult, Ctx, Expr::SE_AllowSideEffects))
7776 Into = ExprResult.Val.getInt();
7777 if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
7779 Into = Into.zextOrSelf(BitsInSizeT);
7784 if (!EvaluateAsSizeT(Call->getArg(SizeArgNo), SizeOfElem))
7787 if (!AllocSize->getNumElemsParam().isValid()) {
7788 Result = std::move(SizeOfElem);
7792 APSInt NumberOfElems;
7793 unsigned NumArgNo = AllocSize->getNumElemsParam().getASTIndex();
7794 if (!EvaluateAsSizeT(Call->getArg(NumArgNo), NumberOfElems))
7798 llvm::APInt BytesAvailable = SizeOfElem.umul_ov(NumberOfElems, Overflow);
7802 Result = std::move(BytesAvailable);
7806 /// Convenience function. LVal's base must be a call to an alloc_size
7808 static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
7810 llvm::APInt &Result) {
7811 assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
7812 "Can't get the size of a non alloc_size function");
7813 const auto *Base = LVal.getLValueBase().get<const Expr *>();
7814 const CallExpr *CE = tryUnwrapAllocSizeCall(Base);
7815 return getBytesReturnedByAllocSizeCall(Ctx, CE, Result);
7818 /// Attempts to evaluate the given LValueBase as the result of a call to
7819 /// a function with the alloc_size attribute. If it was possible to do so, this
7820 /// function will return true, make Result's Base point to said function call,
7821 /// and mark Result's Base as invalid.
7822 static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
7827 // Because we do no form of static analysis, we only support const variables.
7829 // Additionally, we can't support parameters, nor can we support static
7830 // variables (in the latter case, use-before-assign isn't UB; in the former,
7831 // we have no clue what they'll be assigned to).
7833 dyn_cast_or_null<VarDecl>(Base.dyn_cast<const ValueDecl *>());
7834 if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified())
7837 const Expr *Init = VD->getAnyInitializer();
7841 const Expr *E = Init->IgnoreParens();
7842 if (!tryUnwrapAllocSizeCall(E))
7845 // Store E instead of E unwrapped so that the type of the LValue's base is
7846 // what the user wanted.
7847 Result.setInvalid(E);
7849 QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType();
7850 Result.addUnsizedArray(Info, E, Pointee);
7855 class PointerExprEvaluator
7856 : public ExprEvaluatorBase<PointerExprEvaluator> {
7860 bool Success(const Expr *E) {
7865 bool evaluateLValue(const Expr *E, LValue &Result) {
7866 return EvaluateLValue(E, Result, Info, InvalidBaseOK);
7869 bool evaluatePointer(const Expr *E, LValue &Result) {
7870 return EvaluatePointer(E, Result, Info, InvalidBaseOK);
7873 bool visitNonBuiltinCallExpr(const CallExpr *E);
7876 PointerExprEvaluator(EvalInfo &info, LValue &Result, bool InvalidBaseOK)
7877 : ExprEvaluatorBaseTy(info), Result(Result),
7878 InvalidBaseOK(InvalidBaseOK) {}
7880 bool Success(const APValue &V, const Expr *E) {
7881 Result.setFrom(Info.Ctx, V);
7884 bool ZeroInitialization(const Expr *E) {
7885 Result.setNull(Info.Ctx, E->getType());
7889 bool VisitBinaryOperator(const BinaryOperator *E);
7890 bool VisitCastExpr(const CastExpr* E);
7891 bool VisitUnaryAddrOf(const UnaryOperator *E);
7892 bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
7893 { return Success(E); }
7894 bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
7895 if (E->isExpressibleAsConstantInitializer())
7897 if (Info.noteFailure())
7898 EvaluateIgnoredValue(Info, E->getSubExpr());
7901 bool VisitAddrLabelExpr(const AddrLabelExpr *E)
7902 { return Success(E); }
7903 bool VisitCallExpr(const CallExpr *E);
7904 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
7905 bool VisitBlockExpr(const BlockExpr *E) {
7906 if (!E->getBlockDecl()->hasCaptures())
7910 bool VisitCXXThisExpr(const CXXThisExpr *E) {
7911 // Can't look at 'this' when checking a potential constant expression.
7912 if (Info.checkingPotentialConstantExpression())
7914 if (!Info.CurrentCall->This) {
7915 if (Info.getLangOpts().CPlusPlus11)
7916 Info.FFDiag(E, diag::note_constexpr_this) << E->isImplicit();
7921 Result = *Info.CurrentCall->This;
7922 // If we are inside a lambda's call operator, the 'this' expression refers
7923 // to the enclosing '*this' object (either by value or reference) which is
7924 // either copied into the closure object's field that represents the '*this'
7925 // or refers to '*this'.
7926 if (isLambdaCallOperator(Info.CurrentCall->Callee)) {
7927 // Ensure we actually have captured 'this'. (an error will have
7928 // been previously reported if not).
7929 if (!Info.CurrentCall->LambdaThisCaptureField)
7932 // Update 'Result' to refer to the data member/field of the closure object
7933 // that represents the '*this' capture.
7934 if (!HandleLValueMember(Info, E, Result,
7935 Info.CurrentCall->LambdaThisCaptureField))
7937 // If we captured '*this' by reference, replace the field with its referent.
7938 if (Info.CurrentCall->LambdaThisCaptureField->getType()
7939 ->isPointerType()) {
7941 if (!handleLValueToRValueConversion(Info, E, E->getType(), Result,
7945 Result.setFrom(Info.Ctx, RVal);
7951 bool VisitCXXNewExpr(const CXXNewExpr *E);
7953 bool VisitSourceLocExpr(const SourceLocExpr *E) {
7954 assert(E->isStringType() && "SourceLocExpr isn't a pointer type?");
7955 APValue LValResult = E->EvaluateInContext(
7956 Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
7957 Result.setFrom(Info.Ctx, LValResult);
7961 // FIXME: Missing: @protocol, @selector
7963 } // end anonymous namespace
7965 static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
7966 bool InvalidBaseOK) {
7967 assert(E->isRValue() && E->getType()->hasPointerRepresentation());
7968 return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
7971 bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
7972 if (E->getOpcode() != BO_Add &&
7973 E->getOpcode() != BO_Sub)
7974 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
7976 const Expr *PExp = E->getLHS();
7977 const Expr *IExp = E->getRHS();
7978 if (IExp->getType()->isPointerType())
7979 std::swap(PExp, IExp);
7981 bool EvalPtrOK = evaluatePointer(PExp, Result);
7982 if (!EvalPtrOK && !Info.noteFailure())
7985 llvm::APSInt Offset;
7986 if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
7989 if (E->getOpcode() == BO_Sub)
7990 negateAsSigned(Offset);
7992 QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
7993 return HandleLValueArrayAdjustment(Info, E, Result, Pointee, Offset);
7996 bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
7997 return evaluateLValue(E->getSubExpr(), Result);
8000 bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
8001 const Expr *SubExpr = E->getSubExpr();
8003 switch (E->getCastKind()) {
8007 case CK_CPointerToObjCPointerCast:
8008 case CK_BlockPointerToObjCPointerCast:
8009 case CK_AnyPointerToBlockPointerCast:
8010 case CK_AddressSpaceConversion:
8011 if (!Visit(SubExpr))
8013 // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
8014 // permitted in constant expressions in C++11. Bitcasts from cv void* are
8015 // also static_casts, but we disallow them as a resolution to DR1312.
8016 if (!E->getType()->isVoidPointerType()) {
8017 if (!Result.InvalidBase && !Result.Designator.Invalid &&
8018 !Result.IsNullPtr &&
8019 Info.Ctx.hasSameUnqualifiedType(Result.Designator.getType(Info.Ctx),
8020 E->getType()->getPointeeType()) &&
8021 Info.getStdAllocatorCaller("allocate")) {
8022 // Inside a call to std::allocator::allocate and friends, we permit
8023 // casting from void* back to cv1 T* for a pointer that points to a
8026 Result.Designator.setInvalid();
8027 if (SubExpr->getType()->isVoidPointerType())
8028 CCEDiag(E, diag::note_constexpr_invalid_cast)
8029 << 3 << SubExpr->getType();
8031 CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
8034 if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr)
8035 ZeroInitialization(E);
8038 case CK_DerivedToBase:
8039 case CK_UncheckedDerivedToBase:
8040 if (!evaluatePointer(E->getSubExpr(), Result))
8042 if (!Result.Base && Result.Offset.isZero())
8045 // Now figure out the necessary offset to add to the base LV to get from
8046 // the derived class to the base class.
8047 return HandleLValueBasePath(Info, E, E->getSubExpr()->getType()->
8048 castAs<PointerType>()->getPointeeType(),
8051 case CK_BaseToDerived:
8052 if (!Visit(E->getSubExpr()))
8054 if (!Result.Base && Result.Offset.isZero())
8056 return HandleBaseToDerivedCast(Info, E, Result);
8059 if (!Visit(E->getSubExpr()))
8061 return HandleDynamicCast(Info, cast<ExplicitCastExpr>(E), Result);
8063 case CK_NullToPointer:
8064 VisitIgnoredValue(E->getSubExpr());
8065 return ZeroInitialization(E);
8067 case CK_IntegralToPointer: {
8068 CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
8071 if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
8074 if (Value.isInt()) {
8075 unsigned Size = Info.Ctx.getTypeSize(E->getType());
8076 uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue();
8077 Result.Base = (Expr*)nullptr;
8078 Result.InvalidBase = false;
8079 Result.Offset = CharUnits::fromQuantity(N);
8080 Result.Designator.setInvalid();
8081 Result.IsNullPtr = false;
8084 // Cast is of an lvalue, no need to change value.
8085 Result.setFrom(Info.Ctx, Value);
8090 case CK_ArrayToPointerDecay: {
8091 if (SubExpr->isGLValue()) {
8092 if (!evaluateLValue(SubExpr, Result))
8095 APValue &Value = Info.CurrentCall->createTemporary(
8096 SubExpr, SubExpr->getType(), false, Result);
8097 if (!EvaluateInPlace(Value, Info, Result, SubExpr))
8100 // The result is a pointer to the first element of the array.
8101 auto *AT = Info.Ctx.getAsArrayType(SubExpr->getType());
8102 if (auto *CAT = dyn_cast<ConstantArrayType>(AT))
8103 Result.addArray(Info, E, CAT);
8105 Result.addUnsizedArray(Info, E, AT->getElementType());
8109 case CK_FunctionToPointerDecay:
8110 return evaluateLValue(SubExpr, Result);
8112 case CK_LValueToRValue: {
8114 if (!evaluateLValue(E->getSubExpr(), LVal))
8118 // Note, we use the subexpression's type in order to retain cv-qualifiers.
8119 if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
8121 return InvalidBaseOK &&
8122 evaluateLValueAsAllocSize(Info, LVal.Base, Result);
8123 return Success(RVal, E);
8127 return ExprEvaluatorBaseTy::VisitCastExpr(E);
8130 static CharUnits GetAlignOfType(EvalInfo &Info, QualType T,
8131 UnaryExprOrTypeTrait ExprKind) {
8132 // C++ [expr.alignof]p3:
8133 // When alignof is applied to a reference type, the result is the
8134 // alignment of the referenced type.
8135 if (const ReferenceType *Ref = T->getAs<ReferenceType>())
8136 T = Ref->getPointeeType();
8138 if (T.getQualifiers().hasUnaligned())
8139 return CharUnits::One();
8141 const bool AlignOfReturnsPreferred =
8142 Info.Ctx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
8144 // __alignof is defined to return the preferred alignment.
8145 // Before 8, clang returned the preferred alignment for alignof and _Alignof
8147 if (ExprKind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
8148 return Info.Ctx.toCharUnitsFromBits(
8149 Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
8150 // alignof and _Alignof are defined to return the ABI alignment.
8151 else if (ExprKind == UETT_AlignOf)
8152 return Info.Ctx.getTypeAlignInChars(T.getTypePtr());
8154 llvm_unreachable("GetAlignOfType on a non-alignment ExprKind");
8157 static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E,
8158 UnaryExprOrTypeTrait ExprKind) {
8159 E = E->IgnoreParens();
8161 // The kinds of expressions that we have special-case logic here for
8162 // should be kept up to date with the special checks for those
8163 // expressions in Sema.
8165 // alignof decl is always accepted, even if it doesn't make sense: we default
8166 // to 1 in those cases.
8167 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
8168 return Info.Ctx.getDeclAlign(DRE->getDecl(),
8169 /*RefAsPointee*/true);
8171 if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
8172 return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
8173 /*RefAsPointee*/true);
8175 return GetAlignOfType(Info, E->getType(), ExprKind);
8178 static CharUnits getBaseAlignment(EvalInfo &Info, const LValue &Value) {
8179 if (const auto *VD = Value.Base.dyn_cast<const ValueDecl *>())
8180 return Info.Ctx.getDeclAlign(VD);
8181 if (const auto *E = Value.Base.dyn_cast<const Expr *>())
8182 return GetAlignOfExpr(Info, E, UETT_AlignOf);
8183 return GetAlignOfType(Info, Value.Base.getTypeInfoType(), UETT_AlignOf);
8186 /// Evaluate the value of the alignment argument to __builtin_align_{up,down},
8187 /// __builtin_is_aligned and __builtin_assume_aligned.
8188 static bool getAlignmentArgument(const Expr *E, QualType ForType,
8189 EvalInfo &Info, APSInt &Alignment) {
8190 if (!EvaluateInteger(E, Alignment, Info))
8192 if (Alignment < 0 || !Alignment.isPowerOf2()) {
8193 Info.FFDiag(E, diag::note_constexpr_invalid_alignment) << Alignment;
8196 unsigned SrcWidth = Info.Ctx.getIntWidth(ForType);
8197 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
8198 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
8199 Info.FFDiag(E, diag::note_constexpr_alignment_too_big)
8200 << MaxValue << ForType << Alignment;
8203 // Ensure both alignment and source value have the same bit width so that we
8204 // don't assert when computing the resulting value.
8205 APSInt ExtAlignment =
8206 APSInt(Alignment.zextOrTrunc(SrcWidth), /*isUnsigned=*/true);
8207 assert(APSInt::compareValues(Alignment, ExtAlignment) == 0 &&
8208 "Alignment should not be changed by ext/trunc");
8209 Alignment = ExtAlignment;
8210 assert(Alignment.getBitWidth() == SrcWidth);
8214 // To be clear: this happily visits unsupported builtins. Better name welcomed.
8215 bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
8216 if (ExprEvaluatorBaseTy::VisitCallExpr(E))
8219 if (!(InvalidBaseOK && getAllocSizeAttr(E)))
8222 Result.setInvalid(E);
8223 QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType();
8224 Result.addUnsizedArray(Info, E, PointeeTy);
8228 bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
8229 if (IsStringLiteralCall(E))
8232 if (unsigned BuiltinOp = E->getBuiltinCallee())
8233 return VisitBuiltinCallExpr(E, BuiltinOp);
8235 return visitNonBuiltinCallExpr(E);
8238 bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
8239 unsigned BuiltinOp) {
8240 switch (BuiltinOp) {
8241 case Builtin::BI__builtin_addressof:
8242 return evaluateLValue(E->getArg(0), Result);
8243 case Builtin::BI__builtin_assume_aligned: {
8244 // We need to be very careful here because: if the pointer does not have the
8245 // asserted alignment, then the behavior is undefined, and undefined
8246 // behavior is non-constant.
8247 if (!evaluatePointer(E->getArg(0), Result))
8250 LValue OffsetResult(Result);
8252 if (!getAlignmentArgument(E->getArg(1), E->getArg(0)->getType(), Info,
8255 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
8257 if (E->getNumArgs() > 2) {
8259 if (!EvaluateInteger(E->getArg(2), Offset, Info))
8262 int64_t AdditionalOffset = -Offset.getZExtValue();
8263 OffsetResult.Offset += CharUnits::fromQuantity(AdditionalOffset);
8266 // If there is a base object, then it must have the correct alignment.
8267 if (OffsetResult.Base) {
8268 CharUnits BaseAlignment = getBaseAlignment(Info, OffsetResult);
8270 if (BaseAlignment < Align) {
8271 Result.Designator.setInvalid();
8272 // FIXME: Add support to Diagnostic for long / long long.
8273 CCEDiag(E->getArg(0),
8274 diag::note_constexpr_baa_insufficient_alignment) << 0
8275 << (unsigned)BaseAlignment.getQuantity()
8276 << (unsigned)Align.getQuantity();
8281 // The offset must also have the correct alignment.
8282 if (OffsetResult.Offset.alignTo(Align) != OffsetResult.Offset) {
8283 Result.Designator.setInvalid();
8286 ? CCEDiag(E->getArg(0),
8287 diag::note_constexpr_baa_insufficient_alignment) << 1
8288 : CCEDiag(E->getArg(0),
8289 diag::note_constexpr_baa_value_insufficient_alignment))
8290 << (int)OffsetResult.Offset.getQuantity()
8291 << (unsigned)Align.getQuantity();
8297 case Builtin::BI__builtin_align_up:
8298 case Builtin::BI__builtin_align_down: {
8299 if (!evaluatePointer(E->getArg(0), Result))
8302 if (!getAlignmentArgument(E->getArg(1), E->getArg(0)->getType(), Info,
8305 CharUnits BaseAlignment = getBaseAlignment(Info, Result);
8306 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(Result.Offset);
8307 // For align_up/align_down, we can return the same value if the alignment
8308 // is known to be greater or equal to the requested value.
8309 if (PtrAlign.getQuantity() >= Alignment)
8312 // The alignment could be greater than the minimum at run-time, so we cannot
8313 // infer much about the resulting pointer value. One case is possible:
8314 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
8315 // can infer the correct index if the requested alignment is smaller than
8316 // the base alignment so we can perform the computation on the offset.
8317 if (BaseAlignment.getQuantity() >= Alignment) {
8318 assert(Alignment.getBitWidth() <= 64 &&
8319 "Cannot handle > 64-bit address-space");
8320 uint64_t Alignment64 = Alignment.getZExtValue();
8321 CharUnits NewOffset = CharUnits::fromQuantity(
8322 BuiltinOp == Builtin::BI__builtin_align_down
8323 ? llvm::alignDown(Result.Offset.getQuantity(), Alignment64)
8324 : llvm::alignTo(Result.Offset.getQuantity(), Alignment64));
8325 Result.adjustOffset(NewOffset - Result.Offset);
8326 // TODO: diagnose out-of-bounds values/only allow for arrays?
8329 // Otherwise, we cannot constant-evaluate the result.
8330 Info.FFDiag(E->getArg(0), diag::note_constexpr_alignment_adjust)
8334 case Builtin::BI__builtin_operator_new:
8335 return HandleOperatorNewCall(Info, E, Result);
8336 case Builtin::BI__builtin_launder:
8337 return evaluatePointer(E->getArg(0), Result);
8338 case Builtin::BIstrchr:
8339 case Builtin::BIwcschr:
8340 case Builtin::BImemchr:
8341 case Builtin::BIwmemchr:
8342 if (Info.getLangOpts().CPlusPlus11)
8343 Info.CCEDiag(E, diag::note_constexpr_invalid_function)
8344 << /*isConstexpr*/0 << /*isConstructor*/0
8345 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
8347 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
8349 case Builtin::BI__builtin_strchr:
8350 case Builtin::BI__builtin_wcschr:
8351 case Builtin::BI__builtin_memchr:
8352 case Builtin::BI__builtin_char_memchr:
8353 case Builtin::BI__builtin_wmemchr: {
8354 if (!Visit(E->getArg(0)))
8357 if (!EvaluateInteger(E->getArg(1), Desired, Info))
8359 uint64_t MaxLength = uint64_t(-1);
8360 if (BuiltinOp != Builtin::BIstrchr &&
8361 BuiltinOp != Builtin::BIwcschr &&
8362 BuiltinOp != Builtin::BI__builtin_strchr &&
8363 BuiltinOp != Builtin::BI__builtin_wcschr) {
8365 if (!EvaluateInteger(E->getArg(2), N, Info))
8367 MaxLength = N.getExtValue();
8369 // We cannot find the value if there are no candidates to match against.
8370 if (MaxLength == 0u)
8371 return ZeroInitialization(E);
8372 if (!Result.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
8373 Result.Designator.Invalid)
8375 QualType CharTy = Result.Designator.getType(Info.Ctx);
8376 bool IsRawByte = BuiltinOp == Builtin::BImemchr ||
8377 BuiltinOp == Builtin::BI__builtin_memchr;
8379 Info.Ctx.hasSameUnqualifiedType(
8380 CharTy, E->getArg(0)->getType()->getPointeeType()));
8381 // Pointers to const void may point to objects of incomplete type.
8382 if (IsRawByte && CharTy->isIncompleteType()) {
8383 Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy;
8386 // Give up on byte-oriented matching against multibyte elements.
8387 // FIXME: We can compare the bytes in the correct order.
8388 if (IsRawByte && Info.Ctx.getTypeSizeInChars(CharTy) != CharUnits::One())
8390 // Figure out what value we're actually looking for (after converting to
8391 // the corresponding unsigned type if necessary).
8392 uint64_t DesiredVal;
8393 bool StopAtNull = false;
8394 switch (BuiltinOp) {
8395 case Builtin::BIstrchr:
8396 case Builtin::BI__builtin_strchr:
8397 // strchr compares directly to the passed integer, and therefore
8398 // always fails if given an int that is not a char.
8399 if (!APSInt::isSameValue(HandleIntToIntCast(Info, E, CharTy,
8400 E->getArg(1)->getType(),
8403 return ZeroInitialization(E);
8406 case Builtin::BImemchr:
8407 case Builtin::BI__builtin_memchr:
8408 case Builtin::BI__builtin_char_memchr:
8409 // memchr compares by converting both sides to unsigned char. That's also
8410 // correct for strchr if we get this far (to cope with plain char being
8411 // unsigned in the strchr case).
8412 DesiredVal = Desired.trunc(Info.Ctx.getCharWidth()).getZExtValue();
8415 case Builtin::BIwcschr:
8416 case Builtin::BI__builtin_wcschr:
8419 case Builtin::BIwmemchr:
8420 case Builtin::BI__builtin_wmemchr:
8421 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
8422 DesiredVal = Desired.getZExtValue();
8426 for (; MaxLength; --MaxLength) {
8428 if (!handleLValueToRValueConversion(Info, E, CharTy, Result, Char) ||
8431 if (Char.getInt().getZExtValue() == DesiredVal)
8433 if (StopAtNull && !Char.getInt())
8435 if (!HandleLValueArrayAdjustment(Info, E, Result, CharTy, 1))
8438 // Not found: return nullptr.
8439 return ZeroInitialization(E);
8442 case Builtin::BImemcpy:
8443 case Builtin::BImemmove:
8444 case Builtin::BIwmemcpy:
8445 case Builtin::BIwmemmove:
8446 if (Info.getLangOpts().CPlusPlus11)
8447 Info.CCEDiag(E, diag::note_constexpr_invalid_function)
8448 << /*isConstexpr*/0 << /*isConstructor*/0
8449 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
8451 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
8453 case Builtin::BI__builtin_memcpy:
8454 case Builtin::BI__builtin_memmove:
8455 case Builtin::BI__builtin_wmemcpy:
8456 case Builtin::BI__builtin_wmemmove: {
8457 bool WChar = BuiltinOp == Builtin::BIwmemcpy ||
8458 BuiltinOp == Builtin::BIwmemmove ||
8459 BuiltinOp == Builtin::BI__builtin_wmemcpy ||
8460 BuiltinOp == Builtin::BI__builtin_wmemmove;
8461 bool Move = BuiltinOp == Builtin::BImemmove ||
8462 BuiltinOp == Builtin::BIwmemmove ||
8463 BuiltinOp == Builtin::BI__builtin_memmove ||
8464 BuiltinOp == Builtin::BI__builtin_wmemmove;
8466 // The result of mem* is the first argument.
8467 if (!Visit(E->getArg(0)))
8469 LValue Dest = Result;
8472 if (!EvaluatePointer(E->getArg(1), Src, Info))
8476 if (!EvaluateInteger(E->getArg(2), N, Info))
8478 assert(!N.isSigned() && "memcpy and friends take an unsigned size");
8480 // If the size is zero, we treat this as always being a valid no-op.
8481 // (Even if one of the src and dest pointers is null.)
8485 // Otherwise, if either of the operands is null, we can't proceed. Don't
8486 // try to determine the type of the copied objects, because there aren't
8488 if (!Src.Base || !Dest.Base) {
8490 (!Src.Base ? Src : Dest).moveInto(Val);
8491 Info.FFDiag(E, diag::note_constexpr_memcpy_null)
8492 << Move << WChar << !!Src.Base
8493 << Val.getAsString(Info.Ctx, E->getArg(0)->getType());
8496 if (Src.Designator.Invalid || Dest.Designator.Invalid)
8499 // We require that Src and Dest are both pointers to arrays of
8500 // trivially-copyable type. (For the wide version, the designator will be
8501 // invalid if the designated object is not a wchar_t.)
8502 QualType T = Dest.Designator.getType(Info.Ctx);
8503 QualType SrcT = Src.Designator.getType(Info.Ctx);
8504 if (!Info.Ctx.hasSameUnqualifiedType(T, SrcT)) {
8505 Info.FFDiag(E, diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T;
8508 if (T->isIncompleteType()) {
8509 Info.FFDiag(E, diag::note_constexpr_memcpy_incomplete_type) << Move << T;
8512 if (!T.isTriviallyCopyableType(Info.Ctx)) {
8513 Info.FFDiag(E, diag::note_constexpr_memcpy_nontrivial) << Move << T;
8517 // Figure out how many T's we're copying.
8518 uint64_t TSize = Info.Ctx.getTypeSizeInChars(T).getQuantity();
8521 llvm::APInt OrigN = N;
8522 llvm::APInt::udivrem(OrigN, TSize, N, Remainder);
8524 Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported)
8525 << Move << WChar << 0 << T << OrigN.toString(10, /*Signed*/false)
8531 // Check that the copying will remain within the arrays, just so that we
8532 // can give a more meaningful diagnostic. This implicitly also checks that
8533 // N fits into 64 bits.
8534 uint64_t RemainingSrcSize = Src.Designator.validIndexAdjustments().second;
8535 uint64_t RemainingDestSize = Dest.Designator.validIndexAdjustments().second;
8536 if (N.ugt(RemainingSrcSize) || N.ugt(RemainingDestSize)) {
8537 Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported)
8538 << Move << WChar << (N.ugt(RemainingSrcSize) ? 1 : 2) << T
8539 << N.toString(10, /*Signed*/false);
8542 uint64_t NElems = N.getZExtValue();
8543 uint64_t NBytes = NElems * TSize;
8545 // Check for overlap.
8547 if (HasSameBase(Src, Dest)) {
8548 uint64_t SrcOffset = Src.getLValueOffset().getQuantity();
8549 uint64_t DestOffset = Dest.getLValueOffset().getQuantity();
8550 if (DestOffset >= SrcOffset && DestOffset - SrcOffset < NBytes) {
8551 // Dest is inside the source region.
8553 Info.FFDiag(E, diag::note_constexpr_memcpy_overlap) << WChar;
8556 // For memmove and friends, copy backwards.
8557 if (!HandleLValueArrayAdjustment(Info, E, Src, T, NElems - 1) ||
8558 !HandleLValueArrayAdjustment(Info, E, Dest, T, NElems - 1))
8561 } else if (!Move && SrcOffset >= DestOffset &&
8562 SrcOffset - DestOffset < NBytes) {
8563 // Src is inside the destination region for memcpy: invalid.
8564 Info.FFDiag(E, diag::note_constexpr_memcpy_overlap) << WChar;
8571 // FIXME: Set WantObjectRepresentation to true if we're copying a
8573 if (!handleLValueToRValueConversion(Info, E, T, Src, Val) ||
8574 !handleAssignment(Info, E, Dest, T, Val))
8576 // Do not iterate past the last element; if we're copying backwards, that
8577 // might take us off the start of the array.
8580 if (!HandleLValueArrayAdjustment(Info, E, Src, T, Direction) ||
8581 !HandleLValueArrayAdjustment(Info, E, Dest, T, Direction))
8590 return visitNonBuiltinCallExpr(E);
8593 static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
8594 APValue &Result, const InitListExpr *ILE,
8595 QualType AllocType);
8596 static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
8598 const CXXConstructExpr *CCE,
8599 QualType AllocType);
8601 bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
8602 if (!Info.getLangOpts().CPlusPlus2a)
8603 Info.CCEDiag(E, diag::note_constexpr_new);
8605 // We cannot speculatively evaluate a delete expression.
8606 if (Info.SpeculativeEvaluationDepth)
8609 FunctionDecl *OperatorNew = E->getOperatorNew();
8611 bool IsNothrow = false;
8612 bool IsPlacement = false;
8613 if (OperatorNew->isReservedGlobalPlacementOperator() &&
8614 Info.CurrentCall->isStdFunction() && !E->isArray()) {
8615 // FIXME Support array placement new.
8616 assert(E->getNumPlacementArgs() == 1);
8617 if (!EvaluatePointer(E->getPlacementArg(0), Result, Info))
8619 if (Result.Designator.Invalid)
8622 } else if (!OperatorNew->isReplaceableGlobalAllocationFunction()) {
8623 Info.FFDiag(E, diag::note_constexpr_new_non_replaceable)
8624 << isa<CXXMethodDecl>(OperatorNew) << OperatorNew;
8626 } else if (E->getNumPlacementArgs()) {
8627 // The only new-placement list we support is of the form (std::nothrow).
8629 // FIXME: There is no restriction on this, but it's not clear that any
8630 // other form makes any sense. We get here for cases such as:
8632 // new (std::align_val_t{N}) X(int)
8634 // (which should presumably be valid only if N is a multiple of
8635 // alignof(int), and in any case can't be deallocated unless N is
8636 // alignof(X) and X has new-extended alignment).
8637 if (E->getNumPlacementArgs() != 1 ||
8638 !E->getPlacementArg(0)->getType()->isNothrowT())
8639 return Error(E, diag::note_constexpr_new_placement);
8642 if (!EvaluateLValue(E->getPlacementArg(0), Nothrow, Info))
8647 const Expr *Init = E->getInitializer();
8648 const InitListExpr *ResizedArrayILE = nullptr;
8649 const CXXConstructExpr *ResizedArrayCCE = nullptr;
8651 QualType AllocType = E->getAllocatedType();
8652 if (Optional<const Expr*> ArraySize = E->getArraySize()) {
8653 const Expr *Stripped = *ArraySize;
8654 for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Stripped);
8655 Stripped = ICE->getSubExpr())
8656 if (ICE->getCastKind() != CK_NoOp &&
8657 ICE->getCastKind() != CK_IntegralCast)
8660 llvm::APSInt ArrayBound;
8661 if (!EvaluateInteger(Stripped, ArrayBound, Info))
8664 // C++ [expr.new]p9:
8665 // The expression is erroneous if:
8666 // -- [...] its value before converting to size_t [or] applying the
8667 // second standard conversion sequence is less than zero
8668 if (ArrayBound.isSigned() && ArrayBound.isNegative()) {
8670 return ZeroInitialization(E);
8672 Info.FFDiag(*ArraySize, diag::note_constexpr_new_negative)
8673 << ArrayBound << (*ArraySize)->getSourceRange();
8677 // -- its value is such that the size of the allocated object would
8678 // exceed the implementation-defined limit
8679 if (ConstantArrayType::getNumAddressingBits(Info.Ctx, AllocType,
8681 ConstantArrayType::getMaxSizeBits(Info.Ctx)) {
8683 return ZeroInitialization(E);
8685 Info.FFDiag(*ArraySize, diag::note_constexpr_new_too_large)
8686 << ArrayBound << (*ArraySize)->getSourceRange();
8690 // -- the new-initializer is a braced-init-list and the number of
8691 // array elements for which initializers are provided [...]
8692 // exceeds the number of elements to initialize
8693 if (Init && !isa<CXXConstructExpr>(Init)) {
8694 auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType());
8695 assert(CAT && "unexpected type for array initializer");
8698 std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth());
8699 llvm::APInt InitBound = CAT->getSize().zextOrSelf(Bits);
8700 llvm::APInt AllocBound = ArrayBound.zextOrSelf(Bits);
8701 if (InitBound.ugt(AllocBound)) {
8703 return ZeroInitialization(E);
8705 Info.FFDiag(*ArraySize, diag::note_constexpr_new_too_small)
8706 << AllocBound.toString(10, /*Signed=*/false)
8707 << InitBound.toString(10, /*Signed=*/false)
8708 << (*ArraySize)->getSourceRange();
8712 // If the sizes differ, we must have an initializer list, and we need
8713 // special handling for this case when we initialize.
8714 if (InitBound != AllocBound)
8715 ResizedArrayILE = cast<InitListExpr>(Init);
8717 ResizedArrayCCE = cast<CXXConstructExpr>(Init);
8720 AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
8721 ArrayType::Normal, 0);
8723 assert(!AllocType->isArrayType() &&
8724 "array allocation with non-array new");
8729 AccessKinds AK = AK_Construct;
8730 struct FindObjectHandler {
8734 const AccessKinds AccessKind;
8737 typedef bool result_type;
8738 bool failed() { return false; }
8739 bool found(APValue &Subobj, QualType SubobjType) {
8740 // FIXME: Reject the cases where [basic.life]p8 would not permit the
8741 // old name of the object to be used to name the new object.
8742 if (!Info.Ctx.hasSameUnqualifiedType(SubobjType, AllocType)) {
8743 Info.FFDiag(E, diag::note_constexpr_placement_new_wrong_type) <<
8744 SubobjType << AllocType;
8750 bool found(APSInt &Value, QualType SubobjType) {
8751 Info.FFDiag(E, diag::note_constexpr_construct_complex_elem);
8754 bool found(APFloat &Value, QualType SubobjType) {
8755 Info.FFDiag(E, diag::note_constexpr_construct_complex_elem);
8758 } Handler = {Info, E, AllocType, AK, nullptr};
8760 CompleteObject Obj = findCompleteObject(Info, E, AK, Result, AllocType);
8761 if (!Obj || !findSubobject(Info, E, Obj, Result.Designator, Handler))
8764 Val = Handler.Value;
8767 // The lifetime of an object o of type T ends when [...] the storage
8768 // which the object occupies is [...] reused by an object that is not
8769 // nested within o (6.6.2).
8772 // Perform the allocation and obtain a pointer to the resulting object.
8773 Val = Info.createHeapAlloc(E, AllocType, Result);
8778 if (ResizedArrayILE) {
8779 if (!EvaluateArrayNewInitList(Info, Result, *Val, ResizedArrayILE,
8782 } else if (ResizedArrayCCE) {
8783 if (!EvaluateArrayNewConstructExpr(Info, Result, *Val, ResizedArrayCCE,
8787 if (!EvaluateInPlace(*Val, Info, Result, Init))
8790 *Val = getDefaultInitValue(AllocType);
8793 // Array new returns a pointer to the first element, not a pointer to the
8795 if (auto *AT = AllocType->getAsArrayTypeUnsafe())
8796 Result.addArray(Info, E, cast<ConstantArrayType>(AT));
8800 //===----------------------------------------------------------------------===//
8801 // Member Pointer Evaluation
8802 //===----------------------------------------------------------------------===//
8805 class MemberPointerExprEvaluator
8806 : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
8809 bool Success(const ValueDecl *D) {
8810 Result = MemberPtr(D);
8815 MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
8816 : ExprEvaluatorBaseTy(Info), Result(Result) {}
8818 bool Success(const APValue &V, const Expr *E) {
8822 bool ZeroInitialization(const Expr *E) {
8823 return Success((const ValueDecl*)nullptr);
8826 bool VisitCastExpr(const CastExpr *E);
8827 bool VisitUnaryAddrOf(const UnaryOperator *E);
8829 } // end anonymous namespace
8831 static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
8833 assert(E->isRValue() && E->getType()->isMemberPointerType());
8834 return MemberPointerExprEvaluator(Info, Result).Visit(E);
8837 bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
8838 switch (E->getCastKind()) {
8840 return ExprEvaluatorBaseTy::VisitCastExpr(E);
8842 case CK_NullToMemberPointer:
8843 VisitIgnoredValue(E->getSubExpr());
8844 return ZeroInitialization(E);
8846 case CK_BaseToDerivedMemberPointer: {
8847 if (!Visit(E->getSubExpr()))
8849 if (E->path_empty())
8851 // Base-to-derived member pointer casts store the path in derived-to-base
8852 // order, so iterate backwards. The CXXBaseSpecifier also provides us with
8853 // the wrong end of the derived->base arc, so stagger the path by one class.
8854 typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
8855 for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
8856 PathI != PathE; ++PathI) {
8857 assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
8858 const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
8859 if (!Result.castToDerived(Derived))
8862 const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass();
8863 if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl()))
8868 case CK_DerivedToBaseMemberPointer:
8869 if (!Visit(E->getSubExpr()))
8871 for (CastExpr::path_const_iterator PathI = E->path_begin(),
8872 PathE = E->path_end(); PathI != PathE; ++PathI) {
8873 assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
8874 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
8875 if (!Result.castToBase(Base))
8882 bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
8883 // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
8884 // member can be formed.
8885 return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl());
8888 //===----------------------------------------------------------------------===//
8889 // Record Evaluation
8890 //===----------------------------------------------------------------------===//
8893 class RecordExprEvaluator
8894 : public ExprEvaluatorBase<RecordExprEvaluator> {
8899 RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
8900 : ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
8902 bool Success(const APValue &V, const Expr *E) {
8906 bool ZeroInitialization(const Expr *E) {
8907 return ZeroInitialization(E, E->getType());
8909 bool ZeroInitialization(const Expr *E, QualType T);
8911 bool VisitCallExpr(const CallExpr *E) {
8912 return handleCallExpr(E, Result, &This);
8914 bool VisitCastExpr(const CastExpr *E);
8915 bool VisitInitListExpr(const InitListExpr *E);
8916 bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
8917 return VisitCXXConstructExpr(E, E->getType());
8919 bool VisitLambdaExpr(const LambdaExpr *E);
8920 bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
8921 bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T);
8922 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
8923 bool VisitBinCmp(const BinaryOperator *E);
8927 /// Perform zero-initialization on an object of non-union class type.
8928 /// C++11 [dcl.init]p5:
8929 /// To zero-initialize an object or reference of type T means:
8931 /// -- if T is a (possibly cv-qualified) non-union class type,
8932 /// each non-static data member and each base-class subobject is
8933 /// zero-initialized
8934 static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
8935 const RecordDecl *RD,
8936 const LValue &This, APValue &Result) {
8937 assert(!RD->isUnion() && "Expected non-union class type");
8938 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
8939 Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
8940 std::distance(RD->field_begin(), RD->field_end()));
8942 if (RD->isInvalidDecl()) return false;
8943 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
8947 for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
8948 End = CD->bases_end(); I != End; ++I, ++Index) {
8949 const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
8950 LValue Subobject = This;
8951 if (!HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout))
8953 if (!HandleClassZeroInitialization(Info, E, Base, Subobject,
8954 Result.getStructBase(Index)))
8959 for (const auto *I : RD->fields()) {
8960 // -- if T is a reference type, no initialization is performed.
8961 if (I->getType()->isReferenceType())
8964 LValue Subobject = This;
8965 if (!HandleLValueMember(Info, E, Subobject, I, &Layout))
8968 ImplicitValueInitExpr VIE(I->getType());
8969 if (!EvaluateInPlace(
8970 Result.getStructField(I->getFieldIndex()), Info, Subobject, &VIE))
8977 bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
8978 const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
8979 if (RD->isInvalidDecl()) return false;
8980 if (RD->isUnion()) {
8981 // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
8982 // object's first non-static named data member is zero-initialized
8983 RecordDecl::field_iterator I = RD->field_begin();
8984 if (I == RD->field_end()) {
8985 Result = APValue((const FieldDecl*)nullptr);
8989 LValue Subobject = This;
8990 if (!HandleLValueMember(Info, E, Subobject, *I))
8992 Result = APValue(*I);
8993 ImplicitValueInitExpr VIE(I->getType());
8994 return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE);
8997 if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) {
8998 Info.FFDiag(E, diag::note_constexpr_virtual_base) << RD;
9002 return HandleClassZeroInitialization(Info, E, RD, This, Result);
9005 bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
9006 switch (E->getCastKind()) {
9008 return ExprEvaluatorBaseTy::VisitCastExpr(E);
9010 case CK_ConstructorConversion:
9011 return Visit(E->getSubExpr());
9013 case CK_DerivedToBase:
9014 case CK_UncheckedDerivedToBase: {
9015 APValue DerivedObject;
9016 if (!Evaluate(DerivedObject, Info, E->getSubExpr()))
9018 if (!DerivedObject.isStruct())
9019 return Error(E->getSubExpr());
9021 // Derived-to-base rvalue conversion: just slice off the derived part.
9022 APValue *Value = &DerivedObject;
9023 const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
9024 for (CastExpr::path_const_iterator PathI = E->path_begin(),
9025 PathE = E->path_end(); PathI != PathE; ++PathI) {
9026 assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
9027 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
9028 Value = &Value->getStructBase(getBaseIndex(RD, Base));
9037 bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
9038 if (E->isTransparent())
9039 return Visit(E->getInit(0));
9041 const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
9042 if (RD->isInvalidDecl()) return false;
9043 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
9044 auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
9046 EvalInfo::EvaluatingConstructorRAII EvalObj(
9048 ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries},
9049 CXXRD && CXXRD->getNumBases());
9051 if (RD->isUnion()) {
9052 const FieldDecl *Field = E->getInitializedFieldInUnion();
9053 Result = APValue(Field);
9057 // If the initializer list for a union does not contain any elements, the
9058 // first element of the union is value-initialized.
9059 // FIXME: The element should be initialized from an initializer list.
9060 // Is this difference ever observable for initializer lists which
9062 ImplicitValueInitExpr VIE(Field->getType());
9063 const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
9065 LValue Subobject = This;
9066 if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout))
9069 // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
9070 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
9071 isa<CXXDefaultInitExpr>(InitExpr));
9073 return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
9076 if (!Result.hasValue())
9077 Result = APValue(APValue::UninitStruct(), CXXRD ? CXXRD->getNumBases() : 0,
9078 std::distance(RD->field_begin(), RD->field_end()));
9079 unsigned ElementNo = 0;
9080 bool Success = true;
9082 // Initialize base classes.
9083 if (CXXRD && CXXRD->getNumBases()) {
9084 for (const auto &Base : CXXRD->bases()) {
9085 assert(ElementNo < E->getNumInits() && "missing init for base class");
9086 const Expr *Init = E->getInit(ElementNo);
9088 LValue Subobject = This;
9089 if (!HandleLValueBase(Info, Init, Subobject, CXXRD, &Base))
9092 APValue &FieldVal = Result.getStructBase(ElementNo);
9093 if (!EvaluateInPlace(FieldVal, Info, Subobject, Init)) {
9094 if (!Info.noteFailure())
9101 EvalObj.finishedConstructingBases();
9104 // Initialize members.
9105 for (const auto *Field : RD->fields()) {
9106 // Anonymous bit-fields are not considered members of the class for
9107 // purposes of aggregate initialization.
9108 if (Field->isUnnamedBitfield())
9111 LValue Subobject = This;
9113 bool HaveInit = ElementNo < E->getNumInits();
9115 // FIXME: Diagnostics here should point to the end of the initializer
9116 // list, not the start.
9117 if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E,
9118 Subobject, Field, &Layout))
9121 // Perform an implicit value-initialization for members beyond the end of
9122 // the initializer list.
9123 ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
9124 const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE;
9126 // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
9127 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
9128 isa<CXXDefaultInitExpr>(Init));
9130 APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
9131 if (!EvaluateInPlace(FieldVal, Info, Subobject, Init) ||
9132 (Field->isBitField() && !truncateBitfieldValue(Info, Init,
9133 FieldVal, Field))) {
9134 if (!Info.noteFailure())
9143 bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
9145 // Note that E's type is not necessarily the type of our class here; we might
9146 // be initializing an array element instead.
9147 const CXXConstructorDecl *FD = E->getConstructor();
9148 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false;
9150 bool ZeroInit = E->requiresZeroInitialization();
9151 if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
9152 // If we've already performed zero-initialization, we're already done.
9153 if (Result.hasValue())
9157 return ZeroInitialization(E, T);
9159 Result = getDefaultInitValue(T);
9163 const FunctionDecl *Definition = nullptr;
9164 auto Body = FD->getBody(Definition);
9166 if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body))
9169 // Avoid materializing a temporary for an elidable copy/move constructor.
9170 if (E->isElidable() && !ZeroInit)
9171 if (const MaterializeTemporaryExpr *ME
9172 = dyn_cast<MaterializeTemporaryExpr>(E->getArg(0)))
9173 return Visit(ME->getSubExpr());
9175 if (ZeroInit && !ZeroInitialization(E, T))
9178 auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
9179 return HandleConstructorCall(E, This, Args,
9180 cast<CXXConstructorDecl>(Definition), Info,
9184 bool RecordExprEvaluator::VisitCXXInheritedCtorInitExpr(
9185 const CXXInheritedCtorInitExpr *E) {
9186 if (!Info.CurrentCall) {
9187 assert(Info.checkingPotentialConstantExpression());
9191 const CXXConstructorDecl *FD = E->getConstructor();
9192 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl())
9195 const FunctionDecl *Definition = nullptr;
9196 auto Body = FD->getBody(Definition);
9198 if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body))
9201 return HandleConstructorCall(E, This, Info.CurrentCall->Arguments,
9202 cast<CXXConstructorDecl>(Definition), Info,
9206 bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
9207 const CXXStdInitializerListExpr *E) {
9208 const ConstantArrayType *ArrayType =
9209 Info.Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
9212 if (!EvaluateLValue(E->getSubExpr(), Array, Info))
9215 // Get a pointer to the first element of the array.
9216 Array.addArray(Info, E, ArrayType);
9218 // FIXME: Perform the checks on the field types in SemaInit.
9219 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
9220 RecordDecl::field_iterator Field = Record->field_begin();
9221 if (Field == Record->field_end())
9225 if (!Field->getType()->isPointerType() ||
9226 !Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
9227 ArrayType->getElementType()))
9230 // FIXME: What if the initializer_list type has base classes, etc?
9231 Result = APValue(APValue::UninitStruct(), 0, 2);
9232 Array.moveInto(Result.getStructField(0));
9234 if (++Field == Record->field_end())
9237 if (Field->getType()->isPointerType() &&
9238 Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
9239 ArrayType->getElementType())) {
9241 if (!HandleLValueArrayAdjustment(Info, E, Array,
9242 ArrayType->getElementType(),
9243 ArrayType->getSize().getZExtValue()))
9245 Array.moveInto(Result.getStructField(1));
9246 } else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType()))
9248 Result.getStructField(1) = APValue(APSInt(ArrayType->getSize()));
9252 if (++Field != Record->field_end())
9258 bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
9259 const CXXRecordDecl *ClosureClass = E->getLambdaClass();
9260 if (ClosureClass->isInvalidDecl())
9263 const size_t NumFields =
9264 std::distance(ClosureClass->field_begin(), ClosureClass->field_end());
9266 assert(NumFields == (size_t)std::distance(E->capture_init_begin(),
9267 E->capture_init_end()) &&
9268 "The number of lambda capture initializers should equal the number of "
9269 "fields within the closure type");
9271 Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields);
9272 // Iterate through all the lambda's closure object's fields and initialize
9274 auto *CaptureInitIt = E->capture_init_begin();
9275 const LambdaCapture *CaptureIt = ClosureClass->captures_begin();
9276 bool Success = true;
9277 for (const auto *Field : ClosureClass->fields()) {
9278 assert(CaptureInitIt != E->capture_init_end());
9279 // Get the initializer for this field
9280 Expr *const CurFieldInit = *CaptureInitIt++;
9282 // If there is no initializer, either this is a VLA or an error has
9287 APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
9288 if (!EvaluateInPlace(FieldVal, Info, This, CurFieldInit)) {
9289 if (!Info.keepEvaluatingAfterFailure())
9298 static bool EvaluateRecord(const Expr *E, const LValue &This,
9299 APValue &Result, EvalInfo &Info) {
9300 assert(E->isRValue() && E->getType()->isRecordType() &&
9301 "can't evaluate expression as a record rvalue");
9302 return RecordExprEvaluator(Info, This, Result).Visit(E);
9305 //===----------------------------------------------------------------------===//
9306 // Temporary Evaluation
9308 // Temporaries are represented in the AST as rvalues, but generally behave like
9309 // lvalues. The full-object of which the temporary is a subobject is implicitly
9310 // materialized so that a reference can bind to it.
9311 //===----------------------------------------------------------------------===//
9313 class TemporaryExprEvaluator
9314 : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
9316 TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
9317 LValueExprEvaluatorBaseTy(Info, Result, false) {}
9319 /// Visit an expression which constructs the value of this temporary.
9320 bool VisitConstructExpr(const Expr *E) {
9322 Info.CurrentCall->createTemporary(E, E->getType(), false, Result);
9323 return EvaluateInPlace(Value, Info, Result, E);
9326 bool VisitCastExpr(const CastExpr *E) {
9327 switch (E->getCastKind()) {
9329 return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
9331 case CK_ConstructorConversion:
9332 return VisitConstructExpr(E->getSubExpr());
9335 bool VisitInitListExpr(const InitListExpr *E) {
9336 return VisitConstructExpr(E);
9338 bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
9339 return VisitConstructExpr(E);
9341 bool VisitCallExpr(const CallExpr *E) {
9342 return VisitConstructExpr(E);
9344 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) {
9345 return VisitConstructExpr(E);
9347 bool VisitLambdaExpr(const LambdaExpr *E) {
9348 return VisitConstructExpr(E);
9351 } // end anonymous namespace
9353 /// Evaluate an expression of record type as a temporary.
9354 static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
9355 assert(E->isRValue() && E->getType()->isRecordType());
9356 return TemporaryExprEvaluator(Info, Result).Visit(E);
9359 //===----------------------------------------------------------------------===//
9360 // Vector Evaluation
9361 //===----------------------------------------------------------------------===//
9364 class VectorExprEvaluator
9365 : public ExprEvaluatorBase<VectorExprEvaluator> {
9369 VectorExprEvaluator(EvalInfo &info, APValue &Result)
9370 : ExprEvaluatorBaseTy(info), Result(Result) {}
9372 bool Success(ArrayRef<APValue> V, const Expr *E) {
9373 assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
9374 // FIXME: remove this APValue copy.
9375 Result = APValue(V.data(), V.size());
9378 bool Success(const APValue &V, const Expr *E) {
9379 assert(V.isVector());
9383 bool ZeroInitialization(const Expr *E);
9385 bool VisitUnaryReal(const UnaryOperator *E)
9386 { return Visit(E->getSubExpr()); }
9387 bool VisitCastExpr(const CastExpr* E);
9388 bool VisitInitListExpr(const InitListExpr *E);
9389 bool VisitUnaryImag(const UnaryOperator *E);
9390 // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
9391 // binary comparisons, binary and/or/xor,
9392 // conditional operator (for GNU conditional select),
9393 // shufflevector, ExtVectorElementExpr
9395 } // end anonymous namespace
9397 static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
9398 assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue");
9399 return VectorExprEvaluator(Info, Result).Visit(E);
9402 bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
9403 const VectorType *VTy = E->getType()->castAs<VectorType>();
9404 unsigned NElts = VTy->getNumElements();
9406 const Expr *SE = E->getSubExpr();
9407 QualType SETy = SE->getType();
9409 switch (E->getCastKind()) {
9410 case CK_VectorSplat: {
9411 APValue Val = APValue();
9412 if (SETy->isIntegerType()) {
9414 if (!EvaluateInteger(SE, IntResult, Info))
9416 Val = APValue(std::move(IntResult));
9417 } else if (SETy->isRealFloatingType()) {
9418 APFloat FloatResult(0.0);
9419 if (!EvaluateFloat(SE, FloatResult, Info))
9421 Val = APValue(std::move(FloatResult));
9426 // Splat and create vector APValue.
9427 SmallVector<APValue, 4> Elts(NElts, Val);
9428 return Success(Elts, E);
9431 // Evaluate the operand into an APInt we can extract from.
9432 llvm::APInt SValInt;
9433 if (!EvalAndBitcastToAPInt(Info, SE, SValInt))
9435 // Extract the elements
9436 QualType EltTy = VTy->getElementType();
9437 unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
9438 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
9439 SmallVector<APValue, 4> Elts;
9440 if (EltTy->isRealFloatingType()) {
9441 const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
9442 unsigned FloatEltSize = EltSize;
9443 if (&Sem == &APFloat::x87DoubleExtended())
9445 for (unsigned i = 0; i < NElts; i++) {
9448 Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
9450 Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
9451 Elts.push_back(APValue(APFloat(Sem, Elt)));
9453 } else if (EltTy->isIntegerType()) {
9454 for (unsigned i = 0; i < NElts; i++) {
9457 Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
9459 Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
9460 Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType())));
9465 return Success(Elts, E);
9468 return ExprEvaluatorBaseTy::VisitCastExpr(E);
9473 VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
9474 const VectorType *VT = E->getType()->castAs<VectorType>();
9475 unsigned NumInits = E->getNumInits();
9476 unsigned NumElements = VT->getNumElements();
9478 QualType EltTy = VT->getElementType();
9479 SmallVector<APValue, 4> Elements;
9481 // The number of initializers can be less than the number of
9482 // vector elements. For OpenCL, this can be due to nested vector
9483 // initialization. For GCC compatibility, missing trailing elements
9484 // should be initialized with zeroes.
9485 unsigned CountInits = 0, CountElts = 0;
9486 while (CountElts < NumElements) {
9487 // Handle nested vector initialization.
9488 if (CountInits < NumInits
9489 && E->getInit(CountInits)->getType()->isVectorType()) {
9491 if (!EvaluateVector(E->getInit(CountInits), v, Info))
9493 unsigned vlen = v.getVectorLength();
9494 for (unsigned j = 0; j < vlen; j++)
9495 Elements.push_back(v.getVectorElt(j));
9497 } else if (EltTy->isIntegerType()) {
9498 llvm::APSInt sInt(32);
9499 if (CountInits < NumInits) {
9500 if (!EvaluateInteger(E->getInit(CountInits), sInt, Info))
9502 } else // trailing integer zero.
9503 sInt = Info.Ctx.MakeIntValue(0, EltTy);
9504 Elements.push_back(APValue(sInt));
9507 llvm::APFloat f(0.0);
9508 if (CountInits < NumInits) {
9509 if (!EvaluateFloat(E->getInit(CountInits), f, Info))
9511 } else // trailing float zero.
9512 f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
9513 Elements.push_back(APValue(f));
9518 return Success(Elements, E);
9522 VectorExprEvaluator::ZeroInitialization(const Expr *E) {
9523 const auto *VT = E->getType()->castAs<VectorType>();
9524 QualType EltTy = VT->getElementType();
9525 APValue ZeroElement;
9526 if (EltTy->isIntegerType())
9527 ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy));
9530 APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)));
9532 SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
9533 return Success(Elements, E);
9536 bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
9537 VisitIgnoredValue(E->getSubExpr());
9538 return ZeroInitialization(E);
9541 //===----------------------------------------------------------------------===//
9543 //===----------------------------------------------------------------------===//
9546 class ArrayExprEvaluator
9547 : public ExprEvaluatorBase<ArrayExprEvaluator> {
9552 ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
9553 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
9555 bool Success(const APValue &V, const Expr *E) {
9556 assert(V.isArray() && "expected array");
9561 bool ZeroInitialization(const Expr *E) {
9562 const ConstantArrayType *CAT =
9563 Info.Ctx.getAsConstantArrayType(E->getType());
9567 Result = APValue(APValue::UninitArray(), 0,
9568 CAT->getSize().getZExtValue());
9569 if (!Result.hasArrayFiller()) return true;
9571 // Zero-initialize all elements.
9572 LValue Subobject = This;
9573 Subobject.addArray(Info, E, CAT);
9574 ImplicitValueInitExpr VIE(CAT->getElementType());
9575 return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
9578 bool VisitCallExpr(const CallExpr *E) {
9579 return handleCallExpr(E, Result, &This);
9581 bool VisitInitListExpr(const InitListExpr *E,
9582 QualType AllocType = QualType());
9583 bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
9584 bool VisitCXXConstructExpr(const CXXConstructExpr *E);
9585 bool VisitCXXConstructExpr(const CXXConstructExpr *E,
9586 const LValue &Subobject,
9587 APValue *Value, QualType Type);
9588 bool VisitStringLiteral(const StringLiteral *E,
9589 QualType AllocType = QualType()) {
9590 expandStringLiteral(Info, E, Result, AllocType);
9594 } // end anonymous namespace
9596 static bool EvaluateArray(const Expr *E, const LValue &This,
9597 APValue &Result, EvalInfo &Info) {
9598 assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
9599 return ArrayExprEvaluator(Info, This, Result).Visit(E);
9602 static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
9603 APValue &Result, const InitListExpr *ILE,
9604 QualType AllocType) {
9605 assert(ILE->isRValue() && ILE->getType()->isArrayType() &&
9606 "not an array rvalue");
9607 return ArrayExprEvaluator(Info, This, Result)
9608 .VisitInitListExpr(ILE, AllocType);
9611 static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
9613 const CXXConstructExpr *CCE,
9614 QualType AllocType) {
9615 assert(CCE->isRValue() && CCE->getType()->isArrayType() &&
9616 "not an array rvalue");
9617 return ArrayExprEvaluator(Info, This, Result)
9618 .VisitCXXConstructExpr(CCE, This, &Result, AllocType);
9621 // Return true iff the given array filler may depend on the element index.
9622 static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
9623 // For now, just whitelist non-class value-initialization and initialization
9624 // lists comprised of them.
9625 if (isa<ImplicitValueInitExpr>(FillerExpr))
9627 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(FillerExpr)) {
9628 for (unsigned I = 0, E = ILE->getNumInits(); I != E; ++I) {
9629 if (MaybeElementDependentArrayFiller(ILE->getInit(I)))
9637 bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
9638 QualType AllocType) {
9639 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
9640 AllocType.isNull() ? E->getType() : AllocType);
9644 // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
9645 // an appropriately-typed string literal enclosed in braces.
9646 if (E->isStringLiteralInit()) {
9647 auto *SL = dyn_cast<StringLiteral>(E->getInit(0)->IgnoreParens());
9648 // FIXME: Support ObjCEncodeExpr here once we support it in
9649 // ArrayExprEvaluator generally.
9652 return VisitStringLiteral(SL, AllocType);
9655 bool Success = true;
9657 assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) &&
9658 "zero-initialized array shouldn't have any initialized elts");
9660 if (Result.isArray() && Result.hasArrayFiller())
9661 Filler = Result.getArrayFiller();
9663 unsigned NumEltsToInit = E->getNumInits();
9664 unsigned NumElts = CAT->getSize().getZExtValue();
9665 const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : nullptr;
9667 // If the initializer might depend on the array index, run it for each
9669 if (NumEltsToInit != NumElts && MaybeElementDependentArrayFiller(FillerExpr))
9670 NumEltsToInit = NumElts;
9672 LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: "
9673 << NumEltsToInit << ".\n");
9675 Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts);
9677 // If the array was previously zero-initialized, preserve the
9678 // zero-initialized values.
9679 if (Filler.hasValue()) {
9680 for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I)
9681 Result.getArrayInitializedElt(I) = Filler;
9682 if (Result.hasArrayFiller())
9683 Result.getArrayFiller() = Filler;
9686 LValue Subobject = This;
9687 Subobject.addArray(Info, E, CAT);
9688 for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
9690 Index < E->getNumInits() ? E->getInit(Index) : FillerExpr;
9691 if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
9692 Info, Subobject, Init) ||
9693 !HandleLValueArrayAdjustment(Info, Init, Subobject,
9694 CAT->getElementType(), 1)) {
9695 if (!Info.noteFailure())
9701 if (!Result.hasArrayFiller())
9704 // If we get here, we have a trivial filler, which we can just evaluate
9705 // once and splat over the rest of the array elements.
9706 assert(FillerExpr && "no array filler for incomplete init list");
9707 return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject,
9708 FillerExpr) && Success;
9711 bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
9713 if (E->getCommonExpr() &&
9714 !Evaluate(Info.CurrentCall->createTemporary(
9716 getStorageType(Info.Ctx, E->getCommonExpr()), false,
9718 Info, E->getCommonExpr()->getSourceExpr()))
9721 auto *CAT = cast<ConstantArrayType>(E->getType()->castAsArrayTypeUnsafe());
9723 uint64_t Elements = CAT->getSize().getZExtValue();
9724 Result = APValue(APValue::UninitArray(), Elements, Elements);
9726 LValue Subobject = This;
9727 Subobject.addArray(Info, E, CAT);
9729 bool Success = true;
9730 for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) {
9731 if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
9732 Info, Subobject, E->getSubExpr()) ||
9733 !HandleLValueArrayAdjustment(Info, E, Subobject,
9734 CAT->getElementType(), 1)) {
9735 if (!Info.noteFailure())
9744 bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
9745 return VisitCXXConstructExpr(E, This, &Result, E->getType());
9748 bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
9749 const LValue &Subobject,
9752 bool HadZeroInit = Value->hasValue();
9754 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
9755 unsigned N = CAT->getSize().getZExtValue();
9757 // Preserve the array filler if we had prior zero-initialization.
9759 HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
9762 *Value = APValue(APValue::UninitArray(), N, N);
9765 for (unsigned I = 0; I != N; ++I)
9766 Value->getArrayInitializedElt(I) = Filler;
9768 // Initialize the elements.
9769 LValue ArrayElt = Subobject;
9770 ArrayElt.addArray(Info, E, CAT);
9771 for (unsigned I = 0; I != N; ++I)
9772 if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I),
9773 CAT->getElementType()) ||
9774 !HandleLValueArrayAdjustment(Info, E, ArrayElt,
9775 CAT->getElementType(), 1))
9781 if (!Type->isRecordType())
9784 return RecordExprEvaluator(Info, Subobject, *Value)
9785 .VisitCXXConstructExpr(E, Type);
9788 //===----------------------------------------------------------------------===//
9789 // Integer Evaluation
9791 // As a GNU extension, we support casting pointers to sufficiently-wide integer
9792 // types and back in constant folding. Integer values are thus represented
9793 // either as an integer-valued APValue, or as an lvalue-valued APValue.
9794 //===----------------------------------------------------------------------===//
9797 class IntExprEvaluator
9798 : public ExprEvaluatorBase<IntExprEvaluator> {
9801 IntExprEvaluator(EvalInfo &info, APValue &result)
9802 : ExprEvaluatorBaseTy(info), Result(result) {}
9804 bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
9805 assert(E->getType()->isIntegralOrEnumerationType() &&
9806 "Invalid evaluation result.");
9807 assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
9808 "Invalid evaluation result.");
9809 assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
9810 "Invalid evaluation result.");
9811 Result = APValue(SI);
9814 bool Success(const llvm::APSInt &SI, const Expr *E) {
9815 return Success(SI, E, Result);
9818 bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
9819 assert(E->getType()->isIntegralOrEnumerationType() &&
9820 "Invalid evaluation result.");
9821 assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
9822 "Invalid evaluation result.");
9823 Result = APValue(APSInt(I));
9824 Result.getInt().setIsUnsigned(
9825 E->getType()->isUnsignedIntegerOrEnumerationType());
9828 bool Success(const llvm::APInt &I, const Expr *E) {
9829 return Success(I, E, Result);
9832 bool Success(uint64_t Value, const Expr *E, APValue &Result) {
9833 assert(E->getType()->isIntegralOrEnumerationType() &&
9834 "Invalid evaluation result.");
9835 Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
9838 bool Success(uint64_t Value, const Expr *E) {
9839 return Success(Value, E, Result);
9842 bool Success(CharUnits Size, const Expr *E) {
9843 return Success(Size.getQuantity(), E);
9846 bool Success(const APValue &V, const Expr *E) {
9847 if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate()) {
9851 return Success(V.getInt(), E);
9854 bool ZeroInitialization(const Expr *E) { return Success(0, E); }
9856 //===--------------------------------------------------------------------===//
9858 //===--------------------------------------------------------------------===//
9860 bool VisitConstantExpr(const ConstantExpr *E);
9862 bool VisitIntegerLiteral(const IntegerLiteral *E) {
9863 return Success(E->getValue(), E);
9865 bool VisitCharacterLiteral(const CharacterLiteral *E) {
9866 return Success(E->getValue(), E);
9869 bool CheckReferencedDecl(const Expr *E, const Decl *D);
9870 bool VisitDeclRefExpr(const DeclRefExpr *E) {
9871 if (CheckReferencedDecl(E, E->getDecl()))
9874 return ExprEvaluatorBaseTy::VisitDeclRefExpr(E);
9876 bool VisitMemberExpr(const MemberExpr *E) {
9877 if (CheckReferencedDecl(E, E->getMemberDecl())) {
9878 VisitIgnoredBaseExpression(E->getBase());
9882 return ExprEvaluatorBaseTy::VisitMemberExpr(E);
9885 bool VisitCallExpr(const CallExpr *E);
9886 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
9887 bool VisitBinaryOperator(const BinaryOperator *E);
9888 bool VisitOffsetOfExpr(const OffsetOfExpr *E);
9889 bool VisitUnaryOperator(const UnaryOperator *E);
9891 bool VisitCastExpr(const CastExpr* E);
9892 bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
9894 bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
9895 return Success(E->getValue(), E);
9898 bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
9899 return Success(E->getValue(), E);
9902 bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
9903 if (Info.ArrayInitIndex == uint64_t(-1)) {
9904 // We were asked to evaluate this subexpression independent of the
9905 // enclosing ArrayInitLoopExpr. We can't do that.
9909 return Success(Info.ArrayInitIndex, E);
9912 // Note, GNU defines __null as an integer, not a pointer.
9913 bool VisitGNUNullExpr(const GNUNullExpr *E) {
9914 return ZeroInitialization(E);
9917 bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
9918 return Success(E->getValue(), E);
9921 bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
9922 return Success(E->getValue(), E);
9925 bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
9926 return Success(E->getValue(), E);
9929 bool VisitUnaryReal(const UnaryOperator *E);
9930 bool VisitUnaryImag(const UnaryOperator *E);
9932 bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
9933 bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
9934 bool VisitSourceLocExpr(const SourceLocExpr *E);
9935 bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E);
9936 bool VisitRequiresExpr(const RequiresExpr *E);
9937 // FIXME: Missing: array subscript of vector, member of vector
9940 class FixedPointExprEvaluator
9941 : public ExprEvaluatorBase<FixedPointExprEvaluator> {
9945 FixedPointExprEvaluator(EvalInfo &info, APValue &result)
9946 : ExprEvaluatorBaseTy(info), Result(result) {}
9948 bool Success(const llvm::APInt &I, const Expr *E) {
9950 APFixedPoint(I, Info.Ctx.getFixedPointSemantics(E->getType())), E);
9953 bool Success(uint64_t Value, const Expr *E) {
9955 APFixedPoint(Value, Info.Ctx.getFixedPointSemantics(E->getType())), E);
9958 bool Success(const APValue &V, const Expr *E) {
9959 return Success(V.getFixedPoint(), E);
9962 bool Success(const APFixedPoint &V, const Expr *E) {
9963 assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
9964 assert(V.getWidth() == Info.Ctx.getIntWidth(E->getType()) &&
9965 "Invalid evaluation result.");
9966 Result = APValue(V);
9970 //===--------------------------------------------------------------------===//
9972 //===--------------------------------------------------------------------===//
9974 bool VisitFixedPointLiteral(const FixedPointLiteral *E) {
9975 return Success(E->getValue(), E);
9978 bool VisitCastExpr(const CastExpr *E);
9979 bool VisitUnaryOperator(const UnaryOperator *E);
9980 bool VisitBinaryOperator(const BinaryOperator *E);
9982 } // end anonymous namespace
9984 /// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
9985 /// produce either the integer value or a pointer.
9987 /// GCC has a heinous extension which folds casts between pointer types and
9988 /// pointer-sized integral types. We support this by allowing the evaluation of
9989 /// an integer rvalue to produce a pointer (represented as an lvalue) instead.
9990 /// Some simple arithmetic on such values is supported (they are treated much
9992 static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
9994 assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
9995 return IntExprEvaluator(Info, Result).Visit(E);
9998 static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
10000 if (!EvaluateIntegerOrLValue(E, Val, Info))
10002 if (!Val.isInt()) {
10003 // FIXME: It would be better to produce the diagnostic for casting
10004 // a pointer to an integer.
10005 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
10008 Result = Val.getInt();
10012 bool IntExprEvaluator::VisitSourceLocExpr(const SourceLocExpr *E) {
10013 APValue Evaluated = E->EvaluateInContext(
10014 Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
10015 return Success(Evaluated, E);
10018 static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
10020 if (E->getType()->isFixedPointType()) {
10022 if (!FixedPointExprEvaluator(Info, Val).Visit(E))
10024 if (!Val.isFixedPoint())
10027 Result = Val.getFixedPoint();
10033 static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
10035 if (E->getType()->isIntegerType()) {
10036 auto FXSema = Info.Ctx.getFixedPointSemantics(E->getType());
10038 if (!EvaluateInteger(E, Val, Info))
10040 Result = APFixedPoint(Val, FXSema);
10042 } else if (E->getType()->isFixedPointType()) {
10043 return EvaluateFixedPoint(E, Result, Info);
10048 /// Check whether the given declaration can be directly converted to an integral
10049 /// rvalue. If not, no diagnostic is produced; there are other things we can
10051 bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
10052 // Enums are integer constant exprs.
10053 if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
10054 // Check for signedness/width mismatches between E type and ECD value.
10055 bool SameSign = (ECD->getInitVal().isSigned()
10056 == E->getType()->isSignedIntegerOrEnumerationType());
10057 bool SameWidth = (ECD->getInitVal().getBitWidth()
10058 == Info.Ctx.getIntWidth(E->getType()));
10059 if (SameSign && SameWidth)
10060 return Success(ECD->getInitVal(), E);
10062 // Get rid of mismatch (otherwise Success assertions will fail)
10063 // by computing a new value matching the type of E.
10064 llvm::APSInt Val = ECD->getInitVal();
10066 Val.setIsSigned(!ECD->getInitVal().isSigned());
10068 Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType()));
10069 return Success(Val, E);
10075 /// Values returned by __builtin_classify_type, chosen to match the values
10076 /// produced by GCC's builtin.
10077 enum class GCCTypeClass {
10081 // GCC reserves 2 for character types, but instead classifies them as
10086 // GCC reserves 6 for references, but appears to never use it (because
10087 // expressions never have reference type, presumably).
10088 PointerToDataMember = 7,
10091 // GCC reserves 10 for functions, but does not use it since GCC version 6 due
10092 // to decay to pointer. (Prior to version 6 it was only used in C++ mode).
10093 // GCC claims to reserve 11 for pointers to member functions, but *actually*
10094 // uses 12 for that purpose, same as for a class or struct. Maybe it
10095 // internally implements a pointer to member as a struct? Who knows.
10096 PointerToMemberFunction = 12, // Not a bug, see above.
10097 ClassOrStruct = 12,
10099 // GCC reserves 14 for arrays, but does not use it since GCC version 6 due to
10100 // decay to pointer. (Prior to version 6 it was only used in C++ mode).
10101 // GCC reserves 15 for strings, but actually uses 5 (pointer) for string
10105 /// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
10107 static GCCTypeClass
10108 EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
10109 assert(!T->isDependentType() && "unexpected dependent type");
10111 QualType CanTy = T.getCanonicalType();
10112 const BuiltinType *BT = dyn_cast<BuiltinType>(CanTy);
10114 switch (CanTy->getTypeClass()) {
10115 #define TYPE(ID, BASE)
10116 #define DEPENDENT_TYPE(ID, BASE) case Type::ID:
10117 #define NON_CANONICAL_TYPE(ID, BASE) case Type::ID:
10118 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(ID, BASE) case Type::ID:
10119 #include "clang/AST/TypeNodes.inc"
10121 case Type::DeducedTemplateSpecialization:
10122 llvm_unreachable("unexpected non-canonical or dependent type");
10124 case Type::Builtin:
10125 switch (BT->getKind()) {
10126 #define BUILTIN_TYPE(ID, SINGLETON_ID)
10127 #define SIGNED_TYPE(ID, SINGLETON_ID) \
10128 case BuiltinType::ID: return GCCTypeClass::Integer;
10129 #define FLOATING_TYPE(ID, SINGLETON_ID) \
10130 case BuiltinType::ID: return GCCTypeClass::RealFloat;
10131 #define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \
10132 case BuiltinType::ID: break;
10133 #include "clang/AST/BuiltinTypes.def"
10134 case BuiltinType::Void:
10135 return GCCTypeClass::Void;
10137 case BuiltinType::Bool:
10138 return GCCTypeClass::Bool;
10140 case BuiltinType::Char_U:
10141 case BuiltinType::UChar:
10142 case BuiltinType::WChar_U:
10143 case BuiltinType::Char8:
10144 case BuiltinType::Char16:
10145 case BuiltinType::Char32:
10146 case BuiltinType::UShort:
10147 case BuiltinType::UInt:
10148 case BuiltinType::ULong:
10149 case BuiltinType::ULongLong:
10150 case BuiltinType::UInt128:
10151 return GCCTypeClass::Integer;
10153 case BuiltinType::UShortAccum:
10154 case BuiltinType::UAccum:
10155 case BuiltinType::ULongAccum:
10156 case BuiltinType::UShortFract:
10157 case BuiltinType::UFract:
10158 case BuiltinType::ULongFract:
10159 case BuiltinType::SatUShortAccum:
10160 case BuiltinType::SatUAccum:
10161 case BuiltinType::SatULongAccum:
10162 case BuiltinType::SatUShortFract:
10163 case BuiltinType::SatUFract:
10164 case BuiltinType::SatULongFract:
10165 return GCCTypeClass::None;
10167 case BuiltinType::NullPtr:
10169 case BuiltinType::ObjCId:
10170 case BuiltinType::ObjCClass:
10171 case BuiltinType::ObjCSel:
10172 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
10173 case BuiltinType::Id:
10174 #include "clang/Basic/OpenCLImageTypes.def"
10175 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
10176 case BuiltinType::Id:
10177 #include "clang/Basic/OpenCLExtensionTypes.def"
10178 case BuiltinType::OCLSampler:
10179 case BuiltinType::OCLEvent:
10180 case BuiltinType::OCLClkEvent:
10181 case BuiltinType::OCLQueue:
10182 case BuiltinType::OCLReserveID:
10183 #define SVE_TYPE(Name, Id, SingletonId) \
10184 case BuiltinType::Id:
10185 #include "clang/Basic/AArch64SVEACLETypes.def"
10186 return GCCTypeClass::None;
10188 case BuiltinType::Dependent:
10189 llvm_unreachable("unexpected dependent type");
10191 llvm_unreachable("unexpected placeholder type");
10194 return LangOpts.CPlusPlus ? GCCTypeClass::Enum : GCCTypeClass::Integer;
10196 case Type::Pointer:
10197 case Type::ConstantArray:
10198 case Type::VariableArray:
10199 case Type::IncompleteArray:
10200 case Type::FunctionNoProto:
10201 case Type::FunctionProto:
10202 return GCCTypeClass::Pointer;
10204 case Type::MemberPointer:
10205 return CanTy->isMemberDataPointerType()
10206 ? GCCTypeClass::PointerToDataMember
10207 : GCCTypeClass::PointerToMemberFunction;
10209 case Type::Complex:
10210 return GCCTypeClass::Complex;
10213 return CanTy->isUnionType() ? GCCTypeClass::Union
10214 : GCCTypeClass::ClassOrStruct;
10217 // GCC classifies _Atomic T the same as T.
10218 return EvaluateBuiltinClassifyType(
10219 CanTy->castAs<AtomicType>()->getValueType(), LangOpts);
10221 case Type::BlockPointer:
10223 case Type::ExtVector:
10224 case Type::ObjCObject:
10225 case Type::ObjCInterface:
10226 case Type::ObjCObjectPointer:
10228 // GCC classifies vectors as None. We follow its lead and classify all
10229 // other types that don't fit into the regular classification the same way.
10230 return GCCTypeClass::None;
10232 case Type::LValueReference:
10233 case Type::RValueReference:
10234 llvm_unreachable("invalid type for expression");
10237 llvm_unreachable("unexpected type class");
10240 /// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
10242 static GCCTypeClass
10243 EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) {
10244 // If no argument was supplied, default to None. This isn't
10245 // ideal, however it is what gcc does.
10246 if (E->getNumArgs() == 0)
10247 return GCCTypeClass::None;
10249 // FIXME: Bizarrely, GCC treats a call with more than one argument as not
10250 // being an ICE, but still folds it to a constant using the type of the first
10252 return EvaluateBuiltinClassifyType(E->getArg(0)->getType(), LangOpts);
10255 /// EvaluateBuiltinConstantPForLValue - Determine the result of
10256 /// __builtin_constant_p when applied to the given pointer.
10258 /// A pointer is only "constant" if it is null (or a pointer cast to integer)
10259 /// or it points to the first character of a string literal.
10260 static bool EvaluateBuiltinConstantPForLValue(const APValue &LV) {
10261 APValue::LValueBase Base = LV.getLValueBase();
10262 if (Base.isNull()) {
10263 // A null base is acceptable.
10265 } else if (const Expr *E = Base.dyn_cast<const Expr *>()) {
10266 if (!isa<StringLiteral>(E))
10268 return LV.getLValueOffset().isZero();
10269 } else if (Base.is<TypeInfoLValue>()) {
10270 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
10271 // evaluate to true.
10274 // Any other base is not constant enough for GCC.
10279 /// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
10280 /// GCC as we can manage.
10281 static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) {
10282 // This evaluation is not permitted to have side-effects, so evaluate it in
10283 // a speculative evaluation context.
10284 SpeculativeEvaluationRAII SpeculativeEval(Info);
10286 // Constant-folding is always enabled for the operand of __builtin_constant_p
10287 // (even when the enclosing evaluation context otherwise requires a strict
10288 // language-specific constant expression).
10289 FoldConstant Fold(Info, true);
10291 QualType ArgType = Arg->getType();
10293 // __builtin_constant_p always has one operand. The rules which gcc follows
10294 // are not precisely documented, but are as follows:
10296 // - If the operand is of integral, floating, complex or enumeration type,
10297 // and can be folded to a known value of that type, it returns 1.
10298 // - If the operand can be folded to a pointer to the first character
10299 // of a string literal (or such a pointer cast to an integral type)
10300 // or to a null pointer or an integer cast to a pointer, it returns 1.
10302 // Otherwise, it returns 0.
10304 // FIXME: GCC also intends to return 1 for literals of aggregate types, but
10305 // its support for this did not work prior to GCC 9 and is not yet well
10307 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
10308 ArgType->isAnyComplexType() || ArgType->isPointerType() ||
10309 ArgType->isNullPtrType()) {
10311 if (!::EvaluateAsRValue(Info, Arg, V)) {
10312 Fold.keepDiagnostics();
10316 // For a pointer (possibly cast to integer), there are special rules.
10317 if (V.getKind() == APValue::LValue)
10318 return EvaluateBuiltinConstantPForLValue(V);
10320 // Otherwise, any constant value is good enough.
10321 return V.hasValue();
10324 // Anything else isn't considered to be sufficiently constant.
10328 /// Retrieves the "underlying object type" of the given expression,
10329 /// as used by __builtin_object_size.
10330 static QualType getObjectType(APValue::LValueBase B) {
10331 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
10332 if (const VarDecl *VD = dyn_cast<VarDecl>(D))
10333 return VD->getType();
10334 } else if (const Expr *E = B.dyn_cast<const Expr*>()) {
10335 if (isa<CompoundLiteralExpr>(E))
10336 return E->getType();
10337 } else if (B.is<TypeInfoLValue>()) {
10338 return B.getTypeInfoType();
10339 } else if (B.is<DynamicAllocLValue>()) {
10340 return B.getDynamicAllocType();
10346 /// A more selective version of E->IgnoreParenCasts for
10347 /// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
10348 /// to change the type of E.
10349 /// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo`
10351 /// Always returns an RValue with a pointer representation.
10352 static const Expr *ignorePointerCastsAndParens(const Expr *E) {
10353 assert(E->isRValue() && E->getType()->hasPointerRepresentation());
10355 auto *NoParens = E->IgnoreParens();
10356 auto *Cast = dyn_cast<CastExpr>(NoParens);
10357 if (Cast == nullptr)
10360 // We only conservatively allow a few kinds of casts, because this code is
10361 // inherently a simple solution that seeks to support the common case.
10362 auto CastKind = Cast->getCastKind();
10363 if (CastKind != CK_NoOp && CastKind != CK_BitCast &&
10364 CastKind != CK_AddressSpaceConversion)
10367 auto *SubExpr = Cast->getSubExpr();
10368 if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isRValue())
10370 return ignorePointerCastsAndParens(SubExpr);
10373 /// Checks to see if the given LValue's Designator is at the end of the LValue's
10374 /// record layout. e.g.
10375 /// struct { struct { int a, b; } fst, snd; } obj;
10378 /// obj.fst.a // no
10379 /// obj.fst.b // no
10380 /// obj.snd.a // no
10381 /// obj.snd.b // yes
10383 /// Please note: this function is specialized for how __builtin_object_size
10384 /// views "objects".
10386 /// If this encounters an invalid RecordDecl or otherwise cannot determine the
10387 /// correct result, it will always return true.
10388 static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
10389 assert(!LVal.Designator.Invalid);
10391 auto IsLastOrInvalidFieldDecl = [&Ctx](const FieldDecl *FD, bool &Invalid) {
10392 const RecordDecl *Parent = FD->getParent();
10393 Invalid = Parent->isInvalidDecl();
10394 if (Invalid || Parent->isUnion())
10396 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(Parent);
10397 return FD->getFieldIndex() + 1 == Layout.getFieldCount();
10400 auto &Base = LVal.getLValueBase();
10401 if (auto *ME = dyn_cast_or_null<MemberExpr>(Base.dyn_cast<const Expr *>())) {
10402 if (auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
10404 if (!IsLastOrInvalidFieldDecl(FD, Invalid))
10406 } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(ME->getMemberDecl())) {
10407 for (auto *FD : IFD->chain()) {
10409 if (!IsLastOrInvalidFieldDecl(cast<FieldDecl>(FD), Invalid))
10416 QualType BaseType = getType(Base);
10417 if (LVal.Designator.FirstEntryIsAnUnsizedArray) {
10418 // If we don't know the array bound, conservatively assume we're looking at
10419 // the final array element.
10421 if (BaseType->isIncompleteArrayType())
10422 BaseType = Ctx.getAsArrayType(BaseType)->getElementType();
10424 BaseType = BaseType->castAs<PointerType>()->getPointeeType();
10427 for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) {
10428 const auto &Entry = LVal.Designator.Entries[I];
10429 if (BaseType->isArrayType()) {
10430 // Because __builtin_object_size treats arrays as objects, we can ignore
10431 // the index iff this is the last array in the Designator.
10434 const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
10435 uint64_t Index = Entry.getAsArrayIndex();
10436 if (Index + 1 != CAT->getSize())
10438 BaseType = CAT->getElementType();
10439 } else if (BaseType->isAnyComplexType()) {
10440 const auto *CT = BaseType->castAs<ComplexType>();
10441 uint64_t Index = Entry.getAsArrayIndex();
10444 BaseType = CT->getElementType();
10445 } else if (auto *FD = getAsField(Entry)) {
10447 if (!IsLastOrInvalidFieldDecl(FD, Invalid))
10449 BaseType = FD->getType();
10451 assert(getAsBaseClass(Entry) && "Expecting cast to a base class");
10458 /// Tests to see if the LValue has a user-specified designator (that isn't
10459 /// necessarily valid). Note that this always returns 'true' if the LValue has
10460 /// an unsized array as its first designator entry, because there's currently no
10461 /// way to tell if the user typed *foo or foo[0].
10462 static bool refersToCompleteObject(const LValue &LVal) {
10463 if (LVal.Designator.Invalid)
10466 if (!LVal.Designator.Entries.empty())
10467 return LVal.Designator.isMostDerivedAnUnsizedArray();
10469 if (!LVal.InvalidBase)
10472 // If `E` is a MemberExpr, then the first part of the designator is hiding in
10474 const auto *E = LVal.Base.dyn_cast<const Expr *>();
10475 return !E || !isa<MemberExpr>(E);
10478 /// Attempts to detect a user writing into a piece of memory that's impossible
10479 /// to figure out the size of by just using types.
10480 static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
10481 const SubobjectDesignator &Designator = LVal.Designator;
10483 // - Users can only write off of the end when we have an invalid base. Invalid
10484 // bases imply we don't know where the memory came from.
10485 // - We used to be a bit more aggressive here; we'd only be conservative if
10486 // the array at the end was flexible, or if it had 0 or 1 elements. This
10487 // broke some common standard library extensions (PR30346), but was
10488 // otherwise seemingly fine. It may be useful to reintroduce this behavior
10489 // with some sort of whitelist. OTOH, it seems that GCC is always
10490 // conservative with the last element in structs (if it's an array), so our
10491 // current behavior is more compatible than a whitelisting approach would
10493 return LVal.InvalidBase &&
10494 Designator.Entries.size() == Designator.MostDerivedPathLength &&
10495 Designator.MostDerivedIsArrayElement &&
10496 isDesignatorAtObjectEnd(Ctx, LVal);
10499 /// Converts the given APInt to CharUnits, assuming the APInt is unsigned.
10500 /// Fails if the conversion would cause loss of precision.
10501 static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
10502 CharUnits &Result) {
10503 auto CharUnitsMax = std::numeric_limits<CharUnits::QuantityType>::max();
10504 if (Int.ugt(CharUnitsMax))
10506 Result = CharUnits::fromQuantity(Int.getZExtValue());
10510 /// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will
10511 /// determine how many bytes exist from the beginning of the object to either
10512 /// the end of the current subobject, or the end of the object itself, depending
10513 /// on what the LValue looks like + the value of Type.
10515 /// If this returns false, the value of Result is undefined.
10516 static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
10517 unsigned Type, const LValue &LVal,
10518 CharUnits &EndOffset) {
10519 bool DetermineForCompleteObject = refersToCompleteObject(LVal);
10521 auto CheckedHandleSizeof = [&](QualType Ty, CharUnits &Result) {
10522 if (Ty.isNull() || Ty->isIncompleteType() || Ty->isFunctionType())
10524 return HandleSizeof(Info, ExprLoc, Ty, Result);
10527 // We want to evaluate the size of the entire object. This is a valid fallback
10528 // for when Type=1 and the designator is invalid, because we're asked for an
10530 if (!(Type & 1) || LVal.Designator.Invalid || DetermineForCompleteObject) {
10531 // Type=3 wants a lower bound, so we can't fall back to this.
10532 if (Type == 3 && !DetermineForCompleteObject)
10535 llvm::APInt APEndOffset;
10536 if (isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
10537 getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset))
10538 return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset);
10540 if (LVal.InvalidBase)
10543 QualType BaseTy = getObjectType(LVal.getLValueBase());
10544 return CheckedHandleSizeof(BaseTy, EndOffset);
10547 // We want to evaluate the size of a subobject.
10548 const SubobjectDesignator &Designator = LVal.Designator;
10550 // The following is a moderately common idiom in C:
10552 // struct Foo { int a; char c[1]; };
10553 // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
10554 // strcpy(&F->c[0], Bar);
10556 // In order to not break too much legacy code, we need to support it.
10557 if (isUserWritingOffTheEnd(Info.Ctx, LVal)) {
10558 // If we can resolve this to an alloc_size call, we can hand that back,
10559 // because we know for certain how many bytes there are to write to.
10560 llvm::APInt APEndOffset;
10561 if (isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
10562 getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset))
10563 return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset);
10565 // If we cannot determine the size of the initial allocation, then we can't
10566 // given an accurate upper-bound. However, we are still able to give
10567 // conservative lower-bounds for Type=3.
10572 CharUnits BytesPerElem;
10573 if (!CheckedHandleSizeof(Designator.MostDerivedType, BytesPerElem))
10576 // According to the GCC documentation, we want the size of the subobject
10577 // denoted by the pointer. But that's not quite right -- what we actually
10578 // want is the size of the immediately-enclosing array, if there is one.
10579 int64_t ElemsRemaining;
10580 if (Designator.MostDerivedIsArrayElement &&
10581 Designator.Entries.size() == Designator.MostDerivedPathLength) {
10582 uint64_t ArraySize = Designator.getMostDerivedArraySize();
10583 uint64_t ArrayIndex = Designator.Entries.back().getAsArrayIndex();
10584 ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex;
10586 ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1;
10589 EndOffset = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining;
10593 /// Tries to evaluate the __builtin_object_size for @p E. If successful,
10594 /// returns true and stores the result in @p Size.
10596 /// If @p WasError is non-null, this will report whether the failure to evaluate
10597 /// is to be treated as an Error in IntExprEvaluator.
10598 static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
10599 EvalInfo &Info, uint64_t &Size) {
10600 // Determine the denoted object.
10603 // The operand of __builtin_object_size is never evaluated for side-effects.
10604 // If there are any, but we can determine the pointed-to object anyway, then
10605 // ignore the side-effects.
10606 SpeculativeEvaluationRAII SpeculativeEval(Info);
10607 IgnoreSideEffectsRAII Fold(Info);
10609 if (E->isGLValue()) {
10610 // It's possible for us to be given GLValues if we're called via
10611 // Expr::tryEvaluateObjectSize.
10613 if (!EvaluateAsRValue(Info, E, RVal))
10615 LVal.setFrom(Info.Ctx, RVal);
10616 } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info,
10617 /*InvalidBaseOK=*/true))
10621 // If we point to before the start of the object, there are no accessible
10623 if (LVal.getLValueOffset().isNegative()) {
10628 CharUnits EndOffset;
10629 if (!determineEndOffset(Info, E->getExprLoc(), Type, LVal, EndOffset))
10632 // If we've fallen outside of the end offset, just pretend there's nothing to
10633 // write to/read from.
10634 if (EndOffset <= LVal.getLValueOffset())
10637 Size = (EndOffset - LVal.getLValueOffset()).getQuantity();
10641 bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) {
10642 llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true);
10643 if (E->getResultAPValueKind() != APValue::None)
10644 return Success(E->getAPValueResult(), E);
10645 return ExprEvaluatorBaseTy::VisitConstantExpr(E);
10648 bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
10649 if (unsigned BuiltinOp = E->getBuiltinCallee())
10650 return VisitBuiltinCallExpr(E, BuiltinOp);
10652 return ExprEvaluatorBaseTy::VisitCallExpr(E);
10655 static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info,
10656 APValue &Val, APSInt &Alignment) {
10657 QualType SrcTy = E->getArg(0)->getType();
10658 if (!getAlignmentArgument(E->getArg(1), SrcTy, Info, Alignment))
10660 // Even though we are evaluating integer expressions we could get a pointer
10661 // argument for the __builtin_is_aligned() case.
10662 if (SrcTy->isPointerType()) {
10664 if (!EvaluatePointer(E->getArg(0), Ptr, Info))
10667 } else if (!SrcTy->isIntegralOrEnumerationType()) {
10668 Info.FFDiag(E->getArg(0));
10672 if (!EvaluateInteger(E->getArg(0), SrcInt, Info))
10674 assert(SrcInt.getBitWidth() >= Alignment.getBitWidth() &&
10675 "Bit widths must be the same");
10676 Val = APValue(SrcInt);
10678 assert(Val.hasValue());
10682 bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
10683 unsigned BuiltinOp) {
10684 switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
10686 return ExprEvaluatorBaseTy::VisitCallExpr(E);
10688 case Builtin::BI__builtin_dynamic_object_size:
10689 case Builtin::BI__builtin_object_size: {
10690 // The type was checked when we built the expression.
10692 E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
10693 assert(Type <= 3 && "unexpected type");
10696 if (tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size))
10697 return Success(Size, E);
10699 if (E->getArg(0)->HasSideEffects(Info.Ctx))
10700 return Success((Type & 2) ? 0 : -1, E);
10702 // Expression had no side effects, but we couldn't statically determine the
10703 // size of the referenced object.
10704 switch (Info.EvalMode) {
10705 case EvalInfo::EM_ConstantExpression:
10706 case EvalInfo::EM_ConstantFold:
10707 case EvalInfo::EM_IgnoreSideEffects:
10708 // Leave it to IR generation.
10710 case EvalInfo::EM_ConstantExpressionUnevaluated:
10711 // Reduce it to a constant now.
10712 return Success((Type & 2) ? 0 : -1, E);
10715 llvm_unreachable("unexpected EvalMode");
10718 case Builtin::BI__builtin_os_log_format_buffer_size: {
10719 analyze_os_log::OSLogBufferLayout Layout;
10720 analyze_os_log::computeOSLogBufferLayout(Info.Ctx, E, Layout);
10721 return Success(Layout.size().getQuantity(), E);
10724 case Builtin::BI__builtin_is_aligned: {
10727 if (!getBuiltinAlignArguments(E, Info, Src, Alignment))
10729 if (Src.isLValue()) {
10730 // If we evaluated a pointer, check the minimum known alignment.
10732 Ptr.setFrom(Info.Ctx, Src);
10733 CharUnits BaseAlignment = getBaseAlignment(Info, Ptr);
10734 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(Ptr.Offset);
10735 // We can return true if the known alignment at the computed offset is
10736 // greater than the requested alignment.
10737 assert(PtrAlign.isPowerOfTwo());
10738 assert(Alignment.isPowerOf2());
10739 if (PtrAlign.getQuantity() >= Alignment)
10740 return Success(1, E);
10741 // If the alignment is not known to be sufficient, some cases could still
10742 // be aligned at run time. However, if the requested alignment is less or
10743 // equal to the base alignment and the offset is not aligned, we know that
10744 // the run-time value can never be aligned.
10745 if (BaseAlignment.getQuantity() >= Alignment &&
10746 PtrAlign.getQuantity() < Alignment)
10747 return Success(0, E);
10748 // Otherwise we can't infer whether the value is sufficiently aligned.
10749 // TODO: __builtin_is_aligned(__builtin_align_{down,up{(expr, N), N)
10750 // in cases where we can't fully evaluate the pointer.
10751 Info.FFDiag(E->getArg(0), diag::note_constexpr_alignment_compute)
10755 assert(Src.isInt());
10756 return Success((Src.getInt() & (Alignment - 1)) == 0 ? 1 : 0, E);
10758 case Builtin::BI__builtin_align_up: {
10761 if (!getBuiltinAlignArguments(E, Info, Src, Alignment))
10765 APSInt AlignedVal =
10766 APSInt((Src.getInt() + (Alignment - 1)) & ~(Alignment - 1),
10767 Src.getInt().isUnsigned());
10768 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth());
10769 return Success(AlignedVal, E);
10771 case Builtin::BI__builtin_align_down: {
10774 if (!getBuiltinAlignArguments(E, Info, Src, Alignment))
10778 APSInt AlignedVal =
10779 APSInt(Src.getInt() & ~(Alignment - 1), Src.getInt().isUnsigned());
10780 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth());
10781 return Success(AlignedVal, E);
10784 case Builtin::BI__builtin_bswap16:
10785 case Builtin::BI__builtin_bswap32:
10786 case Builtin::BI__builtin_bswap64: {
10788 if (!EvaluateInteger(E->getArg(0), Val, Info))
10791 return Success(Val.byteSwap(), E);
10794 case Builtin::BI__builtin_classify_type:
10795 return Success((int)EvaluateBuiltinClassifyType(E, Info.getLangOpts()), E);
10797 case Builtin::BI__builtin_clrsb:
10798 case Builtin::BI__builtin_clrsbl:
10799 case Builtin::BI__builtin_clrsbll: {
10801 if (!EvaluateInteger(E->getArg(0), Val, Info))
10804 return Success(Val.getBitWidth() - Val.getMinSignedBits(), E);
10807 case Builtin::BI__builtin_clz:
10808 case Builtin::BI__builtin_clzl:
10809 case Builtin::BI__builtin_clzll:
10810 case Builtin::BI__builtin_clzs: {
10812 if (!EvaluateInteger(E->getArg(0), Val, Info))
10817 return Success(Val.countLeadingZeros(), E);
10820 case Builtin::BI__builtin_constant_p: {
10821 const Expr *Arg = E->getArg(0);
10822 if (EvaluateBuiltinConstantP(Info, Arg))
10823 return Success(true, E);
10824 if (Info.InConstantContext || Arg->HasSideEffects(Info.Ctx)) {
10825 // Outside a constant context, eagerly evaluate to false in the presence
10826 // of side-effects in order to avoid -Wunsequenced false-positives in
10827 // a branch on __builtin_constant_p(expr).
10828 return Success(false, E);
10830 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
10834 case Builtin::BI__builtin_is_constant_evaluated: {
10835 const auto *Callee = Info.CurrentCall->getCallee();
10836 if (Info.InConstantContext && !Info.CheckingPotentialConstantExpression &&
10837 (Info.CallStackDepth == 1 ||
10838 (Info.CallStackDepth == 2 && Callee->isInStdNamespace() &&
10839 Callee->getIdentifier() &&
10840 Callee->getIdentifier()->isStr("is_constant_evaluated")))) {
10841 // FIXME: Find a better way to avoid duplicated diagnostics.
10842 if (Info.EvalStatus.Diag)
10843 Info.report((Info.CallStackDepth == 1) ? E->getExprLoc()
10844 : Info.CurrentCall->CallLoc,
10845 diag::warn_is_constant_evaluated_always_true_constexpr)
10846 << (Info.CallStackDepth == 1 ? "__builtin_is_constant_evaluated"
10847 : "std::is_constant_evaluated");
10850 return Success(Info.InConstantContext, E);
10853 case Builtin::BI__builtin_ctz:
10854 case Builtin::BI__builtin_ctzl:
10855 case Builtin::BI__builtin_ctzll:
10856 case Builtin::BI__builtin_ctzs: {
10858 if (!EvaluateInteger(E->getArg(0), Val, Info))
10863 return Success(Val.countTrailingZeros(), E);
10866 case Builtin::BI__builtin_eh_return_data_regno: {
10867 int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
10868 Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand);
10869 return Success(Operand, E);
10872 case Builtin::BI__builtin_expect:
10873 return Visit(E->getArg(0));
10875 case Builtin::BI__builtin_ffs:
10876 case Builtin::BI__builtin_ffsl:
10877 case Builtin::BI__builtin_ffsll: {
10879 if (!EvaluateInteger(E->getArg(0), Val, Info))
10882 unsigned N = Val.countTrailingZeros();
10883 return Success(N == Val.getBitWidth() ? 0 : N + 1, E);
10886 case Builtin::BI__builtin_fpclassify: {
10888 if (!EvaluateFloat(E->getArg(5), Val, Info))
10891 switch (Val.getCategory()) {
10892 case APFloat::fcNaN: Arg = 0; break;
10893 case APFloat::fcInfinity: Arg = 1; break;
10894 case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break;
10895 case APFloat::fcZero: Arg = 4; break;
10897 return Visit(E->getArg(Arg));
10900 case Builtin::BI__builtin_isinf_sign: {
10902 return EvaluateFloat(E->getArg(0), Val, Info) &&
10903 Success(Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E);
10906 case Builtin::BI__builtin_isinf: {
10908 return EvaluateFloat(E->getArg(0), Val, Info) &&
10909 Success(Val.isInfinity() ? 1 : 0, E);
10912 case Builtin::BI__builtin_isfinite: {
10914 return EvaluateFloat(E->getArg(0), Val, Info) &&
10915 Success(Val.isFinite() ? 1 : 0, E);
10918 case Builtin::BI__builtin_isnan: {
10920 return EvaluateFloat(E->getArg(0), Val, Info) &&
10921 Success(Val.isNaN() ? 1 : 0, E);
10924 case Builtin::BI__builtin_isnormal: {
10926 return EvaluateFloat(E->getArg(0), Val, Info) &&
10927 Success(Val.isNormal() ? 1 : 0, E);
10930 case Builtin::BI__builtin_parity:
10931 case Builtin::BI__builtin_parityl:
10932 case Builtin::BI__builtin_parityll: {
10934 if (!EvaluateInteger(E->getArg(0), Val, Info))
10937 return Success(Val.countPopulation() % 2, E);
10940 case Builtin::BI__builtin_popcount:
10941 case Builtin::BI__builtin_popcountl:
10942 case Builtin::BI__builtin_popcountll: {
10944 if (!EvaluateInteger(E->getArg(0), Val, Info))
10947 return Success(Val.countPopulation(), E);
10950 case Builtin::BIstrlen:
10951 case Builtin::BIwcslen:
10952 // A call to strlen is not a constant expression.
10953 if (Info.getLangOpts().CPlusPlus11)
10954 Info.CCEDiag(E, diag::note_constexpr_invalid_function)
10955 << /*isConstexpr*/0 << /*isConstructor*/0
10956 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
10958 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
10960 case Builtin::BI__builtin_strlen:
10961 case Builtin::BI__builtin_wcslen: {
10962 // As an extension, we support __builtin_strlen() as a constant expression,
10963 // and support folding strlen() to a constant.
10965 if (!EvaluatePointer(E->getArg(0), String, Info))
10968 QualType CharTy = E->getArg(0)->getType()->getPointeeType();
10970 // Fast path: if it's a string literal, search the string value.
10971 if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
10972 String.getLValueBase().dyn_cast<const Expr *>())) {
10973 // The string literal may have embedded null characters. Find the first
10974 // one and truncate there.
10975 StringRef Str = S->getBytes();
10976 int64_t Off = String.Offset.getQuantity();
10977 if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
10978 S->getCharByteWidth() == 1 &&
10979 // FIXME: Add fast-path for wchar_t too.
10980 Info.Ctx.hasSameUnqualifiedType(CharTy, Info.Ctx.CharTy)) {
10981 Str = Str.substr(Off);
10983 StringRef::size_type Pos = Str.find(0);
10984 if (Pos != StringRef::npos)
10985 Str = Str.substr(0, Pos);
10987 return Success(Str.size(), E);
10990 // Fall through to slow path to issue appropriate diagnostic.
10993 // Slow path: scan the bytes of the string looking for the terminating 0.
10994 for (uint64_t Strlen = 0; /**/; ++Strlen) {
10996 if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) ||
10999 if (!Char.getInt())
11000 return Success(Strlen, E);
11001 if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1))
11006 case Builtin::BIstrcmp:
11007 case Builtin::BIwcscmp:
11008 case Builtin::BIstrncmp:
11009 case Builtin::BIwcsncmp:
11010 case Builtin::BImemcmp:
11011 case Builtin::BIbcmp:
11012 case Builtin::BIwmemcmp:
11013 // A call to strlen is not a constant expression.
11014 if (Info.getLangOpts().CPlusPlus11)
11015 Info.CCEDiag(E, diag::note_constexpr_invalid_function)
11016 << /*isConstexpr*/0 << /*isConstructor*/0
11017 << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
11019 Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
11021 case Builtin::BI__builtin_strcmp:
11022 case Builtin::BI__builtin_wcscmp:
11023 case Builtin::BI__builtin_strncmp:
11024 case Builtin::BI__builtin_wcsncmp:
11025 case Builtin::BI__builtin_memcmp:
11026 case Builtin::BI__builtin_bcmp:
11027 case Builtin::BI__builtin_wmemcmp: {
11028 LValue String1, String2;
11029 if (!EvaluatePointer(E->getArg(0), String1, Info) ||
11030 !EvaluatePointer(E->getArg(1), String2, Info))
11033 uint64_t MaxLength = uint64_t(-1);
11034 if (BuiltinOp != Builtin::BIstrcmp &&
11035 BuiltinOp != Builtin::BIwcscmp &&
11036 BuiltinOp != Builtin::BI__builtin_strcmp &&
11037 BuiltinOp != Builtin::BI__builtin_wcscmp) {
11039 if (!EvaluateInteger(E->getArg(2), N, Info))
11041 MaxLength = N.getExtValue();
11044 // Empty substrings compare equal by definition.
11045 if (MaxLength == 0u)
11046 return Success(0, E);
11048 if (!String1.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
11049 !String2.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
11050 String1.Designator.Invalid || String2.Designator.Invalid)
11053 QualType CharTy1 = String1.Designator.getType(Info.Ctx);
11054 QualType CharTy2 = String2.Designator.getType(Info.Ctx);
11056 bool IsRawByte = BuiltinOp == Builtin::BImemcmp ||
11057 BuiltinOp == Builtin::BIbcmp ||
11058 BuiltinOp == Builtin::BI__builtin_memcmp ||
11059 BuiltinOp == Builtin::BI__builtin_bcmp;
11061 assert(IsRawByte ||
11062 (Info.Ctx.hasSameUnqualifiedType(
11063 CharTy1, E->getArg(0)->getType()->getPointeeType()) &&
11064 Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2)));
11066 const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) {
11067 return handleLValueToRValueConversion(Info, E, CharTy1, String1, Char1) &&
11068 handleLValueToRValueConversion(Info, E, CharTy2, String2, Char2) &&
11069 Char1.isInt() && Char2.isInt();
11071 const auto &AdvanceElems = [&] {
11072 return HandleLValueArrayAdjustment(Info, E, String1, CharTy1, 1) &&
11073 HandleLValueArrayAdjustment(Info, E, String2, CharTy2, 1);
11077 uint64_t BytesRemaining = MaxLength;
11078 // Pointers to const void may point to objects of incomplete type.
11079 if (CharTy1->isIncompleteType()) {
11080 Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy1;
11083 if (CharTy2->isIncompleteType()) {
11084 Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy2;
11087 uint64_t CharTy1Width{Info.Ctx.getTypeSize(CharTy1)};
11088 CharUnits CharTy1Size = Info.Ctx.toCharUnitsFromBits(CharTy1Width);
11089 // Give up on comparing between elements with disparate widths.
11090 if (CharTy1Size != Info.Ctx.getTypeSizeInChars(CharTy2))
11092 uint64_t BytesPerElement = CharTy1Size.getQuantity();
11093 assert(BytesRemaining && "BytesRemaining should not be zero: the "
11094 "following loop considers at least one element");
11096 APValue Char1, Char2;
11097 if (!ReadCurElems(Char1, Char2))
11099 // We have compatible in-memory widths, but a possible type and
11100 // (for `bool`) internal representation mismatch.
11101 // Assuming two's complement representation, including 0 for `false` and
11102 // 1 for `true`, we can check an appropriate number of elements for
11103 // equality even if they are not byte-sized.
11104 APSInt Char1InMem = Char1.getInt().extOrTrunc(CharTy1Width);
11105 APSInt Char2InMem = Char2.getInt().extOrTrunc(CharTy1Width);
11106 if (Char1InMem.ne(Char2InMem)) {
11107 // If the elements are byte-sized, then we can produce a three-way
11108 // comparison result in a straightforward manner.
11109 if (BytesPerElement == 1u) {
11110 // memcmp always compares unsigned chars.
11111 return Success(Char1InMem.ult(Char2InMem) ? -1 : 1, E);
11113 // The result is byte-order sensitive, and we have multibyte elements.
11114 // FIXME: We can compare the remaining bytes in the correct order.
11117 if (!AdvanceElems())
11119 if (BytesRemaining <= BytesPerElement)
11121 BytesRemaining -= BytesPerElement;
11123 // Enough elements are equal to account for the memcmp limit.
11124 return Success(0, E);
11128 (BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp &&
11129 BuiltinOp != Builtin::BIwmemcmp &&
11130 BuiltinOp != Builtin::BI__builtin_memcmp &&
11131 BuiltinOp != Builtin::BI__builtin_bcmp &&
11132 BuiltinOp != Builtin::BI__builtin_wmemcmp);
11133 bool IsWide = BuiltinOp == Builtin::BIwcscmp ||
11134 BuiltinOp == Builtin::BIwcsncmp ||
11135 BuiltinOp == Builtin::BIwmemcmp ||
11136 BuiltinOp == Builtin::BI__builtin_wcscmp ||
11137 BuiltinOp == Builtin::BI__builtin_wcsncmp ||
11138 BuiltinOp == Builtin::BI__builtin_wmemcmp;
11140 for (; MaxLength; --MaxLength) {
11141 APValue Char1, Char2;
11142 if (!ReadCurElems(Char1, Char2))
11144 if (Char1.getInt() != Char2.getInt()) {
11145 if (IsWide) // wmemcmp compares with wchar_t signedness.
11146 return Success(Char1.getInt() < Char2.getInt() ? -1 : 1, E);
11147 // memcmp always compares unsigned chars.
11148 return Success(Char1.getInt().ult(Char2.getInt()) ? -1 : 1, E);
11150 if (StopAtNull && !Char1.getInt())
11151 return Success(0, E);
11152 assert(!(StopAtNull && !Char2.getInt()));
11153 if (!AdvanceElems())
11156 // We hit the strncmp / memcmp limit.
11157 return Success(0, E);
11160 case Builtin::BI__atomic_always_lock_free:
11161 case Builtin::BI__atomic_is_lock_free:
11162 case Builtin::BI__c11_atomic_is_lock_free: {
11164 if (!EvaluateInteger(E->getArg(0), SizeVal, Info))
11167 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
11168 // of two less than the maximum inline atomic width, we know it is
11169 // lock-free. If the size isn't a power of two, or greater than the
11170 // maximum alignment where we promote atomics, we know it is not lock-free
11171 // (at least not in the sense of atomic_is_lock_free). Otherwise,
11172 // the answer can only be determined at runtime; for example, 16-byte
11173 // atomics have lock-free implementations on some, but not all,
11174 // x86-64 processors.
11176 // Check power-of-two.
11177 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
11178 if (Size.isPowerOfTwo()) {
11179 // Check against inlining width.
11180 unsigned InlineWidthBits =
11181 Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
11182 if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
11183 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
11184 Size == CharUnits::One() ||
11185 E->getArg(1)->isNullPointerConstant(Info.Ctx,
11186 Expr::NPC_NeverValueDependent))
11187 // OK, we will inline appropriately-aligned operations of this size,
11188 // and _Atomic(T) is appropriately-aligned.
11189 return Success(1, E);
11191 QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
11192 castAs<PointerType>()->getPointeeType();
11193 if (!PointeeType->isIncompleteType() &&
11194 Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
11195 // OK, we will inline operations on this object.
11196 return Success(1, E);
11201 // Avoid emiting call for runtime decision on PowerPC 32-bit
11202 // The lock free possibilities on this platform are covered by the lines
11203 // above and we know in advance other cases require lock
11204 if (Info.Ctx.getTargetInfo().getTriple().getArch() == llvm::Triple::ppc) {
11205 return Success(0, E);
11208 return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
11209 Success(0, E) : Error(E);
11211 case Builtin::BIomp_is_initial_device:
11212 // We can decide statically which value the runtime would return if called.
11213 return Success(Info.getLangOpts().OpenMPIsDevice ? 0 : 1, E);
11214 case Builtin::BI__builtin_add_overflow:
11215 case Builtin::BI__builtin_sub_overflow:
11216 case Builtin::BI__builtin_mul_overflow:
11217 case Builtin::BI__builtin_sadd_overflow:
11218 case Builtin::BI__builtin_uadd_overflow:
11219 case Builtin::BI__builtin_uaddl_overflow:
11220 case Builtin::BI__builtin_uaddll_overflow:
11221 case Builtin::BI__builtin_usub_overflow:
11222 case Builtin::BI__builtin_usubl_overflow:
11223 case Builtin::BI__builtin_usubll_overflow:
11224 case Builtin::BI__builtin_umul_overflow:
11225 case Builtin::BI__builtin_umull_overflow:
11226 case Builtin::BI__builtin_umulll_overflow:
11227 case Builtin::BI__builtin_saddl_overflow:
11228 case Builtin::BI__builtin_saddll_overflow:
11229 case Builtin::BI__builtin_ssub_overflow:
11230 case Builtin::BI__builtin_ssubl_overflow:
11231 case Builtin::BI__builtin_ssubll_overflow:
11232 case Builtin::BI__builtin_smul_overflow:
11233 case Builtin::BI__builtin_smull_overflow:
11234 case Builtin::BI__builtin_smulll_overflow: {
11235 LValue ResultLValue;
11238 QualType ResultType = E->getArg(2)->getType()->getPointeeType();
11239 if (!EvaluateInteger(E->getArg(0), LHS, Info) ||
11240 !EvaluateInteger(E->getArg(1), RHS, Info) ||
11241 !EvaluatePointer(E->getArg(2), ResultLValue, Info))
11245 bool DidOverflow = false;
11247 // If the types don't have to match, enlarge all 3 to the largest of them.
11248 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
11249 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
11250 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
11251 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
11252 ResultType->isSignedIntegerOrEnumerationType();
11253 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
11254 ResultType->isSignedIntegerOrEnumerationType();
11255 uint64_t LHSSize = LHS.getBitWidth();
11256 uint64_t RHSSize = RHS.getBitWidth();
11257 uint64_t ResultSize = Info.Ctx.getTypeSize(ResultType);
11258 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
11260 // Add an additional bit if the signedness isn't uniformly agreed to. We
11261 // could do this ONLY if there is a signed and an unsigned that both have
11262 // MaxBits, but the code to check that is pretty nasty. The issue will be
11263 // caught in the shrink-to-result later anyway.
11264 if (IsSigned && !AllSigned)
11267 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
11268 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
11269 Result = APSInt(MaxBits, !IsSigned);
11272 // Find largest int.
11273 switch (BuiltinOp) {
11275 llvm_unreachable("Invalid value for BuiltinOp");
11276 case Builtin::BI__builtin_add_overflow:
11277 case Builtin::BI__builtin_sadd_overflow:
11278 case Builtin::BI__builtin_saddl_overflow:
11279 case Builtin::BI__builtin_saddll_overflow:
11280 case Builtin::BI__builtin_uadd_overflow:
11281 case Builtin::BI__builtin_uaddl_overflow:
11282 case Builtin::BI__builtin_uaddll_overflow:
11283 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, DidOverflow)
11284 : LHS.uadd_ov(RHS, DidOverflow);
11286 case Builtin::BI__builtin_sub_overflow:
11287 case Builtin::BI__builtin_ssub_overflow:
11288 case Builtin::BI__builtin_ssubl_overflow:
11289 case Builtin::BI__builtin_ssubll_overflow:
11290 case Builtin::BI__builtin_usub_overflow:
11291 case Builtin::BI__builtin_usubl_overflow:
11292 case Builtin::BI__builtin_usubll_overflow:
11293 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, DidOverflow)
11294 : LHS.usub_ov(RHS, DidOverflow);
11296 case Builtin::BI__builtin_mul_overflow:
11297 case Builtin::BI__builtin_smul_overflow:
11298 case Builtin::BI__builtin_smull_overflow:
11299 case Builtin::BI__builtin_smulll_overflow:
11300 case Builtin::BI__builtin_umul_overflow:
11301 case Builtin::BI__builtin_umull_overflow:
11302 case Builtin::BI__builtin_umulll_overflow:
11303 Result = LHS.isSigned() ? LHS.smul_ov(RHS, DidOverflow)
11304 : LHS.umul_ov(RHS, DidOverflow);
11308 // In the case where multiple sizes are allowed, truncate and see if
11309 // the values are the same.
11310 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
11311 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
11312 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
11313 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
11314 // since it will give us the behavior of a TruncOrSelf in the case where
11315 // its parameter <= its size. We previously set Result to be at least the
11316 // type-size of the result, so getTypeSize(ResultType) <= Result.BitWidth
11317 // will work exactly like TruncOrSelf.
11318 APSInt Temp = Result.extOrTrunc(Info.Ctx.getTypeSize(ResultType));
11319 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
11321 if (!APSInt::isSameValue(Temp, Result))
11322 DidOverflow = true;
11326 APValue APV{Result};
11327 if (!handleAssignment(Info, E, ResultLValue, ResultType, APV))
11329 return Success(DidOverflow, E);
11334 /// Determine whether this is a pointer past the end of the complete
11335 /// object referred to by the lvalue.
11336 static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
11337 const LValue &LV) {
11338 // A null pointer can be viewed as being "past the end" but we don't
11339 // choose to look at it that way here.
11340 if (!LV.getLValueBase())
11343 // If the designator is valid and refers to a subobject, we're not pointing
11345 if (!LV.getLValueDesignator().Invalid &&
11346 !LV.getLValueDesignator().isOnePastTheEnd())
11349 // A pointer to an incomplete type might be past-the-end if the type's size is
11350 // zero. We cannot tell because the type is incomplete.
11351 QualType Ty = getType(LV.getLValueBase());
11352 if (Ty->isIncompleteType())
11355 // We're a past-the-end pointer if we point to the byte after the object,
11356 // no matter what our type or path is.
11357 auto Size = Ctx.getTypeSizeInChars(Ty);
11358 return LV.getLValueOffset() == Size;
11363 /// Data recursive integer evaluator of certain binary operators.
11365 /// We use a data recursive algorithm for binary operators so that we are able
11366 /// to handle extreme cases of chained binary operators without causing stack
11368 class DataRecursiveIntBinOpEvaluator {
11369 struct EvalResult {
11373 EvalResult() : Failed(false) { }
11375 void swap(EvalResult &RHS) {
11377 Failed = RHS.Failed;
11378 RHS.Failed = false;
11384 EvalResult LHSResult; // meaningful only for binary operator expression.
11385 enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
11388 Job(Job &&) = default;
11390 void startSpeculativeEval(EvalInfo &Info) {
11391 SpecEvalRAII = SpeculativeEvaluationRAII(Info);
11395 SpeculativeEvaluationRAII SpecEvalRAII;
11398 SmallVector<Job, 16> Queue;
11400 IntExprEvaluator &IntEval;
11402 APValue &FinalResult;
11405 DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
11406 : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
11408 /// True if \param E is a binary operator that we are going to handle
11409 /// data recursively.
11410 /// We handle binary operators that are comma, logical, or that have operands
11411 /// with integral or enumeration type.
11412 static bool shouldEnqueue(const BinaryOperator *E) {
11413 return E->getOpcode() == BO_Comma || E->isLogicalOp() ||
11414 (E->isRValue() && E->getType()->isIntegralOrEnumerationType() &&
11415 E->getLHS()->getType()->isIntegralOrEnumerationType() &&
11416 E->getRHS()->getType()->isIntegralOrEnumerationType());
11419 bool Traverse(const BinaryOperator *E) {
11421 EvalResult PrevResult;
11422 while (!Queue.empty())
11423 process(PrevResult);
11425 if (PrevResult.Failed) return false;
11427 FinalResult.swap(PrevResult.Val);
11432 bool Success(uint64_t Value, const Expr *E, APValue &Result) {
11433 return IntEval.Success(Value, E, Result);
11435 bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
11436 return IntEval.Success(Value, E, Result);
11438 bool Error(const Expr *E) {
11439 return IntEval.Error(E);
11441 bool Error(const Expr *E, diag::kind D) {
11442 return IntEval.Error(E, D);
11445 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
11446 return Info.CCEDiag(E, D);
11449 // Returns true if visiting the RHS is necessary, false otherwise.
11450 bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
11451 bool &SuppressRHSDiags);
11453 bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
11454 const BinaryOperator *E, APValue &Result);
11456 void EvaluateExpr(const Expr *E, EvalResult &Result) {
11457 Result.Failed = !Evaluate(Result.Val, Info, E);
11459 Result.Val = APValue();
11462 void process(EvalResult &Result);
11464 void enqueue(const Expr *E) {
11465 E = E->IgnoreParens();
11466 Queue.resize(Queue.size()+1);
11467 Queue.back().E = E;
11468 Queue.back().Kind = Job::AnyExprKind;
11474 bool DataRecursiveIntBinOpEvaluator::
11475 VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
11476 bool &SuppressRHSDiags) {
11477 if (E->getOpcode() == BO_Comma) {
11478 // Ignore LHS but note if we could not evaluate it.
11479 if (LHSResult.Failed)
11480 return Info.noteSideEffect();
11484 if (E->isLogicalOp()) {
11486 if (!LHSResult.Failed && HandleConversionToBool(LHSResult.Val, LHSAsBool)) {
11487 // We were able to evaluate the LHS, see if we can get away with not
11488 // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
11489 if (LHSAsBool == (E->getOpcode() == BO_LOr)) {
11490 Success(LHSAsBool, E, LHSResult.Val);
11491 return false; // Ignore RHS
11494 LHSResult.Failed = true;
11496 // Since we weren't able to evaluate the left hand side, it
11497 // might have had side effects.
11498 if (!Info.noteSideEffect())
11501 // We can't evaluate the LHS; however, sometimes the result
11502 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
11503 // Don't ignore RHS and suppress diagnostics from this arm.
11504 SuppressRHSDiags = true;
11510 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
11511 E->getRHS()->getType()->isIntegralOrEnumerationType());
11513 if (LHSResult.Failed && !Info.noteFailure())
11514 return false; // Ignore RHS;
11519 static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index,
11521 // Compute the new offset in the appropriate width, wrapping at 64 bits.
11522 // FIXME: When compiling for a 32-bit target, we should use 32-bit
11524 assert(!LVal.hasLValuePath() && "have designator for integer lvalue");
11525 CharUnits &Offset = LVal.getLValueOffset();
11526 uint64_t Offset64 = Offset.getQuantity();
11527 uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
11528 Offset = CharUnits::fromQuantity(IsSub ? Offset64 - Index64
11529 : Offset64 + Index64);
11532 bool DataRecursiveIntBinOpEvaluator::
11533 VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
11534 const BinaryOperator *E, APValue &Result) {
11535 if (E->getOpcode() == BO_Comma) {
11536 if (RHSResult.Failed)
11538 Result = RHSResult.Val;
11542 if (E->isLogicalOp()) {
11543 bool lhsResult, rhsResult;
11544 bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult);
11545 bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult);
11549 if (E->getOpcode() == BO_LOr)
11550 return Success(lhsResult || rhsResult, E, Result);
11552 return Success(lhsResult && rhsResult, E, Result);
11556 // We can't evaluate the LHS; however, sometimes the result
11557 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
11558 if (rhsResult == (E->getOpcode() == BO_LOr))
11559 return Success(rhsResult, E, Result);
11566 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
11567 E->getRHS()->getType()->isIntegralOrEnumerationType());
11569 if (LHSResult.Failed || RHSResult.Failed)
11572 const APValue &LHSVal = LHSResult.Val;
11573 const APValue &RHSVal = RHSResult.Val;
11575 // Handle cases like (unsigned long)&a + 4.
11576 if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
11578 addOrSubLValueAsInteger(Result, RHSVal.getInt(), E->getOpcode() == BO_Sub);
11582 // Handle cases like 4 + (unsigned long)&a
11583 if (E->getOpcode() == BO_Add &&
11584 RHSVal.isLValue() && LHSVal.isInt()) {
11586 addOrSubLValueAsInteger(Result, LHSVal.getInt(), /*IsSub*/false);
11590 if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
11591 // Handle (intptr_t)&&A - (intptr_t)&&B.
11592 if (!LHSVal.getLValueOffset().isZero() ||
11593 !RHSVal.getLValueOffset().isZero())
11595 const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
11596 const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
11597 if (!LHSExpr || !RHSExpr)
11599 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
11600 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
11601 if (!LHSAddrExpr || !RHSAddrExpr)
11603 // Make sure both labels come from the same function.
11604 if (LHSAddrExpr->getLabel()->getDeclContext() !=
11605 RHSAddrExpr->getLabel()->getDeclContext())
11607 Result = APValue(LHSAddrExpr, RHSAddrExpr);
11611 // All the remaining cases expect both operands to be an integer
11612 if (!LHSVal.isInt() || !RHSVal.isInt())
11615 // Set up the width and signedness manually, in case it can't be deduced
11616 // from the operation we're performing.
11617 // FIXME: Don't do this in the cases where we can deduce it.
11618 APSInt Value(Info.Ctx.getIntWidth(E->getType()),
11619 E->getType()->isUnsignedIntegerOrEnumerationType());
11620 if (!handleIntIntBinOp(Info, E, LHSVal.getInt(), E->getOpcode(),
11621 RHSVal.getInt(), Value))
11623 return Success(Value, E, Result);
11626 void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
11627 Job &job = Queue.back();
11629 switch (job.Kind) {
11630 case Job::AnyExprKind: {
11631 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
11632 if (shouldEnqueue(Bop)) {
11633 job.Kind = Job::BinOpKind;
11634 enqueue(Bop->getLHS());
11639 EvaluateExpr(job.E, Result);
11644 case Job::BinOpKind: {
11645 const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
11646 bool SuppressRHSDiags = false;
11647 if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
11651 if (SuppressRHSDiags)
11652 job.startSpeculativeEval(Info);
11653 job.LHSResult.swap(Result);
11654 job.Kind = Job::BinOpVisitedLHSKind;
11655 enqueue(Bop->getRHS());
11659 case Job::BinOpVisitedLHSKind: {
11660 const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
11663 Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
11669 llvm_unreachable("Invalid Job::Kind!");
11673 /// Used when we determine that we should fail, but can keep evaluating prior to
11674 /// noting that we had a failure.
11675 class DelayedNoteFailureRAII {
11680 DelayedNoteFailureRAII(EvalInfo &Info, bool NoteFailure = true)
11681 : Info(Info), NoteFailure(NoteFailure) {}
11682 ~DelayedNoteFailureRAII() {
11684 bool ContinueAfterFailure = Info.noteFailure();
11685 (void)ContinueAfterFailure;
11686 assert(ContinueAfterFailure &&
11687 "Shouldn't have kept evaluating on failure.");
11692 enum class CmpResult {
11701 template <class SuccessCB, class AfterCB>
11703 EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
11704 SuccessCB &&Success, AfterCB &&DoAfter) {
11705 assert(E->isComparisonOp() && "expected comparison operator");
11706 assert((E->getOpcode() == BO_Cmp ||
11707 E->getType()->isIntegralOrEnumerationType()) &&
11708 "unsupported binary expression evaluation");
11709 auto Error = [&](const Expr *E) {
11710 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
11714 bool IsRelational = E->isRelationalOp() || E->getOpcode() == BO_Cmp;
11715 bool IsEquality = E->isEqualityOp();
11717 QualType LHSTy = E->getLHS()->getType();
11718 QualType RHSTy = E->getRHS()->getType();
11720 if (LHSTy->isIntegralOrEnumerationType() &&
11721 RHSTy->isIntegralOrEnumerationType()) {
11723 bool LHSOK = EvaluateInteger(E->getLHS(), LHS, Info);
11724 if (!LHSOK && !Info.noteFailure())
11726 if (!EvaluateInteger(E->getRHS(), RHS, Info) || !LHSOK)
11729 return Success(CmpResult::Less, E);
11731 return Success(CmpResult::Greater, E);
11732 return Success(CmpResult::Equal, E);
11735 if (LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) {
11736 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(LHSTy));
11737 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(RHSTy));
11739 bool LHSOK = EvaluateFixedPointOrInteger(E->getLHS(), LHSFX, Info);
11740 if (!LHSOK && !Info.noteFailure())
11742 if (!EvaluateFixedPointOrInteger(E->getRHS(), RHSFX, Info) || !LHSOK)
11745 return Success(CmpResult::Less, E);
11747 return Success(CmpResult::Greater, E);
11748 return Success(CmpResult::Equal, E);
11751 if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
11752 ComplexValue LHS, RHS;
11754 if (E->isAssignmentOp()) {
11756 EvaluateLValue(E->getLHS(), LV, Info);
11758 } else if (LHSTy->isRealFloatingType()) {
11759 LHSOK = EvaluateFloat(E->getLHS(), LHS.FloatReal, Info);
11761 LHS.makeComplexFloat();
11762 LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics());
11765 LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
11767 if (!LHSOK && !Info.noteFailure())
11770 if (E->getRHS()->getType()->isRealFloatingType()) {
11771 if (!EvaluateFloat(E->getRHS(), RHS.FloatReal, Info) || !LHSOK)
11773 RHS.makeComplexFloat();
11774 RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics());
11775 } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
11778 if (LHS.isComplexFloat()) {
11779 APFloat::cmpResult CR_r =
11780 LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
11781 APFloat::cmpResult CR_i =
11782 LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
11783 bool IsEqual = CR_r == APFloat::cmpEqual && CR_i == APFloat::cmpEqual;
11784 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E);
11786 assert(IsEquality && "invalid complex comparison");
11787 bool IsEqual = LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
11788 LHS.getComplexIntImag() == RHS.getComplexIntImag();
11789 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E);
11793 if (LHSTy->isRealFloatingType() &&
11794 RHSTy->isRealFloatingType()) {
11795 APFloat RHS(0.0), LHS(0.0);
11797 bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info);
11798 if (!LHSOK && !Info.noteFailure())
11801 if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK)
11804 assert(E->isComparisonOp() && "Invalid binary operator!");
11805 auto GetCmpRes = [&]() {
11806 switch (LHS.compare(RHS)) {
11807 case APFloat::cmpEqual:
11808 return CmpResult::Equal;
11809 case APFloat::cmpLessThan:
11810 return CmpResult::Less;
11811 case APFloat::cmpGreaterThan:
11812 return CmpResult::Greater;
11813 case APFloat::cmpUnordered:
11814 return CmpResult::Unordered;
11816 llvm_unreachable("Unrecognised APFloat::cmpResult enum");
11818 return Success(GetCmpRes(), E);
11821 if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
11822 LValue LHSValue, RHSValue;
11824 bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
11825 if (!LHSOK && !Info.noteFailure())
11828 if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
11831 // Reject differing bases from the normal codepath; we special-case
11832 // comparisons to null.
11833 if (!HasSameBase(LHSValue, RHSValue)) {
11834 // Inequalities and subtractions between unrelated pointers have
11835 // unspecified or undefined behavior.
11837 Info.FFDiag(E, diag::note_constexpr_pointer_comparison_unspecified);
11840 // A constant address may compare equal to the address of a symbol.
11841 // The one exception is that address of an object cannot compare equal
11842 // to a null pointer constant.
11843 if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
11844 (!RHSValue.Base && !RHSValue.Offset.isZero()))
11846 // It's implementation-defined whether distinct literals will have
11847 // distinct addresses. In clang, the result of such a comparison is
11848 // unspecified, so it is not a constant expression. However, we do know
11849 // that the address of a literal will be non-null.
11850 if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
11851 LHSValue.Base && RHSValue.Base)
11853 // We can't tell whether weak symbols will end up pointing to the same
11855 if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
11857 // We can't compare the address of the start of one object with the
11858 // past-the-end address of another object, per C++ DR1652.
11859 if ((LHSValue.Base && LHSValue.Offset.isZero() &&
11860 isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) ||
11861 (RHSValue.Base && RHSValue.Offset.isZero() &&
11862 isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)))
11864 // We can't tell whether an object is at the same address as another
11865 // zero sized object.
11866 if ((RHSValue.Base && isZeroSized(LHSValue)) ||
11867 (LHSValue.Base && isZeroSized(RHSValue)))
11869 return Success(CmpResult::Unequal, E);
11872 const CharUnits &LHSOffset = LHSValue.getLValueOffset();
11873 const CharUnits &RHSOffset = RHSValue.getLValueOffset();
11875 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
11876 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
11878 // C++11 [expr.rel]p3:
11879 // Pointers to void (after pointer conversions) can be compared, with a
11880 // result defined as follows: If both pointers represent the same
11881 // address or are both the null pointer value, the result is true if the
11882 // operator is <= or >= and false otherwise; otherwise the result is
11884 // We interpret this as applying to pointers to *cv* void.
11885 if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset && IsRelational)
11886 Info.CCEDiag(E, diag::note_constexpr_void_comparison);
11888 // C++11 [expr.rel]p2:
11889 // - If two pointers point to non-static data members of the same object,
11890 // or to subobjects or array elements fo such members, recursively, the
11891 // pointer to the later declared member compares greater provided the
11892 // two members have the same access control and provided their class is
11895 // - Otherwise pointer comparisons are unspecified.
11896 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) {
11897 bool WasArrayIndex;
11898 unsigned Mismatch = FindDesignatorMismatch(
11899 getType(LHSValue.Base), LHSDesignator, RHSDesignator, WasArrayIndex);
11900 // At the point where the designators diverge, the comparison has a
11901 // specified value if:
11902 // - we are comparing array indices
11903 // - we are comparing fields of a union, or fields with the same access
11904 // Otherwise, the result is unspecified and thus the comparison is not a
11905 // constant expression.
11906 if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
11907 Mismatch < RHSDesignator.Entries.size()) {
11908 const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
11909 const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
11911 Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
11913 Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
11914 << getAsBaseClass(LHSDesignator.Entries[Mismatch])
11915 << RF->getParent() << RF;
11917 Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
11918 << getAsBaseClass(RHSDesignator.Entries[Mismatch])
11919 << LF->getParent() << LF;
11920 else if (!LF->getParent()->isUnion() &&
11921 LF->getAccess() != RF->getAccess())
11923 diag::note_constexpr_pointer_comparison_differing_access)
11924 << LF << LF->getAccess() << RF << RF->getAccess()
11925 << LF->getParent();
11929 // The comparison here must be unsigned, and performed with the same
11930 // width as the pointer.
11931 unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy);
11932 uint64_t CompareLHS = LHSOffset.getQuantity();
11933 uint64_t CompareRHS = RHSOffset.getQuantity();
11934 assert(PtrSize <= 64 && "Unexpected pointer width");
11935 uint64_t Mask = ~0ULL >> (64 - PtrSize);
11936 CompareLHS &= Mask;
11937 CompareRHS &= Mask;
11939 // If there is a base and this is a relational operator, we can only
11940 // compare pointers within the object in question; otherwise, the result
11941 // depends on where the object is located in memory.
11942 if (!LHSValue.Base.isNull() && IsRelational) {
11943 QualType BaseTy = getType(LHSValue.Base);
11944 if (BaseTy->isIncompleteType())
11946 CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
11947 uint64_t OffsetLimit = Size.getQuantity();
11948 if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
11952 if (CompareLHS < CompareRHS)
11953 return Success(CmpResult::Less, E);
11954 if (CompareLHS > CompareRHS)
11955 return Success(CmpResult::Greater, E);
11956 return Success(CmpResult::Equal, E);
11959 if (LHSTy->isMemberPointerType()) {
11960 assert(IsEquality && "unexpected member pointer operation");
11961 assert(RHSTy->isMemberPointerType() && "invalid comparison");
11963 MemberPtr LHSValue, RHSValue;
11965 bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info);
11966 if (!LHSOK && !Info.noteFailure())
11969 if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK)
11972 // C++11 [expr.eq]p2:
11973 // If both operands are null, they compare equal. Otherwise if only one is
11974 // null, they compare unequal.
11975 if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
11976 bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
11977 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E);
11980 // Otherwise if either is a pointer to a virtual member function, the
11981 // result is unspecified.
11982 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl()))
11983 if (MD->isVirtual())
11984 Info.CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
11985 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl()))
11986 if (MD->isVirtual())
11987 Info.CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
11989 // Otherwise they compare equal if and only if they would refer to the
11990 // same member of the same most derived object or the same subobject if
11991 // they were dereferenced with a hypothetical object of the associated
11993 bool Equal = LHSValue == RHSValue;
11994 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E);
11997 if (LHSTy->isNullPtrType()) {
11998 assert(E->isComparisonOp() && "unexpected nullptr operation");
11999 assert(RHSTy->isNullPtrType() && "missing pointer conversion");
12000 // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
12001 // are compared, the result is true of the operator is <=, >= or ==, and
12002 // false otherwise.
12003 return Success(CmpResult::Equal, E);
12009 bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) {
12010 if (!CheckLiteralType(Info, E))
12013 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) {
12014 ComparisonCategoryResult CCR;
12016 case CmpResult::Unequal:
12017 llvm_unreachable("should never produce Unequal for three-way comparison");
12018 case CmpResult::Less:
12019 CCR = ComparisonCategoryResult::Less;
12021 case CmpResult::Equal:
12022 CCR = ComparisonCategoryResult::Equal;
12024 case CmpResult::Greater:
12025 CCR = ComparisonCategoryResult::Greater;
12027 case CmpResult::Unordered:
12028 CCR = ComparisonCategoryResult::Unordered;
12031 // Evaluation succeeded. Lookup the information for the comparison category
12032 // type and fetch the VarDecl for the result.
12033 const ComparisonCategoryInfo &CmpInfo =
12034 Info.Ctx.CompCategories.getInfoForType(E->getType());
12035 const VarDecl *VD = CmpInfo.getValueInfo(CmpInfo.makeWeakResult(CCR))->VD;
12036 // Check and evaluate the result as a constant expression.
12039 if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
12041 return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
12043 return EvaluateComparisonBinaryOperator(Info, E, OnSuccess, [&]() {
12044 return ExprEvaluatorBaseTy::VisitBinCmp(E);
12048 bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
12049 // We don't call noteFailure immediately because the assignment happens after
12050 // we evaluate LHS and RHS.
12051 if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp())
12054 DelayedNoteFailureRAII MaybeNoteFailureLater(Info, E->isAssignmentOp());
12055 if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
12056 return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
12058 assert((!E->getLHS()->getType()->isIntegralOrEnumerationType() ||
12059 !E->getRHS()->getType()->isIntegralOrEnumerationType()) &&
12060 "DataRecursiveIntBinOpEvaluator should have handled integral types");
12062 if (E->isComparisonOp()) {
12063 // Evaluate builtin binary comparisons by evaluating them as three-way
12064 // comparisons and then translating the result.
12065 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) {
12066 assert((CR != CmpResult::Unequal || E->isEqualityOp()) &&
12067 "should only produce Unequal for equality comparisons");
12068 bool IsEqual = CR == CmpResult::Equal,
12069 IsLess = CR == CmpResult::Less,
12070 IsGreater = CR == CmpResult::Greater;
12071 auto Op = E->getOpcode();
12074 llvm_unreachable("unsupported binary operator");
12077 return Success(IsEqual == (Op == BO_EQ), E);
12079 return Success(IsLess, E);
12081 return Success(IsGreater, E);
12083 return Success(IsEqual || IsLess, E);
12085 return Success(IsEqual || IsGreater, E);
12088 return EvaluateComparisonBinaryOperator(Info, E, OnSuccess, [&]() {
12089 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
12093 QualType LHSTy = E->getLHS()->getType();
12094 QualType RHSTy = E->getRHS()->getType();
12096 if (LHSTy->isPointerType() && RHSTy->isPointerType() &&
12097 E->getOpcode() == BO_Sub) {
12098 LValue LHSValue, RHSValue;
12100 bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
12101 if (!LHSOK && !Info.noteFailure())
12104 if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
12107 // Reject differing bases from the normal codepath; we special-case
12108 // comparisons to null.
12109 if (!HasSameBase(LHSValue, RHSValue)) {
12110 // Handle &&A - &&B.
12111 if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
12113 const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr *>();
12114 const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr *>();
12115 if (!LHSExpr || !RHSExpr)
12117 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
12118 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
12119 if (!LHSAddrExpr || !RHSAddrExpr)
12121 // Make sure both labels come from the same function.
12122 if (LHSAddrExpr->getLabel()->getDeclContext() !=
12123 RHSAddrExpr->getLabel()->getDeclContext())
12125 return Success(APValue(LHSAddrExpr, RHSAddrExpr), E);
12127 const CharUnits &LHSOffset = LHSValue.getLValueOffset();
12128 const CharUnits &RHSOffset = RHSValue.getLValueOffset();
12130 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
12131 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
12133 // C++11 [expr.add]p6:
12134 // Unless both pointers point to elements of the same array object, or
12135 // one past the last element of the array object, the behavior is
12137 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
12138 !AreElementsOfSameArray(getType(LHSValue.Base), LHSDesignator,
12140 Info.CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
12142 QualType Type = E->getLHS()->getType();
12143 QualType ElementType = Type->castAs<PointerType>()->getPointeeType();
12145 CharUnits ElementSize;
12146 if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
12149 // As an extension, a type may have zero size (empty struct or union in
12150 // C, array of zero length). Pointer subtraction in such cases has
12151 // undefined behavior, so is not constant.
12152 if (ElementSize.isZero()) {
12153 Info.FFDiag(E, diag::note_constexpr_pointer_subtraction_zero_size)
12158 // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
12159 // and produce incorrect results when it overflows. Such behavior
12160 // appears to be non-conforming, but is common, so perhaps we should
12161 // assume the standard intended for such cases to be undefined behavior
12162 // and check for them.
12164 // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
12165 // overflow in the final conversion to ptrdiff_t.
12166 APSInt LHS(llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
12167 APSInt RHS(llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
12168 APSInt ElemSize(llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true),
12170 APSInt TrueResult = (LHS - RHS) / ElemSize;
12171 APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
12173 if (Result.extend(65) != TrueResult &&
12174 !HandleOverflow(Info, E, TrueResult, E->getType()))
12176 return Success(Result, E);
12179 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
12182 /// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
12183 /// a result as the expression's type.
12184 bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
12185 const UnaryExprOrTypeTraitExpr *E) {
12186 switch(E->getKind()) {
12187 case UETT_PreferredAlignOf:
12188 case UETT_AlignOf: {
12189 if (E->isArgumentType())
12190 return Success(GetAlignOfType(Info, E->getArgumentType(), E->getKind()),
12193 return Success(GetAlignOfExpr(Info, E->getArgumentExpr(), E->getKind()),
12197 case UETT_VecStep: {
12198 QualType Ty = E->getTypeOfArgument();
12200 if (Ty->isVectorType()) {
12201 unsigned n = Ty->castAs<VectorType>()->getNumElements();
12203 // The vec_step built-in functions that take a 3-component
12204 // vector return 4. (OpenCL 1.1 spec 6.11.12)
12208 return Success(n, E);
12210 return Success(1, E);
12213 case UETT_SizeOf: {
12214 QualType SrcTy = E->getTypeOfArgument();
12215 // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
12216 // the result is the size of the referenced type."
12217 if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
12218 SrcTy = Ref->getPointeeType();
12221 if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof))
12223 return Success(Sizeof, E);
12225 case UETT_OpenMPRequiredSimdAlign:
12226 assert(E->isArgumentType());
12228 Info.Ctx.toCharUnitsFromBits(
12229 Info.Ctx.getOpenMPDefaultSimdAlign(E->getArgumentType()))
12234 llvm_unreachable("unknown expr/type trait");
12237 bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
12239 unsigned n = OOE->getNumComponents();
12242 QualType CurrentType = OOE->getTypeSourceInfo()->getType();
12243 for (unsigned i = 0; i != n; ++i) {
12244 OffsetOfNode ON = OOE->getComponent(i);
12245 switch (ON.getKind()) {
12246 case OffsetOfNode::Array: {
12247 const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex());
12249 if (!EvaluateInteger(Idx, IdxResult, Info))
12251 const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType);
12254 CurrentType = AT->getElementType();
12255 CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType);
12256 Result += IdxResult.getSExtValue() * ElementSize;
12260 case OffsetOfNode::Field: {
12261 FieldDecl *MemberDecl = ON.getField();
12262 const RecordType *RT = CurrentType->getAs<RecordType>();
12265 RecordDecl *RD = RT->getDecl();
12266 if (RD->isInvalidDecl()) return false;
12267 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
12268 unsigned i = MemberDecl->getFieldIndex();
12269 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
12270 Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
12271 CurrentType = MemberDecl->getType().getNonReferenceType();
12275 case OffsetOfNode::Identifier:
12276 llvm_unreachable("dependent __builtin_offsetof");
12278 case OffsetOfNode::Base: {
12279 CXXBaseSpecifier *BaseSpec = ON.getBase();
12280 if (BaseSpec->isVirtual())
12283 // Find the layout of the class whose base we are looking into.
12284 const RecordType *RT = CurrentType->getAs<RecordType>();
12287 RecordDecl *RD = RT->getDecl();
12288 if (RD->isInvalidDecl()) return false;
12289 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
12291 // Find the base class itself.
12292 CurrentType = BaseSpec->getType();
12293 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
12297 // Add the offset to the base.
12298 Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
12303 return Success(Result, OOE);
12306 bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
12307 switch (E->getOpcode()) {
12309 // Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
12313 // FIXME: Should extension allow i-c-e extension expressions in its scope?
12314 // If so, we could clear the diagnostic ID.
12315 return Visit(E->getSubExpr());
12317 // The result is just the value.
12318 return Visit(E->getSubExpr());
12320 if (!Visit(E->getSubExpr()))
12322 if (!Result.isInt()) return Error(E);
12323 const APSInt &Value = Result.getInt();
12324 if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
12325 !HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
12328 return Success(-Value, E);
12331 if (!Visit(E->getSubExpr()))
12333 if (!Result.isInt()) return Error(E);
12334 return Success(~Result.getInt(), E);
12338 if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
12340 return Success(!bres, E);
12345 /// HandleCast - This is used to evaluate implicit or explicit casts where the
12346 /// result type is integer.
12347 bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
12348 const Expr *SubExpr = E->getSubExpr();
12349 QualType DestType = E->getType();
12350 QualType SrcType = SubExpr->getType();
12352 switch (E->getCastKind()) {
12353 case CK_BaseToDerived:
12354 case CK_DerivedToBase:
12355 case CK_UncheckedDerivedToBase:
12358 case CK_ArrayToPointerDecay:
12359 case CK_FunctionToPointerDecay:
12360 case CK_NullToPointer:
12361 case CK_NullToMemberPointer:
12362 case CK_BaseToDerivedMemberPointer:
12363 case CK_DerivedToBaseMemberPointer:
12364 case CK_ReinterpretMemberPointer:
12365 case CK_ConstructorConversion:
12366 case CK_IntegralToPointer:
12368 case CK_VectorSplat:
12369 case CK_IntegralToFloating:
12370 case CK_FloatingCast:
12371 case CK_CPointerToObjCPointerCast:
12372 case CK_BlockPointerToObjCPointerCast:
12373 case CK_AnyPointerToBlockPointerCast:
12374 case CK_ObjCObjectLValueCast:
12375 case CK_FloatingRealToComplex:
12376 case CK_FloatingComplexToReal:
12377 case CK_FloatingComplexCast:
12378 case CK_FloatingComplexToIntegralComplex:
12379 case CK_IntegralRealToComplex:
12380 case CK_IntegralComplexCast:
12381 case CK_IntegralComplexToFloatingComplex:
12382 case CK_BuiltinFnToFnPtr:
12383 case CK_ZeroToOCLOpaqueType:
12384 case CK_NonAtomicToAtomic:
12385 case CK_AddressSpaceConversion:
12386 case CK_IntToOCLSampler:
12387 case CK_FixedPointCast:
12388 case CK_IntegralToFixedPoint:
12389 llvm_unreachable("invalid cast kind for integral value");
12393 case CK_LValueBitCast:
12394 case CK_ARCProduceObject:
12395 case CK_ARCConsumeObject:
12396 case CK_ARCReclaimReturnedObject:
12397 case CK_ARCExtendBlockObject:
12398 case CK_CopyAndAutoreleaseBlockObject:
12401 case CK_UserDefinedConversion:
12402 case CK_LValueToRValue:
12403 case CK_AtomicToNonAtomic:
12405 case CK_LValueToRValueBitCast:
12406 return ExprEvaluatorBaseTy::VisitCastExpr(E);
12408 case CK_MemberPointerToBoolean:
12409 case CK_PointerToBoolean:
12410 case CK_IntegralToBoolean:
12411 case CK_FloatingToBoolean:
12412 case CK_BooleanToSignedIntegral:
12413 case CK_FloatingComplexToBoolean:
12414 case CK_IntegralComplexToBoolean: {
12416 if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info))
12418 uint64_t IntResult = BoolResult;
12419 if (BoolResult && E->getCastKind() == CK_BooleanToSignedIntegral)
12420 IntResult = (uint64_t)-1;
12421 return Success(IntResult, E);
12424 case CK_FixedPointToIntegral: {
12425 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(SrcType));
12426 if (!EvaluateFixedPoint(SubExpr, Src, Info))
12429 llvm::APSInt Result = Src.convertToInt(
12430 Info.Ctx.getIntWidth(DestType),
12431 DestType->isSignedIntegerOrEnumerationType(), &Overflowed);
12432 if (Overflowed && !HandleOverflow(Info, E, Result, DestType))
12434 return Success(Result, E);
12437 case CK_FixedPointToBoolean: {
12438 // Unsigned padding does not affect this.
12440 if (!Evaluate(Val, Info, SubExpr))
12442 return Success(Val.getFixedPoint().getBoolValue(), E);
12445 case CK_IntegralCast: {
12446 if (!Visit(SubExpr))
12449 if (!Result.isInt()) {
12450 // Allow casts of address-of-label differences if they are no-ops
12451 // or narrowing. (The narrowing case isn't actually guaranteed to
12452 // be constant-evaluatable except in some narrow cases which are hard
12453 // to detect here. We let it through on the assumption the user knows
12454 // what they are doing.)
12455 if (Result.isAddrLabelDiff())
12456 return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType);
12457 // Only allow casts of lvalues if they are lossless.
12458 return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
12461 return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
12462 Result.getInt()), E);
12465 case CK_PointerToIntegral: {
12466 CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
12469 if (!EvaluatePointer(SubExpr, LV, Info))
12472 if (LV.getLValueBase()) {
12473 // Only allow based lvalue casts if they are lossless.
12474 // FIXME: Allow a larger integer size than the pointer size, and allow
12475 // narrowing back down to pointer width in subsequent integral casts.
12476 // FIXME: Check integer type's active bits, not its type size.
12477 if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
12480 LV.Designator.setInvalid();
12481 LV.moveInto(Result);
12488 if (!V.toIntegralConstant(AsInt, SrcType, Info.Ctx))
12489 llvm_unreachable("Can't cast this!");
12491 return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
12494 case CK_IntegralComplexToReal: {
12496 if (!EvaluateComplex(SubExpr, C, Info))
12498 return Success(C.getComplexIntReal(), E);
12501 case CK_FloatingToIntegral: {
12503 if (!EvaluateFloat(SubExpr, F, Info))
12507 if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value))
12509 return Success(Value, E);
12513 llvm_unreachable("unknown cast resulting in integral value");
12516 bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
12517 if (E->getSubExpr()->getType()->isAnyComplexType()) {
12519 if (!EvaluateComplex(E->getSubExpr(), LV, Info))
12521 if (!LV.isComplexInt())
12523 return Success(LV.getComplexIntReal(), E);
12526 return Visit(E->getSubExpr());
12529 bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
12530 if (E->getSubExpr()->getType()->isComplexIntegerType()) {
12532 if (!EvaluateComplex(E->getSubExpr(), LV, Info))
12534 if (!LV.isComplexInt())
12536 return Success(LV.getComplexIntImag(), E);
12539 VisitIgnoredValue(E->getSubExpr());
12540 return Success(0, E);
12543 bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
12544 return Success(E->getPackLength(), E);
12547 bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
12548 return Success(E->getValue(), E);
12551 bool IntExprEvaluator::VisitConceptSpecializationExpr(
12552 const ConceptSpecializationExpr *E) {
12553 return Success(E->isSatisfied(), E);
12556 bool IntExprEvaluator::VisitRequiresExpr(const RequiresExpr *E) {
12557 return Success(E->isSatisfied(), E);
12560 bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
12561 switch (E->getOpcode()) {
12563 // Invalid unary operators
12566 // The result is just the value.
12567 return Visit(E->getSubExpr());
12569 if (!Visit(E->getSubExpr())) return false;
12570 if (!Result.isFixedPoint())
12573 APFixedPoint Negated = Result.getFixedPoint().negate(&Overflowed);
12574 if (Overflowed && !HandleOverflow(Info, E, Negated, E->getType()))
12576 return Success(Negated, E);
12580 if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
12582 return Success(!bres, E);
12587 bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
12588 const Expr *SubExpr = E->getSubExpr();
12589 QualType DestType = E->getType();
12590 assert(DestType->isFixedPointType() &&
12591 "Expected destination type to be a fixed point type");
12592 auto DestFXSema = Info.Ctx.getFixedPointSemantics(DestType);
12594 switch (E->getCastKind()) {
12595 case CK_FixedPointCast: {
12596 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(SubExpr->getType()));
12597 if (!EvaluateFixedPoint(SubExpr, Src, Info))
12600 APFixedPoint Result = Src.convert(DestFXSema, &Overflowed);
12601 if (Overflowed && !HandleOverflow(Info, E, Result, DestType))
12603 return Success(Result, E);
12605 case CK_IntegralToFixedPoint: {
12607 if (!EvaluateInteger(SubExpr, Src, Info))
12611 APFixedPoint IntResult = APFixedPoint::getFromIntValue(
12612 Src, Info.Ctx.getFixedPointSemantics(DestType), &Overflowed);
12614 if (Overflowed && !HandleOverflow(Info, E, IntResult, DestType))
12617 return Success(IntResult, E);
12620 case CK_LValueToRValue:
12621 return ExprEvaluatorBaseTy::VisitCastExpr(E);
12627 bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
12628 const Expr *LHS = E->getLHS();
12629 const Expr *RHS = E->getRHS();
12630 FixedPointSemantics ResultFXSema =
12631 Info.Ctx.getFixedPointSemantics(E->getType());
12633 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(LHS->getType()));
12634 if (!EvaluateFixedPointOrInteger(LHS, LHSFX, Info))
12636 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(RHS->getType()));
12637 if (!EvaluateFixedPointOrInteger(RHS, RHSFX, Info))
12640 switch (E->getOpcode()) {
12642 bool AddOverflow, ConversionOverflow;
12643 APFixedPoint Result = LHSFX.add(RHSFX, &AddOverflow)
12644 .convert(ResultFXSema, &ConversionOverflow);
12645 if ((AddOverflow || ConversionOverflow) &&
12646 !HandleOverflow(Info, E, Result, E->getType()))
12648 return Success(Result, E);
12653 llvm_unreachable("Should've exited before this");
12656 //===----------------------------------------------------------------------===//
12657 // Float Evaluation
12658 //===----------------------------------------------------------------------===//
12661 class FloatExprEvaluator
12662 : public ExprEvaluatorBase<FloatExprEvaluator> {
12665 FloatExprEvaluator(EvalInfo &info, APFloat &result)
12666 : ExprEvaluatorBaseTy(info), Result(result) {}
12668 bool Success(const APValue &V, const Expr *e) {
12669 Result = V.getFloat();
12673 bool ZeroInitialization(const Expr *E) {
12674 Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
12678 bool VisitCallExpr(const CallExpr *E);
12680 bool VisitUnaryOperator(const UnaryOperator *E);
12681 bool VisitBinaryOperator(const BinaryOperator *E);
12682 bool VisitFloatingLiteral(const FloatingLiteral *E);
12683 bool VisitCastExpr(const CastExpr *E);
12685 bool VisitUnaryReal(const UnaryOperator *E);
12686 bool VisitUnaryImag(const UnaryOperator *E);
12688 // FIXME: Missing: array subscript of vector, member of vector
12690 } // end anonymous namespace
12692 static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
12693 assert(E->isRValue() && E->getType()->isRealFloatingType());
12694 return FloatExprEvaluator(Info, Result).Visit(E);
12697 static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
12701 llvm::APFloat &Result) {
12702 const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
12703 if (!S) return false;
12705 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy);
12709 // Treat empty strings as if they were zero.
12710 if (S->getString().empty())
12711 fill = llvm::APInt(32, 0);
12712 else if (S->getString().getAsInteger(0, fill))
12715 if (Context.getTargetInfo().isNan2008()) {
12717 Result = llvm::APFloat::getSNaN(Sem, false, &fill);
12719 Result = llvm::APFloat::getQNaN(Sem, false, &fill);
12721 // Prior to IEEE 754-2008, architectures were allowed to choose whether
12722 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
12723 // a different encoding to what became a standard in 2008, and for pre-
12724 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
12725 // sNaN. This is now known as "legacy NaN" encoding.
12727 Result = llvm::APFloat::getQNaN(Sem, false, &fill);
12729 Result = llvm::APFloat::getSNaN(Sem, false, &fill);
12735 bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
12736 switch (E->getBuiltinCallee()) {
12738 return ExprEvaluatorBaseTy::VisitCallExpr(E);
12740 case Builtin::BI__builtin_huge_val:
12741 case Builtin::BI__builtin_huge_valf:
12742 case Builtin::BI__builtin_huge_vall:
12743 case Builtin::BI__builtin_huge_valf128:
12744 case Builtin::BI__builtin_inf:
12745 case Builtin::BI__builtin_inff:
12746 case Builtin::BI__builtin_infl:
12747 case Builtin::BI__builtin_inff128: {
12748 const llvm::fltSemantics &Sem =
12749 Info.Ctx.getFloatTypeSemantics(E->getType());
12750 Result = llvm::APFloat::getInf(Sem);
12754 case Builtin::BI__builtin_nans:
12755 case Builtin::BI__builtin_nansf:
12756 case Builtin::BI__builtin_nansl:
12757 case Builtin::BI__builtin_nansf128:
12758 if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
12763 case Builtin::BI__builtin_nan:
12764 case Builtin::BI__builtin_nanf:
12765 case Builtin::BI__builtin_nanl:
12766 case Builtin::BI__builtin_nanf128:
12767 // If this is __builtin_nan() turn this into a nan, otherwise we
12768 // can't constant fold it.
12769 if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
12774 case Builtin::BI__builtin_fabs:
12775 case Builtin::BI__builtin_fabsf:
12776 case Builtin::BI__builtin_fabsl:
12777 case Builtin::BI__builtin_fabsf128:
12778 if (!EvaluateFloat(E->getArg(0), Result, Info))
12781 if (Result.isNegative())
12782 Result.changeSign();
12785 // FIXME: Builtin::BI__builtin_powi
12786 // FIXME: Builtin::BI__builtin_powif
12787 // FIXME: Builtin::BI__builtin_powil
12789 case Builtin::BI__builtin_copysign:
12790 case Builtin::BI__builtin_copysignf:
12791 case Builtin::BI__builtin_copysignl:
12792 case Builtin::BI__builtin_copysignf128: {
12794 if (!EvaluateFloat(E->getArg(0), Result, Info) ||
12795 !EvaluateFloat(E->getArg(1), RHS, Info))
12797 Result.copySign(RHS);
12803 bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
12804 if (E->getSubExpr()->getType()->isAnyComplexType()) {
12806 if (!EvaluateComplex(E->getSubExpr(), CV, Info))
12808 Result = CV.FloatReal;
12812 return Visit(E->getSubExpr());
12815 bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
12816 if (E->getSubExpr()->getType()->isAnyComplexType()) {
12818 if (!EvaluateComplex(E->getSubExpr(), CV, Info))
12820 Result = CV.FloatImag;
12824 VisitIgnoredValue(E->getSubExpr());
12825 const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType());
12826 Result = llvm::APFloat::getZero(Sem);
12830 bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
12831 switch (E->getOpcode()) {
12832 default: return Error(E);
12834 return EvaluateFloat(E->getSubExpr(), Result, Info);
12836 if (!EvaluateFloat(E->getSubExpr(), Result, Info))
12838 Result.changeSign();
12843 bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
12844 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
12845 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
12848 bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info);
12849 if (!LHSOK && !Info.noteFailure())
12851 return EvaluateFloat(E->getRHS(), RHS, Info) && LHSOK &&
12852 handleFloatFloatBinOp(Info, E, Result, E->getOpcode(), RHS);
12855 bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
12856 Result = E->getValue();
12860 bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
12861 const Expr* SubExpr = E->getSubExpr();
12863 switch (E->getCastKind()) {
12865 return ExprEvaluatorBaseTy::VisitCastExpr(E);
12867 case CK_IntegralToFloating: {
12869 return EvaluateInteger(SubExpr, IntResult, Info) &&
12870 HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult,
12871 E->getType(), Result);
12874 case CK_FloatingCast: {
12875 if (!Visit(SubExpr))
12877 return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(),
12881 case CK_FloatingComplexToReal: {
12883 if (!EvaluateComplex(SubExpr, V, Info))
12885 Result = V.getComplexFloatReal();
12891 //===----------------------------------------------------------------------===//
12892 // Complex Evaluation (for float and integer)
12893 //===----------------------------------------------------------------------===//
12896 class ComplexExprEvaluator
12897 : public ExprEvaluatorBase<ComplexExprEvaluator> {
12898 ComplexValue &Result;
12901 ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
12902 : ExprEvaluatorBaseTy(info), Result(Result) {}
12904 bool Success(const APValue &V, const Expr *e) {
12909 bool ZeroInitialization(const Expr *E);
12911 //===--------------------------------------------------------------------===//
12913 //===--------------------------------------------------------------------===//
12915 bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
12916 bool VisitCastExpr(const CastExpr *E);
12917 bool VisitBinaryOperator(const BinaryOperator *E);
12918 bool VisitUnaryOperator(const UnaryOperator *E);
12919 bool VisitInitListExpr(const InitListExpr *E);
12921 } // end anonymous namespace
12923 static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
12925 assert(E->isRValue() && E->getType()->isAnyComplexType());
12926 return ComplexExprEvaluator(Info, Result).Visit(E);
12929 bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
12930 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
12931 if (ElemTy->isRealFloatingType()) {
12932 Result.makeComplexFloat();
12933 APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy));
12934 Result.FloatReal = Zero;
12935 Result.FloatImag = Zero;
12937 Result.makeComplexInt();
12938 APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy);
12939 Result.IntReal = Zero;
12940 Result.IntImag = Zero;
12945 bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
12946 const Expr* SubExpr = E->getSubExpr();
12948 if (SubExpr->getType()->isRealFloatingType()) {
12949 Result.makeComplexFloat();
12950 APFloat &Imag = Result.FloatImag;
12951 if (!EvaluateFloat(SubExpr, Imag, Info))
12954 Result.FloatReal = APFloat(Imag.getSemantics());
12957 assert(SubExpr->getType()->isIntegerType() &&
12958 "Unexpected imaginary literal.");
12960 Result.makeComplexInt();
12961 APSInt &Imag = Result.IntImag;
12962 if (!EvaluateInteger(SubExpr, Imag, Info))
12965 Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
12970 bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
12972 switch (E->getCastKind()) {
12974 case CK_BaseToDerived:
12975 case CK_DerivedToBase:
12976 case CK_UncheckedDerivedToBase:
12979 case CK_ArrayToPointerDecay:
12980 case CK_FunctionToPointerDecay:
12981 case CK_NullToPointer:
12982 case CK_NullToMemberPointer:
12983 case CK_BaseToDerivedMemberPointer:
12984 case CK_DerivedToBaseMemberPointer:
12985 case CK_MemberPointerToBoolean:
12986 case CK_ReinterpretMemberPointer:
12987 case CK_ConstructorConversion:
12988 case CK_IntegralToPointer:
12989 case CK_PointerToIntegral:
12990 case CK_PointerToBoolean:
12992 case CK_VectorSplat:
12993 case CK_IntegralCast:
12994 case CK_BooleanToSignedIntegral:
12995 case CK_IntegralToBoolean:
12996 case CK_IntegralToFloating:
12997 case CK_FloatingToIntegral:
12998 case CK_FloatingToBoolean:
12999 case CK_FloatingCast:
13000 case CK_CPointerToObjCPointerCast:
13001 case CK_BlockPointerToObjCPointerCast:
13002 case CK_AnyPointerToBlockPointerCast:
13003 case CK_ObjCObjectLValueCast:
13004 case CK_FloatingComplexToReal:
13005 case CK_FloatingComplexToBoolean:
13006 case CK_IntegralComplexToReal:
13007 case CK_IntegralComplexToBoolean:
13008 case CK_ARCProduceObject:
13009 case CK_ARCConsumeObject:
13010 case CK_ARCReclaimReturnedObject:
13011 case CK_ARCExtendBlockObject:
13012 case CK_CopyAndAutoreleaseBlockObject:
13013 case CK_BuiltinFnToFnPtr:
13014 case CK_ZeroToOCLOpaqueType:
13015 case CK_NonAtomicToAtomic:
13016 case CK_AddressSpaceConversion:
13017 case CK_IntToOCLSampler:
13018 case CK_FixedPointCast:
13019 case CK_FixedPointToBoolean:
13020 case CK_FixedPointToIntegral:
13021 case CK_IntegralToFixedPoint:
13022 llvm_unreachable("invalid cast kind for complex value");
13024 case CK_LValueToRValue:
13025 case CK_AtomicToNonAtomic:
13027 case CK_LValueToRValueBitCast:
13028 return ExprEvaluatorBaseTy::VisitCastExpr(E);
13031 case CK_LValueBitCast:
13032 case CK_UserDefinedConversion:
13035 case CK_FloatingRealToComplex: {
13036 APFloat &Real = Result.FloatReal;
13037 if (!EvaluateFloat(E->getSubExpr(), Real, Info))
13040 Result.makeComplexFloat();
13041 Result.FloatImag = APFloat(Real.getSemantics());
13045 case CK_FloatingComplexCast: {
13046 if (!Visit(E->getSubExpr()))
13049 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
13051 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
13053 return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) &&
13054 HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag);
13057 case CK_FloatingComplexToIntegralComplex: {
13058 if (!Visit(E->getSubExpr()))
13061 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
13063 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
13064 Result.makeComplexInt();
13065 return HandleFloatToIntCast(Info, E, From, Result.FloatReal,
13066 To, Result.IntReal) &&
13067 HandleFloatToIntCast(Info, E, From, Result.FloatImag,
13068 To, Result.IntImag);
13071 case CK_IntegralRealToComplex: {
13072 APSInt &Real = Result.IntReal;
13073 if (!EvaluateInteger(E->getSubExpr(), Real, Info))
13076 Result.makeComplexInt();
13077 Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
13081 case CK_IntegralComplexCast: {
13082 if (!Visit(E->getSubExpr()))
13085 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
13087 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
13089 Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal);
13090 Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag);
13094 case CK_IntegralComplexToFloatingComplex: {
13095 if (!Visit(E->getSubExpr()))
13098 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
13100 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
13101 Result.makeComplexFloat();
13102 return HandleIntToFloatCast(Info, E, From, Result.IntReal,
13103 To, Result.FloatReal) &&
13104 HandleIntToFloatCast(Info, E, From, Result.IntImag,
13105 To, Result.FloatImag);
13109 llvm_unreachable("unknown cast resulting in complex value");
13112 bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
13113 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
13114 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
13116 // Track whether the LHS or RHS is real at the type system level. When this is
13117 // the case we can simplify our evaluation strategy.
13118 bool LHSReal = false, RHSReal = false;
13121 if (E->getLHS()->getType()->isRealFloatingType()) {
13123 APFloat &Real = Result.FloatReal;
13124 LHSOK = EvaluateFloat(E->getLHS(), Real, Info);
13126 Result.makeComplexFloat();
13127 Result.FloatImag = APFloat(Real.getSemantics());
13130 LHSOK = Visit(E->getLHS());
13132 if (!LHSOK && !Info.noteFailure())
13136 if (E->getRHS()->getType()->isRealFloatingType()) {
13138 APFloat &Real = RHS.FloatReal;
13139 if (!EvaluateFloat(E->getRHS(), Real, Info) || !LHSOK)
13141 RHS.makeComplexFloat();
13142 RHS.FloatImag = APFloat(Real.getSemantics());
13143 } else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
13146 assert(!(LHSReal && RHSReal) &&
13147 "Cannot have both operands of a complex operation be real.");
13148 switch (E->getOpcode()) {
13149 default: return Error(E);
13151 if (Result.isComplexFloat()) {
13152 Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
13153 APFloat::rmNearestTiesToEven);
13155 Result.getComplexFloatImag() = RHS.getComplexFloatImag();
13157 Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
13158 APFloat::rmNearestTiesToEven);
13160 Result.getComplexIntReal() += RHS.getComplexIntReal();
13161 Result.getComplexIntImag() += RHS.getComplexIntImag();
13165 if (Result.isComplexFloat()) {
13166 Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(),
13167 APFloat::rmNearestTiesToEven);
13169 Result.getComplexFloatImag() = RHS.getComplexFloatImag();
13170 Result.getComplexFloatImag().changeSign();
13171 } else if (!RHSReal) {
13172 Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
13173 APFloat::rmNearestTiesToEven);
13176 Result.getComplexIntReal() -= RHS.getComplexIntReal();
13177 Result.getComplexIntImag() -= RHS.getComplexIntImag();
13181 if (Result.isComplexFloat()) {
13182 // This is an implementation of complex multiplication according to the
13183 // constraints laid out in C11 Annex G. The implementation uses the
13184 // following naming scheme:
13185 // (a + ib) * (c + id)
13186 ComplexValue LHS = Result;
13187 APFloat &A = LHS.getComplexFloatReal();
13188 APFloat &B = LHS.getComplexFloatImag();
13189 APFloat &C = RHS.getComplexFloatReal();
13190 APFloat &D = RHS.getComplexFloatImag();
13191 APFloat &ResR = Result.getComplexFloatReal();
13192 APFloat &ResI = Result.getComplexFloatImag();
13194 assert(!RHSReal && "Cannot have two real operands for a complex op!");
13197 } else if (RHSReal) {
13201 // In the fully general case, we need to handle NaNs and infinities
13203 APFloat AC = A * C;
13204 APFloat BD = B * D;
13205 APFloat AD = A * D;
13206 APFloat BC = B * C;
13209 if (ResR.isNaN() && ResI.isNaN()) {
13210 bool Recalc = false;
13211 if (A.isInfinity() || B.isInfinity()) {
13212 A = APFloat::copySign(
13213 APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
13214 B = APFloat::copySign(
13215 APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
13217 C = APFloat::copySign(APFloat(C.getSemantics()), C);
13219 D = APFloat::copySign(APFloat(D.getSemantics()), D);
13222 if (C.isInfinity() || D.isInfinity()) {
13223 C = APFloat::copySign(
13224 APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
13225 D = APFloat::copySign(
13226 APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
13228 A = APFloat::copySign(APFloat(A.getSemantics()), A);
13230 B = APFloat::copySign(APFloat(B.getSemantics()), B);
13233 if (!Recalc && (AC.isInfinity() || BD.isInfinity() ||
13234 AD.isInfinity() || BC.isInfinity())) {
13236 A = APFloat::copySign(APFloat(A.getSemantics()), A);
13238 B = APFloat::copySign(APFloat(B.getSemantics()), B);
13240 C = APFloat::copySign(APFloat(C.getSemantics()), C);
13242 D = APFloat::copySign(APFloat(D.getSemantics()), D);
13246 ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D);
13247 ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C);
13252 ComplexValue LHS = Result;
13253 Result.getComplexIntReal() =
13254 (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
13255 LHS.getComplexIntImag() * RHS.getComplexIntImag());
13256 Result.getComplexIntImag() =
13257 (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
13258 LHS.getComplexIntImag() * RHS.getComplexIntReal());
13262 if (Result.isComplexFloat()) {
13263 // This is an implementation of complex division according to the
13264 // constraints laid out in C11 Annex G. The implementation uses the
13265 // following naming scheme:
13266 // (a + ib) / (c + id)
13267 ComplexValue LHS = Result;
13268 APFloat &A = LHS.getComplexFloatReal();
13269 APFloat &B = LHS.getComplexFloatImag();
13270 APFloat &C = RHS.getComplexFloatReal();
13271 APFloat &D = RHS.getComplexFloatImag();
13272 APFloat &ResR = Result.getComplexFloatReal();
13273 APFloat &ResI = Result.getComplexFloatImag();
13279 // No real optimizations we can do here, stub out with zero.
13280 B = APFloat::getZero(A.getSemantics());
13283 APFloat MaxCD = maxnum(abs(C), abs(D));
13284 if (MaxCD.isFinite()) {
13285 DenomLogB = ilogb(MaxCD);
13286 C = scalbn(C, -DenomLogB, APFloat::rmNearestTiesToEven);
13287 D = scalbn(D, -DenomLogB, APFloat::rmNearestTiesToEven);
13289 APFloat Denom = C * C + D * D;
13290 ResR = scalbn((A * C + B * D) / Denom, -DenomLogB,
13291 APFloat::rmNearestTiesToEven);
13292 ResI = scalbn((B * C - A * D) / Denom, -DenomLogB,
13293 APFloat::rmNearestTiesToEven);
13294 if (ResR.isNaN() && ResI.isNaN()) {
13295 if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
13296 ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A;
13297 ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B;
13298 } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
13300 A = APFloat::copySign(
13301 APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
13302 B = APFloat::copySign(
13303 APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
13304 ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D);
13305 ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D);
13306 } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
13307 C = APFloat::copySign(
13308 APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
13309 D = APFloat::copySign(
13310 APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
13311 ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D);
13312 ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D);
13317 if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
13318 return Error(E, diag::note_expr_divide_by_zero);
13320 ComplexValue LHS = Result;
13321 APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
13322 RHS.getComplexIntImag() * RHS.getComplexIntImag();
13323 Result.getComplexIntReal() =
13324 (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
13325 LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
13326 Result.getComplexIntImag() =
13327 (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
13328 LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
13336 bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
13337 // Get the operand value into 'Result'.
13338 if (!Visit(E->getSubExpr()))
13341 switch (E->getOpcode()) {
13347 // The result is always just the subexpr.
13350 if (Result.isComplexFloat()) {
13351 Result.getComplexFloatReal().changeSign();
13352 Result.getComplexFloatImag().changeSign();
13355 Result.getComplexIntReal() = -Result.getComplexIntReal();
13356 Result.getComplexIntImag() = -Result.getComplexIntImag();
13360 if (Result.isComplexFloat())
13361 Result.getComplexFloatImag().changeSign();
13363 Result.getComplexIntImag() = -Result.getComplexIntImag();
13368 bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
13369 if (E->getNumInits() == 2) {
13370 if (E->getType()->isComplexType()) {
13371 Result.makeComplexFloat();
13372 if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info))
13374 if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info))
13377 Result.makeComplexInt();
13378 if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info))
13380 if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info))
13385 return ExprEvaluatorBaseTy::VisitInitListExpr(E);
13388 //===----------------------------------------------------------------------===//
13389 // Atomic expression evaluation, essentially just handling the NonAtomicToAtomic
13390 // implicit conversion.
13391 //===----------------------------------------------------------------------===//
13394 class AtomicExprEvaluator :
13395 public ExprEvaluatorBase<AtomicExprEvaluator> {
13396 const LValue *This;
13399 AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result)
13400 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
13402 bool Success(const APValue &V, const Expr *E) {
13407 bool ZeroInitialization(const Expr *E) {
13408 ImplicitValueInitExpr VIE(
13409 E->getType()->castAs<AtomicType>()->getValueType());
13410 // For atomic-qualified class (and array) types in C++, initialize the
13411 // _Atomic-wrapped subobject directly, in-place.
13412 return This ? EvaluateInPlace(Result, Info, *This, &VIE)
13413 : Evaluate(Result, Info, &VIE);
13416 bool VisitCastExpr(const CastExpr *E) {
13417 switch (E->getCastKind()) {
13419 return ExprEvaluatorBaseTy::VisitCastExpr(E);
13420 case CK_NonAtomicToAtomic:
13421 return This ? EvaluateInPlace(Result, Info, *This, E->getSubExpr())
13422 : Evaluate(Result, Info, E->getSubExpr());
13426 } // end anonymous namespace
13428 static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
13430 assert(E->isRValue() && E->getType()->isAtomicType());
13431 return AtomicExprEvaluator(Info, This, Result).Visit(E);
13434 //===----------------------------------------------------------------------===//
13435 // Void expression evaluation, primarily for a cast to void on the LHS of a
13437 //===----------------------------------------------------------------------===//
13440 class VoidExprEvaluator
13441 : public ExprEvaluatorBase<VoidExprEvaluator> {
13443 VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
13445 bool Success(const APValue &V, const Expr *e) { return true; }
13447 bool ZeroInitialization(const Expr *E) { return true; }
13449 bool VisitCastExpr(const CastExpr *E) {
13450 switch (E->getCastKind()) {
13452 return ExprEvaluatorBaseTy::VisitCastExpr(E);
13454 VisitIgnoredValue(E->getSubExpr());
13459 bool VisitCallExpr(const CallExpr *E) {
13460 switch (E->getBuiltinCallee()) {
13461 case Builtin::BI__assume:
13462 case Builtin::BI__builtin_assume:
13463 // The argument is not evaluated!
13466 case Builtin::BI__builtin_operator_delete:
13467 return HandleOperatorDeleteCall(Info, E);
13473 return ExprEvaluatorBaseTy::VisitCallExpr(E);
13476 bool VisitCXXDeleteExpr(const CXXDeleteExpr *E);
13478 } // end anonymous namespace
13480 bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
13481 // We cannot speculatively evaluate a delete expression.
13482 if (Info.SpeculativeEvaluationDepth)
13485 FunctionDecl *OperatorDelete = E->getOperatorDelete();
13486 if (!OperatorDelete->isReplaceableGlobalAllocationFunction()) {
13487 Info.FFDiag(E, diag::note_constexpr_new_non_replaceable)
13488 << isa<CXXMethodDecl>(OperatorDelete) << OperatorDelete;
13492 const Expr *Arg = E->getArgument();
13495 if (!EvaluatePointer(Arg, Pointer, Info))
13497 if (Pointer.Designator.Invalid)
13500 // Deleting a null pointer has no effect.
13501 if (Pointer.isNullPointer()) {
13502 // This is the only case where we need to produce an extension warning:
13503 // the only other way we can succeed is if we find a dynamic allocation,
13504 // and we will have warned when we allocated it in that case.
13505 if (!Info.getLangOpts().CPlusPlus2a)
13506 Info.CCEDiag(E, diag::note_constexpr_new);
13510 Optional<DynAlloc *> Alloc = CheckDeleteKind(
13511 Info, E, Pointer, E->isArrayForm() ? DynAlloc::ArrayNew : DynAlloc::New);
13514 QualType AllocType = Pointer.Base.getDynamicAllocType();
13516 // For the non-array case, the designator must be empty if the static type
13517 // does not have a virtual destructor.
13518 if (!E->isArrayForm() && Pointer.Designator.Entries.size() != 0 &&
13519 !hasVirtualDestructor(Arg->getType()->getPointeeType())) {
13520 Info.FFDiag(E, diag::note_constexpr_delete_base_nonvirt_dtor)
13521 << Arg->getType()->getPointeeType() << AllocType;
13525 // For a class type with a virtual destructor, the selected operator delete
13526 // is the one looked up when building the destructor.
13527 if (!E->isArrayForm() && !E->isGlobalDelete()) {
13528 const FunctionDecl *VirtualDelete = getVirtualOperatorDelete(AllocType);
13529 if (VirtualDelete &&
13530 !VirtualDelete->isReplaceableGlobalAllocationFunction()) {
13531 Info.FFDiag(E, diag::note_constexpr_new_non_replaceable)
13532 << isa<CXXMethodDecl>(VirtualDelete) << VirtualDelete;
13537 if (!HandleDestruction(Info, E->getExprLoc(), Pointer.getLValueBase(),
13538 (*Alloc)->Value, AllocType))
13541 if (!Info.HeapAllocs.erase(Pointer.Base.dyn_cast<DynamicAllocLValue>())) {
13542 // The element was already erased. This means the destructor call also
13543 // deleted the object.
13544 // FIXME: This probably results in undefined behavior before we get this
13545 // far, and should be diagnosed elsewhere first.
13546 Info.FFDiag(E, diag::note_constexpr_double_delete);
13553 static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
13554 assert(E->isRValue() && E->getType()->isVoidType());
13555 return VoidExprEvaluator(Info).Visit(E);
13558 //===----------------------------------------------------------------------===//
13559 // Top level Expr::EvaluateAsRValue method.
13560 //===----------------------------------------------------------------------===//
13562 static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
13563 // In C, function designators are not lvalues, but we evaluate them as if they
13565 QualType T = E->getType();
13566 if (E->isGLValue() || T->isFunctionType()) {
13568 if (!EvaluateLValue(E, LV, Info))
13570 LV.moveInto(Result);
13571 } else if (T->isVectorType()) {
13572 if (!EvaluateVector(E, Result, Info))
13574 } else if (T->isIntegralOrEnumerationType()) {
13575 if (!IntExprEvaluator(Info, Result).Visit(E))
13577 } else if (T->hasPointerRepresentation()) {
13579 if (!EvaluatePointer(E, LV, Info))
13581 LV.moveInto(Result);
13582 } else if (T->isRealFloatingType()) {
13583 llvm::APFloat F(0.0);
13584 if (!EvaluateFloat(E, F, Info))
13586 Result = APValue(F);
13587 } else if (T->isAnyComplexType()) {
13589 if (!EvaluateComplex(E, C, Info))
13591 C.moveInto(Result);
13592 } else if (T->isFixedPointType()) {
13593 if (!FixedPointExprEvaluator(Info, Result).Visit(E)) return false;
13594 } else if (T->isMemberPointerType()) {
13596 if (!EvaluateMemberPointer(E, P, Info))
13598 P.moveInto(Result);
13600 } else if (T->isArrayType()) {
13603 Info.CurrentCall->createTemporary(E, T, false, LV);
13604 if (!EvaluateArray(E, LV, Value, Info))
13607 } else if (T->isRecordType()) {
13609 APValue &Value = Info.CurrentCall->createTemporary(E, T, false, LV);
13610 if (!EvaluateRecord(E, LV, Value, Info))
13613 } else if (T->isVoidType()) {
13614 if (!Info.getLangOpts().CPlusPlus11)
13615 Info.CCEDiag(E, diag::note_constexpr_nonliteral)
13617 if (!EvaluateVoid(E, Info))
13619 } else if (T->isAtomicType()) {
13620 QualType Unqual = T.getAtomicUnqualifiedType();
13621 if (Unqual->isArrayType() || Unqual->isRecordType()) {
13623 APValue &Value = Info.CurrentCall->createTemporary(E, Unqual, false, LV);
13624 if (!EvaluateAtomic(E, &LV, Value, Info))
13627 if (!EvaluateAtomic(E, nullptr, Result, Info))
13630 } else if (Info.getLangOpts().CPlusPlus11) {
13631 Info.FFDiag(E, diag::note_constexpr_nonliteral) << E->getType();
13634 Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
13641 /// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
13642 /// cases, the in-place evaluation is essential, since later initializers for
13643 /// an object can indirectly refer to subobjects which were initialized earlier.
13644 static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
13645 const Expr *E, bool AllowNonLiteralTypes) {
13646 assert(!E->isValueDependent());
13648 if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This))
13651 if (E->isRValue()) {
13652 // Evaluate arrays and record types in-place, so that later initializers can
13653 // refer to earlier-initialized members of the object.
13654 QualType T = E->getType();
13655 if (T->isArrayType())
13656 return EvaluateArray(E, This, Result, Info);
13657 else if (T->isRecordType())
13658 return EvaluateRecord(E, This, Result, Info);
13659 else if (T->isAtomicType()) {
13660 QualType Unqual = T.getAtomicUnqualifiedType();
13661 if (Unqual->isArrayType() || Unqual->isRecordType())
13662 return EvaluateAtomic(E, &This, Result, Info);
13666 // For any other type, in-place evaluation is unimportant.
13667 return Evaluate(Result, Info, E);
13670 /// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
13671 /// lvalue-to-rvalue cast if it is an lvalue.
13672 static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
13673 if (Info.EnableNewConstInterp) {
13674 if (!Info.Ctx.getInterpContext().evaluateAsRValue(Info, E, Result))
13677 if (E->getType().isNull())
13680 if (!CheckLiteralType(Info, E))
13683 if (!::Evaluate(Result, Info, E))
13686 if (E->isGLValue()) {
13688 LV.setFrom(Info.Ctx, Result);
13689 if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
13694 // Check this core constant expression is a constant expression.
13695 return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result) &&
13696 CheckMemoryLeaks(Info);
13699 static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
13700 const ASTContext &Ctx, bool &IsConst) {
13701 // Fast-path evaluations of integer literals, since we sometimes see files
13702 // containing vast quantities of these.
13703 if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(Exp)) {
13704 Result.Val = APValue(APSInt(L->getValue(),
13705 L->getType()->isUnsignedIntegerType()));
13710 // This case should be rare, but we need to check it before we check on
13712 if (Exp->getType().isNull()) {
13717 // FIXME: Evaluating values of large array and record types can cause
13718 // performance problems. Only do so in C++11 for now.
13719 if (Exp->isRValue() && (Exp->getType()->isArrayType() ||
13720 Exp->getType()->isRecordType()) &&
13721 !Ctx.getLangOpts().CPlusPlus11) {
13728 static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
13729 Expr::SideEffectsKind SEK) {
13730 return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
13731 (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
13734 static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result,
13735 const ASTContext &Ctx, EvalInfo &Info) {
13737 if (FastEvaluateAsRValue(E, Result, Ctx, IsConst))
13740 return EvaluateAsRValue(Info, E, Result.Val);
13743 static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
13744 const ASTContext &Ctx,
13745 Expr::SideEffectsKind AllowSideEffects,
13747 if (!E->getType()->isIntegralOrEnumerationType())
13750 if (!::EvaluateAsRValue(E, ExprResult, Ctx, Info) ||
13751 !ExprResult.Val.isInt() ||
13752 hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
13758 static bool EvaluateAsFixedPoint(const Expr *E, Expr::EvalResult &ExprResult,
13759 const ASTContext &Ctx,
13760 Expr::SideEffectsKind AllowSideEffects,
13762 if (!E->getType()->isFixedPointType())
13765 if (!::EvaluateAsRValue(E, ExprResult, Ctx, Info))
13768 if (!ExprResult.Val.isFixedPoint() ||
13769 hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
13775 /// EvaluateAsRValue - Return true if this is a constant which we can fold using
13776 /// any crazy technique (that has nothing to do with language standards) that
13777 /// we want to. If this function returns true, it returns the folded constant
13778 /// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
13779 /// will be applied to the result.
13780 bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx,
13781 bool InConstantContext) const {
13782 assert(!isValueDependent() &&
13783 "Expression evaluator can't be called on a dependent expression.");
13784 EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
13785 Info.InConstantContext = InConstantContext;
13786 return ::EvaluateAsRValue(this, Result, Ctx, Info);
13789 bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx,
13790 bool InConstantContext) const {
13791 assert(!isValueDependent() &&
13792 "Expression evaluator can't be called on a dependent expression.");
13793 EvalResult Scratch;
13794 return EvaluateAsRValue(Scratch, Ctx, InConstantContext) &&
13795 HandleConversionToBool(Scratch.Val, Result);
13798 bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx,
13799 SideEffectsKind AllowSideEffects,
13800 bool InConstantContext) const {
13801 assert(!isValueDependent() &&
13802 "Expression evaluator can't be called on a dependent expression.");
13803 EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
13804 Info.InConstantContext = InConstantContext;
13805 return ::EvaluateAsInt(this, Result, Ctx, AllowSideEffects, Info);
13808 bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx,
13809 SideEffectsKind AllowSideEffects,
13810 bool InConstantContext) const {
13811 assert(!isValueDependent() &&
13812 "Expression evaluator can't be called on a dependent expression.");
13813 EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
13814 Info.InConstantContext = InConstantContext;
13815 return ::EvaluateAsFixedPoint(this, Result, Ctx, AllowSideEffects, Info);
13818 bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
13819 SideEffectsKind AllowSideEffects,
13820 bool InConstantContext) const {
13821 assert(!isValueDependent() &&
13822 "Expression evaluator can't be called on a dependent expression.");
13824 if (!getType()->isRealFloatingType())
13827 EvalResult ExprResult;
13828 if (!EvaluateAsRValue(ExprResult, Ctx, InConstantContext) ||
13829 !ExprResult.Val.isFloat() ||
13830 hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
13833 Result = ExprResult.Val.getFloat();
13837 bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
13838 bool InConstantContext) const {
13839 assert(!isValueDependent() &&
13840 "Expression evaluator can't be called on a dependent expression.");
13842 EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold);
13843 Info.InConstantContext = InConstantContext;
13845 CheckedTemporaries CheckedTemps;
13846 if (!EvaluateLValue(this, LV, Info) || !Info.discardCleanups() ||
13847 Result.HasSideEffects ||
13848 !CheckLValueConstantExpression(Info, getExprLoc(),
13849 Ctx.getLValueReferenceType(getType()), LV,
13850 Expr::EvaluateForCodeGen, CheckedTemps))
13853 LV.moveInto(Result.Val);
13857 bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
13858 const ASTContext &Ctx) const {
13859 assert(!isValueDependent() &&
13860 "Expression evaluator can't be called on a dependent expression.");
13862 EvalInfo::EvaluationMode EM = EvalInfo::EM_ConstantExpression;
13863 EvalInfo Info(Ctx, Result, EM);
13864 Info.InConstantContext = true;
13866 if (!::Evaluate(Result.Val, Info, this) || Result.HasSideEffects)
13869 if (!Info.discardCleanups())
13870 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
13872 return CheckConstantExpression(Info, getExprLoc(), getStorageType(Ctx, this),
13873 Result.Val, Usage) &&
13874 CheckMemoryLeaks(Info);
13877 bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
13879 SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
13880 assert(!isValueDependent() &&
13881 "Expression evaluator can't be called on a dependent expression.");
13883 // FIXME: Evaluating initializers for large array and record types can cause
13884 // performance problems. Only do so in C++11 for now.
13885 if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
13886 !Ctx.getLangOpts().CPlusPlus11)
13889 Expr::EvalStatus EStatus;
13890 EStatus.Diag = &Notes;
13892 EvalInfo Info(Ctx, EStatus, VD->isConstexpr()
13893 ? EvalInfo::EM_ConstantExpression
13894 : EvalInfo::EM_ConstantFold);
13895 Info.setEvaluatingDecl(VD, Value);
13896 Info.InConstantContext = true;
13898 SourceLocation DeclLoc = VD->getLocation();
13899 QualType DeclTy = VD->getType();
13901 if (Info.EnableNewConstInterp) {
13902 auto &InterpCtx = const_cast<ASTContext &>(Ctx).getInterpContext();
13903 if (!InterpCtx.evaluateAsInitializer(Info, VD, Value))
13909 // C++11 [basic.start.init]p2:
13910 // Variables with static storage duration or thread storage duration shall
13911 // be zero-initialized before any other initialization takes place.
13912 // This behavior is not present in C.
13913 if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
13914 !DeclTy->isReferenceType()) {
13915 ImplicitValueInitExpr VIE(DeclTy);
13916 if (!EvaluateInPlace(Value, Info, LVal, &VIE,
13917 /*AllowNonLiteralTypes=*/true))
13921 if (!EvaluateInPlace(Value, Info, LVal, this,
13922 /*AllowNonLiteralTypes=*/true) ||
13923 EStatus.HasSideEffects)
13926 // At this point, any lifetime-extended temporaries are completely
13928 Info.performLifetimeExtension();
13930 if (!Info.discardCleanups())
13931 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
13933 return CheckConstantExpression(Info, DeclLoc, DeclTy, Value) &&
13934 CheckMemoryLeaks(Info);
13937 bool VarDecl::evaluateDestruction(
13938 SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
13939 assert(getEvaluatedValue() && !getEvaluatedValue()->isAbsent() &&
13940 "cannot evaluate destruction of non-constant-initialized variable");
13942 Expr::EvalStatus EStatus;
13943 EStatus.Diag = &Notes;
13945 // Make a copy of the value for the destructor to mutate.
13946 APValue DestroyedValue = *getEvaluatedValue();
13948 EvalInfo Info(getASTContext(), EStatus, EvalInfo::EM_ConstantExpression);
13949 Info.setEvaluatingDecl(this, DestroyedValue,
13950 EvalInfo::EvaluatingDeclKind::Dtor);
13951 Info.InConstantContext = true;
13953 SourceLocation DeclLoc = getLocation();
13954 QualType DeclTy = getType();
13959 // FIXME: Consider storing whether this variable has constant destruction in
13960 // the EvaluatedStmt so that CodeGen can query it.
13961 if (!HandleDestruction(Info, DeclLoc, LVal.Base, DestroyedValue, DeclTy) ||
13962 EStatus.HasSideEffects)
13965 if (!Info.discardCleanups())
13966 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
13968 ensureEvaluatedStmt()->HasConstantDestruction = true;
13972 /// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
13973 /// constant folded, but discard the result.
13974 bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
13975 assert(!isValueDependent() &&
13976 "Expression evaluator can't be called on a dependent expression.");
13979 return EvaluateAsRValue(Result, Ctx, /* in constant context */ true) &&
13980 !hasUnacceptableSideEffect(Result, SEK);
13983 APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
13984 SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
13985 assert(!isValueDependent() &&
13986 "Expression evaluator can't be called on a dependent expression.");
13988 EvalResult EVResult;
13989 EVResult.Diag = Diag;
13990 EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
13991 Info.InConstantContext = true;
13993 bool Result = ::EvaluateAsRValue(this, EVResult, Ctx, Info);
13995 assert(Result && "Could not evaluate expression");
13996 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
13998 return EVResult.Val.getInt();
14001 APSInt Expr::EvaluateKnownConstIntCheckOverflow(
14002 const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
14003 assert(!isValueDependent() &&
14004 "Expression evaluator can't be called on a dependent expression.");
14006 EvalResult EVResult;
14007 EVResult.Diag = Diag;
14008 EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
14009 Info.InConstantContext = true;
14010 Info.CheckingForUndefinedBehavior = true;
14012 bool Result = ::EvaluateAsRValue(Info, this, EVResult.Val);
14014 assert(Result && "Could not evaluate expression");
14015 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
14017 return EVResult.Val.getInt();
14020 void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
14021 assert(!isValueDependent() &&
14022 "Expression evaluator can't be called on a dependent expression.");
14025 EvalResult EVResult;
14026 if (!FastEvaluateAsRValue(this, EVResult, Ctx, IsConst)) {
14027 EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
14028 Info.CheckingForUndefinedBehavior = true;
14029 (void)::EvaluateAsRValue(Info, this, EVResult.Val);
14033 bool Expr::EvalResult::isGlobalLValue() const {
14034 assert(Val.isLValue());
14035 return IsGlobalLValue(Val.getLValueBase());
14039 /// isIntegerConstantExpr - this recursive routine will test if an expression is
14040 /// an integer constant expression.
14042 /// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
14045 // CheckICE - This function does the fundamental ICE checking: the returned
14046 // ICEDiag contains an ICEKind indicating whether the expression is an ICE,
14047 // and a (possibly null) SourceLocation indicating the location of the problem.
14049 // Note that to reduce code duplication, this helper does no evaluation
14050 // itself; the caller checks whether the expression is evaluatable, and
14051 // in the rare cases where CheckICE actually cares about the evaluated
14052 // value, it calls into Evaluate.
14057 /// This expression is an ICE.
14059 /// This expression is not an ICE, but if it isn't evaluated, it's
14060 /// a legal subexpression for an ICE. This return value is used to handle
14061 /// the comma operator in C99 mode, and non-constant subexpressions.
14062 IK_ICEIfUnevaluated,
14063 /// This expression is not an ICE, and is not a legal subexpression for one.
14069 SourceLocation Loc;
14071 ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {}
14076 static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); }
14078 static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
14080 static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
14081 Expr::EvalResult EVResult;
14082 Expr::EvalStatus Status;
14083 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
14085 Info.InConstantContext = true;
14086 if (!::EvaluateAsRValue(E, EVResult, Ctx, Info) || EVResult.HasSideEffects ||
14087 !EVResult.Val.isInt())
14088 return ICEDiag(IK_NotICE, E->getBeginLoc());
14093 static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
14094 assert(!E->isValueDependent() && "Should not see value dependent exprs!");
14095 if (!E->getType()->isIntegralOrEnumerationType())
14096 return ICEDiag(IK_NotICE, E->getBeginLoc());
14098 switch (E->getStmtClass()) {
14099 #define ABSTRACT_STMT(Node)
14100 #define STMT(Node, Base) case Expr::Node##Class:
14101 #define EXPR(Node, Base)
14102 #include "clang/AST/StmtNodes.inc"
14103 case Expr::PredefinedExprClass:
14104 case Expr::FloatingLiteralClass:
14105 case Expr::ImaginaryLiteralClass:
14106 case Expr::StringLiteralClass:
14107 case Expr::ArraySubscriptExprClass:
14108 case Expr::OMPArraySectionExprClass:
14109 case Expr::MemberExprClass:
14110 case Expr::CompoundAssignOperatorClass:
14111 case Expr::CompoundLiteralExprClass:
14112 case Expr::ExtVectorElementExprClass:
14113 case Expr::DesignatedInitExprClass:
14114 case Expr::ArrayInitLoopExprClass:
14115 case Expr::ArrayInitIndexExprClass:
14116 case Expr::NoInitExprClass:
14117 case Expr::DesignatedInitUpdateExprClass:
14118 case Expr::ImplicitValueInitExprClass:
14119 case Expr::ParenListExprClass:
14120 case Expr::VAArgExprClass:
14121 case Expr::AddrLabelExprClass:
14122 case Expr::StmtExprClass:
14123 case Expr::CXXMemberCallExprClass:
14124 case Expr::CUDAKernelCallExprClass:
14125 case Expr::CXXDynamicCastExprClass:
14126 case Expr::CXXTypeidExprClass:
14127 case Expr::CXXUuidofExprClass:
14128 case Expr::MSPropertyRefExprClass:
14129 case Expr::MSPropertySubscriptExprClass:
14130 case Expr::CXXNullPtrLiteralExprClass:
14131 case Expr::UserDefinedLiteralClass:
14132 case Expr::CXXThisExprClass:
14133 case Expr::CXXThrowExprClass:
14134 case Expr::CXXNewExprClass:
14135 case Expr::CXXDeleteExprClass:
14136 case Expr::CXXPseudoDestructorExprClass:
14137 case Expr::UnresolvedLookupExprClass:
14138 case Expr::TypoExprClass:
14139 case Expr::DependentScopeDeclRefExprClass:
14140 case Expr::CXXConstructExprClass:
14141 case Expr::CXXInheritedCtorInitExprClass:
14142 case Expr::CXXStdInitializerListExprClass:
14143 case Expr::CXXBindTemporaryExprClass:
14144 case Expr::ExprWithCleanupsClass:
14145 case Expr::CXXTemporaryObjectExprClass:
14146 case Expr::CXXUnresolvedConstructExprClass:
14147 case Expr::CXXDependentScopeMemberExprClass:
14148 case Expr::UnresolvedMemberExprClass:
14149 case Expr::ObjCStringLiteralClass:
14150 case Expr::ObjCBoxedExprClass:
14151 case Expr::ObjCArrayLiteralClass:
14152 case Expr::ObjCDictionaryLiteralClass:
14153 case Expr::ObjCEncodeExprClass:
14154 case Expr::ObjCMessageExprClass:
14155 case Expr::ObjCSelectorExprClass:
14156 case Expr::ObjCProtocolExprClass:
14157 case Expr::ObjCIvarRefExprClass:
14158 case Expr::ObjCPropertyRefExprClass:
14159 case Expr::ObjCSubscriptRefExprClass:
14160 case Expr::ObjCIsaExprClass:
14161 case Expr::ObjCAvailabilityCheckExprClass:
14162 case Expr::ShuffleVectorExprClass:
14163 case Expr::ConvertVectorExprClass:
14164 case Expr::BlockExprClass:
14165 case Expr::NoStmtClass:
14166 case Expr::OpaqueValueExprClass:
14167 case Expr::PackExpansionExprClass:
14168 case Expr::SubstNonTypeTemplateParmPackExprClass:
14169 case Expr::FunctionParmPackExprClass:
14170 case Expr::AsTypeExprClass:
14171 case Expr::ObjCIndirectCopyRestoreExprClass:
14172 case Expr::MaterializeTemporaryExprClass:
14173 case Expr::PseudoObjectExprClass:
14174 case Expr::AtomicExprClass:
14175 case Expr::LambdaExprClass:
14176 case Expr::CXXFoldExprClass:
14177 case Expr::CoawaitExprClass:
14178 case Expr::DependentCoawaitExprClass:
14179 case Expr::CoyieldExprClass:
14180 return ICEDiag(IK_NotICE, E->getBeginLoc());
14182 case Expr::InitListExprClass: {
14183 // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
14184 // form "T x = { a };" is equivalent to "T x = a;".
14185 // Unless we're initializing a reference, T is a scalar as it is known to be
14186 // of integral or enumeration type.
14188 if (cast<InitListExpr>(E)->getNumInits() == 1)
14189 return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx);
14190 return ICEDiag(IK_NotICE, E->getBeginLoc());
14193 case Expr::SizeOfPackExprClass:
14194 case Expr::GNUNullExprClass:
14195 case Expr::SourceLocExprClass:
14198 case Expr::SubstNonTypeTemplateParmExprClass:
14200 CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
14202 case Expr::ConstantExprClass:
14203 return CheckICE(cast<ConstantExpr>(E)->getSubExpr(), Ctx);
14205 case Expr::ParenExprClass:
14206 return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
14207 case Expr::GenericSelectionExprClass:
14208 return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx);
14209 case Expr::IntegerLiteralClass:
14210 case Expr::FixedPointLiteralClass:
14211 case Expr::CharacterLiteralClass:
14212 case Expr::ObjCBoolLiteralExprClass:
14213 case Expr::CXXBoolLiteralExprClass:
14214 case Expr::CXXScalarValueInitExprClass:
14215 case Expr::TypeTraitExprClass:
14216 case Expr::ConceptSpecializationExprClass:
14217 case Expr::RequiresExprClass:
14218 case Expr::ArrayTypeTraitExprClass:
14219 case Expr::ExpressionTraitExprClass:
14220 case Expr::CXXNoexceptExprClass:
14222 case Expr::CallExprClass:
14223 case Expr::CXXOperatorCallExprClass: {
14224 // C99 6.6/3 allows function calls within unevaluated subexpressions of
14225 // constant expressions, but they can never be ICEs because an ICE cannot
14226 // contain an operand of (pointer to) function type.
14227 const CallExpr *CE = cast<CallExpr>(E);
14228 if (CE->getBuiltinCallee())
14229 return CheckEvalInICE(E, Ctx);
14230 return ICEDiag(IK_NotICE, E->getBeginLoc());
14232 case Expr::CXXRewrittenBinaryOperatorClass:
14233 return CheckICE(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
14235 case Expr::DeclRefExprClass: {
14236 if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
14238 const ValueDecl *D = cast<DeclRefExpr>(E)->getDecl();
14239 if (Ctx.getLangOpts().CPlusPlus &&
14240 D && IsConstNonVolatile(D->getType())) {
14241 // Parameter variables are never constants. Without this check,
14242 // getAnyInitializer() can find a default argument, which leads
14244 if (isa<ParmVarDecl>(D))
14245 return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
14248 // A variable of non-volatile const-qualified integral or enumeration
14249 // type initialized by an ICE can be used in ICEs.
14250 if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
14251 if (!Dcl->getType()->isIntegralOrEnumerationType())
14252 return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
14255 // Look for a declaration of this variable that has an initializer, and
14256 // check whether it is an ICE.
14257 if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE())
14260 return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
14263 return ICEDiag(IK_NotICE, E->getBeginLoc());
14265 case Expr::UnaryOperatorClass: {
14266 const UnaryOperator *Exp = cast<UnaryOperator>(E);
14267 switch (Exp->getOpcode()) {
14275 // C99 6.6/3 allows increment and decrement within unevaluated
14276 // subexpressions of constant expressions, but they can never be ICEs
14277 // because an ICE cannot contain an lvalue operand.
14278 return ICEDiag(IK_NotICE, E->getBeginLoc());
14286 return CheckICE(Exp->getSubExpr(), Ctx);
14288 llvm_unreachable("invalid unary operator class");
14290 case Expr::OffsetOfExprClass: {
14291 // Note that per C99, offsetof must be an ICE. And AFAIK, using
14292 // EvaluateAsRValue matches the proposed gcc behavior for cases like
14293 // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
14294 // compliance: we should warn earlier for offsetof expressions with
14295 // array subscripts that aren't ICEs, and if the array subscripts
14296 // are ICEs, the value of the offsetof must be an integer constant.
14297 return CheckEvalInICE(E, Ctx);
14299 case Expr::UnaryExprOrTypeTraitExprClass: {
14300 const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E);
14301 if ((Exp->getKind() == UETT_SizeOf) &&
14302 Exp->getTypeOfArgument()->isVariableArrayType())
14303 return ICEDiag(IK_NotICE, E->getBeginLoc());
14306 case Expr::BinaryOperatorClass: {
14307 const BinaryOperator *Exp = cast<BinaryOperator>(E);
14308 switch (Exp->getOpcode()) {
14322 // C99 6.6/3 allows assignments within unevaluated subexpressions of
14323 // constant expressions, but they can never be ICEs because an ICE cannot
14324 // contain an lvalue operand.
14325 return ICEDiag(IK_NotICE, E->getBeginLoc());
14345 ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
14346 ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
14347 if (Exp->getOpcode() == BO_Div ||
14348 Exp->getOpcode() == BO_Rem) {
14349 // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
14350 // we don't evaluate one.
14351 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) {
14352 llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
14354 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
14355 if (REval.isSigned() && REval.isAllOnesValue()) {
14356 llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
14357 if (LEval.isMinSignedValue())
14358 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
14362 if (Exp->getOpcode() == BO_Comma) {
14363 if (Ctx.getLangOpts().C99) {
14364 // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
14365 // if it isn't evaluated.
14366 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE)
14367 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
14369 // In both C89 and C++, commas in ICEs are illegal.
14370 return ICEDiag(IK_NotICE, E->getBeginLoc());
14373 return Worst(LHSResult, RHSResult);
14377 ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
14378 ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
14379 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) {
14380 // Rare case where the RHS has a comma "side-effect"; we need
14381 // to actually check the condition to see whether the side
14382 // with the comma is evaluated.
14383 if ((Exp->getOpcode() == BO_LAnd) !=
14384 (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0))
14389 return Worst(LHSResult, RHSResult);
14392 llvm_unreachable("invalid binary operator kind");
14394 case Expr::ImplicitCastExprClass:
14395 case Expr::CStyleCastExprClass:
14396 case Expr::CXXFunctionalCastExprClass:
14397 case Expr::CXXStaticCastExprClass:
14398 case Expr::CXXReinterpretCastExprClass:
14399 case Expr::CXXConstCastExprClass:
14400 case Expr::ObjCBridgedCastExprClass: {
14401 const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
14402 if (isa<ExplicitCastExpr>(E)) {
14403 if (const FloatingLiteral *FL
14404 = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
14405 unsigned DestWidth = Ctx.getIntWidth(E->getType());
14406 bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
14407 APSInt IgnoredVal(DestWidth, !DestSigned);
14409 // If the value does not fit in the destination type, the behavior is
14410 // undefined, so we are not required to treat it as a constant
14412 if (FL->getValue().convertToInteger(IgnoredVal,
14413 llvm::APFloat::rmTowardZero,
14414 &Ignored) & APFloat::opInvalidOp)
14415 return ICEDiag(IK_NotICE, E->getBeginLoc());
14419 switch (cast<CastExpr>(E)->getCastKind()) {
14420 case CK_LValueToRValue:
14421 case CK_AtomicToNonAtomic:
14422 case CK_NonAtomicToAtomic:
14424 case CK_IntegralToBoolean:
14425 case CK_IntegralCast:
14426 return CheckICE(SubExpr, Ctx);
14428 return ICEDiag(IK_NotICE, E->getBeginLoc());
14431 case Expr::BinaryConditionalOperatorClass: {
14432 const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E);
14433 ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx);
14434 if (CommonResult.Kind == IK_NotICE) return CommonResult;
14435 ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
14436 if (FalseResult.Kind == IK_NotICE) return FalseResult;
14437 if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult;
14438 if (FalseResult.Kind == IK_ICEIfUnevaluated &&
14439 Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag();
14440 return FalseResult;
14442 case Expr::ConditionalOperatorClass: {
14443 const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
14444 // If the condition (ignoring parens) is a __builtin_constant_p call,
14445 // then only the true side is actually considered in an integer constant
14446 // expression, and it is fully evaluated. This is an important GNU
14447 // extension. See GCC PR38377 for discussion.
14448 if (const CallExpr *CallCE
14449 = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
14450 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
14451 return CheckEvalInICE(E, Ctx);
14452 ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
14453 if (CondResult.Kind == IK_NotICE)
14456 ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
14457 ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
14459 if (TrueResult.Kind == IK_NotICE)
14461 if (FalseResult.Kind == IK_NotICE)
14462 return FalseResult;
14463 if (CondResult.Kind == IK_ICEIfUnevaluated)
14465 if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE)
14467 // Rare case where the diagnostics depend on which side is evaluated
14468 // Note that if we get here, CondResult is 0, and at least one of
14469 // TrueResult and FalseResult is non-zero.
14470 if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0)
14471 return FalseResult;
14474 case Expr::CXXDefaultArgExprClass:
14475 return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
14476 case Expr::CXXDefaultInitExprClass:
14477 return CheckICE(cast<CXXDefaultInitExpr>(E)->getExpr(), Ctx);
14478 case Expr::ChooseExprClass: {
14479 return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx);
14481 case Expr::BuiltinBitCastExprClass: {
14482 if (!checkBitCastConstexprEligibility(nullptr, Ctx, cast<CastExpr>(E)))
14483 return ICEDiag(IK_NotICE, E->getBeginLoc());
14484 return CheckICE(cast<CastExpr>(E)->getSubExpr(), Ctx);
14488 llvm_unreachable("Invalid StmtClass!");
14491 /// Evaluate an expression as a C++11 integral constant expression.
14492 static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
14494 llvm::APSInt *Value,
14495 SourceLocation *Loc) {
14496 if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
14497 if (Loc) *Loc = E->getExprLoc();
14502 if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc))
14505 if (!Result.isInt()) {
14506 if (Loc) *Loc = E->getExprLoc();
14510 if (Value) *Value = Result.getInt();
14514 bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
14515 SourceLocation *Loc) const {
14516 assert(!isValueDependent() &&
14517 "Expression evaluator can't be called on a dependent expression.");
14519 if (Ctx.getLangOpts().CPlusPlus11)
14520 return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, nullptr, Loc);
14522 ICEDiag D = CheckICE(this, Ctx);
14523 if (D.Kind != IK_ICE) {
14524 if (Loc) *Loc = D.Loc;
14530 bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
14531 SourceLocation *Loc, bool isEvaluated) const {
14532 assert(!isValueDependent() &&
14533 "Expression evaluator can't be called on a dependent expression.");
14535 if (Ctx.getLangOpts().CPlusPlus11)
14536 return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
14538 if (!isIntegerConstantExpr(Ctx, Loc))
14541 // The only possible side-effects here are due to UB discovered in the
14542 // evaluation (for instance, INT_MAX + 1). In such a case, we are still
14543 // required to treat the expression as an ICE, so we produce the folded
14545 EvalResult ExprResult;
14546 Expr::EvalStatus Status;
14547 EvalInfo Info(Ctx, Status, EvalInfo::EM_IgnoreSideEffects);
14548 Info.InConstantContext = true;
14550 if (!::EvaluateAsInt(this, ExprResult, Ctx, SE_AllowSideEffects, Info))
14551 llvm_unreachable("ICE cannot be evaluated!");
14553 Value = ExprResult.Val.getInt();
14557 bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const {
14558 assert(!isValueDependent() &&
14559 "Expression evaluator can't be called on a dependent expression.");
14561 return CheckICE(this, Ctx).Kind == IK_ICE;
14564 bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result,
14565 SourceLocation *Loc) const {
14566 assert(!isValueDependent() &&
14567 "Expression evaluator can't be called on a dependent expression.");
14569 // We support this checking in C++98 mode in order to diagnose compatibility
14571 assert(Ctx.getLangOpts().CPlusPlus);
14573 // Build evaluation settings.
14574 Expr::EvalStatus Status;
14575 SmallVector<PartialDiagnosticAt, 8> Diags;
14576 Status.Diag = &Diags;
14577 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
14581 ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch) &&
14582 // FIXME: We don't produce a diagnostic for this, but the callers that
14583 // call us on arbitrary full-expressions should generally not care.
14584 Info.discardCleanups() && !Status.HasSideEffects;
14586 if (!Diags.empty()) {
14587 IsConstExpr = false;
14588 if (Loc) *Loc = Diags[0].first;
14589 } else if (!IsConstExpr) {
14590 // FIXME: This shouldn't happen.
14591 if (Loc) *Loc = getExprLoc();
14594 return IsConstExpr;
14597 bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
14598 const FunctionDecl *Callee,
14599 ArrayRef<const Expr*> Args,
14600 const Expr *This) const {
14601 assert(!isValueDependent() &&
14602 "Expression evaluator can't be called on a dependent expression.");
14604 Expr::EvalStatus Status;
14605 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated);
14606 Info.InConstantContext = true;
14609 const LValue *ThisPtr = nullptr;
14612 auto *MD = dyn_cast<CXXMethodDecl>(Callee);
14613 assert(MD && "Don't provide `this` for non-methods.");
14614 assert(!MD->isStatic() && "Don't provide `this` for static methods.");
14616 if (!This->isValueDependent() &&
14617 EvaluateObjectArgument(Info, This, ThisVal) &&
14618 !Info.EvalStatus.HasSideEffects)
14619 ThisPtr = &ThisVal;
14621 // Ignore any side-effects from a failed evaluation. This is safe because
14622 // they can't interfere with any other argument evaluation.
14623 Info.EvalStatus.HasSideEffects = false;
14626 ArgVector ArgValues(Args.size());
14627 for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
14629 if ((*I)->isValueDependent() ||
14630 !Evaluate(ArgValues[I - Args.begin()], Info, *I) ||
14631 Info.EvalStatus.HasSideEffects)
14632 // If evaluation fails, throw away the argument entirely.
14633 ArgValues[I - Args.begin()] = APValue();
14635 // Ignore any side-effects from a failed evaluation. This is safe because
14636 // they can't interfere with any other argument evaluation.
14637 Info.EvalStatus.HasSideEffects = false;
14640 // Parameter cleanups happen in the caller and are not part of this
14642 Info.discardCleanups();
14643 Info.EvalStatus.HasSideEffects = false;
14645 // Build fake call to Callee.
14646 CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr,
14648 // FIXME: Missing ExprWithCleanups in enable_if conditions?
14649 FullExpressionRAII Scope(Info);
14650 return Evaluate(Value, Info, this) && Scope.destroy() &&
14651 !Info.EvalStatus.HasSideEffects;
14654 bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
14656 PartialDiagnosticAt> &Diags) {
14657 // FIXME: It would be useful to check constexpr function templates, but at the
14658 // moment the constant expression evaluator cannot cope with the non-rigorous
14659 // ASTs which we build for dependent expressions.
14660 if (FD->isDependentContext())
14663 Expr::EvalStatus Status;
14664 Status.Diag = &Diags;
14666 EvalInfo Info(FD->getASTContext(), Status, EvalInfo::EM_ConstantExpression);
14667 Info.InConstantContext = true;
14668 Info.CheckingPotentialConstantExpression = true;
14670 // The constexpr VM attempts to compile all methods to bytecode here.
14671 if (Info.EnableNewConstInterp) {
14672 Info.Ctx.getInterpContext().isPotentialConstantExpr(Info, FD);
14673 return Diags.empty();
14676 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
14677 const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr;
14679 // Fabricate an arbitrary expression on the stack and pretend that it
14680 // is a temporary being used as the 'this' pointer.
14682 ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
14683 This.set({&VIE, Info.CurrentCall->Index});
14685 ArrayRef<const Expr*> Args;
14688 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
14689 // Evaluate the call as a constant initializer, to allow the construction
14690 // of objects of non-literal types.
14691 Info.setEvaluatingDecl(This.getLValueBase(), Scratch);
14692 HandleConstructorCall(&VIE, This, Args, CD, Info, Scratch);
14694 SourceLocation Loc = FD->getLocation();
14695 HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr,
14696 Args, FD->getBody(), Info, Scratch, nullptr);
14699 return Diags.empty();
14702 bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
14703 const FunctionDecl *FD,
14705 PartialDiagnosticAt> &Diags) {
14706 assert(!E->isValueDependent() &&
14707 "Expression evaluator can't be called on a dependent expression.");
14709 Expr::EvalStatus Status;
14710 Status.Diag = &Diags;
14712 EvalInfo Info(FD->getASTContext(), Status,
14713 EvalInfo::EM_ConstantExpressionUnevaluated);
14714 Info.InConstantContext = true;
14715 Info.CheckingPotentialConstantExpression = true;
14717 // Fabricate a call stack frame to give the arguments a plausible cover story.
14718 ArrayRef<const Expr*> Args;
14719 ArgVector ArgValues(0);
14720 bool Success = EvaluateArgs(Args, ArgValues, Info, FD);
14723 "Failed to set up arguments for potential constant evaluation");
14724 CallStackFrame Frame(Info, SourceLocation(), FD, nullptr, ArgValues.data());
14726 APValue ResultScratch;
14727 Evaluate(ResultScratch, Info, E);
14728 return Diags.empty();
14731 bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
14732 unsigned Type) const {
14733 if (!getType()->isPointerType())
14736 Expr::EvalStatus Status;
14737 EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
14738 return tryEvaluateBuiltinObjectSize(this, Type, Info, Result);