1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements extra semantic analysis beyond what is enforced
10 // by the C type system.
12 //===----------------------------------------------------------------------===//
14 #include "clang/AST/APValue.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/AttrIterator.h"
18 #include "clang/AST/CharUnits.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclBase.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/DeclarationName.h"
24 #include "clang/AST/EvaluatedExprVisitor.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/FormatString.h"
30 #include "clang/AST/NSAPI.h"
31 #include "clang/AST/NonTrivialTypeVisitor.h"
32 #include "clang/AST/OperationKinds.h"
33 #include "clang/AST/Stmt.h"
34 #include "clang/AST/TemplateBase.h"
35 #include "clang/AST/Type.h"
36 #include "clang/AST/TypeLoc.h"
37 #include "clang/AST/UnresolvedSet.h"
38 #include "clang/Basic/AddressSpaces.h"
39 #include "clang/Basic/CharInfo.h"
40 #include "clang/Basic/Diagnostic.h"
41 #include "clang/Basic/IdentifierTable.h"
42 #include "clang/Basic/LLVM.h"
43 #include "clang/Basic/LangOptions.h"
44 #include "clang/Basic/OpenCLOptions.h"
45 #include "clang/Basic/OperatorKinds.h"
46 #include "clang/Basic/PartialDiagnostic.h"
47 #include "clang/Basic/SourceLocation.h"
48 #include "clang/Basic/SourceManager.h"
49 #include "clang/Basic/Specifiers.h"
50 #include "clang/Basic/SyncScope.h"
51 #include "clang/Basic/TargetBuiltins.h"
52 #include "clang/Basic/TargetCXXABI.h"
53 #include "clang/Basic/TargetInfo.h"
54 #include "clang/Basic/TypeTraits.h"
55 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
56 #include "clang/Sema/Initialization.h"
57 #include "clang/Sema/Lookup.h"
58 #include "clang/Sema/Ownership.h"
59 #include "clang/Sema/Scope.h"
60 #include "clang/Sema/ScopeInfo.h"
61 #include "clang/Sema/Sema.h"
62 #include "clang/Sema/SemaInternal.h"
63 #include "llvm/ADT/APFloat.h"
64 #include "llvm/ADT/APInt.h"
65 #include "llvm/ADT/APSInt.h"
66 #include "llvm/ADT/ArrayRef.h"
67 #include "llvm/ADT/DenseMap.h"
68 #include "llvm/ADT/FoldingSet.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallBitVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallString.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/StringSwitch.h"
78 #include "llvm/ADT/Triple.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/ConvertUTF.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/Format.h"
85 #include "llvm/Support/Locale.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/SaveAndRestore.h"
88 #include "llvm/Support/raw_ostream.h"
99 using namespace clang;
100 using namespace sema;
102 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
103 unsigned ByteNo) const {
104 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
105 Context.getTargetInfo());
108 /// Checks that a call expression's argument count is the desired number.
109 /// This is useful when doing custom type-checking. Returns true on error.
110 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
111 unsigned argCount = call->getNumArgs();
112 if (argCount == desiredArgCount) return false;
114 if (argCount < desiredArgCount)
115 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
116 << 0 /*function call*/ << desiredArgCount << argCount
117 << call->getSourceRange();
119 // Highlight all the excess arguments.
120 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
121 call->getArg(argCount - 1)->getEndLoc());
123 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
124 << 0 /*function call*/ << desiredArgCount << argCount
125 << call->getArg(1)->getSourceRange();
128 /// Check that the first argument to __builtin_annotation is an integer
129 /// and the second argument is a non-wide string literal.
130 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
131 if (checkArgCount(S, TheCall, 2))
134 // First argument should be an integer.
135 Expr *ValArg = TheCall->getArg(0);
136 QualType Ty = ValArg->getType();
137 if (!Ty->isIntegerType()) {
138 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
139 << ValArg->getSourceRange();
143 // Second argument should be a constant string.
144 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
145 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
146 if (!Literal || !Literal->isAscii()) {
147 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
148 << StrArg->getSourceRange();
152 TheCall->setType(Ty);
156 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
157 // We need at least one argument.
158 if (TheCall->getNumArgs() < 1) {
159 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
160 << 0 << 1 << TheCall->getNumArgs()
161 << TheCall->getCallee()->getSourceRange();
165 // All arguments should be wide string literals.
166 for (Expr *Arg : TheCall->arguments()) {
167 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
168 if (!Literal || !Literal->isWide()) {
169 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
170 << Arg->getSourceRange();
178 /// Check that the argument to __builtin_addressof is a glvalue, and set the
179 /// result type to the corresponding pointer type.
180 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
181 if (checkArgCount(S, TheCall, 1))
184 ExprResult Arg(TheCall->getArg(0));
185 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
186 if (ResultType.isNull())
189 TheCall->setArg(0, Arg.get());
190 TheCall->setType(ResultType);
194 /// Check the number of arguments, and set the result type to
195 /// the argument type.
196 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
197 if (checkArgCount(S, TheCall, 1))
200 TheCall->setType(TheCall->getArg(0)->getType());
204 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
205 if (checkArgCount(S, TheCall, 3))
208 // First two arguments should be integers.
209 for (unsigned I = 0; I < 2; ++I) {
210 ExprResult Arg = TheCall->getArg(I);
211 QualType Ty = Arg.get()->getType();
212 if (!Ty->isIntegerType()) {
213 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
214 << Ty << Arg.get()->getSourceRange();
217 InitializedEntity Entity = InitializedEntity::InitializeParameter(
218 S.getASTContext(), Ty, /*consume*/ false);
219 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
222 TheCall->setArg(I, Arg.get());
225 // Third argument should be a pointer to a non-const integer.
226 // IRGen correctly handles volatile, restrict, and address spaces, and
227 // the other qualifiers aren't possible.
229 ExprResult Arg = TheCall->getArg(2);
230 QualType Ty = Arg.get()->getType();
231 const auto *PtrTy = Ty->getAs<PointerType>();
232 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
233 !PtrTy->getPointeeType().isConstQualified())) {
234 S.Diag(Arg.get()->getBeginLoc(),
235 diag::err_overflow_builtin_must_be_ptr_int)
236 << Ty << Arg.get()->getSourceRange();
239 InitializedEntity Entity = InitializedEntity::InitializeParameter(
240 S.getASTContext(), Ty, /*consume*/ false);
241 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
244 TheCall->setArg(2, Arg.get());
249 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
250 if (checkArgCount(S, BuiltinCall, 2))
253 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
254 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
255 Expr *Call = BuiltinCall->getArg(0);
256 Expr *Chain = BuiltinCall->getArg(1);
258 if (Call->getStmtClass() != Stmt::CallExprClass) {
259 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
260 << Call->getSourceRange();
264 auto CE = cast<CallExpr>(Call);
265 if (CE->getCallee()->getType()->isBlockPointerType()) {
266 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
267 << Call->getSourceRange();
271 const Decl *TargetDecl = CE->getCalleeDecl();
272 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
273 if (FD->getBuiltinID()) {
274 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
275 << Call->getSourceRange();
279 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
280 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
281 << Call->getSourceRange();
285 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
286 if (ChainResult.isInvalid())
288 if (!ChainResult.get()->getType()->isPointerType()) {
289 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
290 << Chain->getSourceRange();
294 QualType ReturnTy = CE->getCallReturnType(S.Context);
295 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
296 QualType BuiltinTy = S.Context.getFunctionType(
297 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
298 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
301 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
303 BuiltinCall->setType(CE->getType());
304 BuiltinCall->setValueKind(CE->getValueKind());
305 BuiltinCall->setObjectKind(CE->getObjectKind());
306 BuiltinCall->setCallee(Builtin);
307 BuiltinCall->setArg(1, ChainResult.get());
312 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
313 /// __builtin_*_chk function, then use the object size argument specified in the
314 /// source. Otherwise, infer the object size using __builtin_object_size.
315 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
317 // FIXME: There are some more useful checks we could be doing here:
318 // - Analyze the format string of sprintf to see how much of buffer is used.
319 // - Evaluate strlen of strcpy arguments, use as object size.
321 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
322 isConstantEvaluated())
325 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
330 bool IsChkVariant = false;
331 unsigned SizeIndex, ObjectIndex;
335 case Builtin::BI__builtin___memcpy_chk:
336 case Builtin::BI__builtin___memmove_chk:
337 case Builtin::BI__builtin___memset_chk:
338 case Builtin::BI__builtin___strlcat_chk:
339 case Builtin::BI__builtin___strlcpy_chk:
340 case Builtin::BI__builtin___strncat_chk:
341 case Builtin::BI__builtin___strncpy_chk:
342 case Builtin::BI__builtin___stpncpy_chk:
343 case Builtin::BI__builtin___memccpy_chk: {
344 DiagID = diag::warn_builtin_chk_overflow;
346 SizeIndex = TheCall->getNumArgs() - 2;
347 ObjectIndex = TheCall->getNumArgs() - 1;
351 case Builtin::BI__builtin___snprintf_chk:
352 case Builtin::BI__builtin___vsnprintf_chk: {
353 DiagID = diag::warn_builtin_chk_overflow;
360 case Builtin::BIstrncat:
361 case Builtin::BI__builtin_strncat:
362 case Builtin::BIstrncpy:
363 case Builtin::BI__builtin_strncpy:
364 case Builtin::BIstpncpy:
365 case Builtin::BI__builtin_stpncpy: {
366 // Whether these functions overflow depends on the runtime strlen of the
367 // string, not just the buffer size, so emitting the "always overflow"
368 // diagnostic isn't quite right. We should still diagnose passing a buffer
369 // size larger than the destination buffer though; this is a runtime abort
370 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
371 DiagID = diag::warn_fortify_source_size_mismatch;
372 SizeIndex = TheCall->getNumArgs() - 1;
377 case Builtin::BImemcpy:
378 case Builtin::BI__builtin_memcpy:
379 case Builtin::BImemmove:
380 case Builtin::BI__builtin_memmove:
381 case Builtin::BImemset:
382 case Builtin::BI__builtin_memset: {
383 DiagID = diag::warn_fortify_source_overflow;
384 SizeIndex = TheCall->getNumArgs() - 1;
388 case Builtin::BIsnprintf:
389 case Builtin::BI__builtin_snprintf:
390 case Builtin::BIvsnprintf:
391 case Builtin::BI__builtin_vsnprintf: {
392 DiagID = diag::warn_fortify_source_size_mismatch;
399 llvm::APSInt ObjectSize;
400 // For __builtin___*_chk, the object size is explicitly provided by the caller
401 // (usually using __builtin_object_size). Use that value to check this call.
403 Expr::EvalResult Result;
404 Expr *SizeArg = TheCall->getArg(ObjectIndex);
405 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
407 ObjectSize = Result.Val.getInt();
409 // Otherwise, try to evaluate an imaginary call to __builtin_object_size.
411 // If the parameter has a pass_object_size attribute, then we should use its
412 // (potentially) more strict checking mode. Otherwise, conservatively assume
415 if (const auto *POS =
416 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>())
417 BOSType = POS->getType();
419 Expr *ObjArg = TheCall->getArg(ObjectIndex);
421 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
423 // Get the object size in the target's size_t width.
424 const TargetInfo &TI = getASTContext().getTargetInfo();
425 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
426 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
429 // Evaluate the number of bytes of the object that this call will use.
430 Expr::EvalResult Result;
431 Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
432 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
434 llvm::APSInt UsedSize = Result.Val.getInt();
436 if (UsedSize.ule(ObjectSize))
439 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
440 // Skim off the details of whichever builtin was called to produce a better
441 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
443 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
444 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
445 } else if (FunctionName.startswith("__builtin_")) {
446 FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
449 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
451 << FunctionName << ObjectSize.toString(/*Radix=*/10)
452 << UsedSize.toString(/*Radix=*/10));
455 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
456 Scope::ScopeFlags NeededScopeFlags,
458 // Scopes aren't available during instantiation. Fortunately, builtin
459 // functions cannot be template args so they cannot be formed through template
460 // instantiation. Therefore checking once during the parse is sufficient.
461 if (SemaRef.inTemplateInstantiation())
464 Scope *S = SemaRef.getCurScope();
465 while (S && !S->isSEHExceptScope())
467 if (!S || !(S->getFlags() & NeededScopeFlags)) {
468 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
469 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
470 << DRE->getDecl()->getIdentifier();
477 static inline bool isBlockPointer(Expr *Arg) {
478 return Arg->getType()->isBlockPointerType();
481 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
482 /// void*, which is a requirement of device side enqueue.
483 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
484 const BlockPointerType *BPT =
485 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
486 ArrayRef<QualType> Params =
487 BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes();
488 unsigned ArgCounter = 0;
489 bool IllegalParams = false;
490 // Iterate through the block parameters until either one is found that is not
491 // a local void*, or the block is valid.
492 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
493 I != E; ++I, ++ArgCounter) {
494 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
495 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
496 LangAS::opencl_local) {
497 // Get the location of the error. If a block literal has been passed
498 // (BlockExpr) then we can point straight to the offending argument,
499 // else we just point to the variable reference.
500 SourceLocation ErrorLoc;
501 if (isa<BlockExpr>(BlockArg)) {
502 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
503 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
504 } else if (isa<DeclRefExpr>(BlockArg)) {
505 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
508 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
509 IllegalParams = true;
513 return IllegalParams;
516 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
517 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) {
518 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
519 << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
525 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
526 if (checkArgCount(S, TheCall, 2))
529 if (checkOpenCLSubgroupExt(S, TheCall))
532 // First argument is an ndrange_t type.
533 Expr *NDRangeArg = TheCall->getArg(0);
534 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
535 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
536 << TheCall->getDirectCallee() << "'ndrange_t'";
540 Expr *BlockArg = TheCall->getArg(1);
541 if (!isBlockPointer(BlockArg)) {
542 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
543 << TheCall->getDirectCallee() << "block";
546 return checkOpenCLBlockArgs(S, BlockArg);
549 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
550 /// get_kernel_work_group_size
551 /// and get_kernel_preferred_work_group_size_multiple builtin functions.
552 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
553 if (checkArgCount(S, TheCall, 1))
556 Expr *BlockArg = TheCall->getArg(0);
557 if (!isBlockPointer(BlockArg)) {
558 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
559 << TheCall->getDirectCallee() << "block";
562 return checkOpenCLBlockArgs(S, BlockArg);
565 /// Diagnose integer type and any valid implicit conversion to it.
566 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
567 const QualType &IntType);
569 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
570 unsigned Start, unsigned End) {
571 bool IllegalParams = false;
572 for (unsigned I = Start; I <= End; ++I)
573 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
574 S.Context.getSizeType());
575 return IllegalParams;
578 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
579 /// 'local void*' parameter of passed block.
580 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
582 unsigned NumNonVarArgs) {
583 const BlockPointerType *BPT =
584 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
585 unsigned NumBlockParams =
586 BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams();
587 unsigned TotalNumArgs = TheCall->getNumArgs();
589 // For each argument passed to the block, a corresponding uint needs to
590 // be passed to describe the size of the local memory.
591 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
592 S.Diag(TheCall->getBeginLoc(),
593 diag::err_opencl_enqueue_kernel_local_size_args);
597 // Check that the sizes of the local memory are specified by integers.
598 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
602 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
603 /// overload formats specified in Table 6.13.17.1.
604 /// int enqueue_kernel(queue_t queue,
605 /// kernel_enqueue_flags_t flags,
606 /// const ndrange_t ndrange,
607 /// void (^block)(void))
608 /// int enqueue_kernel(queue_t queue,
609 /// kernel_enqueue_flags_t flags,
610 /// const ndrange_t ndrange,
611 /// uint num_events_in_wait_list,
612 /// clk_event_t *event_wait_list,
613 /// clk_event_t *event_ret,
614 /// void (^block)(void))
615 /// int enqueue_kernel(queue_t queue,
616 /// kernel_enqueue_flags_t flags,
617 /// const ndrange_t ndrange,
618 /// void (^block)(local void*, ...),
620 /// int enqueue_kernel(queue_t queue,
621 /// kernel_enqueue_flags_t flags,
622 /// const ndrange_t ndrange,
623 /// uint num_events_in_wait_list,
624 /// clk_event_t *event_wait_list,
625 /// clk_event_t *event_ret,
626 /// void (^block)(local void*, ...),
628 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
629 unsigned NumArgs = TheCall->getNumArgs();
632 S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args);
636 Expr *Arg0 = TheCall->getArg(0);
637 Expr *Arg1 = TheCall->getArg(1);
638 Expr *Arg2 = TheCall->getArg(2);
639 Expr *Arg3 = TheCall->getArg(3);
641 // First argument always needs to be a queue_t type.
642 if (!Arg0->getType()->isQueueT()) {
643 S.Diag(TheCall->getArg(0)->getBeginLoc(),
644 diag::err_opencl_builtin_expected_type)
645 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
649 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
650 if (!Arg1->getType()->isIntegerType()) {
651 S.Diag(TheCall->getArg(1)->getBeginLoc(),
652 diag::err_opencl_builtin_expected_type)
653 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
657 // Third argument is always an ndrange_t type.
658 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
659 S.Diag(TheCall->getArg(2)->getBeginLoc(),
660 diag::err_opencl_builtin_expected_type)
661 << TheCall->getDirectCallee() << "'ndrange_t'";
665 // With four arguments, there is only one form that the function could be
666 // called in: no events and no variable arguments.
668 // check that the last argument is the right block type.
669 if (!isBlockPointer(Arg3)) {
670 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
671 << TheCall->getDirectCallee() << "block";
674 // we have a block type, check the prototype
675 const BlockPointerType *BPT =
676 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
677 if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) {
678 S.Diag(Arg3->getBeginLoc(),
679 diag::err_opencl_enqueue_kernel_blocks_no_args);
684 // we can have block + varargs.
685 if (isBlockPointer(Arg3))
686 return (checkOpenCLBlockArgs(S, Arg3) ||
687 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
688 // last two cases with either exactly 7 args or 7 args and varargs.
690 // check common block argument.
691 Expr *Arg6 = TheCall->getArg(6);
692 if (!isBlockPointer(Arg6)) {
693 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
694 << TheCall->getDirectCallee() << "block";
697 if (checkOpenCLBlockArgs(S, Arg6))
700 // Forth argument has to be any integer type.
701 if (!Arg3->getType()->isIntegerType()) {
702 S.Diag(TheCall->getArg(3)->getBeginLoc(),
703 diag::err_opencl_builtin_expected_type)
704 << TheCall->getDirectCallee() << "integer";
707 // check remaining common arguments.
708 Expr *Arg4 = TheCall->getArg(4);
709 Expr *Arg5 = TheCall->getArg(5);
711 // Fifth argument is always passed as a pointer to clk_event_t.
712 if (!Arg4->isNullPointerConstant(S.Context,
713 Expr::NPC_ValueDependentIsNotNull) &&
714 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
715 S.Diag(TheCall->getArg(4)->getBeginLoc(),
716 diag::err_opencl_builtin_expected_type)
717 << TheCall->getDirectCallee()
718 << S.Context.getPointerType(S.Context.OCLClkEventTy);
722 // Sixth argument is always passed as a pointer to clk_event_t.
723 if (!Arg5->isNullPointerConstant(S.Context,
724 Expr::NPC_ValueDependentIsNotNull) &&
725 !(Arg5->getType()->isPointerType() &&
726 Arg5->getType()->getPointeeType()->isClkEventT())) {
727 S.Diag(TheCall->getArg(5)->getBeginLoc(),
728 diag::err_opencl_builtin_expected_type)
729 << TheCall->getDirectCallee()
730 << S.Context.getPointerType(S.Context.OCLClkEventTy);
737 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
740 // None of the specific case has been detected, give generic error
741 S.Diag(TheCall->getBeginLoc(),
742 diag::err_opencl_enqueue_kernel_incorrect_args);
746 /// Returns OpenCL access qual.
747 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
748 return D->getAttr<OpenCLAccessAttr>();
751 /// Returns true if pipe element type is different from the pointer.
752 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
753 const Expr *Arg0 = Call->getArg(0);
754 // First argument type should always be pipe.
755 if (!Arg0->getType()->isPipeType()) {
756 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
757 << Call->getDirectCallee() << Arg0->getSourceRange();
760 OpenCLAccessAttr *AccessQual =
761 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
762 // Validates the access qualifier is compatible with the call.
763 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
764 // read_only and write_only, and assumed to be read_only if no qualifier is
766 switch (Call->getDirectCallee()->getBuiltinID()) {
767 case Builtin::BIread_pipe:
768 case Builtin::BIreserve_read_pipe:
769 case Builtin::BIcommit_read_pipe:
770 case Builtin::BIwork_group_reserve_read_pipe:
771 case Builtin::BIsub_group_reserve_read_pipe:
772 case Builtin::BIwork_group_commit_read_pipe:
773 case Builtin::BIsub_group_commit_read_pipe:
774 if (!(!AccessQual || AccessQual->isReadOnly())) {
775 S.Diag(Arg0->getBeginLoc(),
776 diag::err_opencl_builtin_pipe_invalid_access_modifier)
777 << "read_only" << Arg0->getSourceRange();
781 case Builtin::BIwrite_pipe:
782 case Builtin::BIreserve_write_pipe:
783 case Builtin::BIcommit_write_pipe:
784 case Builtin::BIwork_group_reserve_write_pipe:
785 case Builtin::BIsub_group_reserve_write_pipe:
786 case Builtin::BIwork_group_commit_write_pipe:
787 case Builtin::BIsub_group_commit_write_pipe:
788 if (!(AccessQual && AccessQual->isWriteOnly())) {
789 S.Diag(Arg0->getBeginLoc(),
790 diag::err_opencl_builtin_pipe_invalid_access_modifier)
791 << "write_only" << Arg0->getSourceRange();
801 /// Returns true if pipe element type is different from the pointer.
802 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
803 const Expr *Arg0 = Call->getArg(0);
804 const Expr *ArgIdx = Call->getArg(Idx);
805 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
806 const QualType EltTy = PipeTy->getElementType();
807 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
808 // The Idx argument should be a pointer and the type of the pointer and
809 // the type of pipe element should also be the same.
811 !S.Context.hasSameType(
812 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
813 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
814 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
815 << ArgIdx->getType() << ArgIdx->getSourceRange();
821 // Performs semantic analysis for the read/write_pipe call.
822 // \param S Reference to the semantic analyzer.
823 // \param Call A pointer to the builtin call.
824 // \return True if a semantic error has been found, false otherwise.
825 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
826 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
827 // functions have two forms.
828 switch (Call->getNumArgs()) {
830 if (checkOpenCLPipeArg(S, Call))
832 // The call with 2 arguments should be
833 // read/write_pipe(pipe T, T*).
834 // Check packet type T.
835 if (checkOpenCLPipePacketType(S, Call, 1))
840 if (checkOpenCLPipeArg(S, Call))
842 // The call with 4 arguments should be
843 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
844 // Check reserve_id_t.
845 if (!Call->getArg(1)->getType()->isReserveIDT()) {
846 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
847 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
848 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
853 const Expr *Arg2 = Call->getArg(2);
854 if (!Arg2->getType()->isIntegerType() &&
855 !Arg2->getType()->isUnsignedIntegerType()) {
856 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
857 << Call->getDirectCallee() << S.Context.UnsignedIntTy
858 << Arg2->getType() << Arg2->getSourceRange();
862 // Check packet type T.
863 if (checkOpenCLPipePacketType(S, Call, 3))
867 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
868 << Call->getDirectCallee() << Call->getSourceRange();
875 // Performs a semantic analysis on the {work_group_/sub_group_
876 // /_}reserve_{read/write}_pipe
877 // \param S Reference to the semantic analyzer.
878 // \param Call The call to the builtin function to be analyzed.
879 // \return True if a semantic error was found, false otherwise.
880 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
881 if (checkArgCount(S, Call, 2))
884 if (checkOpenCLPipeArg(S, Call))
887 // Check the reserve size.
888 if (!Call->getArg(1)->getType()->isIntegerType() &&
889 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
890 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
891 << Call->getDirectCallee() << S.Context.UnsignedIntTy
892 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
896 // Since return type of reserve_read/write_pipe built-in function is
897 // reserve_id_t, which is not defined in the builtin def file , we used int
898 // as return type and need to override the return type of these functions.
899 Call->setType(S.Context.OCLReserveIDTy);
904 // Performs a semantic analysis on {work_group_/sub_group_
905 // /_}commit_{read/write}_pipe
906 // \param S Reference to the semantic analyzer.
907 // \param Call The call to the builtin function to be analyzed.
908 // \return True if a semantic error was found, false otherwise.
909 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
910 if (checkArgCount(S, Call, 2))
913 if (checkOpenCLPipeArg(S, Call))
916 // Check reserve_id_t.
917 if (!Call->getArg(1)->getType()->isReserveIDT()) {
918 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
919 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
920 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
927 // Performs a semantic analysis on the call to built-in Pipe
929 // \param S Reference to the semantic analyzer.
930 // \param Call The call to the builtin function to be analyzed.
931 // \return True if a semantic error was found, false otherwise.
932 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
933 if (checkArgCount(S, Call, 1))
936 if (!Call->getArg(0)->getType()->isPipeType()) {
937 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
938 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
945 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
946 // Performs semantic analysis for the to_global/local/private call.
947 // \param S Reference to the semantic analyzer.
948 // \param BuiltinID ID of the builtin function.
949 // \param Call A pointer to the builtin call.
950 // \return True if a semantic error has been found, false otherwise.
951 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
953 if (Call->getNumArgs() != 1) {
954 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num)
955 << Call->getDirectCallee() << Call->getSourceRange();
959 auto RT = Call->getArg(0)->getType();
960 if (!RT->isPointerType() || RT->getPointeeType()
961 .getAddressSpace() == LangAS::opencl_constant) {
962 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
963 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
967 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
968 S.Diag(Call->getArg(0)->getBeginLoc(),
969 diag::warn_opencl_generic_address_space_arg)
970 << Call->getDirectCallee()->getNameInfo().getAsString()
971 << Call->getArg(0)->getSourceRange();
974 RT = RT->getPointeeType();
975 auto Qual = RT.getQualifiers();
977 case Builtin::BIto_global:
978 Qual.setAddressSpace(LangAS::opencl_global);
980 case Builtin::BIto_local:
981 Qual.setAddressSpace(LangAS::opencl_local);
983 case Builtin::BIto_private:
984 Qual.setAddressSpace(LangAS::opencl_private);
987 llvm_unreachable("Invalid builtin function");
989 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
990 RT.getUnqualifiedType(), Qual)));
995 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
996 if (checkArgCount(S, TheCall, 1))
999 // Compute __builtin_launder's parameter type from the argument.
1000 // The parameter type is:
1001 // * The type of the argument if it's not an array or function type,
1003 // * The decayed argument type.
1004 QualType ParamTy = [&]() {
1005 QualType ArgTy = TheCall->getArg(0)->getType();
1006 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1007 return S.Context.getPointerType(Ty->getElementType());
1008 if (ArgTy->isFunctionType()) {
1009 return S.Context.getPointerType(ArgTy);
1014 TheCall->setType(ParamTy);
1016 auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
1017 if (!ParamTy->isPointerType())
1019 if (ParamTy->isFunctionPointerType())
1021 if (ParamTy->isVoidPointerType())
1023 return llvm::Optional<unsigned>{};
1025 if (DiagSelect.hasValue()) {
1026 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1027 << DiagSelect.getValue() << TheCall->getSourceRange();
1031 // We either have an incomplete class type, or we have a class template
1032 // whose instantiation has not been forced. Example:
1034 // template <class T> struct Foo { T value; };
1035 // Foo<int> *p = nullptr;
1036 // auto *d = __builtin_launder(p);
1037 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1038 diag::err_incomplete_type))
1041 assert(ParamTy->getPointeeType()->isObjectType() &&
1042 "Unhandled non-object pointer case");
1044 InitializedEntity Entity =
1045 InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
1047 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1048 if (Arg.isInvalid())
1050 TheCall->setArg(0, Arg.get());
1055 // Emit an error and return true if the current architecture is not in the list
1056 // of supported architectures.
1058 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1059 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1060 llvm::Triple::ArchType CurArch =
1061 S.getASTContext().getTargetInfo().getTriple().getArch();
1062 if (llvm::is_contained(SupportedArchs, CurArch))
1064 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1065 << TheCall->getSourceRange();
1070 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
1071 CallExpr *TheCall) {
1072 ExprResult TheCallResult(TheCall);
1074 // Find out if any arguments are required to be integer constant expressions.
1075 unsigned ICEArguments = 0;
1076 ASTContext::GetBuiltinTypeError Error;
1077 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
1078 if (Error != ASTContext::GE_None)
1079 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
1081 // If any arguments are required to be ICE's, check and diagnose.
1082 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
1083 // Skip arguments not required to be ICE's.
1084 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
1086 llvm::APSInt Result;
1087 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
1089 ICEArguments &= ~(1 << ArgNo);
1092 switch (BuiltinID) {
1093 case Builtin::BI__builtin___CFStringMakeConstantString:
1094 assert(TheCall->getNumArgs() == 1 &&
1095 "Wrong # arguments to builtin CFStringMakeConstantString");
1096 if (CheckObjCString(TheCall->getArg(0)))
1099 case Builtin::BI__builtin_ms_va_start:
1100 case Builtin::BI__builtin_stdarg_start:
1101 case Builtin::BI__builtin_va_start:
1102 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1105 case Builtin::BI__va_start: {
1106 switch (Context.getTargetInfo().getTriple().getArch()) {
1107 case llvm::Triple::aarch64:
1108 case llvm::Triple::arm:
1109 case llvm::Triple::thumb:
1110 if (SemaBuiltinVAStartARMMicrosoft(TheCall))
1114 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1121 // The acquire, release, and no fence variants are ARM and AArch64 only.
1122 case Builtin::BI_interlockedbittestandset_acq:
1123 case Builtin::BI_interlockedbittestandset_rel:
1124 case Builtin::BI_interlockedbittestandset_nf:
1125 case Builtin::BI_interlockedbittestandreset_acq:
1126 case Builtin::BI_interlockedbittestandreset_rel:
1127 case Builtin::BI_interlockedbittestandreset_nf:
1128 if (CheckBuiltinTargetSupport(
1129 *this, BuiltinID, TheCall,
1130 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
1134 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
1135 case Builtin::BI_bittest64:
1136 case Builtin::BI_bittestandcomplement64:
1137 case Builtin::BI_bittestandreset64:
1138 case Builtin::BI_bittestandset64:
1139 case Builtin::BI_interlockedbittestandreset64:
1140 case Builtin::BI_interlockedbittestandset64:
1141 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
1142 {llvm::Triple::x86_64, llvm::Triple::arm,
1143 llvm::Triple::thumb, llvm::Triple::aarch64}))
1147 case Builtin::BI__builtin_isgreater:
1148 case Builtin::BI__builtin_isgreaterequal:
1149 case Builtin::BI__builtin_isless:
1150 case Builtin::BI__builtin_islessequal:
1151 case Builtin::BI__builtin_islessgreater:
1152 case Builtin::BI__builtin_isunordered:
1153 if (SemaBuiltinUnorderedCompare(TheCall))
1156 case Builtin::BI__builtin_fpclassify:
1157 if (SemaBuiltinFPClassification(TheCall, 6))
1160 case Builtin::BI__builtin_isfinite:
1161 case Builtin::BI__builtin_isinf:
1162 case Builtin::BI__builtin_isinf_sign:
1163 case Builtin::BI__builtin_isnan:
1164 case Builtin::BI__builtin_isnormal:
1165 case Builtin::BI__builtin_signbit:
1166 case Builtin::BI__builtin_signbitf:
1167 case Builtin::BI__builtin_signbitl:
1168 if (SemaBuiltinFPClassification(TheCall, 1))
1171 case Builtin::BI__builtin_shufflevector:
1172 return SemaBuiltinShuffleVector(TheCall);
1173 // TheCall will be freed by the smart pointer here, but that's fine, since
1174 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
1175 case Builtin::BI__builtin_prefetch:
1176 if (SemaBuiltinPrefetch(TheCall))
1179 case Builtin::BI__builtin_alloca_with_align:
1180 if (SemaBuiltinAllocaWithAlign(TheCall))
1183 case Builtin::BI__assume:
1184 case Builtin::BI__builtin_assume:
1185 if (SemaBuiltinAssume(TheCall))
1188 case Builtin::BI__builtin_assume_aligned:
1189 if (SemaBuiltinAssumeAligned(TheCall))
1192 case Builtin::BI__builtin_dynamic_object_size:
1193 case Builtin::BI__builtin_object_size:
1194 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1197 case Builtin::BI__builtin_longjmp:
1198 if (SemaBuiltinLongjmp(TheCall))
1201 case Builtin::BI__builtin_setjmp:
1202 if (SemaBuiltinSetjmp(TheCall))
1205 case Builtin::BI_setjmp:
1206 case Builtin::BI_setjmpex:
1207 if (checkArgCount(*this, TheCall, 1))
1210 case Builtin::BI__builtin_classify_type:
1211 if (checkArgCount(*this, TheCall, 1)) return true;
1212 TheCall->setType(Context.IntTy);
1214 case Builtin::BI__builtin_constant_p: {
1215 if (checkArgCount(*this, TheCall, 1)) return true;
1216 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
1217 if (Arg.isInvalid()) return true;
1218 TheCall->setArg(0, Arg.get());
1219 TheCall->setType(Context.IntTy);
1222 case Builtin::BI__builtin_launder:
1223 return SemaBuiltinLaunder(*this, TheCall);
1224 case Builtin::BI__sync_fetch_and_add:
1225 case Builtin::BI__sync_fetch_and_add_1:
1226 case Builtin::BI__sync_fetch_and_add_2:
1227 case Builtin::BI__sync_fetch_and_add_4:
1228 case Builtin::BI__sync_fetch_and_add_8:
1229 case Builtin::BI__sync_fetch_and_add_16:
1230 case Builtin::BI__sync_fetch_and_sub:
1231 case Builtin::BI__sync_fetch_and_sub_1:
1232 case Builtin::BI__sync_fetch_and_sub_2:
1233 case Builtin::BI__sync_fetch_and_sub_4:
1234 case Builtin::BI__sync_fetch_and_sub_8:
1235 case Builtin::BI__sync_fetch_and_sub_16:
1236 case Builtin::BI__sync_fetch_and_or:
1237 case Builtin::BI__sync_fetch_and_or_1:
1238 case Builtin::BI__sync_fetch_and_or_2:
1239 case Builtin::BI__sync_fetch_and_or_4:
1240 case Builtin::BI__sync_fetch_and_or_8:
1241 case Builtin::BI__sync_fetch_and_or_16:
1242 case Builtin::BI__sync_fetch_and_and:
1243 case Builtin::BI__sync_fetch_and_and_1:
1244 case Builtin::BI__sync_fetch_and_and_2:
1245 case Builtin::BI__sync_fetch_and_and_4:
1246 case Builtin::BI__sync_fetch_and_and_8:
1247 case Builtin::BI__sync_fetch_and_and_16:
1248 case Builtin::BI__sync_fetch_and_xor:
1249 case Builtin::BI__sync_fetch_and_xor_1:
1250 case Builtin::BI__sync_fetch_and_xor_2:
1251 case Builtin::BI__sync_fetch_and_xor_4:
1252 case Builtin::BI__sync_fetch_and_xor_8:
1253 case Builtin::BI__sync_fetch_and_xor_16:
1254 case Builtin::BI__sync_fetch_and_nand:
1255 case Builtin::BI__sync_fetch_and_nand_1:
1256 case Builtin::BI__sync_fetch_and_nand_2:
1257 case Builtin::BI__sync_fetch_and_nand_4:
1258 case Builtin::BI__sync_fetch_and_nand_8:
1259 case Builtin::BI__sync_fetch_and_nand_16:
1260 case Builtin::BI__sync_add_and_fetch:
1261 case Builtin::BI__sync_add_and_fetch_1:
1262 case Builtin::BI__sync_add_and_fetch_2:
1263 case Builtin::BI__sync_add_and_fetch_4:
1264 case Builtin::BI__sync_add_and_fetch_8:
1265 case Builtin::BI__sync_add_and_fetch_16:
1266 case Builtin::BI__sync_sub_and_fetch:
1267 case Builtin::BI__sync_sub_and_fetch_1:
1268 case Builtin::BI__sync_sub_and_fetch_2:
1269 case Builtin::BI__sync_sub_and_fetch_4:
1270 case Builtin::BI__sync_sub_and_fetch_8:
1271 case Builtin::BI__sync_sub_and_fetch_16:
1272 case Builtin::BI__sync_and_and_fetch:
1273 case Builtin::BI__sync_and_and_fetch_1:
1274 case Builtin::BI__sync_and_and_fetch_2:
1275 case Builtin::BI__sync_and_and_fetch_4:
1276 case Builtin::BI__sync_and_and_fetch_8:
1277 case Builtin::BI__sync_and_and_fetch_16:
1278 case Builtin::BI__sync_or_and_fetch:
1279 case Builtin::BI__sync_or_and_fetch_1:
1280 case Builtin::BI__sync_or_and_fetch_2:
1281 case Builtin::BI__sync_or_and_fetch_4:
1282 case Builtin::BI__sync_or_and_fetch_8:
1283 case Builtin::BI__sync_or_and_fetch_16:
1284 case Builtin::BI__sync_xor_and_fetch:
1285 case Builtin::BI__sync_xor_and_fetch_1:
1286 case Builtin::BI__sync_xor_and_fetch_2:
1287 case Builtin::BI__sync_xor_and_fetch_4:
1288 case Builtin::BI__sync_xor_and_fetch_8:
1289 case Builtin::BI__sync_xor_and_fetch_16:
1290 case Builtin::BI__sync_nand_and_fetch:
1291 case Builtin::BI__sync_nand_and_fetch_1:
1292 case Builtin::BI__sync_nand_and_fetch_2:
1293 case Builtin::BI__sync_nand_and_fetch_4:
1294 case Builtin::BI__sync_nand_and_fetch_8:
1295 case Builtin::BI__sync_nand_and_fetch_16:
1296 case Builtin::BI__sync_val_compare_and_swap:
1297 case Builtin::BI__sync_val_compare_and_swap_1:
1298 case Builtin::BI__sync_val_compare_and_swap_2:
1299 case Builtin::BI__sync_val_compare_and_swap_4:
1300 case Builtin::BI__sync_val_compare_and_swap_8:
1301 case Builtin::BI__sync_val_compare_and_swap_16:
1302 case Builtin::BI__sync_bool_compare_and_swap:
1303 case Builtin::BI__sync_bool_compare_and_swap_1:
1304 case Builtin::BI__sync_bool_compare_and_swap_2:
1305 case Builtin::BI__sync_bool_compare_and_swap_4:
1306 case Builtin::BI__sync_bool_compare_and_swap_8:
1307 case Builtin::BI__sync_bool_compare_and_swap_16:
1308 case Builtin::BI__sync_lock_test_and_set:
1309 case Builtin::BI__sync_lock_test_and_set_1:
1310 case Builtin::BI__sync_lock_test_and_set_2:
1311 case Builtin::BI__sync_lock_test_and_set_4:
1312 case Builtin::BI__sync_lock_test_and_set_8:
1313 case Builtin::BI__sync_lock_test_and_set_16:
1314 case Builtin::BI__sync_lock_release:
1315 case Builtin::BI__sync_lock_release_1:
1316 case Builtin::BI__sync_lock_release_2:
1317 case Builtin::BI__sync_lock_release_4:
1318 case Builtin::BI__sync_lock_release_8:
1319 case Builtin::BI__sync_lock_release_16:
1320 case Builtin::BI__sync_swap:
1321 case Builtin::BI__sync_swap_1:
1322 case Builtin::BI__sync_swap_2:
1323 case Builtin::BI__sync_swap_4:
1324 case Builtin::BI__sync_swap_8:
1325 case Builtin::BI__sync_swap_16:
1326 return SemaBuiltinAtomicOverloaded(TheCallResult);
1327 case Builtin::BI__sync_synchronize:
1328 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
1329 << TheCall->getCallee()->getSourceRange();
1331 case Builtin::BI__builtin_nontemporal_load:
1332 case Builtin::BI__builtin_nontemporal_store:
1333 return SemaBuiltinNontemporalOverloaded(TheCallResult);
1334 #define BUILTIN(ID, TYPE, ATTRS)
1335 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1336 case Builtin::BI##ID: \
1337 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1338 #include "clang/Basic/Builtins.def"
1339 case Builtin::BI__annotation:
1340 if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1343 case Builtin::BI__builtin_annotation:
1344 if (SemaBuiltinAnnotation(*this, TheCall))
1347 case Builtin::BI__builtin_addressof:
1348 if (SemaBuiltinAddressof(*this, TheCall))
1351 case Builtin::BI__builtin_add_overflow:
1352 case Builtin::BI__builtin_sub_overflow:
1353 case Builtin::BI__builtin_mul_overflow:
1354 if (SemaBuiltinOverflow(*this, TheCall))
1357 case Builtin::BI__builtin_operator_new:
1358 case Builtin::BI__builtin_operator_delete: {
1359 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1361 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1362 if (Res.isInvalid())
1363 CorrectDelayedTyposInExpr(TheCallResult.get());
1366 case Builtin::BI__builtin_dump_struct: {
1367 // We first want to ensure we are called with 2 arguments
1368 if (checkArgCount(*this, TheCall, 2))
1370 // Ensure that the first argument is of type 'struct XX *'
1371 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1372 const QualType PtrArgType = PtrArg->getType();
1373 if (!PtrArgType->isPointerType() ||
1374 !PtrArgType->getPointeeType()->isRecordType()) {
1375 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1376 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1377 << "structure pointer";
1381 // Ensure that the second argument is of type 'FunctionType'
1382 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1383 const QualType FnPtrArgType = FnPtrArg->getType();
1384 if (!FnPtrArgType->isPointerType()) {
1385 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1386 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1387 << FnPtrArgType << "'int (*)(const char *, ...)'";
1391 const auto *FuncType =
1392 FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1395 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1396 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1397 << FnPtrArgType << "'int (*)(const char *, ...)'";
1401 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1402 if (!FT->getNumParams()) {
1403 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1404 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1405 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1408 QualType PT = FT->getParamType(0);
1409 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1410 !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1411 !PT->getPointeeType().isConstQualified()) {
1412 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1413 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1414 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1419 TheCall->setType(Context.IntTy);
1422 case Builtin::BI__builtin_preserve_access_index:
1423 if (SemaBuiltinPreserveAI(*this, TheCall))
1426 case Builtin::BI__builtin_call_with_static_chain:
1427 if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1430 case Builtin::BI__exception_code:
1431 case Builtin::BI_exception_code:
1432 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1433 diag::err_seh___except_block))
1436 case Builtin::BI__exception_info:
1437 case Builtin::BI_exception_info:
1438 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1439 diag::err_seh___except_filter))
1442 case Builtin::BI__GetExceptionInfo:
1443 if (checkArgCount(*this, TheCall, 1))
1446 if (CheckCXXThrowOperand(
1447 TheCall->getBeginLoc(),
1448 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1452 TheCall->setType(Context.VoidPtrTy);
1454 // OpenCL v2.0, s6.13.16 - Pipe functions
1455 case Builtin::BIread_pipe:
1456 case Builtin::BIwrite_pipe:
1457 // Since those two functions are declared with var args, we need a semantic
1458 // check for the argument.
1459 if (SemaBuiltinRWPipe(*this, TheCall))
1462 case Builtin::BIreserve_read_pipe:
1463 case Builtin::BIreserve_write_pipe:
1464 case Builtin::BIwork_group_reserve_read_pipe:
1465 case Builtin::BIwork_group_reserve_write_pipe:
1466 if (SemaBuiltinReserveRWPipe(*this, TheCall))
1469 case Builtin::BIsub_group_reserve_read_pipe:
1470 case Builtin::BIsub_group_reserve_write_pipe:
1471 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1472 SemaBuiltinReserveRWPipe(*this, TheCall))
1475 case Builtin::BIcommit_read_pipe:
1476 case Builtin::BIcommit_write_pipe:
1477 case Builtin::BIwork_group_commit_read_pipe:
1478 case Builtin::BIwork_group_commit_write_pipe:
1479 if (SemaBuiltinCommitRWPipe(*this, TheCall))
1482 case Builtin::BIsub_group_commit_read_pipe:
1483 case Builtin::BIsub_group_commit_write_pipe:
1484 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1485 SemaBuiltinCommitRWPipe(*this, TheCall))
1488 case Builtin::BIget_pipe_num_packets:
1489 case Builtin::BIget_pipe_max_packets:
1490 if (SemaBuiltinPipePackets(*this, TheCall))
1493 case Builtin::BIto_global:
1494 case Builtin::BIto_local:
1495 case Builtin::BIto_private:
1496 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1499 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1500 case Builtin::BIenqueue_kernel:
1501 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1504 case Builtin::BIget_kernel_work_group_size:
1505 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1506 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1509 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1510 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1511 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1514 case Builtin::BI__builtin_os_log_format:
1515 case Builtin::BI__builtin_os_log_format_buffer_size:
1516 if (SemaBuiltinOSLogFormat(TheCall))
1521 // Since the target specific builtins for each arch overlap, only check those
1522 // of the arch we are compiling for.
1523 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
1524 switch (Context.getTargetInfo().getTriple().getArch()) {
1525 case llvm::Triple::arm:
1526 case llvm::Triple::armeb:
1527 case llvm::Triple::thumb:
1528 case llvm::Triple::thumbeb:
1529 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
1532 case llvm::Triple::aarch64:
1533 case llvm::Triple::aarch64_be:
1534 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
1537 case llvm::Triple::hexagon:
1538 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
1541 case llvm::Triple::mips:
1542 case llvm::Triple::mipsel:
1543 case llvm::Triple::mips64:
1544 case llvm::Triple::mips64el:
1545 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
1548 case llvm::Triple::systemz:
1549 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
1552 case llvm::Triple::x86:
1553 case llvm::Triple::x86_64:
1554 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
1557 case llvm::Triple::ppc:
1558 case llvm::Triple::ppc64:
1559 case llvm::Triple::ppc64le:
1560 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
1568 return TheCallResult;
1571 // Get the valid immediate range for the specified NEON type code.
1572 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
1573 NeonTypeFlags Type(t);
1574 int IsQuad = ForceQuad ? true : Type.isQuad();
1575 switch (Type.getEltType()) {
1576 case NeonTypeFlags::Int8:
1577 case NeonTypeFlags::Poly8:
1578 return shift ? 7 : (8 << IsQuad) - 1;
1579 case NeonTypeFlags::Int16:
1580 case NeonTypeFlags::Poly16:
1581 return shift ? 15 : (4 << IsQuad) - 1;
1582 case NeonTypeFlags::Int32:
1583 return shift ? 31 : (2 << IsQuad) - 1;
1584 case NeonTypeFlags::Int64:
1585 case NeonTypeFlags::Poly64:
1586 return shift ? 63 : (1 << IsQuad) - 1;
1587 case NeonTypeFlags::Poly128:
1588 return shift ? 127 : (1 << IsQuad) - 1;
1589 case NeonTypeFlags::Float16:
1590 assert(!shift && "cannot shift float types!");
1591 return (4 << IsQuad) - 1;
1592 case NeonTypeFlags::Float32:
1593 assert(!shift && "cannot shift float types!");
1594 return (2 << IsQuad) - 1;
1595 case NeonTypeFlags::Float64:
1596 assert(!shift && "cannot shift float types!");
1597 return (1 << IsQuad) - 1;
1599 llvm_unreachable("Invalid NeonTypeFlag!");
1602 /// getNeonEltType - Return the QualType corresponding to the elements of
1603 /// the vector type specified by the NeonTypeFlags. This is used to check
1604 /// the pointer arguments for Neon load/store intrinsics.
1605 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
1606 bool IsPolyUnsigned, bool IsInt64Long) {
1607 switch (Flags.getEltType()) {
1608 case NeonTypeFlags::Int8:
1609 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
1610 case NeonTypeFlags::Int16:
1611 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
1612 case NeonTypeFlags::Int32:
1613 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
1614 case NeonTypeFlags::Int64:
1616 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
1618 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
1619 : Context.LongLongTy;
1620 case NeonTypeFlags::Poly8:
1621 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
1622 case NeonTypeFlags::Poly16:
1623 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
1624 case NeonTypeFlags::Poly64:
1626 return Context.UnsignedLongTy;
1628 return Context.UnsignedLongLongTy;
1629 case NeonTypeFlags::Poly128:
1631 case NeonTypeFlags::Float16:
1632 return Context.HalfTy;
1633 case NeonTypeFlags::Float32:
1634 return Context.FloatTy;
1635 case NeonTypeFlags::Float64:
1636 return Context.DoubleTy;
1638 llvm_unreachable("Invalid NeonTypeFlag!");
1641 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1642 llvm::APSInt Result;
1646 bool HasConstPtr = false;
1647 switch (BuiltinID) {
1648 #define GET_NEON_OVERLOAD_CHECK
1649 #include "clang/Basic/arm_neon.inc"
1650 #include "clang/Basic/arm_fp16.inc"
1651 #undef GET_NEON_OVERLOAD_CHECK
1654 // For NEON intrinsics which are overloaded on vector element type, validate
1655 // the immediate which specifies which variant to emit.
1656 unsigned ImmArg = TheCall->getNumArgs()-1;
1658 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
1661 TV = Result.getLimitedValue(64);
1662 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
1663 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
1664 << TheCall->getArg(ImmArg)->getSourceRange();
1667 if (PtrArgNum >= 0) {
1668 // Check that pointer arguments have the specified type.
1669 Expr *Arg = TheCall->getArg(PtrArgNum);
1670 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
1671 Arg = ICE->getSubExpr();
1672 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
1673 QualType RHSTy = RHS.get()->getType();
1675 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
1676 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
1677 Arch == llvm::Triple::aarch64_be;
1679 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
1681 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
1683 EltTy = EltTy.withConst();
1684 QualType LHSTy = Context.getPointerType(EltTy);
1685 AssignConvertType ConvTy;
1686 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
1687 if (RHS.isInvalid())
1689 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
1690 RHS.get(), AA_Assigning))
1694 // For NEON intrinsics which take an immediate value as part of the
1695 // instruction, range check them here.
1696 unsigned i = 0, l = 0, u = 0;
1697 switch (BuiltinID) {
1700 #define GET_NEON_IMMEDIATE_CHECK
1701 #include "clang/Basic/arm_neon.inc"
1702 #include "clang/Basic/arm_fp16.inc"
1703 #undef GET_NEON_IMMEDIATE_CHECK
1706 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
1709 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
1710 unsigned MaxWidth) {
1711 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
1712 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1713 BuiltinID == ARM::BI__builtin_arm_strex ||
1714 BuiltinID == ARM::BI__builtin_arm_stlex ||
1715 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1716 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1717 BuiltinID == AArch64::BI__builtin_arm_strex ||
1718 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
1719 "unexpected ARM builtin");
1720 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
1721 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1722 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1723 BuiltinID == AArch64::BI__builtin_arm_ldaex;
1725 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1727 // Ensure that we have the proper number of arguments.
1728 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
1731 // Inspect the pointer argument of the atomic builtin. This should always be
1732 // a pointer type, whose element is an integral scalar or pointer type.
1733 // Because it is a pointer type, we don't have to worry about any implicit
1735 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
1736 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
1737 if (PointerArgRes.isInvalid())
1739 PointerArg = PointerArgRes.get();
1741 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
1743 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
1744 << PointerArg->getType() << PointerArg->getSourceRange();
1748 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
1749 // task is to insert the appropriate casts into the AST. First work out just
1750 // what the appropriate type is.
1751 QualType ValType = pointerType->getPointeeType();
1752 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
1754 AddrType.addConst();
1756 // Issue a warning if the cast is dodgy.
1757 CastKind CastNeeded = CK_NoOp;
1758 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
1759 CastNeeded = CK_BitCast;
1760 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
1761 << PointerArg->getType() << Context.getPointerType(AddrType)
1762 << AA_Passing << PointerArg->getSourceRange();
1765 // Finally, do the cast and replace the argument with the corrected version.
1766 AddrType = Context.getPointerType(AddrType);
1767 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
1768 if (PointerArgRes.isInvalid())
1770 PointerArg = PointerArgRes.get();
1772 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
1774 // In general, we allow ints, floats and pointers to be loaded and stored.
1775 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
1776 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
1777 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
1778 << PointerArg->getType() << PointerArg->getSourceRange();
1782 // But ARM doesn't have instructions to deal with 128-bit versions.
1783 if (Context.getTypeSize(ValType) > MaxWidth) {
1784 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
1785 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
1786 << PointerArg->getType() << PointerArg->getSourceRange();
1790 switch (ValType.getObjCLifetime()) {
1791 case Qualifiers::OCL_None:
1792 case Qualifiers::OCL_ExplicitNone:
1796 case Qualifiers::OCL_Weak:
1797 case Qualifiers::OCL_Strong:
1798 case Qualifiers::OCL_Autoreleasing:
1799 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
1800 << ValType << PointerArg->getSourceRange();
1805 TheCall->setType(ValType);
1809 // Initialize the argument to be stored.
1810 ExprResult ValArg = TheCall->getArg(0);
1811 InitializedEntity Entity = InitializedEntity::InitializeParameter(
1812 Context, ValType, /*consume*/ false);
1813 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
1814 if (ValArg.isInvalid())
1816 TheCall->setArg(0, ValArg.get());
1818 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
1819 // but the custom checker bypasses all default analysis.
1820 TheCall->setType(Context.IntTy);
1824 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1825 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
1826 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1827 BuiltinID == ARM::BI__builtin_arm_strex ||
1828 BuiltinID == ARM::BI__builtin_arm_stlex) {
1829 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
1832 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
1833 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1834 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
1837 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
1838 BuiltinID == ARM::BI__builtin_arm_wsr64)
1839 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
1841 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
1842 BuiltinID == ARM::BI__builtin_arm_rsrp ||
1843 BuiltinID == ARM::BI__builtin_arm_wsr ||
1844 BuiltinID == ARM::BI__builtin_arm_wsrp)
1845 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1847 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
1850 // For intrinsics which take an immediate value as part of the instruction,
1851 // range check them here.
1852 // FIXME: VFP Intrinsics should error if VFP not present.
1853 switch (BuiltinID) {
1854 default: return false;
1855 case ARM::BI__builtin_arm_ssat:
1856 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
1857 case ARM::BI__builtin_arm_usat:
1858 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
1859 case ARM::BI__builtin_arm_ssat16:
1860 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
1861 case ARM::BI__builtin_arm_usat16:
1862 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
1863 case ARM::BI__builtin_arm_vcvtr_f:
1864 case ARM::BI__builtin_arm_vcvtr_d:
1865 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
1866 case ARM::BI__builtin_arm_dmb:
1867 case ARM::BI__builtin_arm_dsb:
1868 case ARM::BI__builtin_arm_isb:
1869 case ARM::BI__builtin_arm_dbg:
1870 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
1874 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
1875 CallExpr *TheCall) {
1876 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1877 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1878 BuiltinID == AArch64::BI__builtin_arm_strex ||
1879 BuiltinID == AArch64::BI__builtin_arm_stlex) {
1880 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
1883 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1884 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1885 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
1886 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
1887 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
1890 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1891 BuiltinID == AArch64::BI__builtin_arm_wsr64)
1892 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1894 // Memory Tagging Extensions (MTE) Intrinsics
1895 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
1896 BuiltinID == AArch64::BI__builtin_arm_addg ||
1897 BuiltinID == AArch64::BI__builtin_arm_gmi ||
1898 BuiltinID == AArch64::BI__builtin_arm_ldg ||
1899 BuiltinID == AArch64::BI__builtin_arm_stg ||
1900 BuiltinID == AArch64::BI__builtin_arm_subp) {
1901 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
1904 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1905 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1906 BuiltinID == AArch64::BI__builtin_arm_wsr ||
1907 BuiltinID == AArch64::BI__builtin_arm_wsrp)
1908 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1910 // Only check the valid encoding range. Any constant in this range would be
1911 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
1912 // an exception for incorrect registers. This matches MSVC behavior.
1913 if (BuiltinID == AArch64::BI_ReadStatusReg ||
1914 BuiltinID == AArch64::BI_WriteStatusReg)
1915 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
1917 if (BuiltinID == AArch64::BI__getReg)
1918 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
1920 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
1923 // For intrinsics which take an immediate value as part of the instruction,
1924 // range check them here.
1925 unsigned i = 0, l = 0, u = 0;
1926 switch (BuiltinID) {
1927 default: return false;
1928 case AArch64::BI__builtin_arm_dmb:
1929 case AArch64::BI__builtin_arm_dsb:
1930 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
1933 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
1936 bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
1937 struct BuiltinAndString {
1942 static BuiltinAndString ValidCPU[] = {
1943 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" },
1944 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" },
1945 { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" },
1946 { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" },
1947 { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" },
1948 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" },
1949 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" },
1950 { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" },
1951 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" },
1952 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" },
1953 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" },
1954 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" },
1955 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" },
1956 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" },
1957 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" },
1958 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" },
1959 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" },
1960 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" },
1961 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" },
1962 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" },
1963 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" },
1964 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" },
1965 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" },
1968 static BuiltinAndString ValidHVX[] = {
1969 { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" },
1970 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" },
1971 { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" },
1972 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" },
1973 { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" },
1974 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" },
1975 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" },
1976 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" },
1977 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" },
1978 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" },
1979 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" },
1980 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" },
1981 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" },
1982 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" },
1983 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" },
1984 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" },
1985 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" },
1986 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" },
1987 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" },
1988 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" },
1989 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" },
1990 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" },
1991 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" },
1992 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" },
1993 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" },
1994 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" },
1995 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" },
1996 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" },
1997 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" },
1998 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" },
1999 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" },
2000 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" },
2001 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" },
2002 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" },
2003 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" },
2004 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" },
2005 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" },
2006 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" },
2007 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" },
2008 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" },
2009 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" },
2010 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" },
2011 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" },
2012 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" },
2013 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" },
2014 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" },
2015 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" },
2016 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" },
2017 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" },
2018 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" },
2019 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" },
2020 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" },
2021 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" },
2022 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" },
2023 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" },
2024 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" },
2025 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" },
2026 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" },
2027 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" },
2028 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" },
2029 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" },
2030 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" },
2031 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" },
2032 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" },
2033 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" },
2034 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" },
2035 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" },
2036 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" },
2037 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" },
2038 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" },
2039 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" },
2040 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" },
2041 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" },
2042 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" },
2043 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" },
2044 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" },
2045 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" },
2046 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" },
2047 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" },
2048 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" },
2049 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" },
2050 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" },
2051 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" },
2052 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" },
2053 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" },
2054 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" },
2055 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" },
2056 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" },
2057 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" },
2058 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" },
2059 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" },
2060 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" },
2061 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" },
2062 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" },
2063 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" },
2064 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" },
2065 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" },
2066 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" },
2067 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" },
2068 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" },
2069 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" },
2070 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" },
2071 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" },
2072 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" },
2073 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" },
2074 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" },
2075 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" },
2076 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" },
2077 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" },
2078 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" },
2079 { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" },
2080 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" },
2081 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" },
2082 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" },
2083 { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" },
2084 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" },
2085 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" },
2086 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" },
2087 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" },
2088 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" },
2089 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" },
2090 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" },
2091 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" },
2092 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" },
2093 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" },
2094 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" },
2095 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" },
2096 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" },
2097 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" },
2098 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" },
2099 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" },
2100 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" },
2101 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" },
2102 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" },
2103 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" },
2104 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" },
2105 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" },
2106 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" },
2107 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" },
2108 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" },
2109 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" },
2110 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" },
2111 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" },
2112 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" },
2113 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" },
2114 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" },
2115 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" },
2116 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" },
2117 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" },
2118 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" },
2119 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" },
2120 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" },
2121 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" },
2122 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" },
2123 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" },
2124 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" },
2125 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" },
2126 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" },
2127 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" },
2128 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" },
2129 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" },
2130 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" },
2131 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" },
2132 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" },
2133 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" },
2134 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" },
2135 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" },
2136 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" },
2137 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" },
2138 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" },
2139 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" },
2140 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" },
2141 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" },
2142 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" },
2143 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" },
2144 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" },
2145 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" },
2146 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" },
2147 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" },
2148 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" },
2149 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" },
2150 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" },
2151 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" },
2152 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" },
2153 { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" },
2154 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" },
2155 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" },
2156 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" },
2157 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" },
2158 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" },
2159 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" },
2160 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" },
2161 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" },
2162 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" },
2163 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" },
2164 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" },
2165 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" },
2166 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" },
2167 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" },
2168 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" },
2169 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" },
2170 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" },
2171 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" },
2172 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" },
2173 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" },
2174 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" },
2175 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" },
2176 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" },
2177 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" },
2178 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" },
2179 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" },
2180 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" },
2181 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" },
2182 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" },
2183 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" },
2184 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" },
2185 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" },
2186 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" },
2187 { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" },
2188 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" },
2189 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" },
2190 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" },
2191 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" },
2192 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" },
2193 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" },
2194 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" },
2195 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" },
2196 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" },
2197 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" },
2198 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" },
2199 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" },
2200 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" },
2201 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" },
2202 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" },
2203 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" },
2204 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" },
2205 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" },
2206 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" },
2207 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" },
2208 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" },
2209 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" },
2210 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" },
2211 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" },
2212 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" },
2213 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" },
2214 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" },
2215 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" },
2216 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" },
2217 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" },
2218 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" },
2219 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" },
2220 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" },
2221 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" },
2222 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" },
2223 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" },
2224 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" },
2225 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" },
2226 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" },
2227 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" },
2228 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" },
2229 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" },
2230 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" },
2231 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" },
2232 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" },
2233 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" },
2234 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" },
2235 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" },
2236 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" },
2237 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" },
2238 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" },
2239 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" },
2240 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" },
2241 { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" },
2242 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" },
2243 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" },
2244 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" },
2245 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" },
2246 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" },
2247 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" },
2248 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" },
2249 { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" },
2250 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" },
2251 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" },
2252 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" },
2253 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" },
2254 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" },
2255 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" },
2256 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" },
2257 { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" },
2258 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" },
2259 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" },
2260 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" },
2261 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" },
2262 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" },
2263 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" },
2264 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" },
2265 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" },
2266 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" },
2267 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" },
2268 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" },
2269 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" },
2270 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" },
2271 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" },
2272 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" },
2273 { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" },
2274 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" },
2275 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" },
2276 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" },
2277 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" },
2278 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" },
2279 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" },
2280 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" },
2281 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" },
2282 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" },
2283 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" },
2284 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" },
2285 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" },
2286 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" },
2287 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" },
2288 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" },
2289 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" },
2290 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" },
2291 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" },
2292 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" },
2293 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" },
2294 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" },
2295 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" },
2296 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" },
2297 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" },
2298 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" },
2299 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" },
2300 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" },
2301 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" },
2302 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" },
2303 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" },
2304 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" },
2305 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" },
2306 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" },
2307 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" },
2308 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" },
2309 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" },
2310 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" },
2311 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" },
2312 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" },
2313 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" },
2314 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" },
2315 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" },
2316 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" },
2317 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" },
2318 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" },
2319 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" },
2320 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" },
2321 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" },
2322 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" },
2323 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" },
2324 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" },
2325 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" },
2326 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" },
2327 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" },
2328 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" },
2329 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" },
2330 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" },
2331 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" },
2332 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" },
2333 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" },
2334 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" },
2335 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" },
2336 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" },
2337 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" },
2338 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" },
2339 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" },
2340 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" },
2341 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" },
2342 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" },
2343 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" },
2344 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" },
2345 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" },
2346 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" },
2347 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" },
2348 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" },
2349 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" },
2350 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" },
2351 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" },
2352 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" },
2353 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" },
2354 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" },
2355 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" },
2356 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" },
2357 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" },
2358 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" },
2359 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" },
2360 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" },
2361 { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" },
2362 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" },
2363 { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" },
2364 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" },
2365 { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" },
2366 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" },
2367 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" },
2368 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" },
2369 { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" },
2370 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" },
2371 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" },
2372 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" },
2373 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" },
2374 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" },
2375 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" },
2376 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" },
2377 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" },
2378 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" },
2379 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" },
2380 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" },
2381 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" },
2382 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" },
2383 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" },
2384 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" },
2385 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" },
2386 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" },
2387 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" },
2388 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" },
2389 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" },
2390 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" },
2391 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" },
2392 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" },
2393 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" },
2394 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" },
2395 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" },
2396 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" },
2397 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" },
2398 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" },
2399 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" },
2400 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" },
2401 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" },
2402 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" },
2403 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" },
2404 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" },
2405 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" },
2406 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" },
2407 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" },
2408 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" },
2409 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" },
2410 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" },
2411 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" },
2412 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" },
2413 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" },
2414 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" },
2415 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" },
2416 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" },
2417 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" },
2418 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" },
2419 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" },
2420 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" },
2421 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" },
2422 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" },
2423 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" },
2424 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" },
2425 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" },
2426 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" },
2427 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" },
2428 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" },
2429 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" },
2430 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" },
2431 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" },
2432 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" },
2433 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" },
2434 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" },
2435 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" },
2436 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" },
2437 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" },
2438 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" },
2439 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" },
2440 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" },
2441 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" },
2442 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" },
2443 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" },
2444 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" },
2445 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" },
2446 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" },
2447 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" },
2448 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" },
2449 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" },
2450 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" },
2451 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" },
2452 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" },
2453 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" },
2454 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" },
2455 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" },
2456 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" },
2457 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" },
2458 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" },
2459 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" },
2460 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" },
2461 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" },
2462 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" },
2463 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" },
2464 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" },
2465 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" },
2466 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" },
2467 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" },
2468 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" },
2469 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" },
2470 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" },
2471 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" },
2472 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" },
2473 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" },
2474 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" },
2475 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" },
2476 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" },
2477 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" },
2478 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" },
2479 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" },
2480 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" },
2481 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" },
2482 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" },
2483 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" },
2484 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" },
2485 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" },
2486 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" },
2487 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" },
2488 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" },
2489 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" },
2490 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" },
2491 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" },
2492 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" },
2493 { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" },
2494 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" },
2495 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" },
2496 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" },
2497 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" },
2498 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" },
2499 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" },
2500 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" },
2501 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" },
2502 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" },
2503 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" },
2504 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" },
2505 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" },
2506 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" },
2507 { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" },
2508 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" },
2509 { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" },
2510 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" },
2511 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" },
2512 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" },
2513 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" },
2514 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" },
2515 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" },
2516 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" },
2517 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" },
2518 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" },
2519 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" },
2520 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" },
2521 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" },
2522 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" },
2523 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" },
2524 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" },
2525 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" },
2526 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" },
2527 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" },
2528 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" },
2529 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" },
2530 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" },
2531 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" },
2532 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" },
2533 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" },
2534 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" },
2535 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" },
2536 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" },
2537 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" },
2538 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" },
2539 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" },
2540 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" },
2541 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" },
2542 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" },
2543 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" },
2544 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" },
2545 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" },
2546 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" },
2547 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" },
2548 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" },
2549 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" },
2550 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" },
2551 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" },
2552 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" },
2553 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" },
2554 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" },
2555 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" },
2556 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" },
2557 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" },
2558 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" },
2559 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" },
2560 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" },
2561 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" },
2562 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" },
2563 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" },
2564 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" },
2565 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" },
2566 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" },
2567 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" },
2568 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" },
2569 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" },
2570 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" },
2571 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" },
2572 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" },
2573 { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" },
2574 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" },
2575 { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" },
2576 { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" },
2577 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" },
2578 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" },
2579 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" },
2580 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" },
2581 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" },
2582 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" },
2583 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" },
2584 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" },
2585 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" },
2586 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" },
2587 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" },
2588 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" },
2589 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" },
2590 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" },
2591 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" },
2592 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" },
2593 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" },
2594 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" },
2595 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" },
2596 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" },
2597 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" },
2598 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" },
2599 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" },
2600 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" },
2601 { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" },
2602 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" },
2603 { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" },
2604 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" },
2605 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" },
2606 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" },
2607 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" },
2608 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" },
2609 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" },
2610 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" },
2611 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" },
2612 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" },
2613 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" },
2614 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" },
2615 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" },
2616 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" },
2617 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" },
2618 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" },
2619 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" },
2620 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" },
2621 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" },
2622 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" },
2623 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" },
2624 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" },
2625 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" },
2626 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" },
2627 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" },
2628 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" },
2629 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" },
2630 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" },
2631 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" },
2632 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" },
2633 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" },
2634 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" },
2635 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" },
2636 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" },
2637 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" },
2638 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" },
2639 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" },
2640 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" },
2641 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" },
2642 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" },
2643 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" },
2644 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" },
2645 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" },
2646 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" },
2647 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" },
2648 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" },
2649 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" },
2650 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" },
2651 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" },
2652 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" },
2653 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" },
2654 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" },
2655 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" },
2656 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" },
2657 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" },
2658 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" },
2659 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" },
2660 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" },
2661 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" },
2662 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" },
2663 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" },
2664 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" },
2665 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" },
2666 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" },
2667 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" },
2668 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" },
2669 { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" },
2670 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" },
2671 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" },
2672 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" },
2673 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" },
2674 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" },
2675 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" },
2676 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" },
2677 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" },
2678 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" },
2679 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" },
2680 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" },
2681 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" },
2682 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" },
2683 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" },
2684 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" },
2685 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" },
2686 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" },
2687 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" },
2688 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" },
2689 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" },
2690 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" },
2691 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" },
2692 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" },
2693 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" },
2694 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" },
2695 { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" },
2696 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" },
2697 { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" },
2698 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" },
2699 { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" },
2700 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" },
2703 // Sort the tables on first execution so we can binary search them.
2704 auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) {
2705 return LHS.BuiltinID < RHS.BuiltinID;
2707 static const bool SortOnce =
2708 (llvm::sort(ValidCPU, SortCmp),
2709 llvm::sort(ValidHVX, SortCmp), true);
2711 auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) {
2712 return BI.BuiltinID < BuiltinID;
2715 const TargetInfo &TI = Context.getTargetInfo();
2717 const BuiltinAndString *FC =
2718 llvm::lower_bound(ValidCPU, BuiltinID, LowerBoundCmp);
2719 if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) {
2720 const TargetOptions &Opts = TI.getTargetOpts();
2721 StringRef CPU = Opts.CPU;
2723 assert(CPU.startswith("hexagon") && "Unexpected CPU name");
2724 CPU.consume_front("hexagon");
2725 SmallVector<StringRef, 3> CPUs;
2726 StringRef(FC->Str).split(CPUs, ',');
2727 if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; }))
2728 return Diag(TheCall->getBeginLoc(),
2729 diag::err_hexagon_builtin_unsupported_cpu);
2733 const BuiltinAndString *FH =
2734 llvm::lower_bound(ValidHVX, BuiltinID, LowerBoundCmp);
2735 if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) {
2736 if (!TI.hasFeature("hvx"))
2737 return Diag(TheCall->getBeginLoc(),
2738 diag::err_hexagon_builtin_requires_hvx);
2740 SmallVector<StringRef, 3> HVXs;
2741 StringRef(FH->Str).split(HVXs, ',');
2742 bool IsValid = llvm::any_of(HVXs,
2743 [&TI] (StringRef V) {
2744 std::string F = "hvx" + V.str();
2745 return TI.hasFeature(F);
2748 return Diag(TheCall->getBeginLoc(),
2749 diag::err_hexagon_builtin_unsupported_hvx);
2755 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
2762 struct BuiltinInfo {
2767 static BuiltinInfo Infos[] = {
2768 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
2769 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
2770 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
2771 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} },
2772 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
2773 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
2774 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
2775 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
2776 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
2777 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
2778 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
2780 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
2781 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
2782 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
2783 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
2784 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
2785 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
2786 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
2787 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
2788 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
2789 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
2790 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
2792 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
2793 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
2794 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
2795 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
2796 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
2797 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
2798 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
2799 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
2800 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
2801 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
2802 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
2803 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
2804 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
2805 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
2806 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
2807 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
2808 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
2809 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
2810 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
2811 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
2812 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
2813 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
2814 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
2815 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
2816 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
2817 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
2818 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
2819 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
2820 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
2821 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
2822 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
2823 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
2824 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
2825 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
2826 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
2827 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
2828 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
2829 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
2830 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
2831 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
2832 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
2833 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
2834 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
2835 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
2836 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
2837 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
2838 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
2839 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
2840 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
2841 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
2842 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
2843 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
2844 {{ 1, false, 6, 0 }} },
2845 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
2846 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
2847 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
2848 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
2849 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
2850 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
2851 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
2852 {{ 1, false, 5, 0 }} },
2853 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
2854 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
2855 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
2856 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
2857 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
2858 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
2859 { 2, false, 5, 0 }} },
2860 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
2861 { 2, false, 6, 0 }} },
2862 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
2863 { 3, false, 5, 0 }} },
2864 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
2865 { 3, false, 6, 0 }} },
2866 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
2867 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
2868 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
2869 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
2870 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
2871 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
2872 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
2873 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
2874 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
2875 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
2876 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
2877 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
2878 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
2879 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
2880 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
2881 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
2882 {{ 2, false, 4, 0 },
2883 { 3, false, 5, 0 }} },
2884 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
2885 {{ 2, false, 4, 0 },
2886 { 3, false, 5, 0 }} },
2887 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
2888 {{ 2, false, 4, 0 },
2889 { 3, false, 5, 0 }} },
2890 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
2891 {{ 2, false, 4, 0 },
2892 { 3, false, 5, 0 }} },
2893 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
2894 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
2895 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
2896 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
2897 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
2898 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
2899 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
2900 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
2901 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
2902 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
2903 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
2904 { 2, false, 5, 0 }} },
2905 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
2906 { 2, false, 6, 0 }} },
2907 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
2908 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
2909 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
2910 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
2911 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
2912 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
2913 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
2914 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
2915 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
2916 {{ 1, false, 4, 0 }} },
2917 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
2918 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
2919 {{ 1, false, 4, 0 }} },
2920 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
2921 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
2922 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
2923 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
2924 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
2925 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
2926 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
2927 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
2928 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
2929 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
2930 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
2931 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
2932 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
2933 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
2934 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
2935 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
2936 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
2937 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
2938 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
2939 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
2940 {{ 3, false, 1, 0 }} },
2941 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
2942 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
2943 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
2944 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
2945 {{ 3, false, 1, 0 }} },
2946 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
2947 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
2948 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
2949 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
2950 {{ 3, false, 1, 0 }} },
2953 // Use a dynamically initialized static to sort the table exactly once on
2955 static const bool SortOnce =
2957 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
2958 return LHS.BuiltinID < RHS.BuiltinID;
2963 const BuiltinInfo *F = llvm::partition_point(
2964 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
2965 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
2970 for (const ArgInfo &A : F->Infos) {
2971 // Ignore empty ArgInfo elements.
2972 if (A.BitWidth == 0)
2975 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
2976 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
2978 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
2980 unsigned M = 1 << A.Align;
2983 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
2984 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
2990 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
2991 CallExpr *TheCall) {
2992 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) ||
2993 CheckHexagonBuiltinArgument(BuiltinID, TheCall);
2997 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the
2998 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The
2999 // ordering for DSP is unspecified. MSA is ordered by the data format used
3000 // by the underlying instruction i.e., df/m, df/n and then by size.
3002 // FIXME: The size tests here should instead be tablegen'd along with the
3003 // definitions from include/clang/Basic/BuiltinsMips.def.
3004 // FIXME: GCC is strict on signedness for some of these intrinsics, we should
3006 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3007 unsigned i = 0, l = 0, u = 0, m = 0;
3008 switch (BuiltinID) {
3009 default: return false;
3010 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
3011 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
3012 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
3013 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
3014 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
3015 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
3016 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
3017 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3019 // These intrinsics take an unsigned 3 bit immediate.
3020 case Mips::BI__builtin_msa_bclri_b:
3021 case Mips::BI__builtin_msa_bnegi_b:
3022 case Mips::BI__builtin_msa_bseti_b:
3023 case Mips::BI__builtin_msa_sat_s_b:
3024 case Mips::BI__builtin_msa_sat_u_b:
3025 case Mips::BI__builtin_msa_slli_b:
3026 case Mips::BI__builtin_msa_srai_b:
3027 case Mips::BI__builtin_msa_srari_b:
3028 case Mips::BI__builtin_msa_srli_b:
3029 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
3030 case Mips::BI__builtin_msa_binsli_b:
3031 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
3032 // These intrinsics take an unsigned 4 bit immediate.
3033 case Mips::BI__builtin_msa_bclri_h:
3034 case Mips::BI__builtin_msa_bnegi_h:
3035 case Mips::BI__builtin_msa_bseti_h:
3036 case Mips::BI__builtin_msa_sat_s_h:
3037 case Mips::BI__builtin_msa_sat_u_h:
3038 case Mips::BI__builtin_msa_slli_h:
3039 case Mips::BI__builtin_msa_srai_h:
3040 case Mips::BI__builtin_msa_srari_h:
3041 case Mips::BI__builtin_msa_srli_h:
3042 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
3043 case Mips::BI__builtin_msa_binsli_h:
3044 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
3045 // These intrinsics take an unsigned 5 bit immediate.
3046 // The first block of intrinsics actually have an unsigned 5 bit field,
3047 // not a df/n field.
3048 case Mips::BI__builtin_msa_cfcmsa:
3049 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
3050 case Mips::BI__builtin_msa_clei_u_b:
3051 case Mips::BI__builtin_msa_clei_u_h:
3052 case Mips::BI__builtin_msa_clei_u_w:
3053 case Mips::BI__builtin_msa_clei_u_d:
3054 case Mips::BI__builtin_msa_clti_u_b:
3055 case Mips::BI__builtin_msa_clti_u_h:
3056 case Mips::BI__builtin_msa_clti_u_w:
3057 case Mips::BI__builtin_msa_clti_u_d:
3058 case Mips::BI__builtin_msa_maxi_u_b:
3059 case Mips::BI__builtin_msa_maxi_u_h:
3060 case Mips::BI__builtin_msa_maxi_u_w:
3061 case Mips::BI__builtin_msa_maxi_u_d:
3062 case Mips::BI__builtin_msa_mini_u_b:
3063 case Mips::BI__builtin_msa_mini_u_h:
3064 case Mips::BI__builtin_msa_mini_u_w:
3065 case Mips::BI__builtin_msa_mini_u_d:
3066 case Mips::BI__builtin_msa_addvi_b:
3067 case Mips::BI__builtin_msa_addvi_h:
3068 case Mips::BI__builtin_msa_addvi_w:
3069 case Mips::BI__builtin_msa_addvi_d:
3070 case Mips::BI__builtin_msa_bclri_w:
3071 case Mips::BI__builtin_msa_bnegi_w:
3072 case Mips::BI__builtin_msa_bseti_w:
3073 case Mips::BI__builtin_msa_sat_s_w:
3074 case Mips::BI__builtin_msa_sat_u_w:
3075 case Mips::BI__builtin_msa_slli_w:
3076 case Mips::BI__builtin_msa_srai_w:
3077 case Mips::BI__builtin_msa_srari_w:
3078 case Mips::BI__builtin_msa_srli_w:
3079 case Mips::BI__builtin_msa_srlri_w:
3080 case Mips::BI__builtin_msa_subvi_b:
3081 case Mips::BI__builtin_msa_subvi_h:
3082 case Mips::BI__builtin_msa_subvi_w:
3083 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
3084 case Mips::BI__builtin_msa_binsli_w:
3085 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
3086 // These intrinsics take an unsigned 6 bit immediate.
3087 case Mips::BI__builtin_msa_bclri_d:
3088 case Mips::BI__builtin_msa_bnegi_d:
3089 case Mips::BI__builtin_msa_bseti_d:
3090 case Mips::BI__builtin_msa_sat_s_d:
3091 case Mips::BI__builtin_msa_sat_u_d:
3092 case Mips::BI__builtin_msa_slli_d:
3093 case Mips::BI__builtin_msa_srai_d:
3094 case Mips::BI__builtin_msa_srari_d:
3095 case Mips::BI__builtin_msa_srli_d:
3096 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
3097 case Mips::BI__builtin_msa_binsli_d:
3098 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
3099 // These intrinsics take a signed 5 bit immediate.
3100 case Mips::BI__builtin_msa_ceqi_b:
3101 case Mips::BI__builtin_msa_ceqi_h:
3102 case Mips::BI__builtin_msa_ceqi_w:
3103 case Mips::BI__builtin_msa_ceqi_d:
3104 case Mips::BI__builtin_msa_clti_s_b:
3105 case Mips::BI__builtin_msa_clti_s_h:
3106 case Mips::BI__builtin_msa_clti_s_w:
3107 case Mips::BI__builtin_msa_clti_s_d:
3108 case Mips::BI__builtin_msa_clei_s_b:
3109 case Mips::BI__builtin_msa_clei_s_h:
3110 case Mips::BI__builtin_msa_clei_s_w:
3111 case Mips::BI__builtin_msa_clei_s_d:
3112 case Mips::BI__builtin_msa_maxi_s_b:
3113 case Mips::BI__builtin_msa_maxi_s_h:
3114 case Mips::BI__builtin_msa_maxi_s_w:
3115 case Mips::BI__builtin_msa_maxi_s_d:
3116 case Mips::BI__builtin_msa_mini_s_b:
3117 case Mips::BI__builtin_msa_mini_s_h:
3118 case Mips::BI__builtin_msa_mini_s_w:
3119 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
3120 // These intrinsics take an unsigned 8 bit immediate.
3121 case Mips::BI__builtin_msa_andi_b:
3122 case Mips::BI__builtin_msa_nori_b:
3123 case Mips::BI__builtin_msa_ori_b:
3124 case Mips::BI__builtin_msa_shf_b:
3125 case Mips::BI__builtin_msa_shf_h:
3126 case Mips::BI__builtin_msa_shf_w:
3127 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
3128 case Mips::BI__builtin_msa_bseli_b:
3129 case Mips::BI__builtin_msa_bmnzi_b:
3130 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
3132 // These intrinsics take an unsigned 4 bit immediate.
3133 case Mips::BI__builtin_msa_copy_s_b:
3134 case Mips::BI__builtin_msa_copy_u_b:
3135 case Mips::BI__builtin_msa_insve_b:
3136 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
3137 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
3138 // These intrinsics take an unsigned 3 bit immediate.
3139 case Mips::BI__builtin_msa_copy_s_h:
3140 case Mips::BI__builtin_msa_copy_u_h:
3141 case Mips::BI__builtin_msa_insve_h:
3142 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
3143 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
3144 // These intrinsics take an unsigned 2 bit immediate.
3145 case Mips::BI__builtin_msa_copy_s_w:
3146 case Mips::BI__builtin_msa_copy_u_w:
3147 case Mips::BI__builtin_msa_insve_w:
3148 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
3149 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
3150 // These intrinsics take an unsigned 1 bit immediate.
3151 case Mips::BI__builtin_msa_copy_s_d:
3152 case Mips::BI__builtin_msa_copy_u_d:
3153 case Mips::BI__builtin_msa_insve_d:
3154 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
3155 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
3156 // Memory offsets and immediate loads.
3157 // These intrinsics take a signed 10 bit immediate.
3158 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
3159 case Mips::BI__builtin_msa_ldi_h:
3160 case Mips::BI__builtin_msa_ldi_w:
3161 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
3162 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
3163 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
3164 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
3165 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
3166 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
3167 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
3168 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
3169 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
3173 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3175 return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
3176 SemaBuiltinConstantArgMultiple(TheCall, i, m);
3179 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3180 unsigned i = 0, l = 0, u = 0;
3181 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
3182 BuiltinID == PPC::BI__builtin_divdeu ||
3183 BuiltinID == PPC::BI__builtin_bpermd;
3184 bool IsTarget64Bit = Context.getTargetInfo()
3185 .getTypeWidth(Context
3187 .getIntPtrType()) == 64;
3188 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
3189 BuiltinID == PPC::BI__builtin_divweu ||
3190 BuiltinID == PPC::BI__builtin_divde ||
3191 BuiltinID == PPC::BI__builtin_divdeu;
3193 if (Is64BitBltin && !IsTarget64Bit)
3194 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
3195 << TheCall->getSourceRange();
3197 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
3198 (BuiltinID == PPC::BI__builtin_bpermd &&
3199 !Context.getTargetInfo().hasFeature("bpermd")))
3200 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
3201 << TheCall->getSourceRange();
3203 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool {
3204 if (!Context.getTargetInfo().hasFeature("vsx"))
3205 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
3206 << TheCall->getSourceRange();
3210 switch (BuiltinID) {
3211 default: return false;
3212 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
3213 case PPC::BI__builtin_altivec_crypto_vshasigmad:
3214 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3215 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3216 case PPC::BI__builtin_tbegin:
3217 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
3218 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
3219 case PPC::BI__builtin_tabortwc:
3220 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
3221 case PPC::BI__builtin_tabortwci:
3222 case PPC::BI__builtin_tabortdci:
3223 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
3224 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
3225 case PPC::BI__builtin_vsx_xxpermdi:
3226 case PPC::BI__builtin_vsx_xxsldwi:
3227 return SemaBuiltinVSX(TheCall);
3228 case PPC::BI__builtin_unpack_vector_int128:
3229 return SemaVSXCheck(TheCall) ||
3230 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3231 case PPC::BI__builtin_pack_vector_int128:
3232 return SemaVSXCheck(TheCall);
3234 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3237 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
3238 CallExpr *TheCall) {
3239 if (BuiltinID == SystemZ::BI__builtin_tabort) {
3240 Expr *Arg = TheCall->getArg(0);
3241 llvm::APSInt AbortCode(32);
3242 if (Arg->isIntegerConstantExpr(AbortCode, Context) &&
3243 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256)
3244 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
3245 << Arg->getSourceRange();
3248 // For intrinsics which take an immediate value as part of the instruction,
3249 // range check them here.
3250 unsigned i = 0, l = 0, u = 0;
3251 switch (BuiltinID) {
3252 default: return false;
3253 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
3254 case SystemZ::BI__builtin_s390_verimb:
3255 case SystemZ::BI__builtin_s390_verimh:
3256 case SystemZ::BI__builtin_s390_verimf:
3257 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
3258 case SystemZ::BI__builtin_s390_vfaeb:
3259 case SystemZ::BI__builtin_s390_vfaeh:
3260 case SystemZ::BI__builtin_s390_vfaef:
3261 case SystemZ::BI__builtin_s390_vfaebs:
3262 case SystemZ::BI__builtin_s390_vfaehs:
3263 case SystemZ::BI__builtin_s390_vfaefs:
3264 case SystemZ::BI__builtin_s390_vfaezb:
3265 case SystemZ::BI__builtin_s390_vfaezh:
3266 case SystemZ::BI__builtin_s390_vfaezf:
3267 case SystemZ::BI__builtin_s390_vfaezbs:
3268 case SystemZ::BI__builtin_s390_vfaezhs:
3269 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
3270 case SystemZ::BI__builtin_s390_vfisb:
3271 case SystemZ::BI__builtin_s390_vfidb:
3272 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
3273 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3274 case SystemZ::BI__builtin_s390_vftcisb:
3275 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
3276 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
3277 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
3278 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
3279 case SystemZ::BI__builtin_s390_vstrcb:
3280 case SystemZ::BI__builtin_s390_vstrch:
3281 case SystemZ::BI__builtin_s390_vstrcf:
3282 case SystemZ::BI__builtin_s390_vstrczb:
3283 case SystemZ::BI__builtin_s390_vstrczh:
3284 case SystemZ::BI__builtin_s390_vstrczf:
3285 case SystemZ::BI__builtin_s390_vstrcbs:
3286 case SystemZ::BI__builtin_s390_vstrchs:
3287 case SystemZ::BI__builtin_s390_vstrcfs:
3288 case SystemZ::BI__builtin_s390_vstrczbs:
3289 case SystemZ::BI__builtin_s390_vstrczhs:
3290 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
3291 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
3292 case SystemZ::BI__builtin_s390_vfminsb:
3293 case SystemZ::BI__builtin_s390_vfmaxsb:
3294 case SystemZ::BI__builtin_s390_vfmindb:
3295 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
3296 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
3297 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
3299 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3302 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
3303 /// This checks that the target supports __builtin_cpu_supports and
3304 /// that the string argument is constant and valid.
3305 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
3306 Expr *Arg = TheCall->getArg(0);
3308 // Check if the argument is a string literal.
3309 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3310 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3311 << Arg->getSourceRange();
3313 // Check the contents of the string.
3315 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3316 if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
3317 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
3318 << Arg->getSourceRange();
3322 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
3323 /// This checks that the target supports __builtin_cpu_is and
3324 /// that the string argument is constant and valid.
3325 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
3326 Expr *Arg = TheCall->getArg(0);
3328 // Check if the argument is a string literal.
3329 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3330 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3331 << Arg->getSourceRange();
3333 // Check the contents of the string.
3335 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3336 if (!S.Context.getTargetInfo().validateCpuIs(Feature))
3337 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
3338 << Arg->getSourceRange();
3342 // Check if the rounding mode is legal.
3343 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
3344 // Indicates if this instruction has rounding control or just SAE.
3347 unsigned ArgNum = 0;
3348 switch (BuiltinID) {
3351 case X86::BI__builtin_ia32_vcvttsd2si32:
3352 case X86::BI__builtin_ia32_vcvttsd2si64:
3353 case X86::BI__builtin_ia32_vcvttsd2usi32:
3354 case X86::BI__builtin_ia32_vcvttsd2usi64:
3355 case X86::BI__builtin_ia32_vcvttss2si32:
3356 case X86::BI__builtin_ia32_vcvttss2si64:
3357 case X86::BI__builtin_ia32_vcvttss2usi32:
3358 case X86::BI__builtin_ia32_vcvttss2usi64:
3361 case X86::BI__builtin_ia32_maxpd512:
3362 case X86::BI__builtin_ia32_maxps512:
3363 case X86::BI__builtin_ia32_minpd512:
3364 case X86::BI__builtin_ia32_minps512:
3367 case X86::BI__builtin_ia32_cvtps2pd512_mask:
3368 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
3369 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
3370 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
3371 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
3372 case X86::BI__builtin_ia32_cvttps2dq512_mask:
3373 case X86::BI__builtin_ia32_cvttps2qq512_mask:
3374 case X86::BI__builtin_ia32_cvttps2udq512_mask:
3375 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
3376 case X86::BI__builtin_ia32_exp2pd_mask:
3377 case X86::BI__builtin_ia32_exp2ps_mask:
3378 case X86::BI__builtin_ia32_getexppd512_mask:
3379 case X86::BI__builtin_ia32_getexpps512_mask:
3380 case X86::BI__builtin_ia32_rcp28pd_mask:
3381 case X86::BI__builtin_ia32_rcp28ps_mask:
3382 case X86::BI__builtin_ia32_rsqrt28pd_mask:
3383 case X86::BI__builtin_ia32_rsqrt28ps_mask:
3384 case X86::BI__builtin_ia32_vcomisd:
3385 case X86::BI__builtin_ia32_vcomiss:
3386 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
3389 case X86::BI__builtin_ia32_cmppd512_mask:
3390 case X86::BI__builtin_ia32_cmpps512_mask:
3391 case X86::BI__builtin_ia32_cmpsd_mask:
3392 case X86::BI__builtin_ia32_cmpss_mask:
3393 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
3394 case X86::BI__builtin_ia32_getexpsd128_round_mask:
3395 case X86::BI__builtin_ia32_getexpss128_round_mask:
3396 case X86::BI__builtin_ia32_getmantpd512_mask:
3397 case X86::BI__builtin_ia32_getmantps512_mask:
3398 case X86::BI__builtin_ia32_maxsd_round_mask:
3399 case X86::BI__builtin_ia32_maxss_round_mask:
3400 case X86::BI__builtin_ia32_minsd_round_mask:
3401 case X86::BI__builtin_ia32_minss_round_mask:
3402 case X86::BI__builtin_ia32_rcp28sd_round_mask:
3403 case X86::BI__builtin_ia32_rcp28ss_round_mask:
3404 case X86::BI__builtin_ia32_reducepd512_mask:
3405 case X86::BI__builtin_ia32_reduceps512_mask:
3406 case X86::BI__builtin_ia32_rndscalepd_mask:
3407 case X86::BI__builtin_ia32_rndscaleps_mask:
3408 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
3409 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
3412 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3413 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3414 case X86::BI__builtin_ia32_fixupimmps512_mask:
3415 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3416 case X86::BI__builtin_ia32_fixupimmsd_mask:
3417 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3418 case X86::BI__builtin_ia32_fixupimmss_mask:
3419 case X86::BI__builtin_ia32_fixupimmss_maskz:
3420 case X86::BI__builtin_ia32_getmantsd_round_mask:
3421 case X86::BI__builtin_ia32_getmantss_round_mask:
3422 case X86::BI__builtin_ia32_rangepd512_mask:
3423 case X86::BI__builtin_ia32_rangeps512_mask:
3424 case X86::BI__builtin_ia32_rangesd128_round_mask:
3425 case X86::BI__builtin_ia32_rangess128_round_mask:
3426 case X86::BI__builtin_ia32_reducesd_mask:
3427 case X86::BI__builtin_ia32_reducess_mask:
3428 case X86::BI__builtin_ia32_rndscalesd_round_mask:
3429 case X86::BI__builtin_ia32_rndscaless_round_mask:
3432 case X86::BI__builtin_ia32_vcvtsd2si64:
3433 case X86::BI__builtin_ia32_vcvtsd2si32:
3434 case X86::BI__builtin_ia32_vcvtsd2usi32:
3435 case X86::BI__builtin_ia32_vcvtsd2usi64:
3436 case X86::BI__builtin_ia32_vcvtss2si32:
3437 case X86::BI__builtin_ia32_vcvtss2si64:
3438 case X86::BI__builtin_ia32_vcvtss2usi32:
3439 case X86::BI__builtin_ia32_vcvtss2usi64:
3440 case X86::BI__builtin_ia32_sqrtpd512:
3441 case X86::BI__builtin_ia32_sqrtps512:
3445 case X86::BI__builtin_ia32_addpd512:
3446 case X86::BI__builtin_ia32_addps512:
3447 case X86::BI__builtin_ia32_divpd512:
3448 case X86::BI__builtin_ia32_divps512:
3449 case X86::BI__builtin_ia32_mulpd512:
3450 case X86::BI__builtin_ia32_mulps512:
3451 case X86::BI__builtin_ia32_subpd512:
3452 case X86::BI__builtin_ia32_subps512:
3453 case X86::BI__builtin_ia32_cvtsi2sd64:
3454 case X86::BI__builtin_ia32_cvtsi2ss32:
3455 case X86::BI__builtin_ia32_cvtsi2ss64:
3456 case X86::BI__builtin_ia32_cvtusi2sd64:
3457 case X86::BI__builtin_ia32_cvtusi2ss32:
3458 case X86::BI__builtin_ia32_cvtusi2ss64:
3462 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
3463 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
3464 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
3465 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
3466 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
3467 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
3468 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
3469 case X86::BI__builtin_ia32_cvtps2dq512_mask:
3470 case X86::BI__builtin_ia32_cvtps2qq512_mask:
3471 case X86::BI__builtin_ia32_cvtps2udq512_mask:
3472 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
3473 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
3474 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
3475 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
3476 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
3480 case X86::BI__builtin_ia32_addss_round_mask:
3481 case X86::BI__builtin_ia32_addsd_round_mask:
3482 case X86::BI__builtin_ia32_divss_round_mask:
3483 case X86::BI__builtin_ia32_divsd_round_mask:
3484 case X86::BI__builtin_ia32_mulss_round_mask:
3485 case X86::BI__builtin_ia32_mulsd_round_mask:
3486 case X86::BI__builtin_ia32_subss_round_mask:
3487 case X86::BI__builtin_ia32_subsd_round_mask:
3488 case X86::BI__builtin_ia32_scalefpd512_mask:
3489 case X86::BI__builtin_ia32_scalefps512_mask:
3490 case X86::BI__builtin_ia32_scalefsd_round_mask:
3491 case X86::BI__builtin_ia32_scalefss_round_mask:
3492 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
3493 case X86::BI__builtin_ia32_sqrtsd_round_mask:
3494 case X86::BI__builtin_ia32_sqrtss_round_mask:
3495 case X86::BI__builtin_ia32_vfmaddsd3_mask:
3496 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
3497 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
3498 case X86::BI__builtin_ia32_vfmaddss3_mask:
3499 case X86::BI__builtin_ia32_vfmaddss3_maskz:
3500 case X86::BI__builtin_ia32_vfmaddss3_mask3:
3501 case X86::BI__builtin_ia32_vfmaddpd512_mask:
3502 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
3503 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
3504 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
3505 case X86::BI__builtin_ia32_vfmaddps512_mask:
3506 case X86::BI__builtin_ia32_vfmaddps512_maskz:
3507 case X86::BI__builtin_ia32_vfmaddps512_mask3:
3508 case X86::BI__builtin_ia32_vfmsubps512_mask3:
3509 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
3510 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
3511 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
3512 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
3513 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
3514 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
3515 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
3516 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
3522 llvm::APSInt Result;
3524 // We can't check the value of a dependent argument.
3525 Expr *Arg = TheCall->getArg(ArgNum);
3526 if (Arg->isTypeDependent() || Arg->isValueDependent())
3529 // Check constant-ness first.
3530 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3533 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
3534 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
3535 // combined with ROUND_NO_EXC.
3536 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
3537 Result == 8/*ROUND_NO_EXC*/ ||
3538 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
3541 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
3542 << Arg->getSourceRange();
3545 // Check if the gather/scatter scale is legal.
3546 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
3547 CallExpr *TheCall) {
3548 unsigned ArgNum = 0;
3549 switch (BuiltinID) {
3552 case X86::BI__builtin_ia32_gatherpfdpd:
3553 case X86::BI__builtin_ia32_gatherpfdps:
3554 case X86::BI__builtin_ia32_gatherpfqpd:
3555 case X86::BI__builtin_ia32_gatherpfqps:
3556 case X86::BI__builtin_ia32_scatterpfdpd:
3557 case X86::BI__builtin_ia32_scatterpfdps:
3558 case X86::BI__builtin_ia32_scatterpfqpd:
3559 case X86::BI__builtin_ia32_scatterpfqps:
3562 case X86::BI__builtin_ia32_gatherd_pd:
3563 case X86::BI__builtin_ia32_gatherd_pd256:
3564 case X86::BI__builtin_ia32_gatherq_pd:
3565 case X86::BI__builtin_ia32_gatherq_pd256:
3566 case X86::BI__builtin_ia32_gatherd_ps:
3567 case X86::BI__builtin_ia32_gatherd_ps256:
3568 case X86::BI__builtin_ia32_gatherq_ps:
3569 case X86::BI__builtin_ia32_gatherq_ps256:
3570 case X86::BI__builtin_ia32_gatherd_q:
3571 case X86::BI__builtin_ia32_gatherd_q256:
3572 case X86::BI__builtin_ia32_gatherq_q:
3573 case X86::BI__builtin_ia32_gatherq_q256:
3574 case X86::BI__builtin_ia32_gatherd_d:
3575 case X86::BI__builtin_ia32_gatherd_d256:
3576 case X86::BI__builtin_ia32_gatherq_d:
3577 case X86::BI__builtin_ia32_gatherq_d256:
3578 case X86::BI__builtin_ia32_gather3div2df:
3579 case X86::BI__builtin_ia32_gather3div2di:
3580 case X86::BI__builtin_ia32_gather3div4df:
3581 case X86::BI__builtin_ia32_gather3div4di:
3582 case X86::BI__builtin_ia32_gather3div4sf:
3583 case X86::BI__builtin_ia32_gather3div4si:
3584 case X86::BI__builtin_ia32_gather3div8sf:
3585 case X86::BI__builtin_ia32_gather3div8si:
3586 case X86::BI__builtin_ia32_gather3siv2df:
3587 case X86::BI__builtin_ia32_gather3siv2di:
3588 case X86::BI__builtin_ia32_gather3siv4df:
3589 case X86::BI__builtin_ia32_gather3siv4di:
3590 case X86::BI__builtin_ia32_gather3siv4sf:
3591 case X86::BI__builtin_ia32_gather3siv4si:
3592 case X86::BI__builtin_ia32_gather3siv8sf:
3593 case X86::BI__builtin_ia32_gather3siv8si:
3594 case X86::BI__builtin_ia32_gathersiv8df:
3595 case X86::BI__builtin_ia32_gathersiv16sf:
3596 case X86::BI__builtin_ia32_gatherdiv8df:
3597 case X86::BI__builtin_ia32_gatherdiv16sf:
3598 case X86::BI__builtin_ia32_gathersiv8di:
3599 case X86::BI__builtin_ia32_gathersiv16si:
3600 case X86::BI__builtin_ia32_gatherdiv8di:
3601 case X86::BI__builtin_ia32_gatherdiv16si:
3602 case X86::BI__builtin_ia32_scatterdiv2df:
3603 case X86::BI__builtin_ia32_scatterdiv2di:
3604 case X86::BI__builtin_ia32_scatterdiv4df:
3605 case X86::BI__builtin_ia32_scatterdiv4di:
3606 case X86::BI__builtin_ia32_scatterdiv4sf:
3607 case X86::BI__builtin_ia32_scatterdiv4si:
3608 case X86::BI__builtin_ia32_scatterdiv8sf:
3609 case X86::BI__builtin_ia32_scatterdiv8si:
3610 case X86::BI__builtin_ia32_scattersiv2df:
3611 case X86::BI__builtin_ia32_scattersiv2di:
3612 case X86::BI__builtin_ia32_scattersiv4df:
3613 case X86::BI__builtin_ia32_scattersiv4di:
3614 case X86::BI__builtin_ia32_scattersiv4sf:
3615 case X86::BI__builtin_ia32_scattersiv4si:
3616 case X86::BI__builtin_ia32_scattersiv8sf:
3617 case X86::BI__builtin_ia32_scattersiv8si:
3618 case X86::BI__builtin_ia32_scattersiv8df:
3619 case X86::BI__builtin_ia32_scattersiv16sf:
3620 case X86::BI__builtin_ia32_scatterdiv8df:
3621 case X86::BI__builtin_ia32_scatterdiv16sf:
3622 case X86::BI__builtin_ia32_scattersiv8di:
3623 case X86::BI__builtin_ia32_scattersiv16si:
3624 case X86::BI__builtin_ia32_scatterdiv8di:
3625 case X86::BI__builtin_ia32_scatterdiv16si:
3630 llvm::APSInt Result;
3632 // We can't check the value of a dependent argument.
3633 Expr *Arg = TheCall->getArg(ArgNum);
3634 if (Arg->isTypeDependent() || Arg->isValueDependent())
3637 // Check constant-ness first.
3638 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3641 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
3644 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
3645 << Arg->getSourceRange();
3648 static bool isX86_32Builtin(unsigned BuiltinID) {
3649 // These builtins only work on x86-32 targets.
3650 switch (BuiltinID) {
3651 case X86::BI__builtin_ia32_readeflags_u32:
3652 case X86::BI__builtin_ia32_writeeflags_u32:
3659 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3660 if (BuiltinID == X86::BI__builtin_cpu_supports)
3661 return SemaBuiltinCpuSupports(*this, TheCall);
3663 if (BuiltinID == X86::BI__builtin_cpu_is)
3664 return SemaBuiltinCpuIs(*this, TheCall);
3666 // Check for 32-bit only builtins on a 64-bit target.
3667 const llvm::Triple &TT = Context.getTargetInfo().getTriple();
3668 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
3669 return Diag(TheCall->getCallee()->getBeginLoc(),
3670 diag::err_32_bit_builtin_64_bit_tgt);
3672 // If the intrinsic has rounding or SAE make sure its valid.
3673 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
3676 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
3677 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
3680 // For intrinsics which take an immediate value as part of the instruction,
3681 // range check them here.
3682 int i = 0, l = 0, u = 0;
3683 switch (BuiltinID) {
3686 case X86::BI__builtin_ia32_vec_ext_v2si:
3687 case X86::BI__builtin_ia32_vec_ext_v2di:
3688 case X86::BI__builtin_ia32_vextractf128_pd256:
3689 case X86::BI__builtin_ia32_vextractf128_ps256:
3690 case X86::BI__builtin_ia32_vextractf128_si256:
3691 case X86::BI__builtin_ia32_extract128i256:
3692 case X86::BI__builtin_ia32_extractf64x4_mask:
3693 case X86::BI__builtin_ia32_extracti64x4_mask:
3694 case X86::BI__builtin_ia32_extractf32x8_mask:
3695 case X86::BI__builtin_ia32_extracti32x8_mask:
3696 case X86::BI__builtin_ia32_extractf64x2_256_mask:
3697 case X86::BI__builtin_ia32_extracti64x2_256_mask:
3698 case X86::BI__builtin_ia32_extractf32x4_256_mask:
3699 case X86::BI__builtin_ia32_extracti32x4_256_mask:
3700 i = 1; l = 0; u = 1;
3702 case X86::BI__builtin_ia32_vec_set_v2di:
3703 case X86::BI__builtin_ia32_vinsertf128_pd256:
3704 case X86::BI__builtin_ia32_vinsertf128_ps256:
3705 case X86::BI__builtin_ia32_vinsertf128_si256:
3706 case X86::BI__builtin_ia32_insert128i256:
3707 case X86::BI__builtin_ia32_insertf32x8:
3708 case X86::BI__builtin_ia32_inserti32x8:
3709 case X86::BI__builtin_ia32_insertf64x4:
3710 case X86::BI__builtin_ia32_inserti64x4:
3711 case X86::BI__builtin_ia32_insertf64x2_256:
3712 case X86::BI__builtin_ia32_inserti64x2_256:
3713 case X86::BI__builtin_ia32_insertf32x4_256:
3714 case X86::BI__builtin_ia32_inserti32x4_256:
3715 i = 2; l = 0; u = 1;
3717 case X86::BI__builtin_ia32_vpermilpd:
3718 case X86::BI__builtin_ia32_vec_ext_v4hi:
3719 case X86::BI__builtin_ia32_vec_ext_v4si:
3720 case X86::BI__builtin_ia32_vec_ext_v4sf:
3721 case X86::BI__builtin_ia32_vec_ext_v4di:
3722 case X86::BI__builtin_ia32_extractf32x4_mask:
3723 case X86::BI__builtin_ia32_extracti32x4_mask:
3724 case X86::BI__builtin_ia32_extractf64x2_512_mask:
3725 case X86::BI__builtin_ia32_extracti64x2_512_mask:
3726 i = 1; l = 0; u = 3;
3728 case X86::BI_mm_prefetch:
3729 case X86::BI__builtin_ia32_vec_ext_v8hi:
3730 case X86::BI__builtin_ia32_vec_ext_v8si:
3731 i = 1; l = 0; u = 7;
3733 case X86::BI__builtin_ia32_sha1rnds4:
3734 case X86::BI__builtin_ia32_blendpd:
3735 case X86::BI__builtin_ia32_shufpd:
3736 case X86::BI__builtin_ia32_vec_set_v4hi:
3737 case X86::BI__builtin_ia32_vec_set_v4si:
3738 case X86::BI__builtin_ia32_vec_set_v4di:
3739 case X86::BI__builtin_ia32_shuf_f32x4_256:
3740 case X86::BI__builtin_ia32_shuf_f64x2_256:
3741 case X86::BI__builtin_ia32_shuf_i32x4_256:
3742 case X86::BI__builtin_ia32_shuf_i64x2_256:
3743 case X86::BI__builtin_ia32_insertf64x2_512:
3744 case X86::BI__builtin_ia32_inserti64x2_512:
3745 case X86::BI__builtin_ia32_insertf32x4:
3746 case X86::BI__builtin_ia32_inserti32x4:
3747 i = 2; l = 0; u = 3;
3749 case X86::BI__builtin_ia32_vpermil2pd:
3750 case X86::BI__builtin_ia32_vpermil2pd256:
3751 case X86::BI__builtin_ia32_vpermil2ps:
3752 case X86::BI__builtin_ia32_vpermil2ps256:
3753 i = 3; l = 0; u = 3;
3755 case X86::BI__builtin_ia32_cmpb128_mask:
3756 case X86::BI__builtin_ia32_cmpw128_mask:
3757 case X86::BI__builtin_ia32_cmpd128_mask:
3758 case X86::BI__builtin_ia32_cmpq128_mask:
3759 case X86::BI__builtin_ia32_cmpb256_mask:
3760 case X86::BI__builtin_ia32_cmpw256_mask:
3761 case X86::BI__builtin_ia32_cmpd256_mask:
3762 case X86::BI__builtin_ia32_cmpq256_mask:
3763 case X86::BI__builtin_ia32_cmpb512_mask:
3764 case X86::BI__builtin_ia32_cmpw512_mask:
3765 case X86::BI__builtin_ia32_cmpd512_mask:
3766 case X86::BI__builtin_ia32_cmpq512_mask:
3767 case X86::BI__builtin_ia32_ucmpb128_mask:
3768 case X86::BI__builtin_ia32_ucmpw128_mask:
3769 case X86::BI__builtin_ia32_ucmpd128_mask:
3770 case X86::BI__builtin_ia32_ucmpq128_mask:
3771 case X86::BI__builtin_ia32_ucmpb256_mask:
3772 case X86::BI__builtin_ia32_ucmpw256_mask:
3773 case X86::BI__builtin_ia32_ucmpd256_mask:
3774 case X86::BI__builtin_ia32_ucmpq256_mask:
3775 case X86::BI__builtin_ia32_ucmpb512_mask:
3776 case X86::BI__builtin_ia32_ucmpw512_mask:
3777 case X86::BI__builtin_ia32_ucmpd512_mask:
3778 case X86::BI__builtin_ia32_ucmpq512_mask:
3779 case X86::BI__builtin_ia32_vpcomub:
3780 case X86::BI__builtin_ia32_vpcomuw:
3781 case X86::BI__builtin_ia32_vpcomud:
3782 case X86::BI__builtin_ia32_vpcomuq:
3783 case X86::BI__builtin_ia32_vpcomb:
3784 case X86::BI__builtin_ia32_vpcomw:
3785 case X86::BI__builtin_ia32_vpcomd:
3786 case X86::BI__builtin_ia32_vpcomq:
3787 case X86::BI__builtin_ia32_vec_set_v8hi:
3788 case X86::BI__builtin_ia32_vec_set_v8si:
3789 i = 2; l = 0; u = 7;
3791 case X86::BI__builtin_ia32_vpermilpd256:
3792 case X86::BI__builtin_ia32_roundps:
3793 case X86::BI__builtin_ia32_roundpd:
3794 case X86::BI__builtin_ia32_roundps256:
3795 case X86::BI__builtin_ia32_roundpd256:
3796 case X86::BI__builtin_ia32_getmantpd128_mask:
3797 case X86::BI__builtin_ia32_getmantpd256_mask:
3798 case X86::BI__builtin_ia32_getmantps128_mask:
3799 case X86::BI__builtin_ia32_getmantps256_mask:
3800 case X86::BI__builtin_ia32_getmantpd512_mask:
3801 case X86::BI__builtin_ia32_getmantps512_mask:
3802 case X86::BI__builtin_ia32_vec_ext_v16qi:
3803 case X86::BI__builtin_ia32_vec_ext_v16hi:
3804 i = 1; l = 0; u = 15;
3806 case X86::BI__builtin_ia32_pblendd128:
3807 case X86::BI__builtin_ia32_blendps:
3808 case X86::BI__builtin_ia32_blendpd256:
3809 case X86::BI__builtin_ia32_shufpd256:
3810 case X86::BI__builtin_ia32_roundss:
3811 case X86::BI__builtin_ia32_roundsd:
3812 case X86::BI__builtin_ia32_rangepd128_mask:
3813 case X86::BI__builtin_ia32_rangepd256_mask:
3814 case X86::BI__builtin_ia32_rangepd512_mask:
3815 case X86::BI__builtin_ia32_rangeps128_mask:
3816 case X86::BI__builtin_ia32_rangeps256_mask:
3817 case X86::BI__builtin_ia32_rangeps512_mask:
3818 case X86::BI__builtin_ia32_getmantsd_round_mask:
3819 case X86::BI__builtin_ia32_getmantss_round_mask:
3820 case X86::BI__builtin_ia32_vec_set_v16qi:
3821 case X86::BI__builtin_ia32_vec_set_v16hi:
3822 i = 2; l = 0; u = 15;
3824 case X86::BI__builtin_ia32_vec_ext_v32qi:
3825 i = 1; l = 0; u = 31;
3827 case X86::BI__builtin_ia32_cmpps:
3828 case X86::BI__builtin_ia32_cmpss:
3829 case X86::BI__builtin_ia32_cmppd:
3830 case X86::BI__builtin_ia32_cmpsd:
3831 case X86::BI__builtin_ia32_cmpps256:
3832 case X86::BI__builtin_ia32_cmppd256:
3833 case X86::BI__builtin_ia32_cmpps128_mask:
3834 case X86::BI__builtin_ia32_cmppd128_mask:
3835 case X86::BI__builtin_ia32_cmpps256_mask:
3836 case X86::BI__builtin_ia32_cmppd256_mask:
3837 case X86::BI__builtin_ia32_cmpps512_mask:
3838 case X86::BI__builtin_ia32_cmppd512_mask:
3839 case X86::BI__builtin_ia32_cmpsd_mask:
3840 case X86::BI__builtin_ia32_cmpss_mask:
3841 case X86::BI__builtin_ia32_vec_set_v32qi:
3842 i = 2; l = 0; u = 31;
3844 case X86::BI__builtin_ia32_permdf256:
3845 case X86::BI__builtin_ia32_permdi256:
3846 case X86::BI__builtin_ia32_permdf512:
3847 case X86::BI__builtin_ia32_permdi512:
3848 case X86::BI__builtin_ia32_vpermilps:
3849 case X86::BI__builtin_ia32_vpermilps256:
3850 case X86::BI__builtin_ia32_vpermilpd512:
3851 case X86::BI__builtin_ia32_vpermilps512:
3852 case X86::BI__builtin_ia32_pshufd:
3853 case X86::BI__builtin_ia32_pshufd256:
3854 case X86::BI__builtin_ia32_pshufd512:
3855 case X86::BI__builtin_ia32_pshufhw:
3856 case X86::BI__builtin_ia32_pshufhw256:
3857 case X86::BI__builtin_ia32_pshufhw512:
3858 case X86::BI__builtin_ia32_pshuflw:
3859 case X86::BI__builtin_ia32_pshuflw256:
3860 case X86::BI__builtin_ia32_pshuflw512:
3861 case X86::BI__builtin_ia32_vcvtps2ph:
3862 case X86::BI__builtin_ia32_vcvtps2ph_mask:
3863 case X86::BI__builtin_ia32_vcvtps2ph256:
3864 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
3865 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
3866 case X86::BI__builtin_ia32_rndscaleps_128_mask:
3867 case X86::BI__builtin_ia32_rndscalepd_128_mask:
3868 case X86::BI__builtin_ia32_rndscaleps_256_mask:
3869 case X86::BI__builtin_ia32_rndscalepd_256_mask:
3870 case X86::BI__builtin_ia32_rndscaleps_mask:
3871 case X86::BI__builtin_ia32_rndscalepd_mask:
3872 case X86::BI__builtin_ia32_reducepd128_mask:
3873 case X86::BI__builtin_ia32_reducepd256_mask:
3874 case X86::BI__builtin_ia32_reducepd512_mask:
3875 case X86::BI__builtin_ia32_reduceps128_mask:
3876 case X86::BI__builtin_ia32_reduceps256_mask:
3877 case X86::BI__builtin_ia32_reduceps512_mask:
3878 case X86::BI__builtin_ia32_prold512:
3879 case X86::BI__builtin_ia32_prolq512:
3880 case X86::BI__builtin_ia32_prold128:
3881 case X86::BI__builtin_ia32_prold256:
3882 case X86::BI__builtin_ia32_prolq128:
3883 case X86::BI__builtin_ia32_prolq256:
3884 case X86::BI__builtin_ia32_prord512:
3885 case X86::BI__builtin_ia32_prorq512:
3886 case X86::BI__builtin_ia32_prord128:
3887 case X86::BI__builtin_ia32_prord256:
3888 case X86::BI__builtin_ia32_prorq128:
3889 case X86::BI__builtin_ia32_prorq256:
3890 case X86::BI__builtin_ia32_fpclasspd128_mask:
3891 case X86::BI__builtin_ia32_fpclasspd256_mask:
3892 case X86::BI__builtin_ia32_fpclassps128_mask:
3893 case X86::BI__builtin_ia32_fpclassps256_mask:
3894 case X86::BI__builtin_ia32_fpclassps512_mask:
3895 case X86::BI__builtin_ia32_fpclasspd512_mask:
3896 case X86::BI__builtin_ia32_fpclasssd_mask:
3897 case X86::BI__builtin_ia32_fpclassss_mask:
3898 case X86::BI__builtin_ia32_pslldqi128_byteshift:
3899 case X86::BI__builtin_ia32_pslldqi256_byteshift:
3900 case X86::BI__builtin_ia32_pslldqi512_byteshift:
3901 case X86::BI__builtin_ia32_psrldqi128_byteshift:
3902 case X86::BI__builtin_ia32_psrldqi256_byteshift:
3903 case X86::BI__builtin_ia32_psrldqi512_byteshift:
3904 case X86::BI__builtin_ia32_kshiftliqi:
3905 case X86::BI__builtin_ia32_kshiftlihi:
3906 case X86::BI__builtin_ia32_kshiftlisi:
3907 case X86::BI__builtin_ia32_kshiftlidi:
3908 case X86::BI__builtin_ia32_kshiftriqi:
3909 case X86::BI__builtin_ia32_kshiftrihi:
3910 case X86::BI__builtin_ia32_kshiftrisi:
3911 case X86::BI__builtin_ia32_kshiftridi:
3912 i = 1; l = 0; u = 255;
3914 case X86::BI__builtin_ia32_vperm2f128_pd256:
3915 case X86::BI__builtin_ia32_vperm2f128_ps256:
3916 case X86::BI__builtin_ia32_vperm2f128_si256:
3917 case X86::BI__builtin_ia32_permti256:
3918 case X86::BI__builtin_ia32_pblendw128:
3919 case X86::BI__builtin_ia32_pblendw256:
3920 case X86::BI__builtin_ia32_blendps256:
3921 case X86::BI__builtin_ia32_pblendd256:
3922 case X86::BI__builtin_ia32_palignr128:
3923 case X86::BI__builtin_ia32_palignr256:
3924 case X86::BI__builtin_ia32_palignr512:
3925 case X86::BI__builtin_ia32_alignq512:
3926 case X86::BI__builtin_ia32_alignd512:
3927 case X86::BI__builtin_ia32_alignd128:
3928 case X86::BI__builtin_ia32_alignd256:
3929 case X86::BI__builtin_ia32_alignq128:
3930 case X86::BI__builtin_ia32_alignq256:
3931 case X86::BI__builtin_ia32_vcomisd:
3932 case X86::BI__builtin_ia32_vcomiss:
3933 case X86::BI__builtin_ia32_shuf_f32x4:
3934 case X86::BI__builtin_ia32_shuf_f64x2:
3935 case X86::BI__builtin_ia32_shuf_i32x4:
3936 case X86::BI__builtin_ia32_shuf_i64x2:
3937 case X86::BI__builtin_ia32_shufpd512:
3938 case X86::BI__builtin_ia32_shufps:
3939 case X86::BI__builtin_ia32_shufps256:
3940 case X86::BI__builtin_ia32_shufps512:
3941 case X86::BI__builtin_ia32_dbpsadbw128:
3942 case X86::BI__builtin_ia32_dbpsadbw256:
3943 case X86::BI__builtin_ia32_dbpsadbw512:
3944 case X86::BI__builtin_ia32_vpshldd128:
3945 case X86::BI__builtin_ia32_vpshldd256:
3946 case X86::BI__builtin_ia32_vpshldd512:
3947 case X86::BI__builtin_ia32_vpshldq128:
3948 case X86::BI__builtin_ia32_vpshldq256:
3949 case X86::BI__builtin_ia32_vpshldq512:
3950 case X86::BI__builtin_ia32_vpshldw128:
3951 case X86::BI__builtin_ia32_vpshldw256:
3952 case X86::BI__builtin_ia32_vpshldw512:
3953 case X86::BI__builtin_ia32_vpshrdd128:
3954 case X86::BI__builtin_ia32_vpshrdd256:
3955 case X86::BI__builtin_ia32_vpshrdd512:
3956 case X86::BI__builtin_ia32_vpshrdq128:
3957 case X86::BI__builtin_ia32_vpshrdq256:
3958 case X86::BI__builtin_ia32_vpshrdq512:
3959 case X86::BI__builtin_ia32_vpshrdw128:
3960 case X86::BI__builtin_ia32_vpshrdw256:
3961 case X86::BI__builtin_ia32_vpshrdw512:
3962 i = 2; l = 0; u = 255;
3964 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3965 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3966 case X86::BI__builtin_ia32_fixupimmps512_mask:
3967 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3968 case X86::BI__builtin_ia32_fixupimmsd_mask:
3969 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3970 case X86::BI__builtin_ia32_fixupimmss_mask:
3971 case X86::BI__builtin_ia32_fixupimmss_maskz:
3972 case X86::BI__builtin_ia32_fixupimmpd128_mask:
3973 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
3974 case X86::BI__builtin_ia32_fixupimmpd256_mask:
3975 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
3976 case X86::BI__builtin_ia32_fixupimmps128_mask:
3977 case X86::BI__builtin_ia32_fixupimmps128_maskz:
3978 case X86::BI__builtin_ia32_fixupimmps256_mask:
3979 case X86::BI__builtin_ia32_fixupimmps256_maskz:
3980 case X86::BI__builtin_ia32_pternlogd512_mask:
3981 case X86::BI__builtin_ia32_pternlogd512_maskz:
3982 case X86::BI__builtin_ia32_pternlogq512_mask:
3983 case X86::BI__builtin_ia32_pternlogq512_maskz:
3984 case X86::BI__builtin_ia32_pternlogd128_mask:
3985 case X86::BI__builtin_ia32_pternlogd128_maskz:
3986 case X86::BI__builtin_ia32_pternlogd256_mask:
3987 case X86::BI__builtin_ia32_pternlogd256_maskz:
3988 case X86::BI__builtin_ia32_pternlogq128_mask:
3989 case X86::BI__builtin_ia32_pternlogq128_maskz:
3990 case X86::BI__builtin_ia32_pternlogq256_mask:
3991 case X86::BI__builtin_ia32_pternlogq256_maskz:
3992 i = 3; l = 0; u = 255;
3994 case X86::BI__builtin_ia32_gatherpfdpd:
3995 case X86::BI__builtin_ia32_gatherpfdps:
3996 case X86::BI__builtin_ia32_gatherpfqpd:
3997 case X86::BI__builtin_ia32_gatherpfqps:
3998 case X86::BI__builtin_ia32_scatterpfdpd:
3999 case X86::BI__builtin_ia32_scatterpfdps:
4000 case X86::BI__builtin_ia32_scatterpfqpd:
4001 case X86::BI__builtin_ia32_scatterpfqps:
4002 i = 4; l = 2; u = 3;
4004 case X86::BI__builtin_ia32_reducesd_mask:
4005 case X86::BI__builtin_ia32_reducess_mask:
4006 case X86::BI__builtin_ia32_rndscalesd_round_mask:
4007 case X86::BI__builtin_ia32_rndscaless_round_mask:
4008 i = 4; l = 0; u = 255;
4012 // Note that we don't force a hard error on the range check here, allowing
4013 // template-generated or macro-generated dead code to potentially have out-of-
4014 // range values. These need to code generate, but don't need to necessarily
4015 // make any sense. We use a warning that defaults to an error.
4016 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
4019 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
4020 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
4021 /// Returns true when the format fits the function and the FormatStringInfo has
4023 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
4024 FormatStringInfo *FSI) {
4025 FSI->HasVAListArg = Format->getFirstArg() == 0;
4026 FSI->FormatIdx = Format->getFormatIdx() - 1;
4027 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
4029 // The way the format attribute works in GCC, the implicit this argument
4030 // of member functions is counted. However, it doesn't appear in our own
4031 // lists, so decrement format_idx in that case.
4033 if(FSI->FormatIdx == 0)
4036 if (FSI->FirstDataArg != 0)
4037 --FSI->FirstDataArg;
4042 /// Checks if a the given expression evaluates to null.
4044 /// Returns true if the value evaluates to null.
4045 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
4046 // If the expression has non-null type, it doesn't evaluate to null.
4047 if (auto nullability
4048 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
4049 if (*nullability == NullabilityKind::NonNull)
4053 // As a special case, transparent unions initialized with zero are
4054 // considered null for the purposes of the nonnull attribute.
4055 if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
4056 if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
4057 if (const CompoundLiteralExpr *CLE =
4058 dyn_cast<CompoundLiteralExpr>(Expr))
4059 if (const InitListExpr *ILE =
4060 dyn_cast<InitListExpr>(CLE->getInitializer()))
4061 Expr = ILE->getInit(0);
4065 return (!Expr->isValueDependent() &&
4066 Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
4070 static void CheckNonNullArgument(Sema &S,
4071 const Expr *ArgExpr,
4072 SourceLocation CallSiteLoc) {
4073 if (CheckNonNullExpr(S, ArgExpr))
4074 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
4075 S.PDiag(diag::warn_null_arg)
4076 << ArgExpr->getSourceRange());
4079 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
4080 FormatStringInfo FSI;
4081 if ((GetFormatStringType(Format) == FST_NSString) &&
4082 getFormatStringInfo(Format, false, &FSI)) {
4083 Idx = FSI.FormatIdx;
4089 /// Diagnose use of %s directive in an NSString which is being passed
4090 /// as formatting string to formatting method.
4092 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
4093 const NamedDecl *FDecl,
4097 bool Format = false;
4098 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
4099 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
4104 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4105 if (S.GetFormatNSStringIdx(I, Idx)) {
4110 if (!Format || NumArgs <= Idx)
4112 const Expr *FormatExpr = Args[Idx];
4113 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
4114 FormatExpr = CSCE->getSubExpr();
4115 const StringLiteral *FormatString;
4116 if (const ObjCStringLiteral *OSL =
4117 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
4118 FormatString = OSL->getString();
4120 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
4123 if (S.FormatStringHasSArg(FormatString)) {
4124 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
4126 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
4127 << FDecl->getDeclName();
4131 /// Determine whether the given type has a non-null nullability annotation.
4132 static bool isNonNullType(ASTContext &ctx, QualType type) {
4133 if (auto nullability = type->getNullability(ctx))
4134 return *nullability == NullabilityKind::NonNull;
4139 static void CheckNonNullArguments(Sema &S,
4140 const NamedDecl *FDecl,
4141 const FunctionProtoType *Proto,
4142 ArrayRef<const Expr *> Args,
4143 SourceLocation CallSiteLoc) {
4144 assert((FDecl || Proto) && "Need a function declaration or prototype");
4146 // Already checked by by constant evaluator.
4147 if (S.isConstantEvaluated())
4149 // Check the attributes attached to the method/function itself.
4150 llvm::SmallBitVector NonNullArgs;
4152 // Handle the nonnull attribute on the function/method declaration itself.
4153 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
4154 if (!NonNull->args_size()) {
4155 // Easy case: all pointer arguments are nonnull.
4156 for (const auto *Arg : Args)
4157 if (S.isValidPointerAttrType(Arg->getType()))
4158 CheckNonNullArgument(S, Arg, CallSiteLoc);
4162 for (const ParamIdx &Idx : NonNull->args()) {
4163 unsigned IdxAST = Idx.getASTIndex();
4164 if (IdxAST >= Args.size())
4166 if (NonNullArgs.empty())
4167 NonNullArgs.resize(Args.size());
4168 NonNullArgs.set(IdxAST);
4173 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
4174 // Handle the nonnull attribute on the parameters of the
4176 ArrayRef<ParmVarDecl*> parms;
4177 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
4178 parms = FD->parameters();
4180 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
4182 unsigned ParamIndex = 0;
4183 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
4184 I != E; ++I, ++ParamIndex) {
4185 const ParmVarDecl *PVD = *I;
4186 if (PVD->hasAttr<NonNullAttr>() ||
4187 isNonNullType(S.Context, PVD->getType())) {
4188 if (NonNullArgs.empty())
4189 NonNullArgs.resize(Args.size());
4191 NonNullArgs.set(ParamIndex);
4195 // If we have a non-function, non-method declaration but no
4196 // function prototype, try to dig out the function prototype.
4198 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
4199 QualType type = VD->getType().getNonReferenceType();
4200 if (auto pointerType = type->getAs<PointerType>())
4201 type = pointerType->getPointeeType();
4202 else if (auto blockType = type->getAs<BlockPointerType>())
4203 type = blockType->getPointeeType();
4204 // FIXME: data member pointers?
4206 // Dig out the function prototype, if there is one.
4207 Proto = type->getAs<FunctionProtoType>();
4211 // Fill in non-null argument information from the nullability
4212 // information on the parameter types (if we have them).
4215 for (auto paramType : Proto->getParamTypes()) {
4216 if (isNonNullType(S.Context, paramType)) {
4217 if (NonNullArgs.empty())
4218 NonNullArgs.resize(Args.size());
4220 NonNullArgs.set(Index);
4228 // Check for non-null arguments.
4229 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
4230 ArgIndex != ArgIndexEnd; ++ArgIndex) {
4231 if (NonNullArgs[ArgIndex])
4232 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
4236 /// Handles the checks for format strings, non-POD arguments to vararg
4237 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
4239 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
4240 const Expr *ThisArg, ArrayRef<const Expr *> Args,
4241 bool IsMemberFunction, SourceLocation Loc,
4242 SourceRange Range, VariadicCallType CallType) {
4243 // FIXME: We should check as much as we can in the template definition.
4244 if (CurContext->isDependentContext())
4247 // Printf and scanf checking.
4248 llvm::SmallBitVector CheckedVarArgs;
4250 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4251 // Only create vector if there are format attributes.
4252 CheckedVarArgs.resize(Args.size());
4254 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
4259 // Refuse POD arguments that weren't caught by the format string
4261 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
4262 if (CallType != VariadicDoesNotApply &&
4263 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
4264 unsigned NumParams = Proto ? Proto->getNumParams()
4265 : FDecl && isa<FunctionDecl>(FDecl)
4266 ? cast<FunctionDecl>(FDecl)->getNumParams()
4267 : FDecl && isa<ObjCMethodDecl>(FDecl)
4268 ? cast<ObjCMethodDecl>(FDecl)->param_size()
4271 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
4272 // Args[ArgIdx] can be null in malformed code.
4273 if (const Expr *Arg = Args[ArgIdx]) {
4274 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
4275 checkVariadicArgument(Arg, CallType);
4280 if (FDecl || Proto) {
4281 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
4283 // Type safety checking.
4285 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
4286 CheckArgumentWithTypeTag(I, Args, Loc);
4291 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
4294 /// CheckConstructorCall - Check a constructor call for correctness and safety
4295 /// properties not enforced by the C type system.
4296 void Sema::CheckConstructorCall(FunctionDecl *FDecl,
4297 ArrayRef<const Expr *> Args,
4298 const FunctionProtoType *Proto,
4299 SourceLocation Loc) {
4300 VariadicCallType CallType =
4301 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
4302 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
4303 Loc, SourceRange(), CallType);
4306 /// CheckFunctionCall - Check a direct function call for various correctness
4307 /// and safety properties not strictly enforced by the C type system.
4308 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
4309 const FunctionProtoType *Proto) {
4310 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
4311 isa<CXXMethodDecl>(FDecl);
4312 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
4313 IsMemberOperatorCall;
4314 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
4315 TheCall->getCallee());
4316 Expr** Args = TheCall->getArgs();
4317 unsigned NumArgs = TheCall->getNumArgs();
4319 Expr *ImplicitThis = nullptr;
4320 if (IsMemberOperatorCall) {
4321 // If this is a call to a member operator, hide the first argument
4323 // FIXME: Our choice of AST representation here is less than ideal.
4324 ImplicitThis = Args[0];
4327 } else if (IsMemberFunction)
4329 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
4331 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
4332 IsMemberFunction, TheCall->getRParenLoc(),
4333 TheCall->getCallee()->getSourceRange(), CallType);
4335 IdentifierInfo *FnInfo = FDecl->getIdentifier();
4336 // None of the checks below are needed for functions that don't have
4337 // simple names (e.g., C++ conversion functions).
4341 CheckAbsoluteValueFunction(TheCall, FDecl);
4342 CheckMaxUnsignedZero(TheCall, FDecl);
4344 if (getLangOpts().ObjC)
4345 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
4347 unsigned CMId = FDecl->getMemoryFunctionKind();
4351 // Handle memory setting and copying functions.
4352 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
4353 CheckStrlcpycatArguments(TheCall, FnInfo);
4354 else if (CMId == Builtin::BIstrncat)
4355 CheckStrncatArguments(TheCall, FnInfo);
4357 CheckMemaccessArguments(TheCall, CMId, FnInfo);
4362 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
4363 ArrayRef<const Expr *> Args) {
4364 VariadicCallType CallType =
4365 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
4367 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
4368 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
4374 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
4375 const FunctionProtoType *Proto) {
4377 if (const auto *V = dyn_cast<VarDecl>(NDecl))
4378 Ty = V->getType().getNonReferenceType();
4379 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
4380 Ty = F->getType().getNonReferenceType();
4384 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
4385 !Ty->isFunctionProtoType())
4388 VariadicCallType CallType;
4389 if (!Proto || !Proto->isVariadic()) {
4390 CallType = VariadicDoesNotApply;
4391 } else if (Ty->isBlockPointerType()) {
4392 CallType = VariadicBlock;
4393 } else { // Ty->isFunctionPointerType()
4394 CallType = VariadicFunction;
4397 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
4398 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4399 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4400 TheCall->getCallee()->getSourceRange(), CallType);
4405 /// Checks function calls when a FunctionDecl or a NamedDecl is not available,
4406 /// such as function pointers returned from functions.
4407 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
4408 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
4409 TheCall->getCallee());
4410 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
4411 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4412 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4413 TheCall->getCallee()->getSourceRange(), CallType);
4418 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
4419 if (!llvm::isValidAtomicOrderingCABI(Ordering))
4422 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
4424 case AtomicExpr::AO__c11_atomic_init:
4425 case AtomicExpr::AO__opencl_atomic_init:
4426 llvm_unreachable("There is no ordering argument for an init");
4428 case AtomicExpr::AO__c11_atomic_load:
4429 case AtomicExpr::AO__opencl_atomic_load:
4430 case AtomicExpr::AO__atomic_load_n:
4431 case AtomicExpr::AO__atomic_load:
4432 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
4433 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4435 case AtomicExpr::AO__c11_atomic_store:
4436 case AtomicExpr::AO__opencl_atomic_store:
4437 case AtomicExpr::AO__atomic_store:
4438 case AtomicExpr::AO__atomic_store_n:
4439 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
4440 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
4441 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4448 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
4449 AtomicExpr::AtomicOp Op) {
4450 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
4451 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4453 // All the non-OpenCL operations take one of the following forms.
4454 // The OpenCL operations take the __c11 forms with one extra argument for
4455 // synchronization scope.
4457 // C __c11_atomic_init(A *, C)
4460 // C __c11_atomic_load(A *, int)
4463 // void __atomic_load(A *, CP, int)
4466 // void __atomic_store(A *, CP, int)
4469 // C __c11_atomic_add(A *, M, int)
4472 // C __atomic_exchange_n(A *, CP, int)
4475 // void __atomic_exchange(A *, C *, CP, int)
4478 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
4481 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
4485 const unsigned NumForm = GNUCmpXchg + 1;
4486 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
4487 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
4489 // C is an appropriate type,
4490 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
4491 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
4492 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
4493 // the int parameters are for orderings.
4495 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
4496 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
4497 "need to update code for modified forms");
4498 static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
4499 AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
4500 AtomicExpr::AO__atomic_load,
4501 "need to update code for modified C11 atomics");
4502 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
4503 Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
4504 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
4505 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
4507 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
4508 Op == AtomicExpr::AO__atomic_store_n ||
4509 Op == AtomicExpr::AO__atomic_exchange_n ||
4510 Op == AtomicExpr::AO__atomic_compare_exchange_n;
4511 bool IsAddSub = false;
4512 bool IsMinMax = false;
4515 case AtomicExpr::AO__c11_atomic_init:
4516 case AtomicExpr::AO__opencl_atomic_init:
4520 case AtomicExpr::AO__c11_atomic_load:
4521 case AtomicExpr::AO__opencl_atomic_load:
4522 case AtomicExpr::AO__atomic_load_n:
4526 case AtomicExpr::AO__atomic_load:
4530 case AtomicExpr::AO__c11_atomic_store:
4531 case AtomicExpr::AO__opencl_atomic_store:
4532 case AtomicExpr::AO__atomic_store:
4533 case AtomicExpr::AO__atomic_store_n:
4537 case AtomicExpr::AO__c11_atomic_fetch_add:
4538 case AtomicExpr::AO__c11_atomic_fetch_sub:
4539 case AtomicExpr::AO__opencl_atomic_fetch_add:
4540 case AtomicExpr::AO__opencl_atomic_fetch_sub:
4541 case AtomicExpr::AO__opencl_atomic_fetch_min:
4542 case AtomicExpr::AO__opencl_atomic_fetch_max:
4543 case AtomicExpr::AO__atomic_fetch_add:
4544 case AtomicExpr::AO__atomic_fetch_sub:
4545 case AtomicExpr::AO__atomic_add_fetch:
4546 case AtomicExpr::AO__atomic_sub_fetch:
4549 case AtomicExpr::AO__c11_atomic_fetch_and:
4550 case AtomicExpr::AO__c11_atomic_fetch_or:
4551 case AtomicExpr::AO__c11_atomic_fetch_xor:
4552 case AtomicExpr::AO__opencl_atomic_fetch_and:
4553 case AtomicExpr::AO__opencl_atomic_fetch_or:
4554 case AtomicExpr::AO__opencl_atomic_fetch_xor:
4555 case AtomicExpr::AO__atomic_fetch_and:
4556 case AtomicExpr::AO__atomic_fetch_or:
4557 case AtomicExpr::AO__atomic_fetch_xor:
4558 case AtomicExpr::AO__atomic_fetch_nand:
4559 case AtomicExpr::AO__atomic_and_fetch:
4560 case AtomicExpr::AO__atomic_or_fetch:
4561 case AtomicExpr::AO__atomic_xor_fetch:
4562 case AtomicExpr::AO__atomic_nand_fetch:
4566 case AtomicExpr::AO__atomic_fetch_min:
4567 case AtomicExpr::AO__atomic_fetch_max:
4572 case AtomicExpr::AO__c11_atomic_exchange:
4573 case AtomicExpr::AO__opencl_atomic_exchange:
4574 case AtomicExpr::AO__atomic_exchange_n:
4578 case AtomicExpr::AO__atomic_exchange:
4582 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
4583 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
4584 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
4585 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
4589 case AtomicExpr::AO__atomic_compare_exchange:
4590 case AtomicExpr::AO__atomic_compare_exchange_n:
4595 unsigned AdjustedNumArgs = NumArgs[Form];
4596 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
4598 // Check we have the right number of arguments.
4599 if (TheCall->getNumArgs() < AdjustedNumArgs) {
4600 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
4601 << 0 << AdjustedNumArgs << TheCall->getNumArgs()
4602 << TheCall->getCallee()->getSourceRange();
4604 } else if (TheCall->getNumArgs() > AdjustedNumArgs) {
4605 Diag(TheCall->getArg(AdjustedNumArgs)->getBeginLoc(),
4606 diag::err_typecheck_call_too_many_args)
4607 << 0 << AdjustedNumArgs << TheCall->getNumArgs()
4608 << TheCall->getCallee()->getSourceRange();
4612 // Inspect the first argument of the atomic operation.
4613 Expr *Ptr = TheCall->getArg(0);
4614 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
4615 if (ConvertedPtr.isInvalid())
4618 Ptr = ConvertedPtr.get();
4619 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
4621 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
4622 << Ptr->getType() << Ptr->getSourceRange();
4626 // For a __c11 builtin, this should be a pointer to an _Atomic type.
4627 QualType AtomTy = pointerType->getPointeeType(); // 'A'
4628 QualType ValType = AtomTy; // 'C'
4630 if (!AtomTy->isAtomicType()) {
4631 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic)
4632 << Ptr->getType() << Ptr->getSourceRange();
4635 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
4636 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
4637 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_atomic)
4638 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
4639 << Ptr->getSourceRange();
4642 ValType = AtomTy->getAs<AtomicType>()->getValueType();
4643 } else if (Form != Load && Form != LoadCopy) {
4644 if (ValType.isConstQualified()) {
4645 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_pointer)
4646 << Ptr->getType() << Ptr->getSourceRange();
4651 // For an arithmetic operation, the implied arithmetic must be well-formed.
4652 if (Form == Arithmetic) {
4653 // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
4654 if (IsAddSub && !ValType->isIntegerType()
4655 && !ValType->isPointerType()) {
4656 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr)
4657 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4661 const BuiltinType *BT = ValType->getAs<BuiltinType>();
4662 if (!BT || (BT->getKind() != BuiltinType::Int &&
4663 BT->getKind() != BuiltinType::UInt)) {
4664 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr);
4668 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
4669 Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int)
4670 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4673 if (IsC11 && ValType->isPointerType() &&
4674 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
4675 diag::err_incomplete_type)) {
4678 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
4679 // For __atomic_*_n operations, the value type must be a scalar integral or
4680 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
4681 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr)
4682 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4686 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
4687 !AtomTy->isScalarType()) {
4688 // For GNU atomics, require a trivially-copyable type. This is not part of
4689 // the GNU atomics specification, but we enforce it for sanity.
4690 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_trivial_copy)
4691 << Ptr->getType() << Ptr->getSourceRange();
4695 switch (ValType.getObjCLifetime()) {
4696 case Qualifiers::OCL_None:
4697 case Qualifiers::OCL_ExplicitNone:
4701 case Qualifiers::OCL_Weak:
4702 case Qualifiers::OCL_Strong:
4703 case Qualifiers::OCL_Autoreleasing:
4704 // FIXME: Can this happen? By this point, ValType should be known
4705 // to be trivially copyable.
4706 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
4707 << ValType << Ptr->getSourceRange();
4711 // All atomic operations have an overload which takes a pointer to a volatile
4712 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
4713 // into the result or the other operands. Similarly atomic_load takes a
4714 // pointer to a const 'A'.
4715 ValType.removeLocalVolatile();
4716 ValType.removeLocalConst();
4717 QualType ResultType = ValType;
4718 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
4720 ResultType = Context.VoidTy;
4721 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
4722 ResultType = Context.BoolTy;
4724 // The type of a parameter passed 'by value'. In the GNU atomics, such
4725 // arguments are actually passed as pointers.
4726 QualType ByValType = ValType; // 'CP'
4727 bool IsPassedByAddress = false;
4728 if (!IsC11 && !IsN) {
4729 ByValType = Ptr->getType();
4730 IsPassedByAddress = true;
4733 // The first argument's non-CV pointer type is used to deduce the type of
4734 // subsequent arguments, except for:
4735 // - weak flag (always converted to bool)
4736 // - memory order (always converted to int)
4737 // - scope (always converted to int)
4738 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
4740 if (i < NumVals[Form] + 1) {
4743 // The first argument is always a pointer. It has a fixed type.
4744 // It is always dereferenced, a nullptr is undefined.
4745 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
4746 // Nothing else to do: we already know all we want about this pointer.
4749 // The second argument is the non-atomic operand. For arithmetic, this
4750 // is always passed by value, and for a compare_exchange it is always
4751 // passed by address. For the rest, GNU uses by-address and C11 uses
4753 assert(Form != Load);
4754 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
4756 else if (Form == Copy || Form == Xchg) {
4757 if (IsPassedByAddress)
4758 // The value pointer is always dereferenced, a nullptr is undefined.
4759 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
4761 } else if (Form == Arithmetic)
4762 Ty = Context.getPointerDiffType();
4764 Expr *ValArg = TheCall->getArg(i);
4765 // The value pointer is always dereferenced, a nullptr is undefined.
4766 CheckNonNullArgument(*this, ValArg, DRE->getBeginLoc());
4767 LangAS AS = LangAS::Default;
4768 // Keep address space of non-atomic pointer type.
4769 if (const PointerType *PtrTy =
4770 ValArg->getType()->getAs<PointerType>()) {
4771 AS = PtrTy->getPointeeType().getAddressSpace();
4773 Ty = Context.getPointerType(
4774 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
4778 // The third argument to compare_exchange / GNU exchange is the desired
4779 // value, either by-value (for the C11 and *_n variant) or as a pointer.
4780 if (IsPassedByAddress)
4781 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
4785 // The fourth argument to GNU compare_exchange is a 'weak' flag.
4786 Ty = Context.BoolTy;
4790 // The order(s) and scope are always converted to int.
4794 InitializedEntity Entity =
4795 InitializedEntity::InitializeParameter(Context, Ty, false);
4796 ExprResult Arg = TheCall->getArg(i);
4797 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4798 if (Arg.isInvalid())
4800 TheCall->setArg(i, Arg.get());
4803 // Permute the arguments into a 'consistent' order.
4804 SmallVector<Expr*, 5> SubExprs;
4805 SubExprs.push_back(Ptr);
4808 // Note, AtomicExpr::getVal1() has a special case for this atomic.
4809 SubExprs.push_back(TheCall->getArg(1)); // Val1
4812 SubExprs.push_back(TheCall->getArg(1)); // Order
4818 SubExprs.push_back(TheCall->getArg(2)); // Order
4819 SubExprs.push_back(TheCall->getArg(1)); // Val1
4822 // Note, AtomicExpr::getVal2() has a special case for this atomic.
4823 SubExprs.push_back(TheCall->getArg(3)); // Order
4824 SubExprs.push_back(TheCall->getArg(1)); // Val1
4825 SubExprs.push_back(TheCall->getArg(2)); // Val2
4828 SubExprs.push_back(TheCall->getArg(3)); // Order
4829 SubExprs.push_back(TheCall->getArg(1)); // Val1
4830 SubExprs.push_back(TheCall->getArg(4)); // OrderFail
4831 SubExprs.push_back(TheCall->getArg(2)); // Val2
4834 SubExprs.push_back(TheCall->getArg(4)); // Order
4835 SubExprs.push_back(TheCall->getArg(1)); // Val1
4836 SubExprs.push_back(TheCall->getArg(5)); // OrderFail
4837 SubExprs.push_back(TheCall->getArg(2)); // Val2
4838 SubExprs.push_back(TheCall->getArg(3)); // Weak
4842 if (SubExprs.size() >= 2 && Form != Init) {
4843 llvm::APSInt Result(32);
4844 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
4845 !isValidOrderingForOp(Result.getSExtValue(), Op))
4846 Diag(SubExprs[1]->getBeginLoc(),
4847 diag::warn_atomic_op_has_invalid_memory_order)
4848 << SubExprs[1]->getSourceRange();
4851 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
4852 auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
4853 llvm::APSInt Result(32);
4854 if (Scope->isIntegerConstantExpr(Result, Context) &&
4855 !ScopeModel->isValid(Result.getZExtValue())) {
4856 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
4857 << Scope->getSourceRange();
4859 SubExprs.push_back(Scope);
4863 new (Context) AtomicExpr(TheCall->getCallee()->getBeginLoc(), SubExprs,
4864 ResultType, Op, TheCall->getRParenLoc());
4866 if ((Op == AtomicExpr::AO__c11_atomic_load ||
4867 Op == AtomicExpr::AO__c11_atomic_store ||
4868 Op == AtomicExpr::AO__opencl_atomic_load ||
4869 Op == AtomicExpr::AO__opencl_atomic_store ) &&
4870 Context.AtomicUsesUnsupportedLibcall(AE))
4871 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
4872 << ((Op == AtomicExpr::AO__c11_atomic_load ||
4873 Op == AtomicExpr::AO__opencl_atomic_load)
4880 /// checkBuiltinArgument - Given a call to a builtin function, perform
4881 /// normal type-checking on the given argument, updating the call in
4882 /// place. This is useful when a builtin function requires custom
4883 /// type-checking for some of its arguments but not necessarily all of
4886 /// Returns true on error.
4887 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
4888 FunctionDecl *Fn = E->getDirectCallee();
4889 assert(Fn && "builtin call without direct callee!");
4891 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
4892 InitializedEntity Entity =
4893 InitializedEntity::InitializeParameter(S.Context, Param);
4895 ExprResult Arg = E->getArg(0);
4896 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
4897 if (Arg.isInvalid())
4900 E->setArg(ArgIndex, Arg.get());
4904 /// We have a call to a function like __sync_fetch_and_add, which is an
4905 /// overloaded function based on the pointer type of its first argument.
4906 /// The main BuildCallExpr routines have already promoted the types of
4907 /// arguments because all of these calls are prototyped as void(...).
4909 /// This function goes through and does final semantic checking for these
4910 /// builtins, as well as generating any warnings.
4912 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
4913 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
4914 Expr *Callee = TheCall->getCallee();
4915 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
4916 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
4918 // Ensure that we have at least one argument to do type inference from.
4919 if (TheCall->getNumArgs() < 1) {
4920 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
4921 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
4925 // Inspect the first argument of the atomic builtin. This should always be
4926 // a pointer type, whose element is an integral scalar or pointer type.
4927 // Because it is a pointer type, we don't have to worry about any implicit
4929 // FIXME: We don't allow floating point scalars as input.
4930 Expr *FirstArg = TheCall->getArg(0);
4931 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
4932 if (FirstArgResult.isInvalid())
4934 FirstArg = FirstArgResult.get();
4935 TheCall->setArg(0, FirstArg);
4937 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
4939 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
4940 << FirstArg->getType() << FirstArg->getSourceRange();
4944 QualType ValType = pointerType->getPointeeType();
4945 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
4946 !ValType->isBlockPointerType()) {
4947 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
4948 << FirstArg->getType() << FirstArg->getSourceRange();
4952 if (ValType.isConstQualified()) {
4953 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
4954 << FirstArg->getType() << FirstArg->getSourceRange();
4958 switch (ValType.getObjCLifetime()) {
4959 case Qualifiers::OCL_None:
4960 case Qualifiers::OCL_ExplicitNone:
4964 case Qualifiers::OCL_Weak:
4965 case Qualifiers::OCL_Strong:
4966 case Qualifiers::OCL_Autoreleasing:
4967 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
4968 << ValType << FirstArg->getSourceRange();
4972 // Strip any qualifiers off ValType.
4973 ValType = ValType.getUnqualifiedType();
4975 // The majority of builtins return a value, but a few have special return
4976 // types, so allow them to override appropriately below.
4977 QualType ResultType = ValType;
4979 // We need to figure out which concrete builtin this maps onto. For example,
4980 // __sync_fetch_and_add with a 2 byte object turns into
4981 // __sync_fetch_and_add_2.
4982 #define BUILTIN_ROW(x) \
4983 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
4984 Builtin::BI##x##_8, Builtin::BI##x##_16 }
4986 static const unsigned BuiltinIndices[][5] = {
4987 BUILTIN_ROW(__sync_fetch_and_add),
4988 BUILTIN_ROW(__sync_fetch_and_sub),
4989 BUILTIN_ROW(__sync_fetch_and_or),
4990 BUILTIN_ROW(__sync_fetch_and_and),
4991 BUILTIN_ROW(__sync_fetch_and_xor),
4992 BUILTIN_ROW(__sync_fetch_and_nand),
4994 BUILTIN_ROW(__sync_add_and_fetch),
4995 BUILTIN_ROW(__sync_sub_and_fetch),
4996 BUILTIN_ROW(__sync_and_and_fetch),
4997 BUILTIN_ROW(__sync_or_and_fetch),
4998 BUILTIN_ROW(__sync_xor_and_fetch),
4999 BUILTIN_ROW(__sync_nand_and_fetch),
5001 BUILTIN_ROW(__sync_val_compare_and_swap),
5002 BUILTIN_ROW(__sync_bool_compare_and_swap),
5003 BUILTIN_ROW(__sync_lock_test_and_set),
5004 BUILTIN_ROW(__sync_lock_release),
5005 BUILTIN_ROW(__sync_swap)
5009 // Determine the index of the size.
5011 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
5012 case 1: SizeIndex = 0; break;
5013 case 2: SizeIndex = 1; break;
5014 case 4: SizeIndex = 2; break;
5015 case 8: SizeIndex = 3; break;
5016 case 16: SizeIndex = 4; break;
5018 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
5019 << FirstArg->getType() << FirstArg->getSourceRange();
5023 // Each of these builtins has one pointer argument, followed by some number of
5024 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
5025 // that we ignore. Find out which row of BuiltinIndices to read from as well
5026 // as the number of fixed args.
5027 unsigned BuiltinID = FDecl->getBuiltinID();
5028 unsigned BuiltinIndex, NumFixed = 1;
5029 bool WarnAboutSemanticsChange = false;
5030 switch (BuiltinID) {
5031 default: llvm_unreachable("Unknown overloaded atomic builtin!");
5032 case Builtin::BI__sync_fetch_and_add:
5033 case Builtin::BI__sync_fetch_and_add_1:
5034 case Builtin::BI__sync_fetch_and_add_2:
5035 case Builtin::BI__sync_fetch_and_add_4:
5036 case Builtin::BI__sync_fetch_and_add_8:
5037 case Builtin::BI__sync_fetch_and_add_16:
5041 case Builtin::BI__sync_fetch_and_sub:
5042 case Builtin::BI__sync_fetch_and_sub_1:
5043 case Builtin::BI__sync_fetch_and_sub_2:
5044 case Builtin::BI__sync_fetch_and_sub_4:
5045 case Builtin::BI__sync_fetch_and_sub_8:
5046 case Builtin::BI__sync_fetch_and_sub_16:
5050 case Builtin::BI__sync_fetch_and_or:
5051 case Builtin::BI__sync_fetch_and_or_1:
5052 case Builtin::BI__sync_fetch_and_or_2:
5053 case Builtin::BI__sync_fetch_and_or_4:
5054 case Builtin::BI__sync_fetch_and_or_8:
5055 case Builtin::BI__sync_fetch_and_or_16:
5059 case Builtin::BI__sync_fetch_and_and:
5060 case Builtin::BI__sync_fetch_and_and_1:
5061 case Builtin::BI__sync_fetch_and_and_2:
5062 case Builtin::BI__sync_fetch_and_and_4:
5063 case Builtin::BI__sync_fetch_and_and_8:
5064 case Builtin::BI__sync_fetch_and_and_16:
5068 case Builtin::BI__sync_fetch_and_xor:
5069 case Builtin::BI__sync_fetch_and_xor_1:
5070 case Builtin::BI__sync_fetch_and_xor_2:
5071 case Builtin::BI__sync_fetch_and_xor_4:
5072 case Builtin::BI__sync_fetch_and_xor_8:
5073 case Builtin::BI__sync_fetch_and_xor_16:
5077 case Builtin::BI__sync_fetch_and_nand:
5078 case Builtin::BI__sync_fetch_and_nand_1:
5079 case Builtin::BI__sync_fetch_and_nand_2:
5080 case Builtin::BI__sync_fetch_and_nand_4:
5081 case Builtin::BI__sync_fetch_and_nand_8:
5082 case Builtin::BI__sync_fetch_and_nand_16:
5084 WarnAboutSemanticsChange = true;
5087 case Builtin::BI__sync_add_and_fetch:
5088 case Builtin::BI__sync_add_and_fetch_1:
5089 case Builtin::BI__sync_add_and_fetch_2:
5090 case Builtin::BI__sync_add_and_fetch_4:
5091 case Builtin::BI__sync_add_and_fetch_8:
5092 case Builtin::BI__sync_add_and_fetch_16:
5096 case Builtin::BI__sync_sub_and_fetch:
5097 case Builtin::BI__sync_sub_and_fetch_1:
5098 case Builtin::BI__sync_sub_and_fetch_2:
5099 case Builtin::BI__sync_sub_and_fetch_4:
5100 case Builtin::BI__sync_sub_and_fetch_8:
5101 case Builtin::BI__sync_sub_and_fetch_16:
5105 case Builtin::BI__sync_and_and_fetch:
5106 case Builtin::BI__sync_and_and_fetch_1:
5107 case Builtin::BI__sync_and_and_fetch_2:
5108 case Builtin::BI__sync_and_and_fetch_4:
5109 case Builtin::BI__sync_and_and_fetch_8:
5110 case Builtin::BI__sync_and_and_fetch_16:
5114 case Builtin::BI__sync_or_and_fetch:
5115 case Builtin::BI__sync_or_and_fetch_1:
5116 case Builtin::BI__sync_or_and_fetch_2:
5117 case Builtin::BI__sync_or_and_fetch_4:
5118 case Builtin::BI__sync_or_and_fetch_8:
5119 case Builtin::BI__sync_or_and_fetch_16:
5123 case Builtin::BI__sync_xor_and_fetch:
5124 case Builtin::BI__sync_xor_and_fetch_1:
5125 case Builtin::BI__sync_xor_and_fetch_2:
5126 case Builtin::BI__sync_xor_and_fetch_4:
5127 case Builtin::BI__sync_xor_and_fetch_8:
5128 case Builtin::BI__sync_xor_and_fetch_16:
5132 case Builtin::BI__sync_nand_and_fetch:
5133 case Builtin::BI__sync_nand_and_fetch_1:
5134 case Builtin::BI__sync_nand_and_fetch_2:
5135 case Builtin::BI__sync_nand_and_fetch_4:
5136 case Builtin::BI__sync_nand_and_fetch_8:
5137 case Builtin::BI__sync_nand_and_fetch_16:
5139 WarnAboutSemanticsChange = true;
5142 case Builtin::BI__sync_val_compare_and_swap:
5143 case Builtin::BI__sync_val_compare_and_swap_1:
5144 case Builtin::BI__sync_val_compare_and_swap_2:
5145 case Builtin::BI__sync_val_compare_and_swap_4:
5146 case Builtin::BI__sync_val_compare_and_swap_8:
5147 case Builtin::BI__sync_val_compare_and_swap_16:
5152 case Builtin::BI__sync_bool_compare_and_swap:
5153 case Builtin::BI__sync_bool_compare_and_swap_1:
5154 case Builtin::BI__sync_bool_compare_and_swap_2:
5155 case Builtin::BI__sync_bool_compare_and_swap_4:
5156 case Builtin::BI__sync_bool_compare_and_swap_8:
5157 case Builtin::BI__sync_bool_compare_and_swap_16:
5160 ResultType = Context.BoolTy;
5163 case Builtin::BI__sync_lock_test_and_set:
5164 case Builtin::BI__sync_lock_test_and_set_1:
5165 case Builtin::BI__sync_lock_test_and_set_2:
5166 case Builtin::BI__sync_lock_test_and_set_4:
5167 case Builtin::BI__sync_lock_test_and_set_8:
5168 case Builtin::BI__sync_lock_test_and_set_16:
5172 case Builtin::BI__sync_lock_release:
5173 case Builtin::BI__sync_lock_release_1:
5174 case Builtin::BI__sync_lock_release_2:
5175 case Builtin::BI__sync_lock_release_4:
5176 case Builtin::BI__sync_lock_release_8:
5177 case Builtin::BI__sync_lock_release_16:
5180 ResultType = Context.VoidTy;
5183 case Builtin::BI__sync_swap:
5184 case Builtin::BI__sync_swap_1:
5185 case Builtin::BI__sync_swap_2:
5186 case Builtin::BI__sync_swap_4:
5187 case Builtin::BI__sync_swap_8:
5188 case Builtin::BI__sync_swap_16:
5193 // Now that we know how many fixed arguments we expect, first check that we
5194 // have at least that many.
5195 if (TheCall->getNumArgs() < 1+NumFixed) {
5196 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5197 << 0 << 1 + NumFixed << TheCall->getNumArgs()
5198 << Callee->getSourceRange();
5202 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
5203 << Callee->getSourceRange();
5205 if (WarnAboutSemanticsChange) {
5206 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
5207 << Callee->getSourceRange();
5210 // Get the decl for the concrete builtin from this, we can tell what the
5211 // concrete integer type we should convert to is.
5212 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
5213 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
5214 FunctionDecl *NewBuiltinDecl;
5215 if (NewBuiltinID == BuiltinID)
5216 NewBuiltinDecl = FDecl;
5218 // Perform builtin lookup to avoid redeclaring it.
5219 DeclarationName DN(&Context.Idents.get(NewBuiltinName));
5220 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
5221 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
5222 assert(Res.getFoundDecl());
5223 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
5224 if (!NewBuiltinDecl)
5228 // The first argument --- the pointer --- has a fixed type; we
5229 // deduce the types of the rest of the arguments accordingly. Walk
5230 // the remaining arguments, converting them to the deduced value type.
5231 for (unsigned i = 0; i != NumFixed; ++i) {
5232 ExprResult Arg = TheCall->getArg(i+1);
5234 // GCC does an implicit conversion to the pointer or integer ValType. This
5235 // can fail in some cases (1i -> int**), check for this error case now.
5236 // Initialize the argument.
5237 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
5238 ValType, /*consume*/ false);
5239 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5240 if (Arg.isInvalid())
5243 // Okay, we have something that *can* be converted to the right type. Check
5244 // to see if there is a potentially weird extension going on here. This can
5245 // happen when you do an atomic operation on something like an char* and
5246 // pass in 42. The 42 gets converted to char. This is even more strange
5247 // for things like 45.123 -> char, etc.
5248 // FIXME: Do this check.
5249 TheCall->setArg(i+1, Arg.get());
5252 // Create a new DeclRefExpr to refer to the new decl.
5253 DeclRefExpr *NewDRE = DeclRefExpr::Create(
5254 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
5255 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
5256 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
5258 // Set the callee in the CallExpr.
5259 // FIXME: This loses syntactic information.
5260 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
5261 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
5262 CK_BuiltinFnToFnPtr);
5263 TheCall->setCallee(PromotedCall.get());
5265 // Change the result type of the call to match the original value type. This
5266 // is arbitrary, but the codegen for these builtins ins design to handle it
5268 TheCall->setType(ResultType);
5270 return TheCallResult;
5273 /// SemaBuiltinNontemporalOverloaded - We have a call to
5274 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
5275 /// overloaded function based on the pointer type of its last argument.
5277 /// This function goes through and does final semantic checking for these
5279 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
5280 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
5282 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5283 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5284 unsigned BuiltinID = FDecl->getBuiltinID();
5285 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
5286 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
5287 "Unexpected nontemporal load/store builtin!");
5288 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
5289 unsigned numArgs = isStore ? 2 : 1;
5291 // Ensure that we have the proper number of arguments.
5292 if (checkArgCount(*this, TheCall, numArgs))
5295 // Inspect the last argument of the nontemporal builtin. This should always
5296 // be a pointer type, from which we imply the type of the memory access.
5297 // Because it is a pointer type, we don't have to worry about any implicit
5299 Expr *PointerArg = TheCall->getArg(numArgs - 1);
5300 ExprResult PointerArgResult =
5301 DefaultFunctionArrayLvalueConversion(PointerArg);
5303 if (PointerArgResult.isInvalid())
5305 PointerArg = PointerArgResult.get();
5306 TheCall->setArg(numArgs - 1, PointerArg);
5308 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
5310 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
5311 << PointerArg->getType() << PointerArg->getSourceRange();
5315 QualType ValType = pointerType->getPointeeType();
5317 // Strip any qualifiers off ValType.
5318 ValType = ValType.getUnqualifiedType();
5319 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5320 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
5321 !ValType->isVectorType()) {
5322 Diag(DRE->getBeginLoc(),
5323 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
5324 << PointerArg->getType() << PointerArg->getSourceRange();
5329 TheCall->setType(ValType);
5330 return TheCallResult;
5333 ExprResult ValArg = TheCall->getArg(0);
5334 InitializedEntity Entity = InitializedEntity::InitializeParameter(
5335 Context, ValType, /*consume*/ false);
5336 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
5337 if (ValArg.isInvalid())
5340 TheCall->setArg(0, ValArg.get());
5341 TheCall->setType(Context.VoidTy);
5342 return TheCallResult;
5345 /// CheckObjCString - Checks that the argument to the builtin
5346 /// CFString constructor is correct
5347 /// Note: It might also make sense to do the UTF-16 conversion here (would
5348 /// simplify the backend).
5349 bool Sema::CheckObjCString(Expr *Arg) {
5350 Arg = Arg->IgnoreParenCasts();
5351 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
5353 if (!Literal || !Literal->isAscii()) {
5354 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
5355 << Arg->getSourceRange();
5359 if (Literal->containsNonAsciiOrNull()) {
5360 StringRef String = Literal->getString();
5361 unsigned NumBytes = String.size();
5362 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
5363 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
5364 llvm::UTF16 *ToPtr = &ToBuf[0];
5366 llvm::ConversionResult Result =
5367 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
5368 ToPtr + NumBytes, llvm::strictConversion);
5369 // Check for conversion failure.
5370 if (Result != llvm::conversionOK)
5371 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
5372 << Arg->getSourceRange();
5377 /// CheckObjCString - Checks that the format string argument to the os_log()
5378 /// and os_trace() functions is correct, and converts it to const char *.
5379 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
5380 Arg = Arg->IgnoreParenCasts();
5381 auto *Literal = dyn_cast<StringLiteral>(Arg);
5383 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
5384 Literal = ObjcLiteral->getString();
5388 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
5390 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
5391 << Arg->getSourceRange());
5394 ExprResult Result(Literal);
5395 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
5396 InitializedEntity Entity =
5397 InitializedEntity::InitializeParameter(Context, ResultTy, false);
5398 Result = PerformCopyInitialization(Entity, SourceLocation(), Result);
5402 /// Check that the user is calling the appropriate va_start builtin for the
5403 /// target and calling convention.
5404 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
5405 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
5406 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
5407 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64;
5408 bool IsWindows = TT.isOSWindows();
5409 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
5410 if (IsX64 || IsAArch64) {
5411 CallingConv CC = CC_C;
5412 if (const FunctionDecl *FD = S.getCurFunctionDecl())
5413 CC = FD->getType()->getAs<FunctionType>()->getCallConv();
5415 // Don't allow this in System V ABI functions.
5416 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
5417 return S.Diag(Fn->getBeginLoc(),
5418 diag::err_ms_va_start_used_in_sysv_function);
5420 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
5421 // On x64 Windows, don't allow this in System V ABI functions.
5422 // (Yes, that means there's no corresponding way to support variadic
5423 // System V ABI functions on Windows.)
5424 if ((IsWindows && CC == CC_X86_64SysV) ||
5425 (!IsWindows && CC == CC_Win64))
5426 return S.Diag(Fn->getBeginLoc(),
5427 diag::err_va_start_used_in_wrong_abi_function)
5434 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
5438 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
5439 ParmVarDecl **LastParam = nullptr) {
5440 // Determine whether the current function, block, or obj-c method is variadic
5441 // and get its parameter list.
5442 bool IsVariadic = false;
5443 ArrayRef<ParmVarDecl *> Params;
5444 DeclContext *Caller = S.CurContext;
5445 if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
5446 IsVariadic = Block->isVariadic();
5447 Params = Block->parameters();
5448 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
5449 IsVariadic = FD->isVariadic();
5450 Params = FD->parameters();
5451 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
5452 IsVariadic = MD->isVariadic();
5453 // FIXME: This isn't correct for methods (results in bogus warning).
5454 Params = MD->parameters();
5455 } else if (isa<CapturedDecl>(Caller)) {
5456 // We don't support va_start in a CapturedDecl.
5457 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
5460 // This must be some other declcontext that parses exprs.
5461 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
5466 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
5471 *LastParam = Params.empty() ? nullptr : Params.back();
5476 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
5477 /// for validity. Emit an error and return true on failure; return false
5479 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
5480 Expr *Fn = TheCall->getCallee();
5482 if (checkVAStartABI(*this, BuiltinID, Fn))
5485 if (TheCall->getNumArgs() > 2) {
5486 Diag(TheCall->getArg(2)->getBeginLoc(),
5487 diag::err_typecheck_call_too_many_args)
5488 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5489 << Fn->getSourceRange()
5490 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
5491 (*(TheCall->arg_end() - 1))->getEndLoc());
5495 if (TheCall->getNumArgs() < 2) {
5496 return Diag(TheCall->getEndLoc(),
5497 diag::err_typecheck_call_too_few_args_at_least)
5498 << 0 /*function call*/ << 2 << TheCall->getNumArgs();
5501 // Type-check the first argument normally.
5502 if (checkBuiltinArgument(*this, TheCall, 0))
5505 // Check that the current function is variadic, and get its last parameter.
5506 ParmVarDecl *LastParam;
5507 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
5510 // Verify that the second argument to the builtin is the last argument of the
5511 // current function or method.
5512 bool SecondArgIsLastNamedArgument = false;
5513 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
5515 // These are valid if SecondArgIsLastNamedArgument is false after the next
5518 SourceLocation ParamLoc;
5519 bool IsCRegister = false;
5521 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
5522 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
5523 SecondArgIsLastNamedArgument = PV == LastParam;
5525 Type = PV->getType();
5526 ParamLoc = PV->getLocation();
5528 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
5532 if (!SecondArgIsLastNamedArgument)
5533 Diag(TheCall->getArg(1)->getBeginLoc(),
5534 diag::warn_second_arg_of_va_start_not_last_named_param);
5535 else if (IsCRegister || Type->isReferenceType() ||
5536 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
5537 // Promotable integers are UB, but enumerations need a bit of
5538 // extra checking to see what their promotable type actually is.
5539 if (!Type->isPromotableIntegerType())
5541 if (!Type->isEnumeralType())
5543 const EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
5545 Context.typesAreCompatible(ED->getPromotionType(), Type));
5547 unsigned Reason = 0;
5548 if (Type->isReferenceType()) Reason = 1;
5549 else if (IsCRegister) Reason = 2;
5550 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
5551 Diag(ParamLoc, diag::note_parameter_type) << Type;
5554 TheCall->setType(Context.VoidTy);
5558 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
5559 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
5560 // const char *named_addr);
5562 Expr *Func = Call->getCallee();
5564 if (Call->getNumArgs() < 3)
5565 return Diag(Call->getEndLoc(),
5566 diag::err_typecheck_call_too_few_args_at_least)
5567 << 0 /*function call*/ << 3 << Call->getNumArgs();
5569 // Type-check the first argument normally.
5570 if (checkBuiltinArgument(*this, Call, 0))
5573 // Check that the current function is variadic.
5574 if (checkVAStartIsInVariadicFunction(*this, Func))
5577 // __va_start on Windows does not validate the parameter qualifiers
5579 const Expr *Arg1 = Call->getArg(1)->IgnoreParens();
5580 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
5582 const Expr *Arg2 = Call->getArg(2)->IgnoreParens();
5583 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
5585 const QualType &ConstCharPtrTy =
5586 Context.getPointerType(Context.CharTy.withConst());
5587 if (!Arg1Ty->isPointerType() ||
5588 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy)
5589 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
5590 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
5591 << 0 /* qualifier difference */
5592 << 3 /* parameter mismatch */
5593 << 2 << Arg1->getType() << ConstCharPtrTy;
5595 const QualType SizeTy = Context.getSizeType();
5596 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
5597 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
5598 << Arg2->getType() << SizeTy << 1 /* different class */
5599 << 0 /* qualifier difference */
5600 << 3 /* parameter mismatch */
5601 << 3 << Arg2->getType() << SizeTy;
5606 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
5607 /// friends. This is declared to take (...), so we have to check everything.
5608 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
5609 if (TheCall->getNumArgs() < 2)
5610 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
5611 << 0 << 2 << TheCall->getNumArgs() /*function call*/;
5612 if (TheCall->getNumArgs() > 2)
5613 return Diag(TheCall->getArg(2)->getBeginLoc(),
5614 diag::err_typecheck_call_too_many_args)
5615 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5616 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
5617 (*(TheCall->arg_end() - 1))->getEndLoc());
5619 ExprResult OrigArg0 = TheCall->getArg(0);
5620 ExprResult OrigArg1 = TheCall->getArg(1);
5622 // Do standard promotions between the two arguments, returning their common
5624 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
5625 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
5628 // Make sure any conversions are pushed back into the call; this is
5629 // type safe since unordered compare builtins are declared as "_Bool
5631 TheCall->setArg(0, OrigArg0.get());
5632 TheCall->setArg(1, OrigArg1.get());
5634 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
5637 // If the common type isn't a real floating type, then the arguments were
5638 // invalid for this operation.
5639 if (Res.isNull() || !Res->isRealFloatingType())
5640 return Diag(OrigArg0.get()->getBeginLoc(),
5641 diag::err_typecheck_call_invalid_ordered_compare)
5642 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
5643 << SourceRange(OrigArg0.get()->getBeginLoc(),
5644 OrigArg1.get()->getEndLoc());
5649 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
5650 /// __builtin_isnan and friends. This is declared to take (...), so we have
5651 /// to check everything. We expect the last argument to be a floating point
5653 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
5654 if (TheCall->getNumArgs() < NumArgs)
5655 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
5656 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/;
5657 if (TheCall->getNumArgs() > NumArgs)
5658 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(),
5659 diag::err_typecheck_call_too_many_args)
5660 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
5661 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(),
5662 (*(TheCall->arg_end() - 1))->getEndLoc());
5664 Expr *OrigArg = TheCall->getArg(NumArgs-1);
5666 if (OrigArg->isTypeDependent())
5669 // This operation requires a non-_Complex floating-point number.
5670 if (!OrigArg->getType()->isRealFloatingType())
5671 return Diag(OrigArg->getBeginLoc(),
5672 diag::err_typecheck_call_invalid_unary_fp)
5673 << OrigArg->getType() << OrigArg->getSourceRange();
5675 // If this is an implicit conversion from float -> float, double, or
5676 // long double, remove it.
5677 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
5678 // Only remove standard FloatCasts, leaving other casts inplace
5679 if (Cast->getCastKind() == CK_FloatingCast) {
5680 Expr *CastArg = Cast->getSubExpr();
5681 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
5683 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
5684 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) ||
5685 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) &&
5686 "promotion from float to either float, double, or long double is "
5687 "the only expected cast here");
5688 Cast->setSubExpr(nullptr);
5689 TheCall->setArg(NumArgs-1, CastArg);
5697 // Customized Sema Checking for VSX builtins that have the following signature:
5698 // vector [...] builtinName(vector [...], vector [...], const int);
5699 // Which takes the same type of vectors (any legal vector type) for the first
5700 // two arguments and takes compile time constant for the third argument.
5701 // Example builtins are :
5702 // vector double vec_xxpermdi(vector double, vector double, int);
5703 // vector short vec_xxsldwi(vector short, vector short, int);
5704 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
5705 unsigned ExpectedNumArgs = 3;
5706 if (TheCall->getNumArgs() < ExpectedNumArgs)
5707 return Diag(TheCall->getEndLoc(),
5708 diag::err_typecheck_call_too_few_args_at_least)
5709 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
5710 << TheCall->getSourceRange();
5712 if (TheCall->getNumArgs() > ExpectedNumArgs)
5713 return Diag(TheCall->getEndLoc(),
5714 diag::err_typecheck_call_too_many_args_at_most)
5715 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
5716 << TheCall->getSourceRange();
5718 // Check the third argument is a compile time constant
5720 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context))
5721 return Diag(TheCall->getBeginLoc(),
5722 diag::err_vsx_builtin_nonconstant_argument)
5723 << 3 /* argument index */ << TheCall->getDirectCallee()
5724 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
5725 TheCall->getArg(2)->getEndLoc());
5727 QualType Arg1Ty = TheCall->getArg(0)->getType();
5728 QualType Arg2Ty = TheCall->getArg(1)->getType();
5730 // Check the type of argument 1 and argument 2 are vectors.
5731 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
5732 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
5733 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
5734 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
5735 << TheCall->getDirectCallee()
5736 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5737 TheCall->getArg(1)->getEndLoc());
5740 // Check the first two arguments are the same type.
5741 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
5742 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
5743 << TheCall->getDirectCallee()
5744 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5745 TheCall->getArg(1)->getEndLoc());
5748 // When default clang type checking is turned off and the customized type
5749 // checking is used, the returning type of the function must be explicitly
5750 // set. Otherwise it is _Bool by default.
5751 TheCall->setType(Arg1Ty);
5756 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
5757 // This is declared to take (...), so we have to check everything.
5758 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
5759 if (TheCall->getNumArgs() < 2)
5760 return ExprError(Diag(TheCall->getEndLoc(),
5761 diag::err_typecheck_call_too_few_args_at_least)
5762 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5763 << TheCall->getSourceRange());
5765 // Determine which of the following types of shufflevector we're checking:
5766 // 1) unary, vector mask: (lhs, mask)
5767 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
5768 QualType resType = TheCall->getArg(0)->getType();
5769 unsigned numElements = 0;
5771 if (!TheCall->getArg(0)->isTypeDependent() &&
5772 !TheCall->getArg(1)->isTypeDependent()) {
5773 QualType LHSType = TheCall->getArg(0)->getType();
5774 QualType RHSType = TheCall->getArg(1)->getType();
5776 if (!LHSType->isVectorType() || !RHSType->isVectorType())
5778 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
5779 << TheCall->getDirectCallee()
5780 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5781 TheCall->getArg(1)->getEndLoc()));
5783 numElements = LHSType->getAs<VectorType>()->getNumElements();
5784 unsigned numResElements = TheCall->getNumArgs() - 2;
5786 // Check to see if we have a call with 2 vector arguments, the unary shuffle
5787 // with mask. If so, verify that RHS is an integer vector type with the
5788 // same number of elts as lhs.
5789 if (TheCall->getNumArgs() == 2) {
5790 if (!RHSType->hasIntegerRepresentation() ||
5791 RHSType->getAs<VectorType>()->getNumElements() != numElements)
5792 return ExprError(Diag(TheCall->getBeginLoc(),
5793 diag::err_vec_builtin_incompatible_vector)
5794 << TheCall->getDirectCallee()
5795 << SourceRange(TheCall->getArg(1)->getBeginLoc(),
5796 TheCall->getArg(1)->getEndLoc()));
5797 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
5798 return ExprError(Diag(TheCall->getBeginLoc(),
5799 diag::err_vec_builtin_incompatible_vector)
5800 << TheCall->getDirectCallee()
5801 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5802 TheCall->getArg(1)->getEndLoc()));
5803 } else if (numElements != numResElements) {
5804 QualType eltType = LHSType->getAs<VectorType>()->getElementType();
5805 resType = Context.getVectorType(eltType, numResElements,
5806 VectorType::GenericVector);
5810 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
5811 if (TheCall->getArg(i)->isTypeDependent() ||
5812 TheCall->getArg(i)->isValueDependent())
5815 llvm::APSInt Result(32);
5816 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
5817 return ExprError(Diag(TheCall->getBeginLoc(),
5818 diag::err_shufflevector_nonconstant_argument)
5819 << TheCall->getArg(i)->getSourceRange());
5821 // Allow -1 which will be translated to undef in the IR.
5822 if (Result.isSigned() && Result.isAllOnesValue())
5825 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
5826 return ExprError(Diag(TheCall->getBeginLoc(),
5827 diag::err_shufflevector_argument_too_large)
5828 << TheCall->getArg(i)->getSourceRange());
5831 SmallVector<Expr*, 32> exprs;
5833 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
5834 exprs.push_back(TheCall->getArg(i));
5835 TheCall->setArg(i, nullptr);
5838 return new (Context) ShuffleVectorExpr(Context, exprs, resType,
5839 TheCall->getCallee()->getBeginLoc(),
5840 TheCall->getRParenLoc());
5843 /// SemaConvertVectorExpr - Handle __builtin_convertvector
5844 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
5845 SourceLocation BuiltinLoc,
5846 SourceLocation RParenLoc) {
5847 ExprValueKind VK = VK_RValue;
5848 ExprObjectKind OK = OK_Ordinary;
5849 QualType DstTy = TInfo->getType();
5850 QualType SrcTy = E->getType();
5852 if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
5853 return ExprError(Diag(BuiltinLoc,
5854 diag::err_convertvector_non_vector)
5855 << E->getSourceRange());
5856 if (!DstTy->isVectorType() && !DstTy->isDependentType())
5857 return ExprError(Diag(BuiltinLoc,
5858 diag::err_convertvector_non_vector_type));
5860 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
5861 unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements();
5862 unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements();
5863 if (SrcElts != DstElts)
5864 return ExprError(Diag(BuiltinLoc,
5865 diag::err_convertvector_incompatible_vector)
5866 << E->getSourceRange());
5869 return new (Context)
5870 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
5873 /// SemaBuiltinPrefetch - Handle __builtin_prefetch.
5874 // This is declared to take (const void*, ...) and can take two
5875 // optional constant int args.
5876 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
5877 unsigned NumArgs = TheCall->getNumArgs();
5880 return Diag(TheCall->getEndLoc(),
5881 diag::err_typecheck_call_too_many_args_at_most)
5882 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
5884 // Argument 0 is checked for us and the remaining arguments must be
5885 // constant integers.
5886 for (unsigned i = 1; i != NumArgs; ++i)
5887 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
5893 /// SemaBuiltinAssume - Handle __assume (MS Extension).
5894 // __assume does not evaluate its arguments, and should warn if its argument
5895 // has side effects.
5896 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
5897 Expr *Arg = TheCall->getArg(0);
5898 if (Arg->isInstantiationDependent()) return false;
5900 if (Arg->HasSideEffects(Context))
5901 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects)
5902 << Arg->getSourceRange()
5903 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
5908 /// Handle __builtin_alloca_with_align. This is declared
5909 /// as (size_t, size_t) where the second size_t must be a power of 2 greater
5911 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
5912 // The alignment must be a constant integer.
5913 Expr *Arg = TheCall->getArg(1);
5915 // We can't check the value of a dependent argument.
5916 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
5917 if (const auto *UE =
5918 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
5919 if (UE->getKind() == UETT_AlignOf ||
5920 UE->getKind() == UETT_PreferredAlignOf)
5921 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
5922 << Arg->getSourceRange();
5924 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
5926 if (!Result.isPowerOf2())
5927 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
5928 << Arg->getSourceRange();
5930 if (Result < Context.getCharWidth())
5931 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small)
5932 << (unsigned)Context.getCharWidth() << Arg->getSourceRange();
5934 if (Result > std::numeric_limits<int32_t>::max())
5935 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big)
5936 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange();
5942 /// Handle __builtin_assume_aligned. This is declared
5943 /// as (const void*, size_t, ...) and can take one optional constant int arg.
5944 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
5945 unsigned NumArgs = TheCall->getNumArgs();
5948 return Diag(TheCall->getEndLoc(),
5949 diag::err_typecheck_call_too_many_args_at_most)
5950 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
5952 // The alignment must be a constant integer.
5953 Expr *Arg = TheCall->getArg(1);
5955 // We can't check the value of a dependent argument.
5956 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
5957 llvm::APSInt Result;
5958 if (SemaBuiltinConstantArg(TheCall, 1, Result))
5961 if (!Result.isPowerOf2())
5962 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
5963 << Arg->getSourceRange();
5965 // Alignment calculations can wrap around if it's greater than 2**29.
5966 unsigned MaximumAlignment = 536870912;
5967 if (Result > MaximumAlignment)
5968 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
5969 << Arg->getSourceRange() << MaximumAlignment;
5973 ExprResult Arg(TheCall->getArg(2));
5974 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
5975 Context.getSizeType(), false);
5976 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5977 if (Arg.isInvalid()) return true;
5978 TheCall->setArg(2, Arg.get());
5984 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
5985 unsigned BuiltinID =
5986 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
5987 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
5989 unsigned NumArgs = TheCall->getNumArgs();
5990 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
5991 if (NumArgs < NumRequiredArgs) {
5992 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
5993 << 0 /* function call */ << NumRequiredArgs << NumArgs
5994 << TheCall->getSourceRange();
5996 if (NumArgs >= NumRequiredArgs + 0x100) {
5997 return Diag(TheCall->getEndLoc(),
5998 diag::err_typecheck_call_too_many_args_at_most)
5999 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
6000 << TheCall->getSourceRange();
6004 // For formatting call, check buffer arg.
6006 ExprResult Arg(TheCall->getArg(i));
6007 InitializedEntity Entity = InitializedEntity::InitializeParameter(
6008 Context, Context.VoidPtrTy, false);
6009 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
6010 if (Arg.isInvalid())
6012 TheCall->setArg(i, Arg.get());
6016 // Check string literal arg.
6017 unsigned FormatIdx = i;
6019 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i));
6020 if (Arg.isInvalid())
6022 TheCall->setArg(i, Arg.get());
6026 // Make sure variadic args are scalar.
6027 unsigned FirstDataArg = i;
6028 while (i < NumArgs) {
6029 ExprResult Arg = DefaultVariadicArgumentPromotion(
6030 TheCall->getArg(i), VariadicFunction, nullptr);
6031 if (Arg.isInvalid())
6033 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
6034 if (ArgSize.getQuantity() >= 0x100) {
6035 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big)
6036 << i << (int)ArgSize.getQuantity() << 0xff
6037 << TheCall->getSourceRange();
6039 TheCall->setArg(i, Arg.get());
6043 // Check formatting specifiers. NOTE: We're only doing this for the non-size
6044 // call to avoid duplicate diagnostics.
6046 llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
6047 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
6048 bool Success = CheckFormatArguments(
6049 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
6050 VariadicFunction, TheCall->getBeginLoc(), SourceRange(),
6057 TheCall->setType(Context.getSizeType());
6059 TheCall->setType(Context.VoidPtrTy);
6064 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
6065 /// TheCall is a constant expression.
6066 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
6067 llvm::APSInt &Result) {
6068 Expr *Arg = TheCall->getArg(ArgNum);
6069 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
6070 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
6072 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
6074 if (!Arg->isIntegerConstantExpr(Result, Context))
6075 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
6076 << FDecl->getDeclName() << Arg->getSourceRange();
6081 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
6082 /// TheCall is a constant expression in the range [Low, High].
6083 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
6084 int Low, int High, bool RangeIsError) {
6085 if (isConstantEvaluated())
6087 llvm::APSInt Result;
6089 // We can't check the value of a dependent argument.
6090 Expr *Arg = TheCall->getArg(ArgNum);
6091 if (Arg->isTypeDependent() || Arg->isValueDependent())
6094 // Check constant-ness first.
6095 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6098 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
6100 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
6101 << Result.toString(10) << Low << High << Arg->getSourceRange();
6103 // Defer the warning until we know if the code will be emitted so that
6104 // dead code can ignore this.
6105 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
6106 PDiag(diag::warn_argument_invalid_range)
6107 << Result.toString(10) << Low << High
6108 << Arg->getSourceRange());
6114 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
6115 /// TheCall is a constant expression is a multiple of Num..
6116 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
6118 llvm::APSInt Result;
6120 // We can't check the value of a dependent argument.
6121 Expr *Arg = TheCall->getArg(ArgNum);
6122 if (Arg->isTypeDependent() || Arg->isValueDependent())
6125 // Check constant-ness first.
6126 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6129 if (Result.getSExtValue() % Num != 0)
6130 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple)
6131 << Num << Arg->getSourceRange();
6136 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
6137 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
6138 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
6139 if (checkArgCount(*this, TheCall, 2))
6141 Expr *Arg0 = TheCall->getArg(0);
6142 Expr *Arg1 = TheCall->getArg(1);
6144 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6145 if (FirstArg.isInvalid())
6147 QualType FirstArgType = FirstArg.get()->getType();
6148 if (!FirstArgType->isAnyPointerType())
6149 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6150 << "first" << FirstArgType << Arg0->getSourceRange();
6151 TheCall->setArg(0, FirstArg.get());
6153 ExprResult SecArg = DefaultLvalueConversion(Arg1);
6154 if (SecArg.isInvalid())
6156 QualType SecArgType = SecArg.get()->getType();
6157 if (!SecArgType->isIntegerType())
6158 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
6159 << "second" << SecArgType << Arg1->getSourceRange();
6161 // Derive the return type from the pointer argument.
6162 TheCall->setType(FirstArgType);
6166 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
6167 if (checkArgCount(*this, TheCall, 2))
6170 Expr *Arg0 = TheCall->getArg(0);
6171 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6172 if (FirstArg.isInvalid())
6174 QualType FirstArgType = FirstArg.get()->getType();
6175 if (!FirstArgType->isAnyPointerType())
6176 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6177 << "first" << FirstArgType << Arg0->getSourceRange();
6178 TheCall->setArg(0, FirstArg.get());
6180 // Derive the return type from the pointer argument.
6181 TheCall->setType(FirstArgType);
6183 // Second arg must be an constant in range [0,15]
6184 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
6187 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
6188 if (checkArgCount(*this, TheCall, 2))
6190 Expr *Arg0 = TheCall->getArg(0);
6191 Expr *Arg1 = TheCall->getArg(1);
6193 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6194 if (FirstArg.isInvalid())
6196 QualType FirstArgType = FirstArg.get()->getType();
6197 if (!FirstArgType->isAnyPointerType())
6198 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6199 << "first" << FirstArgType << Arg0->getSourceRange();
6201 QualType SecArgType = Arg1->getType();
6202 if (!SecArgType->isIntegerType())
6203 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
6204 << "second" << SecArgType << Arg1->getSourceRange();
6205 TheCall->setType(Context.IntTy);
6209 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
6210 BuiltinID == AArch64::BI__builtin_arm_stg) {
6211 if (checkArgCount(*this, TheCall, 1))
6213 Expr *Arg0 = TheCall->getArg(0);
6214 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
6215 if (FirstArg.isInvalid())
6218 QualType FirstArgType = FirstArg.get()->getType();
6219 if (!FirstArgType->isAnyPointerType())
6220 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
6221 << "first" << FirstArgType << Arg0->getSourceRange();
6222 TheCall->setArg(0, FirstArg.get());
6224 // Derive the return type from the pointer argument.
6225 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
6226 TheCall->setType(FirstArgType);
6230 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
6231 Expr *ArgA = TheCall->getArg(0);
6232 Expr *ArgB = TheCall->getArg(1);
6234 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA);
6235 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB);
6237 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
6240 QualType ArgTypeA = ArgExprA.get()->getType();
6241 QualType ArgTypeB = ArgExprB.get()->getType();
6243 auto isNull = [&] (Expr *E) -> bool {
6244 return E->isNullPointerConstant(
6245 Context, Expr::NPC_ValueDependentIsNotNull); };
6247 // argument should be either a pointer or null
6248 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
6249 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
6250 << "first" << ArgTypeA << ArgA->getSourceRange();
6252 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
6253 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
6254 << "second" << ArgTypeB << ArgB->getSourceRange();
6256 // Ensure Pointee types are compatible
6257 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
6258 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
6259 QualType pointeeA = ArgTypeA->getPointeeType();
6260 QualType pointeeB = ArgTypeB->getPointeeType();
6261 if (!Context.typesAreCompatible(
6262 Context.getCanonicalType(pointeeA).getUnqualifiedType(),
6263 Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
6264 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
6265 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
6266 << ArgB->getSourceRange();
6270 // at least one argument should be pointer type
6271 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
6272 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
6273 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
6275 if (isNull(ArgA)) // adopt type of the other pointer
6276 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
6279 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
6281 TheCall->setArg(0, ArgExprA.get());
6282 TheCall->setArg(1, ArgExprB.get());
6283 TheCall->setType(Context.LongLongTy);
6286 assert(false && "Unhandled ARM MTE intrinsic");
6290 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
6291 /// TheCall is an ARM/AArch64 special register string literal.
6292 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
6293 int ArgNum, unsigned ExpectedFieldNum,
6295 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6296 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6297 BuiltinID == ARM::BI__builtin_arm_rsr ||
6298 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6299 BuiltinID == ARM::BI__builtin_arm_wsr ||
6300 BuiltinID == ARM::BI__builtin_arm_wsrp;
6301 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
6302 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
6303 BuiltinID == AArch64::BI__builtin_arm_rsr ||
6304 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
6305 BuiltinID == AArch64::BI__builtin_arm_wsr ||
6306 BuiltinID == AArch64::BI__builtin_arm_wsrp;
6307 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
6309 // We can't check the value of a dependent argument.
6310 Expr *Arg = TheCall->getArg(ArgNum);
6311 if (Arg->isTypeDependent() || Arg->isValueDependent())
6314 // Check if the argument is a string literal.
6315 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
6316 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
6317 << Arg->getSourceRange();
6319 // Check the type of special register given.
6320 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
6321 SmallVector<StringRef, 6> Fields;
6322 Reg.split(Fields, ":");
6324 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
6325 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
6326 << Arg->getSourceRange();
6328 // If the string is the name of a register then we cannot check that it is
6329 // valid here but if the string is of one the forms described in ACLE then we
6330 // can check that the supplied fields are integers and within the valid
6332 if (Fields.size() > 1) {
6333 bool FiveFields = Fields.size() == 5;
6335 bool ValidString = true;
6337 ValidString &= Fields[0].startswith_lower("cp") ||
6338 Fields[0].startswith_lower("p");
6341 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1);
6343 ValidString &= Fields[2].startswith_lower("c");
6345 Fields[2] = Fields[2].drop_front(1);
6348 ValidString &= Fields[3].startswith_lower("c");
6350 Fields[3] = Fields[3].drop_front(1);
6354 SmallVector<int, 5> Ranges;
6356 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
6358 Ranges.append({15, 7, 15});
6360 for (unsigned i=0; i<Fields.size(); ++i) {
6362 ValidString &= !Fields[i].getAsInteger(10, IntField);
6363 ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
6367 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
6368 << Arg->getSourceRange();
6369 } else if (IsAArch64Builtin && Fields.size() == 1) {
6370 // If the register name is one of those that appear in the condition below
6371 // and the special register builtin being used is one of the write builtins,
6372 // then we require that the argument provided for writing to the register
6373 // is an integer constant expression. This is because it will be lowered to
6374 // an MSR (immediate) instruction, so we need to know the immediate at
6376 if (TheCall->getNumArgs() != 2)
6379 std::string RegLower = Reg.lower();
6380 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" &&
6381 RegLower != "pan" && RegLower != "uao")
6384 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
6390 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
6391 /// This checks that the target supports __builtin_longjmp and
6392 /// that val is a constant 1.
6393 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
6394 if (!Context.getTargetInfo().hasSjLjLowering())
6395 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
6396 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
6398 Expr *Arg = TheCall->getArg(1);
6399 llvm::APSInt Result;
6401 // TODO: This is less than ideal. Overload this to take a value.
6402 if (SemaBuiltinConstantArg(TheCall, 1, Result))
6406 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val)
6407 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc());
6412 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
6413 /// This checks that the target supports __builtin_setjmp.
6414 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
6415 if (!Context.getTargetInfo().hasSjLjLowering())
6416 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
6417 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
6423 class UncoveredArgHandler {
6424 enum { Unknown = -1, AllCovered = -2 };
6426 signed FirstUncoveredArg = Unknown;
6427 SmallVector<const Expr *, 4> DiagnosticExprs;
6430 UncoveredArgHandler() = default;
6432 bool hasUncoveredArg() const {
6433 return (FirstUncoveredArg >= 0);
6436 unsigned getUncoveredArg() const {
6437 assert(hasUncoveredArg() && "no uncovered argument");
6438 return FirstUncoveredArg;
6441 void setAllCovered() {
6442 // A string has been found with all arguments covered, so clear out
6444 DiagnosticExprs.clear();
6445 FirstUncoveredArg = AllCovered;
6448 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
6449 assert(NewFirstUncoveredArg >= 0 && "Outside range");
6451 // Don't update if a previous string covers all arguments.
6452 if (FirstUncoveredArg == AllCovered)
6455 // UncoveredArgHandler tracks the highest uncovered argument index
6456 // and with it all the strings that match this index.
6457 if (NewFirstUncoveredArg == FirstUncoveredArg)
6458 DiagnosticExprs.push_back(StrExpr);
6459 else if (NewFirstUncoveredArg > FirstUncoveredArg) {
6460 DiagnosticExprs.clear();
6461 DiagnosticExprs.push_back(StrExpr);
6462 FirstUncoveredArg = NewFirstUncoveredArg;
6466 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
6469 enum StringLiteralCheckType {
6471 SLCT_UncheckedLiteral,
6477 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
6478 BinaryOperatorKind BinOpKind,
6479 bool AddendIsRight) {
6480 unsigned BitWidth = Offset.getBitWidth();
6481 unsigned AddendBitWidth = Addend.getBitWidth();
6482 // There might be negative interim results.
6483 if (Addend.isUnsigned()) {
6484 Addend = Addend.zext(++AddendBitWidth);
6485 Addend.setIsSigned(true);
6487 // Adjust the bit width of the APSInts.
6488 if (AddendBitWidth > BitWidth) {
6489 Offset = Offset.sext(AddendBitWidth);
6490 BitWidth = AddendBitWidth;
6491 } else if (BitWidth > AddendBitWidth) {
6492 Addend = Addend.sext(BitWidth);
6496 llvm::APSInt ResOffset = Offset;
6497 if (BinOpKind == BO_Add)
6498 ResOffset = Offset.sadd_ov(Addend, Ov);
6500 assert(AddendIsRight && BinOpKind == BO_Sub &&
6501 "operator must be add or sub with addend on the right");
6502 ResOffset = Offset.ssub_ov(Addend, Ov);
6505 // We add an offset to a pointer here so we should support an offset as big as
6508 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&
6509 "index (intermediate) result too big");
6510 Offset = Offset.sext(2 * BitWidth);
6511 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
6520 // This is a wrapper class around StringLiteral to support offsetted string
6521 // literals as format strings. It takes the offset into account when returning
6522 // the string and its length or the source locations to display notes correctly.
6523 class FormatStringLiteral {
6524 const StringLiteral *FExpr;
6528 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
6529 : FExpr(fexpr), Offset(Offset) {}
6531 StringRef getString() const {
6532 return FExpr->getString().drop_front(Offset);
6535 unsigned getByteLength() const {
6536 return FExpr->getByteLength() - getCharByteWidth() * Offset;
6539 unsigned getLength() const { return FExpr->getLength() - Offset; }
6540 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
6542 StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
6544 QualType getType() const { return FExpr->getType(); }
6546 bool isAscii() const { return FExpr->isAscii(); }
6547 bool isWide() const { return FExpr->isWide(); }
6548 bool isUTF8() const { return FExpr->isUTF8(); }
6549 bool isUTF16() const { return FExpr->isUTF16(); }
6550 bool isUTF32() const { return FExpr->isUTF32(); }
6551 bool isPascal() const { return FExpr->isPascal(); }
6553 SourceLocation getLocationOfByte(
6554 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
6555 const TargetInfo &Target, unsigned *StartToken = nullptr,
6556 unsigned *StartTokenByteOffset = nullptr) const {
6557 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target,
6558 StartToken, StartTokenByteOffset);
6561 SourceLocation getBeginLoc() const LLVM_READONLY {
6562 return FExpr->getBeginLoc().getLocWithOffset(Offset);
6565 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); }
6570 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
6571 const Expr *OrigFormatExpr,
6572 ArrayRef<const Expr *> Args,
6573 bool HasVAListArg, unsigned format_idx,
6574 unsigned firstDataArg,
6575 Sema::FormatStringType Type,
6576 bool inFunctionCall,
6577 Sema::VariadicCallType CallType,
6578 llvm::SmallBitVector &CheckedVarArgs,
6579 UncoveredArgHandler &UncoveredArg);
6581 // Determine if an expression is a string literal or constant string.
6582 // If this function returns false on the arguments to a function expecting a
6583 // format string, we will usually need to emit a warning.
6584 // True string literals are then checked by CheckFormatString.
6585 static StringLiteralCheckType
6586 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
6587 bool HasVAListArg, unsigned format_idx,
6588 unsigned firstDataArg, Sema::FormatStringType Type,
6589 Sema::VariadicCallType CallType, bool InFunctionCall,
6590 llvm::SmallBitVector &CheckedVarArgs,
6591 UncoveredArgHandler &UncoveredArg,
6592 llvm::APSInt Offset) {
6593 if (S.isConstantEvaluated())
6594 return SLCT_NotALiteral;
6596 assert(Offset.isSigned() && "invalid offset");
6598 if (E->isTypeDependent() || E->isValueDependent())
6599 return SLCT_NotALiteral;
6601 E = E->IgnoreParenCasts();
6603 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
6604 // Technically -Wformat-nonliteral does not warn about this case.
6605 // The behavior of printf and friends in this case is implementation
6606 // dependent. Ideally if the format string cannot be null then
6607 // it should have a 'nonnull' attribute in the function prototype.
6608 return SLCT_UncheckedLiteral;
6610 switch (E->getStmtClass()) {
6611 case Stmt::BinaryConditionalOperatorClass:
6612 case Stmt::ConditionalOperatorClass: {
6613 // The expression is a literal if both sub-expressions were, and it was
6614 // completely checked only if both sub-expressions were checked.
6615 const AbstractConditionalOperator *C =
6616 cast<AbstractConditionalOperator>(E);
6618 // Determine whether it is necessary to check both sub-expressions, for
6619 // example, because the condition expression is a constant that can be
6620 // evaluated at compile time.
6621 bool CheckLeft = true, CheckRight = true;
6624 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(),
6625 S.isConstantEvaluated())) {
6632 // We need to maintain the offsets for the right and the left hand side
6633 // separately to check if every possible indexed expression is a valid
6634 // string literal. They might have different offsets for different string
6635 // literals in the end.
6636 StringLiteralCheckType Left;
6638 Left = SLCT_UncheckedLiteral;
6640 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args,
6641 HasVAListArg, format_idx, firstDataArg,
6642 Type, CallType, InFunctionCall,
6643 CheckedVarArgs, UncoveredArg, Offset);
6644 if (Left == SLCT_NotALiteral || !CheckRight) {
6649 StringLiteralCheckType Right =
6650 checkFormatStringExpr(S, C->getFalseExpr(), Args,
6651 HasVAListArg, format_idx, firstDataArg,
6652 Type, CallType, InFunctionCall, CheckedVarArgs,
6653 UncoveredArg, Offset);
6655 return (CheckLeft && Left < Right) ? Left : Right;
6658 case Stmt::ImplicitCastExprClass:
6659 E = cast<ImplicitCastExpr>(E)->getSubExpr();
6662 case Stmt::OpaqueValueExprClass:
6663 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
6667 return SLCT_NotALiteral;
6669 case Stmt::PredefinedExprClass:
6670 // While __func__, etc., are technically not string literals, they
6671 // cannot contain format specifiers and thus are not a security
6673 return SLCT_UncheckedLiteral;
6675 case Stmt::DeclRefExprClass: {
6676 const DeclRefExpr *DR = cast<DeclRefExpr>(E);
6678 // As an exception, do not flag errors for variables binding to
6679 // const string literals.
6680 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
6681 bool isConstant = false;
6682 QualType T = DR->getType();
6684 if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
6685 isConstant = AT->getElementType().isConstant(S.Context);
6686 } else if (const PointerType *PT = T->getAs<PointerType>()) {
6687 isConstant = T.isConstant(S.Context) &&
6688 PT->getPointeeType().isConstant(S.Context);
6689 } else if (T->isObjCObjectPointerType()) {
6690 // In ObjC, there is usually no "const ObjectPointer" type,
6691 // so don't check if the pointee type is constant.
6692 isConstant = T.isConstant(S.Context);
6696 if (const Expr *Init = VD->getAnyInitializer()) {
6697 // Look through initializers like const char c[] = { "foo" }
6698 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
6699 if (InitList->isStringLiteralInit())
6700 Init = InitList->getInit(0)->IgnoreParenImpCasts();
6702 return checkFormatStringExpr(S, Init, Args,
6703 HasVAListArg, format_idx,
6704 firstDataArg, Type, CallType,
6705 /*InFunctionCall*/ false, CheckedVarArgs,
6706 UncoveredArg, Offset);
6710 // For vprintf* functions (i.e., HasVAListArg==true), we add a
6711 // special check to see if the format string is a function parameter
6712 // of the function calling the printf function. If the function
6713 // has an attribute indicating it is a printf-like function, then we
6714 // should suppress warnings concerning non-literals being used in a call
6715 // to a vprintf function. For example:
6718 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
6720 // va_start(ap, fmt);
6721 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
6725 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
6726 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
6727 int PVIndex = PV->getFunctionScopeIndex() + 1;
6728 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
6729 // adjust for implicit parameter
6730 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
6731 if (MD->isInstance())
6733 // We also check if the formats are compatible.
6734 // We can't pass a 'scanf' string to a 'printf' function.
6735 if (PVIndex == PVFormat->getFormatIdx() &&
6736 Type == S.GetFormatStringType(PVFormat))
6737 return SLCT_UncheckedLiteral;
6744 return SLCT_NotALiteral;
6747 case Stmt::CallExprClass:
6748 case Stmt::CXXMemberCallExprClass: {
6749 const CallExpr *CE = cast<CallExpr>(E);
6750 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
6751 bool IsFirst = true;
6752 StringLiteralCheckType CommonResult;
6753 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
6754 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
6755 StringLiteralCheckType Result = checkFormatStringExpr(
6756 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
6757 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
6759 CommonResult = Result;
6764 return CommonResult;
6766 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
6767 unsigned BuiltinID = FD->getBuiltinID();
6768 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
6769 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
6770 const Expr *Arg = CE->getArg(0);
6771 return checkFormatStringExpr(S, Arg, Args,
6772 HasVAListArg, format_idx,
6773 firstDataArg, Type, CallType,
6774 InFunctionCall, CheckedVarArgs,
6775 UncoveredArg, Offset);
6780 return SLCT_NotALiteral;
6782 case Stmt::ObjCMessageExprClass: {
6783 const auto *ME = cast<ObjCMessageExpr>(E);
6784 if (const auto *ND = ME->getMethodDecl()) {
6785 if (const auto *FA = ND->getAttr<FormatArgAttr>()) {
6786 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
6787 return checkFormatStringExpr(
6788 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
6789 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
6793 return SLCT_NotALiteral;
6795 case Stmt::ObjCStringLiteralClass:
6796 case Stmt::StringLiteralClass: {
6797 const StringLiteral *StrE = nullptr;
6799 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
6800 StrE = ObjCFExpr->getString();
6802 StrE = cast<StringLiteral>(E);
6805 if (Offset.isNegative() || Offset > StrE->getLength()) {
6806 // TODO: It would be better to have an explicit warning for out of
6808 return SLCT_NotALiteral;
6810 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
6811 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx,
6812 firstDataArg, Type, InFunctionCall, CallType,
6813 CheckedVarArgs, UncoveredArg);
6814 return SLCT_CheckedLiteral;
6817 return SLCT_NotALiteral;
6819 case Stmt::BinaryOperatorClass: {
6820 const BinaryOperator *BinOp = cast<BinaryOperator>(E);
6822 // A string literal + an int offset is still a string literal.
6823 if (BinOp->isAdditiveOp()) {
6824 Expr::EvalResult LResult, RResult;
6826 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(
6827 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
6828 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(
6829 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
6831 if (LIsInt != RIsInt) {
6832 BinaryOperatorKind BinOpKind = BinOp->getOpcode();
6835 if (BinOpKind == BO_Add) {
6836 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt);
6837 E = BinOp->getRHS();
6841 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt);
6842 E = BinOp->getLHS();
6848 return SLCT_NotALiteral;
6850 case Stmt::UnaryOperatorClass: {
6851 const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
6852 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
6853 if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
6854 Expr::EvalResult IndexResult;
6855 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context,
6856 Expr::SE_NoSideEffects,
6857 S.isConstantEvaluated())) {
6858 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
6859 /*RHS is int*/ true);
6865 return SLCT_NotALiteral;
6869 return SLCT_NotALiteral;
6873 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
6874 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
6875 .Case("scanf", FST_Scanf)
6876 .Cases("printf", "printf0", FST_Printf)
6877 .Cases("NSString", "CFString", FST_NSString)
6878 .Case("strftime", FST_Strftime)
6879 .Case("strfmon", FST_Strfmon)
6880 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
6881 .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
6882 .Case("os_trace", FST_OSLog)
6883 .Case("os_log", FST_OSLog)
6884 .Default(FST_Unknown);
6887 /// CheckFormatArguments - Check calls to printf and scanf (and similar
6888 /// functions) for correct use of format strings.
6889 /// Returns true if a format string has been fully checked.
6890 bool Sema::CheckFormatArguments(const FormatAttr *Format,
6891 ArrayRef<const Expr *> Args,
6893 VariadicCallType CallType,
6894 SourceLocation Loc, SourceRange Range,
6895 llvm::SmallBitVector &CheckedVarArgs) {
6896 FormatStringInfo FSI;
6897 if (getFormatStringInfo(Format, IsCXXMember, &FSI))
6898 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx,
6899 FSI.FirstDataArg, GetFormatStringType(Format),
6900 CallType, Loc, Range, CheckedVarArgs);
6904 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
6905 bool HasVAListArg, unsigned format_idx,
6906 unsigned firstDataArg, FormatStringType Type,
6907 VariadicCallType CallType,
6908 SourceLocation Loc, SourceRange Range,
6909 llvm::SmallBitVector &CheckedVarArgs) {
6910 // CHECK: printf/scanf-like function is called with no format string.
6911 if (format_idx >= Args.size()) {
6912 Diag(Loc, diag::warn_missing_format_string) << Range;
6916 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
6918 // CHECK: format string is not a string literal.
6920 // Dynamically generated format strings are difficult to
6921 // automatically vet at compile time. Requiring that format strings
6922 // are string literals: (1) permits the checking of format strings by
6923 // the compiler and thereby (2) can practically remove the source of
6924 // many format string exploits.
6926 // Format string can be either ObjC string (e.g. @"%d") or
6927 // C string (e.g. "%d")
6928 // ObjC string uses the same format specifiers as C string, so we can use
6929 // the same format string checking logic for both ObjC and C strings.
6930 UncoveredArgHandler UncoveredArg;
6931 StringLiteralCheckType CT =
6932 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
6933 format_idx, firstDataArg, Type, CallType,
6934 /*IsFunctionCall*/ true, CheckedVarArgs,
6936 /*no string offset*/ llvm::APSInt(64, false) = 0);
6938 // Generate a diagnostic where an uncovered argument is detected.
6939 if (UncoveredArg.hasUncoveredArg()) {
6940 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
6941 assert(ArgIdx < Args.size() && "ArgIdx outside bounds");
6942 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]);
6945 if (CT != SLCT_NotALiteral)
6946 // Literal format string found, check done!
6947 return CT == SLCT_CheckedLiteral;
6949 // Strftime is particular as it always uses a single 'time' argument,
6950 // so it is safe to pass a non-literal string.
6951 if (Type == FST_Strftime)
6954 // Do not emit diag when the string param is a macro expansion and the
6955 // format is either NSString or CFString. This is a hack to prevent
6956 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
6957 // which are usually used in place of NS and CF string literals.
6958 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc();
6959 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc))
6962 // If there are no arguments specified, warn with -Wformat-security, otherwise
6963 // warn only with -Wformat-nonliteral.
6964 if (Args.size() == firstDataArg) {
6965 Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
6966 << OrigFormatExpr->getSourceRange();
6971 case FST_FreeBSDKPrintf:
6973 Diag(FormatLoc, diag::note_format_security_fixit)
6974 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
6977 Diag(FormatLoc, diag::note_format_security_fixit)
6978 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
6982 Diag(FormatLoc, diag::warn_format_nonliteral)
6983 << OrigFormatExpr->getSourceRange();
6990 class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
6993 const FormatStringLiteral *FExpr;
6994 const Expr *OrigFormatExpr;
6995 const Sema::FormatStringType FSType;
6996 const unsigned FirstDataArg;
6997 const unsigned NumDataArgs;
6998 const char *Beg; // Start of format string.
6999 const bool HasVAListArg;
7000 ArrayRef<const Expr *> Args;
7002 llvm::SmallBitVector CoveredArgs;
7003 bool usesPositionalArgs = false;
7004 bool atFirstArg = true;
7005 bool inFunctionCall;
7006 Sema::VariadicCallType CallType;
7007 llvm::SmallBitVector &CheckedVarArgs;
7008 UncoveredArgHandler &UncoveredArg;
7011 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
7012 const Expr *origFormatExpr,
7013 const Sema::FormatStringType type, unsigned firstDataArg,
7014 unsigned numDataArgs, const char *beg, bool hasVAListArg,
7015 ArrayRef<const Expr *> Args, unsigned formatIdx,
7016 bool inFunctionCall, Sema::VariadicCallType callType,
7017 llvm::SmallBitVector &CheckedVarArgs,
7018 UncoveredArgHandler &UncoveredArg)
7019 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
7020 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
7021 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx),
7022 inFunctionCall(inFunctionCall), CallType(callType),
7023 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
7024 CoveredArgs.resize(numDataArgs);
7025 CoveredArgs.reset();
7028 void DoneProcessing();
7030 void HandleIncompleteSpecifier(const char *startSpecifier,
7031 unsigned specifierLen) override;
7033 void HandleInvalidLengthModifier(
7034 const analyze_format_string::FormatSpecifier &FS,
7035 const analyze_format_string::ConversionSpecifier &CS,
7036 const char *startSpecifier, unsigned specifierLen,
7039 void HandleNonStandardLengthModifier(
7040 const analyze_format_string::FormatSpecifier &FS,
7041 const char *startSpecifier, unsigned specifierLen);
7043 void HandleNonStandardConversionSpecifier(
7044 const analyze_format_string::ConversionSpecifier &CS,
7045 const char *startSpecifier, unsigned specifierLen);
7047 void HandlePosition(const char *startPos, unsigned posLen) override;
7049 void HandleInvalidPosition(const char *startSpecifier,
7050 unsigned specifierLen,
7051 analyze_format_string::PositionContext p) override;
7053 void HandleZeroPosition(const char *startPos, unsigned posLen) override;
7055 void HandleNullChar(const char *nullCharacter) override;
7057 template <typename Range>
7059 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
7060 const PartialDiagnostic &PDiag, SourceLocation StringLoc,
7061 bool IsStringLocation, Range StringRange,
7062 ArrayRef<FixItHint> Fixit = None);
7065 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
7066 const char *startSpec,
7067 unsigned specifierLen,
7068 const char *csStart, unsigned csLen);
7070 void HandlePositionalNonpositionalArgs(SourceLocation Loc,
7071 const char *startSpec,
7072 unsigned specifierLen);
7074 SourceRange getFormatStringRange();
7075 CharSourceRange getSpecifierRange(const char *startSpecifier,
7076 unsigned specifierLen);
7077 SourceLocation getLocationOfByte(const char *x);
7079 const Expr *getDataArg(unsigned i) const;
7081 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
7082 const analyze_format_string::ConversionSpecifier &CS,
7083 const char *startSpecifier, unsigned specifierLen,
7086 template <typename Range>
7087 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
7088 bool IsStringLocation, Range StringRange,
7089 ArrayRef<FixItHint> Fixit = None);
7094 SourceRange CheckFormatHandler::getFormatStringRange() {
7095 return OrigFormatExpr->getSourceRange();
7098 CharSourceRange CheckFormatHandler::
7099 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
7100 SourceLocation Start = getLocationOfByte(startSpecifier);
7101 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
7103 // Advance the end SourceLocation by one due to half-open ranges.
7104 End = End.getLocWithOffset(1);
7106 return CharSourceRange::getCharRange(Start, End);
7109 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
7110 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(),
7111 S.getLangOpts(), S.Context.getTargetInfo());
7114 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
7115 unsigned specifierLen){
7116 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
7117 getLocationOfByte(startSpecifier),
7118 /*IsStringLocation*/true,
7119 getSpecifierRange(startSpecifier, specifierLen));
7122 void CheckFormatHandler::HandleInvalidLengthModifier(
7123 const analyze_format_string::FormatSpecifier &FS,
7124 const analyze_format_string::ConversionSpecifier &CS,
7125 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
7126 using namespace analyze_format_string;
7128 const LengthModifier &LM = FS.getLengthModifier();
7129 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
7131 // See if we know how to fix this length modifier.
7132 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
7134 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
7135 getLocationOfByte(LM.getStart()),
7136 /*IsStringLocation*/true,
7137 getSpecifierRange(startSpecifier, specifierLen));
7139 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
7140 << FixedLM->toString()
7141 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
7145 if (DiagID == diag::warn_format_nonsensical_length)
7146 Hint = FixItHint::CreateRemoval(LMRange);
7148 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
7149 getLocationOfByte(LM.getStart()),
7150 /*IsStringLocation*/true,
7151 getSpecifierRange(startSpecifier, specifierLen),
7156 void CheckFormatHandler::HandleNonStandardLengthModifier(
7157 const analyze_format_string::FormatSpecifier &FS,
7158 const char *startSpecifier, unsigned specifierLen) {
7159 using namespace analyze_format_string;
7161 const LengthModifier &LM = FS.getLengthModifier();
7162 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
7164 // See if we know how to fix this length modifier.
7165 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
7167 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7168 << LM.toString() << 0,
7169 getLocationOfByte(LM.getStart()),
7170 /*IsStringLocation*/true,
7171 getSpecifierRange(startSpecifier, specifierLen));
7173 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
7174 << FixedLM->toString()
7175 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
7178 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7179 << LM.toString() << 0,
7180 getLocationOfByte(LM.getStart()),
7181 /*IsStringLocation*/true,
7182 getSpecifierRange(startSpecifier, specifierLen));
7186 void CheckFormatHandler::HandleNonStandardConversionSpecifier(
7187 const analyze_format_string::ConversionSpecifier &CS,
7188 const char *startSpecifier, unsigned specifierLen) {
7189 using namespace analyze_format_string;
7191 // See if we know how to fix this conversion specifier.
7192 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
7194 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7195 << CS.toString() << /*conversion specifier*/1,
7196 getLocationOfByte(CS.getStart()),
7197 /*IsStringLocation*/true,
7198 getSpecifierRange(startSpecifier, specifierLen));
7200 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength());
7201 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
7202 << FixedCS->toString()
7203 << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
7205 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
7206 << CS.toString() << /*conversion specifier*/1,
7207 getLocationOfByte(CS.getStart()),
7208 /*IsStringLocation*/true,
7209 getSpecifierRange(startSpecifier, specifierLen));
7213 void CheckFormatHandler::HandlePosition(const char *startPos,
7215 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
7216 getLocationOfByte(startPos),
7217 /*IsStringLocation*/true,
7218 getSpecifierRange(startPos, posLen));
7222 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
7223 analyze_format_string::PositionContext p) {
7224 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
7226 getLocationOfByte(startPos), /*IsStringLocation*/true,
7227 getSpecifierRange(startPos, posLen));
7230 void CheckFormatHandler::HandleZeroPosition(const char *startPos,
7232 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
7233 getLocationOfByte(startPos),
7234 /*IsStringLocation*/true,
7235 getSpecifierRange(startPos, posLen));
7238 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
7239 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) {
7240 // The presence of a null character is likely an error.
7241 EmitFormatDiagnostic(
7242 S.PDiag(diag::warn_printf_format_string_contains_null_char),
7243 getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
7244 getFormatStringRange());
7248 // Note that this may return NULL if there was an error parsing or building
7249 // one of the argument expressions.
7250 const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
7251 return Args[FirstDataArg + i];
7254 void CheckFormatHandler::DoneProcessing() {
7255 // Does the number of data arguments exceed the number of
7256 // format conversions in the format string?
7257 if (!HasVAListArg) {
7258 // Find any arguments that weren't covered.
7260 signed notCoveredArg = CoveredArgs.find_first();
7261 if (notCoveredArg >= 0) {
7262 assert((unsigned)notCoveredArg < NumDataArgs);
7263 UncoveredArg.Update(notCoveredArg, OrigFormatExpr);
7265 UncoveredArg.setAllCovered();
7270 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
7271 const Expr *ArgExpr) {
7272 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 &&
7278 SourceLocation Loc = ArgExpr->getBeginLoc();
7280 if (S.getSourceManager().isInSystemMacro(Loc))
7283 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used);
7284 for (auto E : DiagnosticExprs)
7285 PDiag << E->getSourceRange();
7287 CheckFormatHandler::EmitFormatDiagnostic(
7288 S, IsFunctionCall, DiagnosticExprs[0],
7289 PDiag, Loc, /*IsStringLocation*/false,
7290 DiagnosticExprs[0]->getSourceRange());
7294 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
7296 const char *startSpec,
7297 unsigned specifierLen,
7298 const char *csStart,
7300 bool keepGoing = true;
7301 if (argIndex < NumDataArgs) {
7302 // Consider the argument coverered, even though the specifier doesn't
7304 CoveredArgs.set(argIndex);
7307 // If argIndex exceeds the number of data arguments we
7308 // don't issue a warning because that is just a cascade of warnings (and
7309 // they may have intended '%%' anyway). We don't want to continue processing
7310 // the format string after this point, however, as we will like just get
7311 // gibberish when trying to match arguments.
7315 StringRef Specifier(csStart, csLen);
7317 // If the specifier in non-printable, it could be the first byte of a UTF-8
7318 // sequence. In that case, print the UTF-8 code point. If not, print the byte
7320 std::string CodePointStr;
7321 if (!llvm::sys::locale::isPrint(*csStart)) {
7322 llvm::UTF32 CodePoint;
7323 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
7324 const llvm::UTF8 *E =
7325 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
7326 llvm::ConversionResult Result =
7327 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion);
7329 if (Result != llvm::conversionOK) {
7330 unsigned char FirstChar = *csStart;
7331 CodePoint = (llvm::UTF32)FirstChar;
7334 llvm::raw_string_ostream OS(CodePointStr);
7335 if (CodePoint < 256)
7336 OS << "\\x" << llvm::format("%02x", CodePoint);
7337 else if (CodePoint <= 0xFFFF)
7338 OS << "\\u" << llvm::format("%04x", CodePoint);
7340 OS << "\\U" << llvm::format("%08x", CodePoint);
7342 Specifier = CodePointStr;
7345 EmitFormatDiagnostic(
7346 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc,
7347 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen));
7353 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
7354 const char *startSpec,
7355 unsigned specifierLen) {
7356 EmitFormatDiagnostic(
7357 S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
7358 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
7362 CheckFormatHandler::CheckNumArgs(
7363 const analyze_format_string::FormatSpecifier &FS,
7364 const analyze_format_string::ConversionSpecifier &CS,
7365 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
7367 if (argIndex >= NumDataArgs) {
7368 PartialDiagnostic PDiag = FS.usesPositionalArg()
7369 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
7370 << (argIndex+1) << NumDataArgs)
7371 : S.PDiag(diag::warn_printf_insufficient_data_args);
7372 EmitFormatDiagnostic(
7373 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
7374 getSpecifierRange(startSpecifier, specifierLen));
7376 // Since more arguments than conversion tokens are given, by extension
7377 // all arguments are covered, so mark this as so.
7378 UncoveredArg.setAllCovered();
7384 template<typename Range>
7385 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
7387 bool IsStringLocation,
7389 ArrayRef<FixItHint> FixIt) {
7390 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
7391 Loc, IsStringLocation, StringRange, FixIt);
7394 /// If the format string is not within the function call, emit a note
7395 /// so that the function call and string are in diagnostic messages.
7397 /// \param InFunctionCall if true, the format string is within the function
7398 /// call and only one diagnostic message will be produced. Otherwise, an
7399 /// extra note will be emitted pointing to location of the format string.
7401 /// \param ArgumentExpr the expression that is passed as the format string
7402 /// argument in the function call. Used for getting locations when two
7403 /// diagnostics are emitted.
7405 /// \param PDiag the callee should already have provided any strings for the
7406 /// diagnostic message. This function only adds locations and fixits
7409 /// \param Loc primary location for diagnostic. If two diagnostics are
7410 /// required, one will be at Loc and a new SourceLocation will be created for
7413 /// \param IsStringLocation if true, Loc points to the format string should be
7414 /// used for the note. Otherwise, Loc points to the argument list and will
7415 /// be used with PDiag.
7417 /// \param StringRange some or all of the string to highlight. This is
7418 /// templated so it can accept either a CharSourceRange or a SourceRange.
7420 /// \param FixIt optional fix it hint for the format string.
7421 template <typename Range>
7422 void CheckFormatHandler::EmitFormatDiagnostic(
7423 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr,
7424 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation,
7425 Range StringRange, ArrayRef<FixItHint> FixIt) {
7426 if (InFunctionCall) {
7427 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
7431 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
7432 << ArgumentExpr->getSourceRange();
7434 const Sema::SemaDiagnosticBuilder &Note =
7435 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
7436 diag::note_format_string_defined);
7438 Note << StringRange;
7443 //===--- CHECK: Printf format string checking ------------------------------===//
7447 class CheckPrintfHandler : public CheckFormatHandler {
7449 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
7450 const Expr *origFormatExpr,
7451 const Sema::FormatStringType type, unsigned firstDataArg,
7452 unsigned numDataArgs, bool isObjC, const char *beg,
7453 bool hasVAListArg, ArrayRef<const Expr *> Args,
7454 unsigned formatIdx, bool inFunctionCall,
7455 Sema::VariadicCallType CallType,
7456 llvm::SmallBitVector &CheckedVarArgs,
7457 UncoveredArgHandler &UncoveredArg)
7458 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
7459 numDataArgs, beg, hasVAListArg, Args, formatIdx,
7460 inFunctionCall, CallType, CheckedVarArgs,
7463 bool isObjCContext() const { return FSType == Sema::FST_NSString; }
7465 /// Returns true if '%@' specifiers are allowed in the format string.
7466 bool allowsObjCArg() const {
7467 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
7468 FSType == Sema::FST_OSTrace;
7471 bool HandleInvalidPrintfConversionSpecifier(
7472 const analyze_printf::PrintfSpecifier &FS,
7473 const char *startSpecifier,
7474 unsigned specifierLen) override;
7476 void handleInvalidMaskType(StringRef MaskType) override;
7478 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
7479 const char *startSpecifier,
7480 unsigned specifierLen) override;
7481 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
7482 const char *StartSpecifier,
7483 unsigned SpecifierLen,
7486 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
7487 const char *startSpecifier, unsigned specifierLen);
7488 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
7489 const analyze_printf::OptionalAmount &Amt,
7491 const char *startSpecifier, unsigned specifierLen);
7492 void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
7493 const analyze_printf::OptionalFlag &flag,
7494 const char *startSpecifier, unsigned specifierLen);
7495 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
7496 const analyze_printf::OptionalFlag &ignoredFlag,
7497 const analyze_printf::OptionalFlag &flag,
7498 const char *startSpecifier, unsigned specifierLen);
7499 bool checkForCStrMembers(const analyze_printf::ArgType &AT,
7502 void HandleEmptyObjCModifierFlag(const char *startFlag,
7503 unsigned flagLen) override;
7505 void HandleInvalidObjCModifierFlag(const char *startFlag,
7506 unsigned flagLen) override;
7508 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
7509 const char *flagsEnd,
7510 const char *conversionPosition)
7516 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
7517 const analyze_printf::PrintfSpecifier &FS,
7518 const char *startSpecifier,
7519 unsigned specifierLen) {
7520 const analyze_printf::PrintfConversionSpecifier &CS =
7521 FS.getConversionSpecifier();
7523 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
7524 getLocationOfByte(CS.getStart()),
7525 startSpecifier, specifierLen,
7526 CS.getStart(), CS.getLength());
7529 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
7530 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
7533 bool CheckPrintfHandler::HandleAmount(
7534 const analyze_format_string::OptionalAmount &Amt,
7535 unsigned k, const char *startSpecifier,
7536 unsigned specifierLen) {
7537 if (Amt.hasDataArgument()) {
7538 if (!HasVAListArg) {
7539 unsigned argIndex = Amt.getArgIndex();
7540 if (argIndex >= NumDataArgs) {
7541 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
7543 getLocationOfByte(Amt.getStart()),
7544 /*IsStringLocation*/true,
7545 getSpecifierRange(startSpecifier, specifierLen));
7546 // Don't do any more checking. We will just emit
7551 // Type check the data argument. It should be an 'int'.
7552 // Although not in conformance with C99, we also allow the argument to be
7553 // an 'unsigned int' as that is a reasonably safe case. GCC also
7554 // doesn't emit a warning for that case.
7555 CoveredArgs.set(argIndex);
7556 const Expr *Arg = getDataArg(argIndex);
7560 QualType T = Arg->getType();
7562 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context);
7563 assert(AT.isValid());
7565 if (!AT.matchesType(S.Context, T)) {
7566 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
7567 << k << AT.getRepresentativeTypeName(S.Context)
7568 << T << Arg->getSourceRange(),
7569 getLocationOfByte(Amt.getStart()),
7570 /*IsStringLocation*/true,
7571 getSpecifierRange(startSpecifier, specifierLen));
7572 // Don't do any more checking. We will just emit
7581 void CheckPrintfHandler::HandleInvalidAmount(
7582 const analyze_printf::PrintfSpecifier &FS,
7583 const analyze_printf::OptionalAmount &Amt,
7585 const char *startSpecifier,
7586 unsigned specifierLen) {
7587 const analyze_printf::PrintfConversionSpecifier &CS =
7588 FS.getConversionSpecifier();
7591 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
7592 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
7593 Amt.getConstantLength()))
7596 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
7597 << type << CS.toString(),
7598 getLocationOfByte(Amt.getStart()),
7599 /*IsStringLocation*/true,
7600 getSpecifierRange(startSpecifier, specifierLen),
7604 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
7605 const analyze_printf::OptionalFlag &flag,
7606 const char *startSpecifier,
7607 unsigned specifierLen) {
7608 // Warn about pointless flag with a fixit removal.
7609 const analyze_printf::PrintfConversionSpecifier &CS =
7610 FS.getConversionSpecifier();
7611 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
7612 << flag.toString() << CS.toString(),
7613 getLocationOfByte(flag.getPosition()),
7614 /*IsStringLocation*/true,
7615 getSpecifierRange(startSpecifier, specifierLen),
7616 FixItHint::CreateRemoval(
7617 getSpecifierRange(flag.getPosition(), 1)));
7620 void CheckPrintfHandler::HandleIgnoredFlag(
7621 const analyze_printf::PrintfSpecifier &FS,
7622 const analyze_printf::OptionalFlag &ignoredFlag,
7623 const analyze_printf::OptionalFlag &flag,
7624 const char *startSpecifier,
7625 unsigned specifierLen) {
7626 // Warn about ignored flag with a fixit removal.
7627 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
7628 << ignoredFlag.toString() << flag.toString(),
7629 getLocationOfByte(ignoredFlag.getPosition()),
7630 /*IsStringLocation*/true,
7631 getSpecifierRange(startSpecifier, specifierLen),
7632 FixItHint::CreateRemoval(
7633 getSpecifierRange(ignoredFlag.getPosition(), 1)));
7636 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
7638 // Warn about an empty flag.
7639 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
7640 getLocationOfByte(startFlag),
7641 /*IsStringLocation*/true,
7642 getSpecifierRange(startFlag, flagLen));
7645 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
7647 // Warn about an invalid flag.
7648 auto Range = getSpecifierRange(startFlag, flagLen);
7649 StringRef flag(startFlag, flagLen);
7650 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
7651 getLocationOfByte(startFlag),
7652 /*IsStringLocation*/true,
7653 Range, FixItHint::CreateRemoval(Range));
7656 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
7657 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
7658 // Warn about using '[...]' without a '@' conversion.
7659 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1);
7660 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
7661 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1),
7662 getLocationOfByte(conversionPosition),
7663 /*IsStringLocation*/true,
7664 Range, FixItHint::CreateRemoval(Range));
7667 // Determines if the specified is a C++ class or struct containing
7668 // a member with the specified name and kind (e.g. a CXXMethodDecl named
7670 template<typename MemberKind>
7671 static llvm::SmallPtrSet<MemberKind*, 1>
7672 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
7673 const RecordType *RT = Ty->getAs<RecordType>();
7674 llvm::SmallPtrSet<MemberKind*, 1> Results;
7678 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
7679 if (!RD || !RD->getDefinition())
7682 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
7683 Sema::LookupMemberName);
7684 R.suppressDiagnostics();
7686 // We just need to include all members of the right kind turned up by the
7687 // filter, at this point.
7688 if (S.LookupQualifiedName(R, RT->getDecl()))
7689 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
7690 NamedDecl *decl = (*I)->getUnderlyingDecl();
7691 if (MemberKind *FK = dyn_cast<MemberKind>(decl))
7697 /// Check if we could call '.c_str()' on an object.
7699 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
7700 /// allow the call, or if it would be ambiguous).
7701 bool Sema::hasCStrMethod(const Expr *E) {
7702 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
7705 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType());
7706 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
7708 if ((*MI)->getMinRequiredArguments() == 0)
7713 // Check if a (w)string was passed when a (w)char* was needed, and offer a
7714 // better diagnostic if so. AT is assumed to be valid.
7715 // Returns true when a c_str() conversion method is found.
7716 bool CheckPrintfHandler::checkForCStrMembers(
7717 const analyze_printf::ArgType &AT, const Expr *E) {
7718 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
7721 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType());
7723 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
7725 const CXXMethodDecl *Method = *MI;
7726 if (Method->getMinRequiredArguments() == 0 &&
7727 AT.matchesType(S.Context, Method->getReturnType())) {
7728 // FIXME: Suggest parens if the expression needs them.
7729 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc());
7730 S.Diag(E->getBeginLoc(), diag::note_printf_c_str)
7731 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()");
7740 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
7742 const char *startSpecifier,
7743 unsigned specifierLen) {
7744 using namespace analyze_format_string;
7745 using namespace analyze_printf;
7747 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
7749 if (FS.consumesDataArgument()) {
7752 usesPositionalArgs = FS.usesPositionalArg();
7754 else if (usesPositionalArgs != FS.usesPositionalArg()) {
7755 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
7756 startSpecifier, specifierLen);
7761 // First check if the field width, precision, and conversion specifier
7762 // have matching data arguments.
7763 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
7764 startSpecifier, specifierLen)) {
7768 if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
7769 startSpecifier, specifierLen)) {
7773 if (!CS.consumesDataArgument()) {
7774 // FIXME: Technically specifying a precision or field width here
7775 // makes no sense. Worth issuing a warning at some point.
7779 // Consume the argument.
7780 unsigned argIndex = FS.getArgIndex();
7781 if (argIndex < NumDataArgs) {
7782 // The check to see if the argIndex is valid will come later.
7783 // We set the bit here because we may exit early from this
7784 // function if we encounter some other error.
7785 CoveredArgs.set(argIndex);
7788 // FreeBSD kernel extensions.
7789 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
7790 CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
7791 // We need at least two arguments.
7792 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1))
7795 // Claim the second argument.
7796 CoveredArgs.set(argIndex + 1);
7798 // Type check the first argument (int for %b, pointer for %D)
7799 const Expr *Ex = getDataArg(argIndex);
7800 const analyze_printf::ArgType &AT =
7801 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
7802 ArgType(S.Context.IntTy) : ArgType::CPointerTy;
7803 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
7804 EmitFormatDiagnostic(
7805 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
7806 << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
7807 << false << Ex->getSourceRange(),
7808 Ex->getBeginLoc(), /*IsStringLocation*/ false,
7809 getSpecifierRange(startSpecifier, specifierLen));
7811 // Type check the second argument (char * for both %b and %D)
7812 Ex = getDataArg(argIndex + 1);
7813 const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
7814 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
7815 EmitFormatDiagnostic(
7816 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
7817 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
7818 << false << Ex->getSourceRange(),
7819 Ex->getBeginLoc(), /*IsStringLocation*/ false,
7820 getSpecifierRange(startSpecifier, specifierLen));
7825 // Check for using an Objective-C specific conversion specifier
7826 // in a non-ObjC literal.
7827 if (!allowsObjCArg() && CS.isObjCArg()) {
7828 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7832 // %P can only be used with os_log.
7833 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
7834 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7838 // %n is not allowed with os_log.
7839 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
7840 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
7841 getLocationOfByte(CS.getStart()),
7842 /*IsStringLocation*/ false,
7843 getSpecifierRange(startSpecifier, specifierLen));
7848 // Only scalars are allowed for os_trace.
7849 if (FSType == Sema::FST_OSTrace &&
7850 (CS.getKind() == ConversionSpecifier::PArg ||
7851 CS.getKind() == ConversionSpecifier::sArg ||
7852 CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
7853 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7857 // Check for use of public/private annotation outside of os_log().
7858 if (FSType != Sema::FST_OSLog) {
7859 if (FS.isPublic().isSet()) {
7860 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
7862 getLocationOfByte(FS.isPublic().getPosition()),
7863 /*IsStringLocation*/ false,
7864 getSpecifierRange(startSpecifier, specifierLen));
7866 if (FS.isPrivate().isSet()) {
7867 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
7869 getLocationOfByte(FS.isPrivate().getPosition()),
7870 /*IsStringLocation*/ false,
7871 getSpecifierRange(startSpecifier, specifierLen));
7875 // Check for invalid use of field width
7876 if (!FS.hasValidFieldWidth()) {
7877 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
7878 startSpecifier, specifierLen);
7881 // Check for invalid use of precision
7882 if (!FS.hasValidPrecision()) {
7883 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
7884 startSpecifier, specifierLen);
7887 // Precision is mandatory for %P specifier.
7888 if (CS.getKind() == ConversionSpecifier::PArg &&
7889 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
7890 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
7891 getLocationOfByte(startSpecifier),
7892 /*IsStringLocation*/ false,
7893 getSpecifierRange(startSpecifier, specifierLen));
7896 // Check each flag does not conflict with any other component.
7897 if (!FS.hasValidThousandsGroupingPrefix())
7898 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
7899 if (!FS.hasValidLeadingZeros())
7900 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
7901 if (!FS.hasValidPlusPrefix())
7902 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
7903 if (!FS.hasValidSpacePrefix())
7904 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
7905 if (!FS.hasValidAlternativeForm())
7906 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
7907 if (!FS.hasValidLeftJustified())
7908 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
7910 // Check that flags are not ignored by another flag
7911 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
7912 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
7913 startSpecifier, specifierLen);
7914 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
7915 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
7916 startSpecifier, specifierLen);
7918 // Check the length modifier is valid with the given conversion specifier.
7919 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
7921 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
7922 diag::warn_format_nonsensical_length);
7923 else if (!FS.hasStandardLengthModifier())
7924 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
7925 else if (!FS.hasStandardLengthConversionCombination())
7926 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
7927 diag::warn_format_non_standard_conversion_spec);
7929 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
7930 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
7932 // The remaining checks depend on the data arguments.
7936 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
7939 const Expr *Arg = getDataArg(argIndex);
7943 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
7946 static bool requiresParensToAddCast(const Expr *E) {
7947 // FIXME: We should have a general way to reason about operator
7948 // precedence and whether parens are actually needed here.
7949 // Take care of a few common cases where they aren't.
7950 const Expr *Inside = E->IgnoreImpCasts();
7951 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside))
7952 Inside = POE->getSyntacticForm()->IgnoreImpCasts();
7954 switch (Inside->getStmtClass()) {
7955 case Stmt::ArraySubscriptExprClass:
7956 case Stmt::CallExprClass:
7957 case Stmt::CharacterLiteralClass:
7958 case Stmt::CXXBoolLiteralExprClass:
7959 case Stmt::DeclRefExprClass:
7960 case Stmt::FloatingLiteralClass:
7961 case Stmt::IntegerLiteralClass:
7962 case Stmt::MemberExprClass:
7963 case Stmt::ObjCArrayLiteralClass:
7964 case Stmt::ObjCBoolLiteralExprClass:
7965 case Stmt::ObjCBoxedExprClass:
7966 case Stmt::ObjCDictionaryLiteralClass:
7967 case Stmt::ObjCEncodeExprClass:
7968 case Stmt::ObjCIvarRefExprClass:
7969 case Stmt::ObjCMessageExprClass:
7970 case Stmt::ObjCPropertyRefExprClass:
7971 case Stmt::ObjCStringLiteralClass:
7972 case Stmt::ObjCSubscriptRefExprClass:
7973 case Stmt::ParenExprClass:
7974 case Stmt::StringLiteralClass:
7975 case Stmt::UnaryOperatorClass:
7982 static std::pair<QualType, StringRef>
7983 shouldNotPrintDirectly(const ASTContext &Context,
7984 QualType IntendedTy,
7986 // Use a 'while' to peel off layers of typedefs.
7987 QualType TyTy = IntendedTy;
7988 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
7989 StringRef Name = UserTy->getDecl()->getName();
7990 QualType CastTy = llvm::StringSwitch<QualType>(Name)
7991 .Case("CFIndex", Context.getNSIntegerType())
7992 .Case("NSInteger", Context.getNSIntegerType())
7993 .Case("NSUInteger", Context.getNSUIntegerType())
7994 .Case("SInt32", Context.IntTy)
7995 .Case("UInt32", Context.UnsignedIntTy)
7996 .Default(QualType());
7998 if (!CastTy.isNull())
7999 return std::make_pair(CastTy, Name);
8001 TyTy = UserTy->desugar();
8004 // Strip parens if necessary.
8005 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
8006 return shouldNotPrintDirectly(Context,
8007 PE->getSubExpr()->getType(),
8010 // If this is a conditional expression, then its result type is constructed
8011 // via usual arithmetic conversions and thus there might be no necessary
8012 // typedef sugar there. Recurse to operands to check for NSInteger &
8013 // Co. usage condition.
8014 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
8015 QualType TrueTy, FalseTy;
8016 StringRef TrueName, FalseName;
8018 std::tie(TrueTy, TrueName) =
8019 shouldNotPrintDirectly(Context,
8020 CO->getTrueExpr()->getType(),
8022 std::tie(FalseTy, FalseName) =
8023 shouldNotPrintDirectly(Context,
8024 CO->getFalseExpr()->getType(),
8025 CO->getFalseExpr());
8027 if (TrueTy == FalseTy)
8028 return std::make_pair(TrueTy, TrueName);
8029 else if (TrueTy.isNull())
8030 return std::make_pair(FalseTy, FalseName);
8031 else if (FalseTy.isNull())
8032 return std::make_pair(TrueTy, TrueName);
8035 return std::make_pair(QualType(), StringRef());
8038 /// Return true if \p ICE is an implicit argument promotion of an arithmetic
8039 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
8040 /// type do not count.
8042 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
8043 QualType From = ICE->getSubExpr()->getType();
8044 QualType To = ICE->getType();
8045 // It's an integer promotion if the destination type is the promoted
8047 if (ICE->getCastKind() == CK_IntegralCast &&
8048 From->isPromotableIntegerType() &&
8049 S.Context.getPromotedIntegerType(From) == To)
8051 // Look through vector types, since we do default argument promotion for
8053 if (const auto *VecTy = From->getAs<ExtVectorType>())
8054 From = VecTy->getElementType();
8055 if (const auto *VecTy = To->getAs<ExtVectorType>())
8056 To = VecTy->getElementType();
8057 // It's a floating promotion if the source type is a lower rank.
8058 return ICE->getCastKind() == CK_FloatingCast &&
8059 S.Context.getFloatingTypeOrder(From, To) < 0;
8063 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
8064 const char *StartSpecifier,
8065 unsigned SpecifierLen,
8067 using namespace analyze_format_string;
8068 using namespace analyze_printf;
8070 // Now type check the data expression that matches the
8071 // format specifier.
8072 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext());
8076 QualType ExprTy = E->getType();
8077 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) {
8078 ExprTy = TET->getUnderlyingExpr()->getType();
8081 const analyze_printf::ArgType::MatchKind Match =
8082 AT.matchesType(S.Context, ExprTy);
8083 bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic;
8084 if (Match == analyze_printf::ArgType::Match)
8087 // Look through argument promotions for our error message's reported type.
8088 // This includes the integral and floating promotions, but excludes array
8089 // and function pointer decay (seeing that an argument intended to be a
8090 // string has type 'char [6]' is probably more confusing than 'char *') and
8091 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
8092 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
8093 if (isArithmeticArgumentPromotion(S, ICE)) {
8094 E = ICE->getSubExpr();
8095 ExprTy = E->getType();
8097 // Check if we didn't match because of an implicit cast from a 'char'
8098 // or 'short' to an 'int'. This is done because printf is a varargs
8100 if (ICE->getType() == S.Context.IntTy ||
8101 ICE->getType() == S.Context.UnsignedIntTy) {
8102 // All further checking is done on the subexpression.
8103 if (AT.matchesType(S.Context, ExprTy))
8107 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
8108 // Special case for 'a', which has type 'int' in C.
8109 // Note, however, that we do /not/ want to treat multibyte constants like
8110 // 'MooV' as characters! This form is deprecated but still exists.
8111 if (ExprTy == S.Context.IntTy)
8112 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue()))
8113 ExprTy = S.Context.CharTy;
8116 // Look through enums to their underlying type.
8117 bool IsEnum = false;
8118 if (auto EnumTy = ExprTy->getAs<EnumType>()) {
8119 ExprTy = EnumTy->getDecl()->getIntegerType();
8123 // %C in an Objective-C context prints a unichar, not a wchar_t.
8124 // If the argument is an integer of some kind, believe the %C and suggest
8125 // a cast instead of changing the conversion specifier.
8126 QualType IntendedTy = ExprTy;
8127 if (isObjCContext() &&
8128 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
8129 if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
8130 !ExprTy->isCharType()) {
8131 // 'unichar' is defined as a typedef of unsigned short, but we should
8132 // prefer using the typedef if it is visible.
8133 IntendedTy = S.Context.UnsignedShortTy;
8135 // While we are here, check if the value is an IntegerLiteral that happens
8136 // to be within the valid range.
8137 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) {
8138 const llvm::APInt &V = IL->getValue();
8139 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy))
8143 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(),
8144 Sema::LookupOrdinaryName);
8145 if (S.LookupName(Result, S.getCurScope())) {
8146 NamedDecl *ND = Result.getFoundDecl();
8147 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
8148 if (TD->getUnderlyingType() == IntendedTy)
8149 IntendedTy = S.Context.getTypedefType(TD);
8154 // Special-case some of Darwin's platform-independence types by suggesting
8155 // casts to primitive types that are known to be large enough.
8156 bool ShouldNotPrintDirectly = false; StringRef CastTyName;
8157 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
8159 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
8160 if (!CastTy.isNull()) {
8161 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
8162 // (long in ASTContext). Only complain to pedants.
8163 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
8164 (AT.isSizeT() || AT.isPtrdiffT()) &&
8165 AT.matchesType(S.Context, CastTy))
8167 IntendedTy = CastTy;
8168 ShouldNotPrintDirectly = true;
8172 // We may be able to offer a FixItHint if it is a supported type.
8173 PrintfSpecifier fixedFS = FS;
8175 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext());
8178 // Get the fix string from the fixed format specifier
8179 SmallString<16> buf;
8180 llvm::raw_svector_ostream os(buf);
8181 fixedFS.toString(os);
8183 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
8185 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
8188 ? diag::warn_format_conversion_argument_type_mismatch_pedantic
8189 : diag::warn_format_conversion_argument_type_mismatch;
8190 // In this case, the specifier is wrong and should be changed to match
8192 EmitFormatDiagnostic(S.PDiag(Diag)
8193 << AT.getRepresentativeTypeName(S.Context)
8194 << IntendedTy << IsEnum << E->getSourceRange(),
8196 /*IsStringLocation*/ false, SpecRange,
8197 FixItHint::CreateReplacement(SpecRange, os.str()));
8199 // The canonical type for formatting this value is different from the
8200 // actual type of the expression. (This occurs, for example, with Darwin's
8201 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
8202 // should be printed as 'long' for 64-bit compatibility.)
8203 // Rather than emitting a normal format/argument mismatch, we want to
8204 // add a cast to the recommended type (and correct the format string
8206 SmallString<16> CastBuf;
8207 llvm::raw_svector_ostream CastFix(CastBuf);
8209 IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
8212 SmallVector<FixItHint,4> Hints;
8213 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly)
8214 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
8216 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
8217 // If there's already a cast present, just replace it.
8218 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
8219 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
8221 } else if (!requiresParensToAddCast(E)) {
8222 // If the expression has high enough precedence,
8223 // just write the C-style cast.
8225 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
8227 // Otherwise, add parens around the expression as well as the cast.
8230 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
8232 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc());
8233 Hints.push_back(FixItHint::CreateInsertion(After, ")"));
8236 if (ShouldNotPrintDirectly) {
8237 // The expression has a type that should not be printed directly.
8238 // We extract the name from the typedef because we don't want to show
8239 // the underlying type in the diagnostic.
8241 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy))
8242 Name = TypedefTy->getDecl()->getName();
8245 unsigned Diag = Pedantic
8246 ? diag::warn_format_argument_needs_cast_pedantic
8247 : diag::warn_format_argument_needs_cast;
8248 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum
8249 << E->getSourceRange(),
8250 E->getBeginLoc(), /*IsStringLocation=*/false,
8253 // In this case, the expression could be printed using a different
8254 // specifier, but we've decided that the specifier is probably correct
8255 // and we should cast instead. Just use the normal warning message.
8256 EmitFormatDiagnostic(
8257 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
8258 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
8259 << E->getSourceRange(),
8260 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
8264 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
8266 // Since the warning for passing non-POD types to variadic functions
8267 // was deferred until now, we emit a warning for non-POD
8269 switch (S.isValidVarArgType(ExprTy)) {
8270 case Sema::VAK_Valid:
8271 case Sema::VAK_ValidInCXX11: {
8274 ? diag::warn_format_conversion_argument_type_mismatch_pedantic
8275 : diag::warn_format_conversion_argument_type_mismatch;
8277 EmitFormatDiagnostic(
8278 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
8279 << IsEnum << CSR << E->getSourceRange(),
8280 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
8283 case Sema::VAK_Undefined:
8284 case Sema::VAK_MSVCUndefined:
8285 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string)
8286 << S.getLangOpts().CPlusPlus11 << ExprTy
8288 << AT.getRepresentativeTypeName(S.Context) << CSR
8289 << E->getSourceRange(),
8290 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
8291 checkForCStrMembers(AT, E);
8294 case Sema::VAK_Invalid:
8295 if (ExprTy->isObjCObjectType())
8296 EmitFormatDiagnostic(
8297 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
8298 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
8299 << AT.getRepresentativeTypeName(S.Context) << CSR
8300 << E->getSourceRange(),
8301 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
8303 // FIXME: If this is an initializer list, suggest removing the braces
8304 // or inserting a cast to the target type.
8305 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format)
8306 << isa<InitListExpr>(E) << ExprTy << CallType
8307 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange();
8311 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
8312 "format string specifier index out of range");
8313 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
8319 //===--- CHECK: Scanf format string checking ------------------------------===//
8323 class CheckScanfHandler : public CheckFormatHandler {
8325 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
8326 const Expr *origFormatExpr, Sema::FormatStringType type,
8327 unsigned firstDataArg, unsigned numDataArgs,
8328 const char *beg, bool hasVAListArg,
8329 ArrayRef<const Expr *> Args, unsigned formatIdx,
8330 bool inFunctionCall, Sema::VariadicCallType CallType,
8331 llvm::SmallBitVector &CheckedVarArgs,
8332 UncoveredArgHandler &UncoveredArg)
8333 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
8334 numDataArgs, beg, hasVAListArg, Args, formatIdx,
8335 inFunctionCall, CallType, CheckedVarArgs,
8338 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
8339 const char *startSpecifier,
8340 unsigned specifierLen) override;
8342 bool HandleInvalidScanfConversionSpecifier(
8343 const analyze_scanf::ScanfSpecifier &FS,
8344 const char *startSpecifier,
8345 unsigned specifierLen) override;
8347 void HandleIncompleteScanList(const char *start, const char *end) override;
8352 void CheckScanfHandler::HandleIncompleteScanList(const char *start,
8354 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
8355 getLocationOfByte(end), /*IsStringLocation*/true,
8356 getSpecifierRange(start, end - start));
8359 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
8360 const analyze_scanf::ScanfSpecifier &FS,
8361 const char *startSpecifier,
8362 unsigned specifierLen) {
8363 const analyze_scanf::ScanfConversionSpecifier &CS =
8364 FS.getConversionSpecifier();
8366 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
8367 getLocationOfByte(CS.getStart()),
8368 startSpecifier, specifierLen,
8369 CS.getStart(), CS.getLength());
8372 bool CheckScanfHandler::HandleScanfSpecifier(
8373 const analyze_scanf::ScanfSpecifier &FS,
8374 const char *startSpecifier,
8375 unsigned specifierLen) {
8376 using namespace analyze_scanf;
8377 using namespace analyze_format_string;
8379 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
8381 // Handle case where '%' and '*' don't consume an argument. These shouldn't
8382 // be used to decide if we are using positional arguments consistently.
8383 if (FS.consumesDataArgument()) {
8386 usesPositionalArgs = FS.usesPositionalArg();
8388 else if (usesPositionalArgs != FS.usesPositionalArg()) {
8389 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
8390 startSpecifier, specifierLen);
8395 // Check if the field with is non-zero.
8396 const OptionalAmount &Amt = FS.getFieldWidth();
8397 if (Amt.getHowSpecified() == OptionalAmount::Constant) {
8398 if (Amt.getConstantAmount() == 0) {
8399 const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
8400 Amt.getConstantLength());
8401 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
8402 getLocationOfByte(Amt.getStart()),
8403 /*IsStringLocation*/true, R,
8404 FixItHint::CreateRemoval(R));
8408 if (!FS.consumesDataArgument()) {
8409 // FIXME: Technically specifying a precision or field width here
8410 // makes no sense. Worth issuing a warning at some point.
8414 // Consume the argument.
8415 unsigned argIndex = FS.getArgIndex();
8416 if (argIndex < NumDataArgs) {
8417 // The check to see if the argIndex is valid will come later.
8418 // We set the bit here because we may exit early from this
8419 // function if we encounter some other error.
8420 CoveredArgs.set(argIndex);
8423 // Check the length modifier is valid with the given conversion specifier.
8424 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(),
8426 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8427 diag::warn_format_nonsensical_length);
8428 else if (!FS.hasStandardLengthModifier())
8429 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
8430 else if (!FS.hasStandardLengthConversionCombination())
8431 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
8432 diag::warn_format_non_standard_conversion_spec);
8434 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
8435 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
8437 // The remaining checks depend on the data arguments.
8441 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
8444 // Check that the argument type matches the format specifier.
8445 const Expr *Ex = getDataArg(argIndex);
8449 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
8451 if (!AT.isValid()) {
8455 analyze_format_string::ArgType::MatchKind Match =
8456 AT.matchesType(S.Context, Ex->getType());
8457 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
8458 if (Match == analyze_format_string::ArgType::Match)
8461 ScanfSpecifier fixedFS = FS;
8462 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
8463 S.getLangOpts(), S.Context);
8466 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
8467 : diag::warn_format_conversion_argument_type_mismatch;
8470 // Get the fix string from the fixed format specifier.
8471 SmallString<128> buf;
8472 llvm::raw_svector_ostream os(buf);
8473 fixedFS.toString(os);
8475 EmitFormatDiagnostic(
8476 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context)
8477 << Ex->getType() << false << Ex->getSourceRange(),
8479 /*IsStringLocation*/ false,
8480 getSpecifierRange(startSpecifier, specifierLen),
8481 FixItHint::CreateReplacement(
8482 getSpecifierRange(startSpecifier, specifierLen), os.str()));
8484 EmitFormatDiagnostic(S.PDiag(Diag)
8485 << AT.getRepresentativeTypeName(S.Context)
8486 << Ex->getType() << false << Ex->getSourceRange(),
8488 /*IsStringLocation*/ false,
8489 getSpecifierRange(startSpecifier, specifierLen));
8495 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
8496 const Expr *OrigFormatExpr,
8497 ArrayRef<const Expr *> Args,
8498 bool HasVAListArg, unsigned format_idx,
8499 unsigned firstDataArg,
8500 Sema::FormatStringType Type,
8501 bool inFunctionCall,
8502 Sema::VariadicCallType CallType,
8503 llvm::SmallBitVector &CheckedVarArgs,
8504 UncoveredArgHandler &UncoveredArg) {
8505 // CHECK: is the format string a wide literal?
8506 if (!FExpr->isAscii() && !FExpr->isUTF8()) {
8507 CheckFormatHandler::EmitFormatDiagnostic(
8508 S, inFunctionCall, Args[format_idx],
8509 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(),
8510 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
8514 // Str - The format string. NOTE: this is NOT null-terminated!
8515 StringRef StrRef = FExpr->getString();
8516 const char *Str = StrRef.data();
8517 // Account for cases where the string literal is truncated in a declaration.
8518 const ConstantArrayType *T =
8519 S.Context.getAsConstantArrayType(FExpr->getType());
8520 assert(T && "String literal not of constant array type!");
8521 size_t TypeSize = T->getSize().getZExtValue();
8522 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
8523 const unsigned numDataArgs = Args.size() - firstDataArg;
8525 // Emit a warning if the string literal is truncated and does not contain an
8526 // embedded null character.
8527 if (TypeSize <= StrRef.size() &&
8528 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) {
8529 CheckFormatHandler::EmitFormatDiagnostic(
8530 S, inFunctionCall, Args[format_idx],
8531 S.PDiag(diag::warn_printf_format_string_not_null_terminated),
8532 FExpr->getBeginLoc(),
8533 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
8537 // CHECK: empty format string?
8538 if (StrLen == 0 && numDataArgs > 0) {
8539 CheckFormatHandler::EmitFormatDiagnostic(
8540 S, inFunctionCall, Args[format_idx],
8541 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(),
8542 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
8546 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
8547 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
8548 Type == Sema::FST_OSTrace) {
8549 CheckPrintfHandler H(
8550 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
8551 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str,
8552 HasVAListArg, Args, format_idx, inFunctionCall, CallType,
8553 CheckedVarArgs, UncoveredArg);
8555 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
8557 S.Context.getTargetInfo(),
8558 Type == Sema::FST_FreeBSDKPrintf))
8560 } else if (Type == Sema::FST_Scanf) {
8561 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
8562 numDataArgs, Str, HasVAListArg, Args, format_idx,
8563 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg);
8565 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
8567 S.Context.getTargetInfo()))
8569 } // TODO: handle other formats
8572 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
8573 // Str - The format string. NOTE: this is NOT null-terminated!
8574 StringRef StrRef = FExpr->getString();
8575 const char *Str = StrRef.data();
8576 // Account for cases where the string literal is truncated in a declaration.
8577 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
8578 assert(T && "String literal not of constant array type!");
8579 size_t TypeSize = T->getSize().getZExtValue();
8580 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
8581 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
8583 Context.getTargetInfo());
8586 //===--- CHECK: Warn on use of wrong absolute value function. -------------===//
8588 // Returns the related absolute value function that is larger, of 0 if one
8590 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
8591 switch (AbsFunction) {
8595 case Builtin::BI__builtin_abs:
8596 return Builtin::BI__builtin_labs;
8597 case Builtin::BI__builtin_labs:
8598 return Builtin::BI__builtin_llabs;
8599 case Builtin::BI__builtin_llabs:
8602 case Builtin::BI__builtin_fabsf:
8603 return Builtin::BI__builtin_fabs;
8604 case Builtin::BI__builtin_fabs:
8605 return Builtin::BI__builtin_fabsl;
8606 case Builtin::BI__builtin_fabsl:
8609 case Builtin::BI__builtin_cabsf:
8610 return Builtin::BI__builtin_cabs;
8611 case Builtin::BI__builtin_cabs:
8612 return Builtin::BI__builtin_cabsl;
8613 case Builtin::BI__builtin_cabsl:
8616 case Builtin::BIabs:
8617 return Builtin::BIlabs;
8618 case Builtin::BIlabs:
8619 return Builtin::BIllabs;
8620 case Builtin::BIllabs:
8623 case Builtin::BIfabsf:
8624 return Builtin::BIfabs;
8625 case Builtin::BIfabs:
8626 return Builtin::BIfabsl;
8627 case Builtin::BIfabsl:
8630 case Builtin::BIcabsf:
8631 return Builtin::BIcabs;
8632 case Builtin::BIcabs:
8633 return Builtin::BIcabsl;
8634 case Builtin::BIcabsl:
8639 // Returns the argument type of the absolute value function.
8640 static QualType getAbsoluteValueArgumentType(ASTContext &Context,
8645 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
8646 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error);
8647 if (Error != ASTContext::GE_None)
8650 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
8654 if (FT->getNumParams() != 1)
8657 return FT->getParamType(0);
8660 // Returns the best absolute value function, or zero, based on type and
8661 // current absolute value function.
8662 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
8663 unsigned AbsFunctionKind) {
8664 unsigned BestKind = 0;
8665 uint64_t ArgSize = Context.getTypeSize(ArgType);
8666 for (unsigned Kind = AbsFunctionKind; Kind != 0;
8667 Kind = getLargerAbsoluteValueFunction(Kind)) {
8668 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind);
8669 if (Context.getTypeSize(ParamType) >= ArgSize) {
8672 else if (Context.hasSameType(ParamType, ArgType)) {
8681 enum AbsoluteValueKind {
8687 static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
8688 if (T->isIntegralOrEnumerationType())
8690 if (T->isRealFloatingType())
8691 return AVK_Floating;
8692 if (T->isAnyComplexType())
8695 llvm_unreachable("Type not integer, floating, or complex");
8698 // Changes the absolute value function to a different type. Preserves whether
8699 // the function is a builtin.
8700 static unsigned changeAbsFunction(unsigned AbsKind,
8701 AbsoluteValueKind ValueKind) {
8702 switch (ValueKind) {
8707 case Builtin::BI__builtin_fabsf:
8708 case Builtin::BI__builtin_fabs:
8709 case Builtin::BI__builtin_fabsl:
8710 case Builtin::BI__builtin_cabsf:
8711 case Builtin::BI__builtin_cabs:
8712 case Builtin::BI__builtin_cabsl:
8713 return Builtin::BI__builtin_abs;
8714 case Builtin::BIfabsf:
8715 case Builtin::BIfabs:
8716 case Builtin::BIfabsl:
8717 case Builtin::BIcabsf:
8718 case Builtin::BIcabs:
8719 case Builtin::BIcabsl:
8720 return Builtin::BIabs;
8726 case Builtin::BI__builtin_abs:
8727 case Builtin::BI__builtin_labs:
8728 case Builtin::BI__builtin_llabs:
8729 case Builtin::BI__builtin_cabsf:
8730 case Builtin::BI__builtin_cabs:
8731 case Builtin::BI__builtin_cabsl:
8732 return Builtin::BI__builtin_fabsf;
8733 case Builtin::BIabs:
8734 case Builtin::BIlabs:
8735 case Builtin::BIllabs:
8736 case Builtin::BIcabsf:
8737 case Builtin::BIcabs:
8738 case Builtin::BIcabsl:
8739 return Builtin::BIfabsf;
8745 case Builtin::BI__builtin_abs:
8746 case Builtin::BI__builtin_labs:
8747 case Builtin::BI__builtin_llabs:
8748 case Builtin::BI__builtin_fabsf:
8749 case Builtin::BI__builtin_fabs:
8750 case Builtin::BI__builtin_fabsl:
8751 return Builtin::BI__builtin_cabsf;
8752 case Builtin::BIabs:
8753 case Builtin::BIlabs:
8754 case Builtin::BIllabs:
8755 case Builtin::BIfabsf:
8756 case Builtin::BIfabs:
8757 case Builtin::BIfabsl:
8758 return Builtin::BIcabsf;
8761 llvm_unreachable("Unable to convert function");
8764 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
8765 const IdentifierInfo *FnInfo = FDecl->getIdentifier();
8769 switch (FDecl->getBuiltinID()) {
8772 case Builtin::BI__builtin_abs:
8773 case Builtin::BI__builtin_fabs:
8774 case Builtin::BI__builtin_fabsf:
8775 case Builtin::BI__builtin_fabsl:
8776 case Builtin::BI__builtin_labs:
8777 case Builtin::BI__builtin_llabs:
8778 case Builtin::BI__builtin_cabs:
8779 case Builtin::BI__builtin_cabsf:
8780 case Builtin::BI__builtin_cabsl:
8781 case Builtin::BIabs:
8782 case Builtin::BIlabs:
8783 case Builtin::BIllabs:
8784 case Builtin::BIfabs:
8785 case Builtin::BIfabsf:
8786 case Builtin::BIfabsl:
8787 case Builtin::BIcabs:
8788 case Builtin::BIcabsf:
8789 case Builtin::BIcabsl:
8790 return FDecl->getBuiltinID();
8792 llvm_unreachable("Unknown Builtin type");
8795 // If the replacement is valid, emit a note with replacement function.
8796 // Additionally, suggest including the proper header if not already included.
8797 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
8798 unsigned AbsKind, QualType ArgType) {
8799 bool EmitHeaderHint = true;
8800 const char *HeaderName = nullptr;
8801 const char *FunctionName = nullptr;
8802 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
8803 FunctionName = "std::abs";
8804 if (ArgType->isIntegralOrEnumerationType()) {
8805 HeaderName = "cstdlib";
8806 } else if (ArgType->isRealFloatingType()) {
8807 HeaderName = "cmath";
8809 llvm_unreachable("Invalid Type");
8812 // Lookup all std::abs
8813 if (NamespaceDecl *Std = S.getStdNamespace()) {
8814 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName);
8815 R.suppressDiagnostics();
8816 S.LookupQualifiedName(R, Std);
8818 for (const auto *I : R) {
8819 const FunctionDecl *FDecl = nullptr;
8820 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) {
8821 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl());
8823 FDecl = dyn_cast<FunctionDecl>(I);
8828 // Found std::abs(), check that they are the right ones.
8829 if (FDecl->getNumParams() != 1)
8832 // Check that the parameter type can handle the argument.
8833 QualType ParamType = FDecl->getParamDecl(0)->getType();
8834 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) &&
8835 S.Context.getTypeSize(ArgType) <=
8836 S.Context.getTypeSize(ParamType)) {
8837 // Found a function, don't need the header hint.
8838 EmitHeaderHint = false;
8844 FunctionName = S.Context.BuiltinInfo.getName(AbsKind);
8845 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind);
8848 DeclarationName DN(&S.Context.Idents.get(FunctionName));
8849 LookupResult R(S, DN, Loc, Sema::LookupAnyName);
8850 R.suppressDiagnostics();
8851 S.LookupName(R, S.getCurScope());
8853 if (R.isSingleResult()) {
8854 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
8855 if (FD && FD->getBuiltinID() == AbsKind) {
8856 EmitHeaderHint = false;
8860 } else if (!R.empty()) {
8866 S.Diag(Loc, diag::note_replace_abs_function)
8867 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
8872 if (!EmitHeaderHint)
8875 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
8879 template <std::size_t StrLen>
8880 static bool IsStdFunction(const FunctionDecl *FDecl,
8881 const char (&Str)[StrLen]) {
8884 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
8886 if (!FDecl->isInStdNamespace())
8892 // Warn when using the wrong abs() function.
8893 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
8894 const FunctionDecl *FDecl) {
8895 if (Call->getNumArgs() != 1)
8898 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
8899 bool IsStdAbs = IsStdFunction(FDecl, "abs");
8900 if (AbsKind == 0 && !IsStdAbs)
8903 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType();
8904 QualType ParamType = Call->getArg(0)->getType();
8906 // Unsigned types cannot be negative. Suggest removing the absolute value
8908 if (ArgType->isUnsignedIntegerType()) {
8909 const char *FunctionName =
8910 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
8911 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
8912 Diag(Call->getExprLoc(), diag::note_remove_abs)
8914 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
8918 // Taking the absolute value of a pointer is very suspicious, they probably
8919 // wanted to index into an array, dereference a pointer, call a function, etc.
8920 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
8921 unsigned DiagType = 0;
8922 if (ArgType->isFunctionType())
8924 else if (ArgType->isArrayType())
8927 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
8931 // std::abs has overloads which prevent most of the absolute value problems
8936 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType);
8937 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType);
8939 // The argument and parameter are the same kind. Check if they are the right
8941 if (ArgValueKind == ParamValueKind) {
8942 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType))
8945 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind);
8946 Diag(Call->getExprLoc(), diag::warn_abs_too_small)
8947 << FDecl << ArgType << ParamType;
8949 if (NewAbsKind == 0)
8952 emitReplacement(*this, Call->getExprLoc(),
8953 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
8957 // ArgValueKind != ParamValueKind
8958 // The wrong type of absolute value function was used. Attempt to find the
8960 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind);
8961 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind);
8962 if (NewAbsKind == 0)
8965 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
8966 << FDecl << ParamValueKind << ArgValueKind;
8968 emitReplacement(*this, Call->getExprLoc(),
8969 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
8972 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
8973 void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
8974 const FunctionDecl *FDecl) {
8975 if (!Call || !FDecl) return;
8977 // Ignore template specializations and macros.
8978 if (inTemplateInstantiation()) return;
8979 if (Call->getExprLoc().isMacroID()) return;
8981 // Only care about the one template argument, two function parameter std::max
8982 if (Call->getNumArgs() != 2) return;
8983 if (!IsStdFunction(FDecl, "max")) return;
8984 const auto * ArgList = FDecl->getTemplateSpecializationArgs();
8985 if (!ArgList) return;
8986 if (ArgList->size() != 1) return;
8988 // Check that template type argument is unsigned integer.
8989 const auto& TA = ArgList->get(0);
8990 if (TA.getKind() != TemplateArgument::Type) return;
8991 QualType ArgType = TA.getAsType();
8992 if (!ArgType->isUnsignedIntegerType()) return;
8994 // See if either argument is a literal zero.
8995 auto IsLiteralZeroArg = [](const Expr* E) -> bool {
8996 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
8997 if (!MTE) return false;
8998 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr());
8999 if (!Num) return false;
9000 if (Num->getValue() != 0) return false;
9004 const Expr *FirstArg = Call->getArg(0);
9005 const Expr *SecondArg = Call->getArg(1);
9006 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
9007 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
9009 // Only warn when exactly one argument is zero.
9010 if (IsFirstArgZero == IsSecondArgZero) return;
9012 SourceRange FirstRange = FirstArg->getSourceRange();
9013 SourceRange SecondRange = SecondArg->getSourceRange();
9015 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
9017 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
9018 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
9020 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
9021 SourceRange RemovalRange;
9022 if (IsFirstArgZero) {
9023 RemovalRange = SourceRange(FirstRange.getBegin(),
9024 SecondRange.getBegin().getLocWithOffset(-1));
9026 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()),
9027 SecondRange.getEnd());
9030 Diag(Call->getExprLoc(), diag::note_remove_max_call)
9031 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
9032 << FixItHint::CreateRemoval(RemovalRange);
9035 //===--- CHECK: Standard memory functions ---------------------------------===//
9037 /// Takes the expression passed to the size_t parameter of functions
9038 /// such as memcmp, strncat, etc and warns if it's a comparison.
9040 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
9041 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
9042 IdentifierInfo *FnName,
9043 SourceLocation FnLoc,
9044 SourceLocation RParenLoc) {
9045 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E);
9049 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
9050 if (!Size->isComparisonOp() && !Size->isLogicalOp())
9053 SourceRange SizeRange = Size->getSourceRange();
9054 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
9055 << SizeRange << FnName;
9056 S.Diag(FnLoc, diag::note_memsize_comparison_paren)
9058 << FixItHint::CreateInsertion(
9059 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")")
9060 << FixItHint::CreateRemoval(RParenLoc);
9061 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
9062 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
9063 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
9069 /// Determine whether the given type is or contains a dynamic class type
9070 /// (e.g., whether it has a vtable).
9071 static const CXXRecordDecl *getContainedDynamicClass(QualType T,
9072 bool &IsContained) {
9073 // Look through array types while ignoring qualifiers.
9074 const Type *Ty = T->getBaseElementTypeUnsafe();
9075 IsContained = false;
9077 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
9078 RD = RD ? RD->getDefinition() : nullptr;
9079 if (!RD || RD->isInvalidDecl())
9082 if (RD->isDynamicClass())
9085 // Check all the fields. If any bases were dynamic, the class is dynamic.
9086 // It's impossible for a class to transitively contain itself by value, so
9087 // infinite recursion is impossible.
9088 for (auto *FD : RD->fields()) {
9090 if (const CXXRecordDecl *ContainedRD =
9091 getContainedDynamicClass(FD->getType(), SubContained)) {
9100 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
9101 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E))
9102 if (Unary->getKind() == UETT_SizeOf)
9107 /// If E is a sizeof expression, returns its argument expression,
9108 /// otherwise returns NULL.
9109 static const Expr *getSizeOfExprArg(const Expr *E) {
9110 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
9111 if (!SizeOf->isArgumentType())
9112 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
9116 /// If E is a sizeof expression, returns its argument type.
9117 static QualType getSizeOfArgType(const Expr *E) {
9118 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
9119 return SizeOf->getTypeOfArgument();
9125 struct SearchNonTrivialToInitializeField
9126 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
9128 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
9130 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
9132 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
9133 SourceLocation SL) {
9134 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
9135 asDerived().visitArray(PDIK, AT, SL);
9139 Super::visitWithKind(PDIK, FT, SL);
9142 void visitARCStrong(QualType FT, SourceLocation SL) {
9143 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
9145 void visitARCWeak(QualType FT, SourceLocation SL) {
9146 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
9148 void visitStruct(QualType FT, SourceLocation SL) {
9149 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
9150 visit(FD->getType(), FD->getLocation());
9152 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
9153 const ArrayType *AT, SourceLocation SL) {
9154 visit(getContext().getBaseElementType(AT), SL);
9156 void visitTrivial(QualType FT, SourceLocation SL) {}
9158 static void diag(QualType RT, const Expr *E, Sema &S) {
9159 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation());
9162 ASTContext &getContext() { return S.getASTContext(); }
9168 struct SearchNonTrivialToCopyField
9169 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
9170 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
9172 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
9174 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
9175 SourceLocation SL) {
9176 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
9177 asDerived().visitArray(PCK, AT, SL);
9181 Super::visitWithKind(PCK, FT, SL);
9184 void visitARCStrong(QualType FT, SourceLocation SL) {
9185 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
9187 void visitARCWeak(QualType FT, SourceLocation SL) {
9188 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
9190 void visitStruct(QualType FT, SourceLocation SL) {
9191 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
9192 visit(FD->getType(), FD->getLocation());
9194 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
9195 SourceLocation SL) {
9196 visit(getContext().getBaseElementType(AT), SL);
9198 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
9199 SourceLocation SL) {}
9200 void visitTrivial(QualType FT, SourceLocation SL) {}
9201 void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
9203 static void diag(QualType RT, const Expr *E, Sema &S) {
9204 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation());
9207 ASTContext &getContext() { return S.getASTContext(); }
9215 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
9216 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
9217 SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
9219 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) {
9220 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
9223 return doesExprLikelyComputeSize(BO->getLHS()) ||
9224 doesExprLikelyComputeSize(BO->getRHS());
9227 return getAsSizeOfExpr(SizeofExpr) != nullptr;
9230 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
9238 /// This should return true for the first call to foo, but not for the second
9239 /// (regardless of whether foo is a macro or function).
9240 static bool isArgumentExpandedFromMacro(SourceManager &SM,
9241 SourceLocation CallLoc,
9242 SourceLocation ArgLoc) {
9243 if (!CallLoc.isMacroID())
9244 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc);
9246 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) !=
9247 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc));
9250 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
9251 /// last two arguments transposed.
9252 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
9253 if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
9256 const Expr *SizeArg =
9257 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
9259 auto isLiteralZero = [](const Expr *E) {
9260 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0;
9263 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
9264 SourceLocation CallLoc = Call->getRParenLoc();
9265 SourceManager &SM = S.getSourceManager();
9266 if (isLiteralZero(SizeArg) &&
9267 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) {
9269 SourceLocation DiagLoc = SizeArg->getExprLoc();
9271 // Some platforms #define bzero to __builtin_memset. See if this is the
9272 // case, and if so, emit a better diagnostic.
9273 if (BId == Builtin::BIbzero ||
9274 (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
9275 CallLoc, SM, S.getLangOpts()) == "bzero")) {
9276 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
9277 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
9278 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) {
9279 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
9280 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
9285 // If the second argument to a memset is a sizeof expression and the third
9286 // isn't, this is also likely an error. This should catch
9287 // 'memset(buf, sizeof(buf), 0xff)'.
9288 if (BId == Builtin::BImemset &&
9289 doesExprLikelyComputeSize(Call->getArg(1)) &&
9290 !doesExprLikelyComputeSize(Call->getArg(2))) {
9291 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc();
9292 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
9293 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
9298 /// Check for dangerous or invalid arguments to memset().
9300 /// This issues warnings on known problematic, dangerous or unspecified
9301 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
9304 /// \param Call The call expression to diagnose.
9305 void Sema::CheckMemaccessArguments(const CallExpr *Call,
9307 IdentifierInfo *FnName) {
9310 // It is possible to have a non-standard definition of memset. Validate
9311 // we have enough arguments, and if not, abort further checking.
9312 unsigned ExpectedNumArgs =
9313 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
9314 if (Call->getNumArgs() < ExpectedNumArgs)
9317 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
9318 BId == Builtin::BIstrndup ? 1 : 2);
9320 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
9321 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
9323 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
9324 Call->getBeginLoc(), Call->getRParenLoc()))
9327 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
9328 CheckMemaccessSize(*this, BId, Call);
9330 // We have special checking when the length is a sizeof expression.
9331 QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
9332 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
9333 llvm::FoldingSetNodeID SizeOfArgID;
9335 // Although widely used, 'bzero' is not a standard function. Be more strict
9336 // with the argument types before allowing diagnostics and only allow the
9337 // form bzero(ptr, sizeof(...)).
9338 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType();
9339 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
9342 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
9343 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
9344 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
9346 QualType DestTy = Dest->getType();
9348 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
9349 PointeeTy = DestPtrTy->getPointeeType();
9351 // Never warn about void type pointers. This can be used to suppress
9353 if (PointeeTy->isVoidType())
9356 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
9357 // actually comparing the expressions for equality. Because computing the
9358 // expression IDs can be expensive, we only do this if the diagnostic is
9361 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
9362 SizeOfArg->getExprLoc())) {
9363 // We only compute IDs for expressions if the warning is enabled, and
9364 // cache the sizeof arg's ID.
9365 if (SizeOfArgID == llvm::FoldingSetNodeID())
9366 SizeOfArg->Profile(SizeOfArgID, Context, true);
9367 llvm::FoldingSetNodeID DestID;
9368 Dest->Profile(DestID, Context, true);
9369 if (DestID == SizeOfArgID) {
9370 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
9371 // over sizeof(src) as well.
9372 unsigned ActionIdx = 0; // Default is to suggest dereferencing.
9373 StringRef ReadableName = FnName->getName();
9375 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
9376 if (UnaryOp->getOpcode() == UO_AddrOf)
9377 ActionIdx = 1; // If its an address-of operator, just remove it.
9378 if (!PointeeTy->isIncompleteType() &&
9379 (Context.getTypeSize(PointeeTy) == Context.getCharWidth()))
9380 ActionIdx = 2; // If the pointee's size is sizeof(char),
9381 // suggest an explicit length.
9383 // If the function is defined as a builtin macro, do not show macro
9385 SourceLocation SL = SizeOfArg->getExprLoc();
9386 SourceRange DSR = Dest->getSourceRange();
9387 SourceRange SSR = SizeOfArg->getSourceRange();
9388 SourceManager &SM = getSourceManager();
9390 if (SM.isMacroArgExpansion(SL)) {
9391 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts);
9392 SL = SM.getSpellingLoc(SL);
9393 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()),
9394 SM.getSpellingLoc(DSR.getEnd()));
9395 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()),
9396 SM.getSpellingLoc(SSR.getEnd()));
9399 DiagRuntimeBehavior(SL, SizeOfArg,
9400 PDiag(diag::warn_sizeof_pointer_expr_memaccess)
9406 DiagRuntimeBehavior(SL, SizeOfArg,
9407 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
9415 // Also check for cases where the sizeof argument is the exact same
9416 // type as the memory argument, and where it points to a user-defined
9418 if (SizeOfArgTy != QualType()) {
9419 if (PointeeTy->isRecordType() &&
9420 Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
9421 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
9422 PDiag(diag::warn_sizeof_pointer_type_memaccess)
9423 << FnName << SizeOfArgTy << ArgIdx
9424 << PointeeTy << Dest->getSourceRange()
9425 << LenExpr->getSourceRange());
9429 } else if (DestTy->isArrayType()) {
9433 if (PointeeTy == QualType())
9436 // Always complain about dynamic classes.
9438 if (const CXXRecordDecl *ContainedRD =
9439 getContainedDynamicClass(PointeeTy, IsContained)) {
9441 unsigned OperationType = 0;
9442 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp;
9443 // "overwritten" if we're warning about the destination for any call
9444 // but memcmp; otherwise a verb appropriate to the call.
9445 if (ArgIdx != 0 || IsCmp) {
9446 if (BId == Builtin::BImemcpy)
9448 else if(BId == Builtin::BImemmove)
9454 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9455 PDiag(diag::warn_dyn_class_memaccess)
9456 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName
9457 << IsContained << ContainedRD << OperationType
9458 << Call->getCallee()->getSourceRange());
9459 } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
9460 BId != Builtin::BImemset)
9461 DiagRuntimeBehavior(
9462 Dest->getExprLoc(), Dest,
9463 PDiag(diag::warn_arc_object_memaccess)
9464 << ArgIdx << FnName << PointeeTy
9465 << Call->getCallee()->getSourceRange());
9466 else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
9467 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
9468 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
9469 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9470 PDiag(diag::warn_cstruct_memaccess)
9471 << ArgIdx << FnName << PointeeTy << 0);
9472 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this);
9473 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
9474 RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
9475 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9476 PDiag(diag::warn_cstruct_memaccess)
9477 << ArgIdx << FnName << PointeeTy << 1);
9478 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this);
9485 DiagRuntimeBehavior(
9486 Dest->getExprLoc(), Dest,
9487 PDiag(diag::note_bad_memaccess_silence)
9488 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
9493 // A little helper routine: ignore addition and subtraction of integer literals.
9494 // This intentionally does not ignore all integer constant expressions because
9495 // we don't want to remove sizeof().
9496 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
9497 Ex = Ex->IgnoreParenCasts();
9500 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex);
9501 if (!BO || !BO->isAdditiveOp())
9504 const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
9505 const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
9507 if (isa<IntegerLiteral>(RHS))
9509 else if (isa<IntegerLiteral>(LHS))
9518 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
9519 ASTContext &Context) {
9520 // Only handle constant-sized or VLAs, but not flexible members.
9521 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
9522 // Only issue the FIXIT for arrays of size > 1.
9523 if (CAT->getSize().getSExtValue() <= 1)
9525 } else if (!Ty->isVariableArrayType()) {
9531 // Warn if the user has made the 'size' argument to strlcpy or strlcat
9532 // be the size of the source, instead of the destination.
9533 void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
9534 IdentifierInfo *FnName) {
9536 // Don't crash if the user has the wrong number of arguments
9537 unsigned NumArgs = Call->getNumArgs();
9538 if ((NumArgs != 3) && (NumArgs != 4))
9541 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
9542 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
9543 const Expr *CompareWithSrc = nullptr;
9545 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
9546 Call->getBeginLoc(), Call->getRParenLoc()))
9549 // Look for 'strlcpy(dst, x, sizeof(x))'
9550 if (const Expr *Ex = getSizeOfExprArg(SizeArg))
9551 CompareWithSrc = Ex;
9553 // Look for 'strlcpy(dst, x, strlen(x))'
9554 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
9555 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
9556 SizeCall->getNumArgs() == 1)
9557 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
9561 if (!CompareWithSrc)
9564 // Determine if the argument to sizeof/strlen is equal to the source
9565 // argument. In principle there's all kinds of things you could do
9566 // here, for instance creating an == expression and evaluating it with
9567 // EvaluateAsBooleanCondition, but this uses a more direct technique:
9568 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg);
9572 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc);
9573 if (!CompareWithSrcDRE ||
9574 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
9577 const Expr *OriginalSizeArg = Call->getArg(2);
9578 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size)
9579 << OriginalSizeArg->getSourceRange() << FnName;
9581 // Output a FIXIT hint if the destination is an array (rather than a
9582 // pointer to an array). This could be enhanced to handle some
9583 // pointers if we know the actual size, like if DstArg is 'array+2'
9584 // we could say 'sizeof(array)-2'.
9585 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
9586 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context))
9589 SmallString<128> sizeString;
9590 llvm::raw_svector_ostream OS(sizeString);
9592 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9595 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size)
9596 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
9600 /// Check if two expressions refer to the same declaration.
9601 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
9602 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
9603 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
9604 return D1->getDecl() == D2->getDecl();
9608 static const Expr *getStrlenExprArg(const Expr *E) {
9609 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
9610 const FunctionDecl *FD = CE->getDirectCallee();
9611 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
9613 return CE->getArg(0)->IgnoreParenCasts();
9618 // Warn on anti-patterns as the 'size' argument to strncat.
9619 // The correct size argument should look like following:
9620 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
9621 void Sema::CheckStrncatArguments(const CallExpr *CE,
9622 IdentifierInfo *FnName) {
9623 // Don't crash if the user has the wrong number of arguments.
9624 if (CE->getNumArgs() < 3)
9626 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
9627 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
9628 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
9630 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(),
9631 CE->getRParenLoc()))
9634 // Identify common expressions, which are wrongly used as the size argument
9635 // to strncat and may lead to buffer overflows.
9636 unsigned PatternType = 0;
9637 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
9639 if (referToTheSameDecl(SizeOfArg, DstArg))
9642 else if (referToTheSameDecl(SizeOfArg, SrcArg))
9644 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
9645 if (BE->getOpcode() == BO_Sub) {
9646 const Expr *L = BE->getLHS()->IgnoreParenCasts();
9647 const Expr *R = BE->getRHS()->IgnoreParenCasts();
9648 // - sizeof(dst) - strlen(dst)
9649 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
9650 referToTheSameDecl(DstArg, getStrlenExprArg(R)))
9652 // - sizeof(src) - (anything)
9653 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
9658 if (PatternType == 0)
9661 // Generate the diagnostic.
9662 SourceLocation SL = LenArg->getBeginLoc();
9663 SourceRange SR = LenArg->getSourceRange();
9664 SourceManager &SM = getSourceManager();
9666 // If the function is defined as a builtin macro, do not show macro expansion.
9667 if (SM.isMacroArgExpansion(SL)) {
9668 SL = SM.getSpellingLoc(SL);
9669 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
9670 SM.getSpellingLoc(SR.getEnd()));
9673 // Check if the destination is an array (rather than a pointer to an array).
9674 QualType DstTy = DstArg->getType();
9675 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy,
9677 if (!isKnownSizeArray) {
9678 if (PatternType == 1)
9679 Diag(SL, diag::warn_strncat_wrong_size) << SR;
9681 Diag(SL, diag::warn_strncat_src_size) << SR;
9685 if (PatternType == 1)
9686 Diag(SL, diag::warn_strncat_large_size) << SR;
9688 Diag(SL, diag::warn_strncat_src_size) << SR;
9690 SmallString<128> sizeString;
9691 llvm::raw_svector_ostream OS(sizeString);
9693 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9696 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9699 Diag(SL, diag::note_strncat_wrong_size)
9700 << FixItHint::CreateReplacement(SR, OS.str());
9704 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
9705 SourceLocation ReturnLoc,
9707 const AttrVec *Attrs,
9708 const FunctionDecl *FD) {
9709 // Check if the return value is null but should not be.
9710 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
9711 (!isObjCMethod && isNonNullType(Context, lhsType))) &&
9712 CheckNonNullExpr(*this, RetValExp))
9713 Diag(ReturnLoc, diag::warn_null_ret)
9714 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
9716 // C++11 [basic.stc.dynamic.allocation]p4:
9717 // If an allocation function declared with a non-throwing
9718 // exception-specification fails to allocate storage, it shall return
9719 // a null pointer. Any other allocation function that fails to allocate
9720 // storage shall indicate failure only by throwing an exception [...]
9722 OverloadedOperatorKind Op = FD->getOverloadedOperator();
9723 if (Op == OO_New || Op == OO_Array_New) {
9724 const FunctionProtoType *Proto
9725 = FD->getType()->castAs<FunctionProtoType>();
9726 if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
9727 CheckNonNullExpr(*this, RetValExp))
9728 Diag(ReturnLoc, diag::warn_operator_new_returns_null)
9729 << FD << getLangOpts().CPlusPlus11;
9734 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
9736 /// Check for comparisons of floating point operands using != and ==.
9737 /// Issue a warning if these are no self-comparisons, as they are not likely
9738 /// to do what the programmer intended.
9739 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
9740 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
9741 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
9743 // Special case: check for x == x (which is OK).
9744 // Do not emit warnings for such cases.
9745 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
9746 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
9747 if (DRL->getDecl() == DRR->getDecl())
9750 // Special case: check for comparisons against literals that can be exactly
9751 // represented by APFloat. In such cases, do not emit a warning. This
9752 // is a heuristic: often comparison against such literals are used to
9753 // detect if a value in a variable has not changed. This clearly can
9754 // lead to false negatives.
9755 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
9759 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen))
9763 // Check for comparisons with builtin types.
9764 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
9765 if (CL->getBuiltinCallee())
9768 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
9769 if (CR->getBuiltinCallee())
9772 // Emit the diagnostic.
9773 Diag(Loc, diag::warn_floatingpoint_eq)
9774 << LHS->getSourceRange() << RHS->getSourceRange();
9777 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
9778 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
9782 /// Structure recording the 'active' range of an integer-valued
9785 /// The number of bits active in the int.
9788 /// True if the int is known not to have negative values.
9791 IntRange(unsigned Width, bool NonNegative)
9792 : Width(Width), NonNegative(NonNegative) {}
9794 /// Returns the range of the bool type.
9795 static IntRange forBoolType() {
9796 return IntRange(1, true);
9799 /// Returns the range of an opaque value of the given integral type.
9800 static IntRange forValueOfType(ASTContext &C, QualType T) {
9801 return forValueOfCanonicalType(C,
9802 T->getCanonicalTypeInternal().getTypePtr());
9805 /// Returns the range of an opaque value of a canonical integral type.
9806 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
9807 assert(T->isCanonicalUnqualified());
9809 if (const VectorType *VT = dyn_cast<VectorType>(T))
9810 T = VT->getElementType().getTypePtr();
9811 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
9812 T = CT->getElementType().getTypePtr();
9813 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
9814 T = AT->getValueType().getTypePtr();
9816 if (!C.getLangOpts().CPlusPlus) {
9817 // For enum types in C code, use the underlying datatype.
9818 if (const EnumType *ET = dyn_cast<EnumType>(T))
9819 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr();
9820 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) {
9821 // For enum types in C++, use the known bit width of the enumerators.
9822 EnumDecl *Enum = ET->getDecl();
9823 // In C++11, enums can have a fixed underlying type. Use this type to
9824 // compute the range.
9825 if (Enum->isFixed()) {
9826 return IntRange(C.getIntWidth(QualType(T, 0)),
9827 !ET->isSignedIntegerOrEnumerationType());
9830 unsigned NumPositive = Enum->getNumPositiveBits();
9831 unsigned NumNegative = Enum->getNumNegativeBits();
9833 if (NumNegative == 0)
9834 return IntRange(NumPositive, true/*NonNegative*/);
9836 return IntRange(std::max(NumPositive + 1, NumNegative),
9837 false/*NonNegative*/);
9840 const BuiltinType *BT = cast<BuiltinType>(T);
9841 assert(BT->isInteger());
9843 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
9846 /// Returns the "target" range of a canonical integral type, i.e.
9847 /// the range of values expressible in the type.
9849 /// This matches forValueOfCanonicalType except that enums have the
9850 /// full range of their type, not the range of their enumerators.
9851 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
9852 assert(T->isCanonicalUnqualified());
9854 if (const VectorType *VT = dyn_cast<VectorType>(T))
9855 T = VT->getElementType().getTypePtr();
9856 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
9857 T = CT->getElementType().getTypePtr();
9858 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
9859 T = AT->getValueType().getTypePtr();
9860 if (const EnumType *ET = dyn_cast<EnumType>(T))
9861 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
9863 const BuiltinType *BT = cast<BuiltinType>(T);
9864 assert(BT->isInteger());
9866 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
9869 /// Returns the supremum of two ranges: i.e. their conservative merge.
9870 static IntRange join(IntRange L, IntRange R) {
9871 return IntRange(std::max(L.Width, R.Width),
9872 L.NonNegative && R.NonNegative);
9875 /// Returns the infinum of two ranges: i.e. their aggressive merge.
9876 static IntRange meet(IntRange L, IntRange R) {
9877 return IntRange(std::min(L.Width, R.Width),
9878 L.NonNegative || R.NonNegative);
9884 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
9885 unsigned MaxWidth) {
9886 if (value.isSigned() && value.isNegative())
9887 return IntRange(value.getMinSignedBits(), false);
9889 if (value.getBitWidth() > MaxWidth)
9890 value = value.trunc(MaxWidth);
9892 // isNonNegative() just checks the sign bit without considering
9894 return IntRange(value.getActiveBits(), true);
9897 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
9898 unsigned MaxWidth) {
9900 return GetValueRange(C, result.getInt(), MaxWidth);
9902 if (result.isVector()) {
9903 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
9904 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
9905 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
9906 R = IntRange::join(R, El);
9911 if (result.isComplexInt()) {
9912 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
9913 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
9914 return IntRange::join(R, I);
9917 // This can happen with lossless casts to intptr_t of "based" lvalues.
9918 // Assume it might use arbitrary bits.
9919 // FIXME: The only reason we need to pass the type in here is to get
9920 // the sign right on this one case. It would be nice if APValue
9922 assert(result.isLValue() || result.isAddrLabelDiff());
9923 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
9926 static QualType GetExprType(const Expr *E) {
9927 QualType Ty = E->getType();
9928 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
9929 Ty = AtomicRHS->getValueType();
9933 /// Pseudo-evaluate the given integer expression, estimating the
9934 /// range of values it might take.
9936 /// \param MaxWidth - the width to which the value will be truncated
9937 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
9938 bool InConstantContext) {
9939 E = E->IgnoreParens();
9941 // Try a full evaluation first.
9942 Expr::EvalResult result;
9943 if (E->EvaluateAsRValue(result, C, InConstantContext))
9944 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth);
9946 // I think we only want to look through implicit casts here; if the
9947 // user has an explicit widening cast, we should treat the value as
9948 // being of the new, wider type.
9949 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) {
9950 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
9951 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext);
9953 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
9955 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast ||
9956 CE->getCastKind() == CK_BooleanToSignedIntegral;
9958 // Assume that non-integer casts can span the full range of the type.
9960 return OutputTypeRange;
9962 IntRange SubRange = GetExprRange(C, CE->getSubExpr(),
9963 std::min(MaxWidth, OutputTypeRange.Width),
9966 // Bail out if the subexpr's range is as wide as the cast type.
9967 if (SubRange.Width >= OutputTypeRange.Width)
9968 return OutputTypeRange;
9970 // Otherwise, we take the smaller width, and we're non-negative if
9971 // either the output type or the subexpr is.
9972 return IntRange(SubRange.Width,
9973 SubRange.NonNegative || OutputTypeRange.NonNegative);
9976 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
9977 // If we can fold the condition, just take that operand.
9979 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
9980 return GetExprRange(C,
9981 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(),
9982 MaxWidth, InConstantContext);
9984 // Otherwise, conservatively merge.
9986 GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext);
9988 GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext);
9989 return IntRange::join(L, R);
9992 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
9993 switch (BO->getOpcode()) {
9995 llvm_unreachable("builtin <=> should have class type");
9997 // Boolean-valued operations are single-bit and positive.
10006 return IntRange::forBoolType();
10008 // The type of the assignments is the type of the LHS, so the RHS
10009 // is not necessarily the same type.
10017 // TODO: bitfields?
10018 return IntRange::forValueOfType(C, GetExprType(E));
10020 // Simple assignments just pass through the RHS, which will have
10021 // been coerced to the LHS type.
10023 // TODO: bitfields?
10024 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
10026 // Operations with opaque sources are black-listed.
10029 return IntRange::forValueOfType(C, GetExprType(E));
10031 // Bitwise-and uses the *infinum* of the two source ranges.
10034 return IntRange::meet(
10035 GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext),
10036 GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext));
10038 // Left shift gets black-listed based on a judgement call.
10040 // ...except that we want to treat '1 << (blah)' as logically
10041 // positive. It's an important idiom.
10042 if (IntegerLiteral *I
10043 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
10044 if (I->getValue() == 1) {
10045 IntRange R = IntRange::forValueOfType(C, GetExprType(E));
10046 return IntRange(R.Width, /*NonNegative*/ true);
10052 return IntRange::forValueOfType(C, GetExprType(E));
10054 // Right shift by a constant can narrow its left argument.
10056 case BO_ShrAssign: {
10057 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext);
10059 // If the shift amount is a positive constant, drop the width by
10061 llvm::APSInt shift;
10062 if (BO->getRHS()->isIntegerConstantExpr(shift, C) &&
10063 shift.isNonNegative()) {
10064 unsigned zext = shift.getZExtValue();
10065 if (zext >= L.Width)
10066 L.Width = (L.NonNegative ? 0 : 1);
10074 // Comma acts as its right operand.
10076 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
10078 // Black-list pointer subtractions.
10080 if (BO->getLHS()->getType()->isPointerType())
10081 return IntRange::forValueOfType(C, GetExprType(E));
10084 // The width of a division result is mostly determined by the size
10087 // Don't 'pre-truncate' the operands.
10088 unsigned opWidth = C.getIntWidth(GetExprType(E));
10089 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext);
10091 // If the divisor is constant, use that.
10092 llvm::APSInt divisor;
10093 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) {
10094 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor))
10095 if (log2 >= L.Width)
10096 L.Width = (L.NonNegative ? 0 : 1);
10098 L.Width = std::min(L.Width - log2, MaxWidth);
10102 // Otherwise, just use the LHS's width.
10103 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext);
10104 return IntRange(L.Width, L.NonNegative && R.NonNegative);
10107 // The result of a remainder can't be larger than the result of
10110 // Don't 'pre-truncate' the operands.
10111 unsigned opWidth = C.getIntWidth(GetExprType(E));
10112 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext);
10113 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext);
10115 IntRange meet = IntRange::meet(L, R);
10116 meet.Width = std::min(meet.Width, MaxWidth);
10120 // The default behavior is okay for these.
10128 // The default case is to treat the operation as if it were closed
10129 // on the narrowest type that encompasses both operands.
10130 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext);
10131 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext);
10132 return IntRange::join(L, R);
10135 if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
10136 switch (UO->getOpcode()) {
10137 // Boolean-valued operations are white-listed.
10139 return IntRange::forBoolType();
10141 // Operations with opaque sources are black-listed.
10143 case UO_AddrOf: // should be impossible
10144 return IntRange::forValueOfType(C, GetExprType(E));
10147 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext);
10151 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
10152 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext);
10154 if (const auto *BitField = E->getSourceBitField())
10155 return IntRange(BitField->getBitWidthValue(C),
10156 BitField->getType()->isUnsignedIntegerOrEnumerationType());
10158 return IntRange::forValueOfType(C, GetExprType(E));
10161 static IntRange GetExprRange(ASTContext &C, const Expr *E,
10162 bool InConstantContext) {
10163 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext);
10166 /// Checks whether the given value, which currently has the given
10167 /// source semantics, has the same value when coerced through the
10168 /// target semantics.
10169 static bool IsSameFloatAfterCast(const llvm::APFloat &value,
10170 const llvm::fltSemantics &Src,
10171 const llvm::fltSemantics &Tgt) {
10172 llvm::APFloat truncated = value;
10175 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
10176 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
10178 return truncated.bitwiseIsEqual(value);
10181 /// Checks whether the given value, which currently has the given
10182 /// source semantics, has the same value when coerced through the
10183 /// target semantics.
10185 /// The value might be a vector of floats (or a complex number).
10186 static bool IsSameFloatAfterCast(const APValue &value,
10187 const llvm::fltSemantics &Src,
10188 const llvm::fltSemantics &Tgt) {
10189 if (value.isFloat())
10190 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
10192 if (value.isVector()) {
10193 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
10194 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
10199 assert(value.isComplexFloat());
10200 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
10201 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
10204 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
10206 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
10207 // Suppress cases where we are comparing against an enum constant.
10208 if (const DeclRefExpr *DR =
10209 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
10210 if (isa<EnumConstantDecl>(DR->getDecl()))
10213 // Suppress cases where the value is expanded from a macro, unless that macro
10214 // is how a language represents a boolean literal. This is the case in both C
10215 // and Objective-C.
10216 SourceLocation BeginLoc = E->getBeginLoc();
10217 if (BeginLoc.isMacroID()) {
10218 StringRef MacroName = Lexer::getImmediateMacroName(
10219 BeginLoc, S.getSourceManager(), S.getLangOpts());
10220 return MacroName != "YES" && MacroName != "NO" &&
10221 MacroName != "true" && MacroName != "false";
10227 static bool isKnownToHaveUnsignedValue(Expr *E) {
10228 return E->getType()->isIntegerType() &&
10229 (!E->getType()->isSignedIntegerType() ||
10230 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
10234 /// The promoted range of values of a type. In general this has the
10235 /// following structure:
10237 /// |-----------| . . . |-----------|
10239 /// Min HoleMin HoleMax Max
10241 /// ... where there is only a hole if a signed type is promoted to unsigned
10242 /// (in which case Min and Max are the smallest and largest representable
10244 struct PromotedRange {
10245 // Min, or HoleMax if there is a hole.
10246 llvm::APSInt PromotedMin;
10247 // Max, or HoleMin if there is a hole.
10248 llvm::APSInt PromotedMax;
10250 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) {
10252 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned);
10253 else if (R.Width >= BitWidth && !Unsigned) {
10254 // Promotion made the type *narrower*. This happens when promoting
10255 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
10256 // Treat all values of 'signed int' as being in range for now.
10257 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned);
10258 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned);
10260 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative)
10261 .extOrTrunc(BitWidth);
10262 PromotedMin.setIsUnsigned(Unsigned);
10264 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative)
10265 .extOrTrunc(BitWidth);
10266 PromotedMax.setIsUnsigned(Unsigned);
10270 // Determine whether this range is contiguous (has no hole).
10271 bool isContiguous() const { return PromotedMin <= PromotedMax; }
10273 // Where a constant value is within the range.
10274 enum ComparisonResult {
10281 InRangeFlag = 0x40,
10283 Less = LE | LT | NE,
10284 Min = LE | InRangeFlag,
10285 InRange = InRangeFlag,
10286 Max = GE | InRangeFlag,
10287 Greater = GE | GT | NE,
10289 OnlyValue = LE | GE | EQ | InRangeFlag,
10293 ComparisonResult compare(const llvm::APSInt &Value) const {
10294 assert(Value.getBitWidth() == PromotedMin.getBitWidth() &&
10295 Value.isUnsigned() == PromotedMin.isUnsigned());
10296 if (!isContiguous()) {
10297 assert(Value.isUnsigned() && "discontiguous range for signed compare");
10298 if (Value.isMinValue()) return Min;
10299 if (Value.isMaxValue()) return Max;
10300 if (Value >= PromotedMin) return InRange;
10301 if (Value <= PromotedMax) return InRange;
10305 switch (llvm::APSInt::compareValues(Value, PromotedMin)) {
10306 case -1: return Less;
10307 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min;
10309 switch (llvm::APSInt::compareValues(Value, PromotedMax)) {
10310 case -1: return InRange;
10311 case 0: return Max;
10312 case 1: return Greater;
10316 llvm_unreachable("impossible compare result");
10319 static llvm::Optional<StringRef>
10320 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
10321 if (Op == BO_Cmp) {
10322 ComparisonResult LTFlag = LT, GTFlag = GT;
10323 if (ConstantOnRHS) std::swap(LTFlag, GTFlag);
10325 if (R & EQ) return StringRef("'std::strong_ordering::equal'");
10326 if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
10327 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
10331 ComparisonResult TrueFlag, FalseFlag;
10335 } else if (Op == BO_NE) {
10339 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) {
10346 if (Op == BO_GE || Op == BO_LE)
10347 std::swap(TrueFlag, FalseFlag);
10350 return StringRef("true");
10352 return StringRef("false");
10358 static bool HasEnumType(Expr *E) {
10359 // Strip off implicit integral promotions.
10360 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
10361 if (ICE->getCastKind() != CK_IntegralCast &&
10362 ICE->getCastKind() != CK_NoOp)
10364 E = ICE->getSubExpr();
10367 return E->getType()->isEnumeralType();
10370 static int classifyConstantValue(Expr *Constant) {
10371 // The values of this enumeration are used in the diagnostics
10372 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
10373 enum ConstantValueKind {
10378 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant))
10379 return BL->getValue() ? ConstantValueKind::LiteralTrue
10380 : ConstantValueKind::LiteralFalse;
10381 return ConstantValueKind::Miscellaneous;
10384 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
10385 Expr *Constant, Expr *Other,
10386 const llvm::APSInt &Value,
10387 bool RhsConstant) {
10388 if (S.inTemplateInstantiation())
10391 Expr *OriginalOther = Other;
10393 Constant = Constant->IgnoreParenImpCasts();
10394 Other = Other->IgnoreParenImpCasts();
10396 // Suppress warnings on tautological comparisons between values of the same
10397 // enumeration type. There are only two ways we could warn on this:
10398 // - If the constant is outside the range of representable values of
10399 // the enumeration. In such a case, we should warn about the cast
10400 // to enumeration type, not about the comparison.
10401 // - If the constant is the maximum / minimum in-range value. For an
10402 // enumeratin type, such comparisons can be meaningful and useful.
10403 if (Constant->getType()->isEnumeralType() &&
10404 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType()))
10407 // TODO: Investigate using GetExprRange() to get tighter bounds
10408 // on the bit ranges.
10409 QualType OtherT = Other->getType();
10410 if (const auto *AT = OtherT->getAs<AtomicType>())
10411 OtherT = AT->getValueType();
10412 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT);
10414 // Special case for ObjC BOOL on targets where its a typedef for a signed char
10415 // (Namely, macOS).
10416 bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
10417 S.NSAPIObj->isObjCBOOLType(OtherT) &&
10418 OtherT->isSpecificBuiltinType(BuiltinType::SChar);
10420 // Whether we're treating Other as being a bool because of the form of
10421 // expression despite it having another type (typically 'int' in C).
10422 bool OtherIsBooleanDespiteType =
10423 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
10424 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool)
10425 OtherRange = IntRange::forBoolType();
10427 // Determine the promoted range of the other type and see if a comparison of
10428 // the constant against that range is tautological.
10429 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(),
10430 Value.isUnsigned());
10431 auto Cmp = OtherPromotedRange.compare(Value);
10432 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant);
10436 // Suppress the diagnostic for an in-range comparison if the constant comes
10437 // from a macro or enumerator. We don't want to diagnose
10439 // some_long_value <= INT_MAX
10441 // when sizeof(int) == sizeof(long).
10442 bool InRange = Cmp & PromotedRange::InRangeFlag;
10443 if (InRange && IsEnumConstOrFromMacro(S, Constant))
10446 // If this is a comparison to an enum constant, include that
10447 // constant in the diagnostic.
10448 const EnumConstantDecl *ED = nullptr;
10449 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant))
10450 ED = dyn_cast<EnumConstantDecl>(DR->getDecl());
10452 // Should be enough for uint128 (39 decimal digits)
10453 SmallString<64> PrettySourceValue;
10454 llvm::raw_svector_ostream OS(PrettySourceValue);
10456 OS << '\'' << *ED << "' (" << Value << ")";
10457 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>(
10458 Constant->IgnoreParenImpCasts())) {
10459 OS << (BL->getValue() ? "YES" : "NO");
10464 if (IsObjCSignedCharBool) {
10465 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
10466 S.PDiag(diag::warn_tautological_compare_objc_bool)
10467 << OS.str() << *Result);
10471 // FIXME: We use a somewhat different formatting for the in-range cases and
10472 // cases involving boolean values for historical reasons. We should pick a
10473 // consistent way of presenting these diagnostics.
10474 if (!InRange || Other->isKnownToHaveBooleanValue()) {
10476 S.DiagRuntimeBehavior(
10477 E->getOperatorLoc(), E,
10478 S.PDiag(!InRange ? diag::warn_out_of_range_compare
10479 : diag::warn_tautological_bool_compare)
10480 << OS.str() << classifyConstantValue(Constant) << OtherT
10481 << OtherIsBooleanDespiteType << *Result
10482 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
10484 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
10485 ? (HasEnumType(OriginalOther)
10486 ? diag::warn_unsigned_enum_always_true_comparison
10487 : diag::warn_unsigned_always_true_comparison)
10488 : diag::warn_tautological_constant_compare;
10490 S.Diag(E->getOperatorLoc(), Diag)
10491 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
10492 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
10498 /// Analyze the operands of the given comparison. Implements the
10499 /// fallback case from AnalyzeComparison.
10500 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
10501 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10502 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10505 /// Implements -Wsign-compare.
10507 /// \param E the binary operator to check for warnings
10508 static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
10509 // The type the comparison is being performed in.
10510 QualType T = E->getLHS()->getType();
10512 // Only analyze comparison operators where both sides have been converted to
10514 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()))
10515 return AnalyzeImpConvsInComparison(S, E);
10517 // Don't analyze value-dependent comparisons directly.
10518 if (E->isValueDependent())
10519 return AnalyzeImpConvsInComparison(S, E);
10521 Expr *LHS = E->getLHS();
10522 Expr *RHS = E->getRHS();
10524 if (T->isIntegralType(S.Context)) {
10525 llvm::APSInt RHSValue;
10526 llvm::APSInt LHSValue;
10528 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context);
10529 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context);
10531 // We don't care about expressions whose result is a constant.
10532 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral)
10533 return AnalyzeImpConvsInComparison(S, E);
10535 // We only care about expressions where just one side is literal
10536 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) {
10537 // Is the constant on the RHS or LHS?
10538 const bool RhsConstant = IsRHSIntegralLiteral;
10539 Expr *Const = RhsConstant ? RHS : LHS;
10540 Expr *Other = RhsConstant ? LHS : RHS;
10541 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue;
10543 // Check whether an integer constant comparison results in a value
10544 // of 'true' or 'false'.
10545 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant))
10546 return AnalyzeImpConvsInComparison(S, E);
10550 if (!T->hasUnsignedIntegerRepresentation()) {
10551 // We don't do anything special if this isn't an unsigned integral
10552 // comparison: we're only interested in integral comparisons, and
10553 // signed comparisons only happen in cases we don't care to warn about.
10554 return AnalyzeImpConvsInComparison(S, E);
10557 LHS = LHS->IgnoreParenImpCasts();
10558 RHS = RHS->IgnoreParenImpCasts();
10560 if (!S.getLangOpts().CPlusPlus) {
10561 // Avoid warning about comparison of integers with different signs when
10562 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
10563 // the type of `E`.
10564 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType()))
10565 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
10566 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType()))
10567 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
10570 // Check to see if one of the (unmodified) operands is of different
10572 Expr *signedOperand, *unsignedOperand;
10573 if (LHS->getType()->hasSignedIntegerRepresentation()) {
10574 assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
10575 "unsigned comparison between two signed integer expressions?");
10576 signedOperand = LHS;
10577 unsignedOperand = RHS;
10578 } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
10579 signedOperand = RHS;
10580 unsignedOperand = LHS;
10582 return AnalyzeImpConvsInComparison(S, E);
10585 // Otherwise, calculate the effective range of the signed operand.
10586 IntRange signedRange =
10587 GetExprRange(S.Context, signedOperand, S.isConstantEvaluated());
10589 // Go ahead and analyze implicit conversions in the operands. Note
10590 // that we skip the implicit conversions on both sides.
10591 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc());
10592 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc());
10594 // If the signed range is non-negative, -Wsign-compare won't fire.
10595 if (signedRange.NonNegative)
10598 // For (in)equality comparisons, if the unsigned operand is a
10599 // constant which cannot collide with a overflowed signed operand,
10600 // then reinterpreting the signed operand as unsigned will not
10601 // change the result of the comparison.
10602 if (E->isEqualityOp()) {
10603 unsigned comparisonWidth = S.Context.getIntWidth(T);
10604 IntRange unsignedRange =
10605 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated());
10607 // We should never be unable to prove that the unsigned operand is
10609 assert(unsignedRange.NonNegative && "unsigned range includes negative?");
10611 if (unsignedRange.Width < comparisonWidth)
10615 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
10616 S.PDiag(diag::warn_mixed_sign_comparison)
10617 << LHS->getType() << RHS->getType()
10618 << LHS->getSourceRange() << RHS->getSourceRange());
10621 /// Analyzes an attempt to assign the given value to a bitfield.
10623 /// Returns true if there was something fishy about the attempt.
10624 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
10625 SourceLocation InitLoc) {
10626 assert(Bitfield->isBitField());
10627 if (Bitfield->isInvalidDecl())
10630 // White-list bool bitfields.
10631 QualType BitfieldType = Bitfield->getType();
10632 if (BitfieldType->isBooleanType())
10635 if (BitfieldType->isEnumeralType()) {
10636 EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl();
10637 // If the underlying enum type was not explicitly specified as an unsigned
10638 // type and the enum contain only positive values, MSVC++ will cause an
10639 // inconsistency by storing this as a signed type.
10640 if (S.getLangOpts().CPlusPlus11 &&
10641 !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
10642 BitfieldEnumDecl->getNumPositiveBits() > 0 &&
10643 BitfieldEnumDecl->getNumNegativeBits() == 0) {
10644 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
10645 << BitfieldEnumDecl->getNameAsString();
10649 if (Bitfield->getType()->isBooleanType())
10652 // Ignore value- or type-dependent expressions.
10653 if (Bitfield->getBitWidth()->isValueDependent() ||
10654 Bitfield->getBitWidth()->isTypeDependent() ||
10655 Init->isValueDependent() ||
10656 Init->isTypeDependent())
10659 Expr *OriginalInit = Init->IgnoreParenImpCasts();
10660 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
10662 Expr::EvalResult Result;
10663 if (!OriginalInit->EvaluateAsInt(Result, S.Context,
10664 Expr::SE_AllowSideEffects)) {
10665 // The RHS is not constant. If the RHS has an enum type, make sure the
10666 // bitfield is wide enough to hold all the values of the enum without
10668 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
10669 EnumDecl *ED = EnumTy->getDecl();
10670 bool SignedBitfield = BitfieldType->isSignedIntegerType();
10672 // Enum types are implicitly signed on Windows, so check if there are any
10673 // negative enumerators to see if the enum was intended to be signed or
10675 bool SignedEnum = ED->getNumNegativeBits() > 0;
10677 // Check for surprising sign changes when assigning enum values to a
10678 // bitfield of different signedness. If the bitfield is signed and we
10679 // have exactly the right number of bits to store this unsigned enum,
10680 // suggest changing the enum to an unsigned type. This typically happens
10681 // on Windows where unfixed enums always use an underlying type of 'int'.
10682 unsigned DiagID = 0;
10683 if (SignedEnum && !SignedBitfield) {
10684 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
10685 } else if (SignedBitfield && !SignedEnum &&
10686 ED->getNumPositiveBits() == FieldWidth) {
10687 DiagID = diag::warn_signed_bitfield_enum_conversion;
10691 S.Diag(InitLoc, DiagID) << Bitfield << ED;
10692 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
10693 SourceRange TypeRange =
10694 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
10695 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
10696 << SignedEnum << TypeRange;
10699 // Compute the required bitwidth. If the enum has negative values, we need
10700 // one more bit than the normal number of positive bits to represent the
10702 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1,
10703 ED->getNumNegativeBits())
10704 : ED->getNumPositiveBits();
10706 // Check the bitwidth.
10707 if (BitsNeeded > FieldWidth) {
10708 Expr *WidthExpr = Bitfield->getBitWidth();
10709 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
10711 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
10712 << BitsNeeded << ED << WidthExpr->getSourceRange();
10719 llvm::APSInt Value = Result.Val.getInt();
10721 unsigned OriginalWidth = Value.getBitWidth();
10723 if (!Value.isSigned() || Value.isNegative())
10724 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
10725 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
10726 OriginalWidth = Value.getMinSignedBits();
10728 if (OriginalWidth <= FieldWidth)
10731 // Compute the value which the bitfield will contain.
10732 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
10733 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
10735 // Check whether the stored value is equal to the original value.
10736 TruncatedValue = TruncatedValue.extend(OriginalWidth);
10737 if (llvm::APSInt::isSameValue(Value, TruncatedValue))
10740 // Special-case bitfields of width 1: booleans are naturally 0/1, and
10741 // therefore don't strictly fit into a signed bitfield of width 1.
10742 if (FieldWidth == 1 && Value == 1)
10745 std::string PrettyValue = Value.toString(10);
10746 std::string PrettyTrunc = TruncatedValue.toString(10);
10748 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
10749 << PrettyValue << PrettyTrunc << OriginalInit->getType()
10750 << Init->getSourceRange();
10755 /// Analyze the given simple or compound assignment for warning-worthy
10757 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
10758 // Just recurse on the LHS.
10759 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10761 // We want to recurse on the RHS as normal unless we're assigning to
10763 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
10764 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
10765 E->getOperatorLoc())) {
10766 // Recurse, ignoring any implicit conversions on the RHS.
10767 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
10768 E->getOperatorLoc());
10772 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10774 // Diagnose implicitly sequentially-consistent atomic assignment.
10775 if (E->getLHS()->getType()->isAtomicType())
10776 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
10779 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
10780 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
10781 SourceLocation CContext, unsigned diag,
10782 bool pruneControlFlow = false) {
10783 if (pruneControlFlow) {
10784 S.DiagRuntimeBehavior(E->getExprLoc(), E,
10786 << SourceType << T << E->getSourceRange()
10787 << SourceRange(CContext));
10790 S.Diag(E->getExprLoc(), diag)
10791 << SourceType << T << E->getSourceRange() << SourceRange(CContext);
10794 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
10795 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
10796 SourceLocation CContext,
10797 unsigned diag, bool pruneControlFlow = false) {
10798 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
10801 /// Diagnose an implicit cast from a floating point value to an integer value.
10802 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
10803 SourceLocation CContext) {
10804 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool);
10805 const bool PruneWarnings = S.inTemplateInstantiation();
10807 Expr *InnerE = E->IgnoreParenImpCasts();
10808 // We also want to warn on, e.g., "int i = -1.234"
10809 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE))
10810 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
10811 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
10813 const bool IsLiteral =
10814 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE);
10816 llvm::APFloat Value(0.0);
10818 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects);
10820 return DiagnoseImpCast(S, E, T, CContext,
10821 diag::warn_impcast_float_integer, PruneWarnings);
10824 bool isExact = false;
10826 llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
10827 T->hasUnsignedIntegerRepresentation());
10828 llvm::APFloat::opStatus Result = Value.convertToInteger(
10829 IntegerValue, llvm::APFloat::rmTowardZero, &isExact);
10831 if (Result == llvm::APFloat::opOK && isExact) {
10832 if (IsLiteral) return;
10833 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
10837 // Conversion of a floating-point value to a non-bool integer where the
10838 // integral part cannot be represented by the integer type is undefined.
10839 if (!IsBool && Result == llvm::APFloat::opInvalidOp)
10840 return DiagnoseImpCast(
10842 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
10843 : diag::warn_impcast_float_to_integer_out_of_range,
10846 unsigned DiagID = 0;
10848 // Warn on floating point literal to integer.
10849 DiagID = diag::warn_impcast_literal_float_to_integer;
10850 } else if (IntegerValue == 0) {
10851 if (Value.isZero()) { // Skip -0.0 to 0 conversion.
10852 return DiagnoseImpCast(S, E, T, CContext,
10853 diag::warn_impcast_float_integer, PruneWarnings);
10855 // Warn on non-zero to zero conversion.
10856 DiagID = diag::warn_impcast_float_to_integer_zero;
10858 if (IntegerValue.isUnsigned()) {
10859 if (!IntegerValue.isMaxValue()) {
10860 return DiagnoseImpCast(S, E, T, CContext,
10861 diag::warn_impcast_float_integer, PruneWarnings);
10863 } else { // IntegerValue.isSigned()
10864 if (!IntegerValue.isMaxSignedValue() &&
10865 !IntegerValue.isMinSignedValue()) {
10866 return DiagnoseImpCast(S, E, T, CContext,
10867 diag::warn_impcast_float_integer, PruneWarnings);
10870 // Warn on evaluatable floating point expression to integer conversion.
10871 DiagID = diag::warn_impcast_float_to_integer;
10874 // FIXME: Force the precision of the source value down so we don't print
10875 // digits which are usually useless (we don't really care here if we
10876 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
10877 // would automatically print the shortest representation, but it's a bit
10878 // tricky to implement.
10879 SmallString<16> PrettySourceValue;
10880 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
10881 precision = (precision * 59 + 195) / 196;
10882 Value.toString(PrettySourceValue, precision);
10884 SmallString<16> PrettyTargetValue;
10886 PrettyTargetValue = Value.isZero() ? "false" : "true";
10888 IntegerValue.toString(PrettyTargetValue);
10890 if (PruneWarnings) {
10891 S.DiagRuntimeBehavior(E->getExprLoc(), E,
10893 << E->getType() << T.getUnqualifiedType()
10894 << PrettySourceValue << PrettyTargetValue
10895 << E->getSourceRange() << SourceRange(CContext));
10897 S.Diag(E->getExprLoc(), DiagID)
10898 << E->getType() << T.getUnqualifiedType() << PrettySourceValue
10899 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext);
10903 /// Analyze the given compound assignment for the possible losing of
10904 /// floating-point precision.
10905 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
10906 assert(isa<CompoundAssignOperator>(E) &&
10907 "Must be compound assignment operation");
10908 // Recurse on the LHS and RHS in here
10909 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10910 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10912 if (E->getLHS()->getType()->isAtomicType())
10913 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst);
10915 // Now check the outermost expression
10916 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
10917 const auto *RBT = cast<CompoundAssignOperator>(E)
10918 ->getComputationResultType()
10919 ->getAs<BuiltinType>();
10921 // The below checks assume source is floating point.
10922 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
10924 // If source is floating point but target is an integer.
10925 if (ResultBT->isInteger())
10926 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(),
10927 E->getExprLoc(), diag::warn_impcast_float_integer);
10929 if (!ResultBT->isFloatingPoint())
10932 // If both source and target are floating points, warn about losing precision.
10933 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
10934 QualType(ResultBT, 0), QualType(RBT, 0));
10935 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
10936 // warn about dropping FP rank.
10937 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
10938 diag::warn_impcast_float_result_precision);
10941 static std::string PrettyPrintInRange(const llvm::APSInt &Value,
10943 if (!Range.Width) return "0";
10945 llvm::APSInt ValueInRange = Value;
10946 ValueInRange.setIsSigned(!Range.NonNegative);
10947 ValueInRange = ValueInRange.trunc(Range.Width);
10948 return ValueInRange.toString(10);
10951 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
10952 if (!isa<ImplicitCastExpr>(Ex))
10955 Expr *InnerE = Ex->IgnoreParenImpCasts();
10956 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr();
10957 const Type *Source =
10958 S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
10959 if (Target->isDependentType())
10962 const BuiltinType *FloatCandidateBT =
10963 dyn_cast<BuiltinType>(ToBool ? Source : Target);
10964 const Type *BoolCandidateType = ToBool ? Target : Source;
10966 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) &&
10967 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
10970 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
10971 SourceLocation CC) {
10972 unsigned NumArgs = TheCall->getNumArgs();
10973 for (unsigned i = 0; i < NumArgs; ++i) {
10974 Expr *CurrA = TheCall->getArg(i);
10975 if (!IsImplicitBoolFloatConversion(S, CurrA, true))
10978 bool IsSwapped = ((i > 0) &&
10979 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false));
10980 IsSwapped |= ((i < (NumArgs - 1)) &&
10981 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false));
10983 // Warn on this floating-point to bool conversion.
10984 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
10985 CurrA->getType(), CC,
10986 diag::warn_impcast_floating_point_to_bool);
10991 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
10992 SourceLocation CC) {
10993 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
10997 // Don't warn on functions which have return type nullptr_t.
10998 if (isa<CallExpr>(E))
11001 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
11002 const Expr::NullPointerConstantKind NullKind =
11003 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull);
11004 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr)
11007 // Return if target type is a safe conversion.
11008 if (T->isAnyPointerType() || T->isBlockPointerType() ||
11009 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
11012 SourceLocation Loc = E->getSourceRange().getBegin();
11014 // Venture through the macro stacks to get to the source of macro arguments.
11015 // The new location is a better location than the complete location that was
11017 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
11018 CC = S.SourceMgr.getTopMacroCallerLoc(CC);
11020 // __null is usually wrapped in a macro. Go up a macro if that is the case.
11021 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) {
11022 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
11023 Loc, S.SourceMgr, S.getLangOpts());
11024 if (MacroName == "NULL")
11025 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
11028 // Only warn if the null and context location are in the same macro expansion.
11029 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC))
11032 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
11033 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC)
11034 << FixItHint::CreateReplacement(Loc,
11035 S.getFixItZeroLiteralForType(T, Loc));
11038 static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
11039 ObjCArrayLiteral *ArrayLiteral);
11042 checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
11043 ObjCDictionaryLiteral *DictionaryLiteral);
11045 /// Check a single element within a collection literal against the
11046 /// target element type.
11047 static void checkObjCCollectionLiteralElement(Sema &S,
11048 QualType TargetElementType,
11050 unsigned ElementKind) {
11051 // Skip a bitcast to 'id' or qualified 'id'.
11052 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
11053 if (ICE->getCastKind() == CK_BitCast &&
11054 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
11055 Element = ICE->getSubExpr();
11058 QualType ElementType = Element->getType();
11059 ExprResult ElementResult(Element);
11060 if (ElementType->getAs<ObjCObjectPointerType>() &&
11061 S.CheckSingleAssignmentConstraints(TargetElementType,
11064 != Sema::Compatible) {
11065 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
11066 << ElementType << ElementKind << TargetElementType
11067 << Element->getSourceRange();
11070 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
11071 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral);
11072 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
11073 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral);
11076 /// Check an Objective-C array literal being converted to the given
11078 static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
11079 ObjCArrayLiteral *ArrayLiteral) {
11080 if (!S.NSArrayDecl)
11083 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
11084 if (!TargetObjCPtr)
11087 if (TargetObjCPtr->isUnspecialized() ||
11088 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
11089 != S.NSArrayDecl->getCanonicalDecl())
11092 auto TypeArgs = TargetObjCPtr->getTypeArgs();
11093 if (TypeArgs.size() != 1)
11096 QualType TargetElementType = TypeArgs[0];
11097 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
11098 checkObjCCollectionLiteralElement(S, TargetElementType,
11099 ArrayLiteral->getElement(I),
11104 /// Check an Objective-C dictionary literal being converted to the given
11107 checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
11108 ObjCDictionaryLiteral *DictionaryLiteral) {
11109 if (!S.NSDictionaryDecl)
11112 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
11113 if (!TargetObjCPtr)
11116 if (TargetObjCPtr->isUnspecialized() ||
11117 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
11118 != S.NSDictionaryDecl->getCanonicalDecl())
11121 auto TypeArgs = TargetObjCPtr->getTypeArgs();
11122 if (TypeArgs.size() != 2)
11125 QualType TargetKeyType = TypeArgs[0];
11126 QualType TargetObjectType = TypeArgs[1];
11127 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
11128 auto Element = DictionaryLiteral->getKeyValueElement(I);
11129 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1);
11130 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2);
11134 // Helper function to filter out cases for constant width constant conversion.
11135 // Don't warn on char array initialization or for non-decimal values.
11136 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
11137 SourceLocation CC) {
11138 // If initializing from a constant, and the constant starts with '0',
11139 // then it is a binary, octal, or hexadecimal. Allow these constants
11140 // to fill all the bits, even if there is a sign change.
11141 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) {
11142 const char FirstLiteralCharacter =
11143 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0];
11144 if (FirstLiteralCharacter == '0')
11148 // If the CC location points to a '{', and the type is char, then assume
11149 // assume it is an array initialization.
11150 if (CC.isValid() && T->isCharType()) {
11151 const char FirstContextCharacter =
11152 S.getSourceManager().getCharacterData(CC)[0];
11153 if (FirstContextCharacter == '{')
11160 static bool isObjCSignedCharBool(Sema &S, QualType Ty) {
11161 return Ty->isSpecificBuiltinType(BuiltinType::SChar) &&
11162 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty);
11166 CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
11167 bool *ICContext = nullptr) {
11168 if (E->isTypeDependent() || E->isValueDependent()) return;
11170 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
11171 const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
11172 if (Source == Target) return;
11173 if (Target->isDependentType()) return;
11175 // If the conversion context location is invalid don't complain. We also
11176 // don't want to emit a warning if the issue occurs from the expansion of
11177 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
11178 // delay this check as long as possible. Once we detect we are in that
11179 // scenario, we just return.
11180 if (CC.isInvalid())
11183 if (Source->isAtomicType())
11184 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
11186 // Diagnose implicit casts to bool.
11187 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
11188 if (isa<StringLiteral>(E))
11189 // Warn on string literal to bool. Checks for string literals in logical
11190 // and expressions, for instance, assert(0 && "error here"), are
11191 // prevented by a check in AnalyzeImplicitConversions().
11192 return DiagnoseImpCast(S, E, T, CC,
11193 diag::warn_impcast_string_literal_to_bool);
11194 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
11195 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
11196 // This covers the literal expressions that evaluate to Objective-C
11198 return DiagnoseImpCast(S, E, T, CC,
11199 diag::warn_impcast_objective_c_literal_to_bool);
11201 if (Source->isPointerType() || Source->canDecayToPointerType()) {
11202 // Warn on pointer to bool conversion that is always true.
11203 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
11208 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL
11209 // is a typedef for signed char (macOS), then that constant value has to be 1
11211 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) {
11212 Expr::EvalResult Result;
11213 if (E->EvaluateAsInt(Result, S.getASTContext(),
11214 Expr::SE_AllowSideEffects) &&
11215 Result.Val.getInt() != 1 && Result.Val.getInt() != 0) {
11216 auto Builder = S.Diag(CC, diag::warn_impcast_constant_int_to_objc_bool)
11217 << Result.Val.getInt().toString(10);
11218 Expr *Ignored = E->IgnoreImplicit();
11219 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) ||
11220 isa<BinaryOperator>(Ignored) ||
11221 isa<CXXOperatorCallExpr>(Ignored);
11222 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc());
11224 Builder << FixItHint::CreateInsertion(E->getBeginLoc(), "(")
11225 << FixItHint::CreateInsertion(EndLoc, ")");
11226 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO");
11231 // Check implicit casts from Objective-C collection literals to specialized
11232 // collection types, e.g., NSArray<NSString *> *.
11233 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
11234 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral);
11235 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E))
11236 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
11238 // Strip vector types.
11239 if (isa<VectorType>(Source)) {
11240 if (!isa<VectorType>(Target)) {
11241 if (S.SourceMgr.isInSystemMacro(CC))
11243 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
11246 // If the vector cast is cast between two vectors of the same size, it is
11247 // a bitcast, not a conversion.
11248 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
11251 Source = cast<VectorType>(Source)->getElementType().getTypePtr();
11252 Target = cast<VectorType>(Target)->getElementType().getTypePtr();
11254 if (auto VecTy = dyn_cast<VectorType>(Target))
11255 Target = VecTy->getElementType().getTypePtr();
11257 // Strip complex types.
11258 if (isa<ComplexType>(Source)) {
11259 if (!isa<ComplexType>(Target)) {
11260 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType())
11263 return DiagnoseImpCast(S, E, T, CC,
11264 S.getLangOpts().CPlusPlus
11265 ? diag::err_impcast_complex_scalar
11266 : diag::warn_impcast_complex_scalar);
11269 Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
11270 Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
11273 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
11274 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
11276 // If the source is floating point...
11277 if (SourceBT && SourceBT->isFloatingPoint()) {
11278 // ...and the target is floating point...
11279 if (TargetBT && TargetBT->isFloatingPoint()) {
11280 // ...then warn if we're dropping FP rank.
11282 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
11283 QualType(SourceBT, 0), QualType(TargetBT, 0));
11285 // Don't warn about float constants that are precisely
11286 // representable in the target type.
11287 Expr::EvalResult result;
11288 if (E->EvaluateAsRValue(result, S.Context)) {
11289 // Value might be a float, a float vector, or a float complex.
11290 if (IsSameFloatAfterCast(result.Val,
11291 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
11292 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
11296 if (S.SourceMgr.isInSystemMacro(CC))
11299 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
11301 // ... or possibly if we're increasing rank, too
11302 else if (Order < 0) {
11303 if (S.SourceMgr.isInSystemMacro(CC))
11306 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
11311 // If the target is integral, always warn.
11312 if (TargetBT && TargetBT->isInteger()) {
11313 if (S.SourceMgr.isInSystemMacro(CC))
11316 DiagnoseFloatingImpCast(S, E, T, CC);
11319 // Detect the case where a call result is converted from floating-point to
11320 // to bool, and the final argument to the call is converted from bool, to
11321 // discover this typo:
11323 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
11325 // FIXME: This is an incredibly special case; is there some more general
11326 // way to detect this class of misplaced-parentheses bug?
11327 if (Target->isBooleanType() && isa<CallExpr>(E)) {
11328 // Check last argument of function call to see if it is an
11329 // implicit cast from a type matching the type the result
11330 // is being cast to.
11331 CallExpr *CEx = cast<CallExpr>(E);
11332 if (unsigned NumArgs = CEx->getNumArgs()) {
11333 Expr *LastA = CEx->getArg(NumArgs - 1);
11334 Expr *InnerE = LastA->IgnoreParenImpCasts();
11335 if (isa<ImplicitCastExpr>(LastA) &&
11336 InnerE->getType()->isBooleanType()) {
11337 // Warn on this floating-point to bool conversion
11338 DiagnoseImpCast(S, E, T, CC,
11339 diag::warn_impcast_floating_point_to_bool);
11346 // Valid casts involving fixed point types should be accounted for here.
11347 if (Source->isFixedPointType()) {
11348 if (Target->isUnsaturatedFixedPointType()) {
11349 Expr::EvalResult Result;
11350 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects,
11351 S.isConstantEvaluated())) {
11352 APFixedPoint Value = Result.Val.getFixedPoint();
11353 APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
11354 APFixedPoint MinVal = S.Context.getFixedPointMin(T);
11355 if (Value > MaxVal || Value < MinVal) {
11356 S.DiagRuntimeBehavior(E->getExprLoc(), E,
11357 S.PDiag(diag::warn_impcast_fixed_point_range)
11358 << Value.toString() << T
11359 << E->getSourceRange()
11360 << clang::SourceRange(CC));
11364 } else if (Target->isIntegerType()) {
11365 Expr::EvalResult Result;
11366 if (!S.isConstantEvaluated() &&
11367 E->EvaluateAsFixedPoint(Result, S.Context,
11368 Expr::SE_AllowSideEffects)) {
11369 APFixedPoint FXResult = Result.Val.getFixedPoint();
11372 llvm::APSInt IntResult = FXResult.convertToInt(
11373 S.Context.getIntWidth(T),
11374 Target->isSignedIntegerOrEnumerationType(), &Overflowed);
11377 S.DiagRuntimeBehavior(E->getExprLoc(), E,
11378 S.PDiag(diag::warn_impcast_fixed_point_range)
11379 << FXResult.toString() << T
11380 << E->getSourceRange()
11381 << clang::SourceRange(CC));
11386 } else if (Target->isUnsaturatedFixedPointType()) {
11387 if (Source->isIntegerType()) {
11388 Expr::EvalResult Result;
11389 if (!S.isConstantEvaluated() &&
11390 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
11391 llvm::APSInt Value = Result.Val.getInt();
11394 APFixedPoint IntResult = APFixedPoint::getFromIntValue(
11395 Value, S.Context.getFixedPointSemantics(T), &Overflowed);
11398 S.DiagRuntimeBehavior(E->getExprLoc(), E,
11399 S.PDiag(diag::warn_impcast_fixed_point_range)
11400 << Value.toString(/*Radix=*/10) << T
11401 << E->getSourceRange()
11402 << clang::SourceRange(CC));
11409 DiagnoseNullConversion(S, E, T, CC);
11411 S.DiscardMisalignedMemberAddress(Target, E);
11413 if (!Source->isIntegerType() || !Target->isIntegerType())
11416 // TODO: remove this early return once the false positives for constant->bool
11417 // in templates, macros, etc, are reduced or removed.
11418 if (Target->isSpecificBuiltinType(BuiltinType::Bool))
11421 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated());
11422 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
11424 if (SourceRange.Width > TargetRange.Width) {
11425 // If the source is a constant, use a default-on diagnostic.
11426 // TODO: this should happen for bitfield stores, too.
11427 Expr::EvalResult Result;
11428 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects,
11429 S.isConstantEvaluated())) {
11430 llvm::APSInt Value(32);
11431 Value = Result.Val.getInt();
11433 if (S.SourceMgr.isInSystemMacro(CC))
11436 std::string PrettySourceValue = Value.toString(10);
11437 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
11439 S.DiagRuntimeBehavior(
11440 E->getExprLoc(), E,
11441 S.PDiag(diag::warn_impcast_integer_precision_constant)
11442 << PrettySourceValue << PrettyTargetValue << E->getType() << T
11443 << E->getSourceRange() << clang::SourceRange(CC));
11447 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
11448 if (S.SourceMgr.isInSystemMacro(CC))
11451 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
11452 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
11453 /* pruneControlFlow */ true);
11454 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
11457 if (TargetRange.Width > SourceRange.Width) {
11458 if (auto *UO = dyn_cast<UnaryOperator>(E))
11459 if (UO->getOpcode() == UO_Minus)
11460 if (Source->isUnsignedIntegerType()) {
11461 if (Target->isUnsignedIntegerType())
11462 return DiagnoseImpCast(S, E, T, CC,
11463 diag::warn_impcast_high_order_zero_bits);
11464 if (Target->isSignedIntegerType())
11465 return DiagnoseImpCast(S, E, T, CC,
11466 diag::warn_impcast_nonnegative_result);
11470 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative &&
11471 SourceRange.NonNegative && Source->isSignedIntegerType()) {
11472 // Warn when doing a signed to signed conversion, warn if the positive
11473 // source value is exactly the width of the target type, which will
11474 // cause a negative value to be stored.
11476 Expr::EvalResult Result;
11477 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) &&
11478 !S.SourceMgr.isInSystemMacro(CC)) {
11479 llvm::APSInt Value = Result.Val.getInt();
11480 if (isSameWidthConstantConversion(S, E, T, CC)) {
11481 std::string PrettySourceValue = Value.toString(10);
11482 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
11484 S.DiagRuntimeBehavior(
11485 E->getExprLoc(), E,
11486 S.PDiag(diag::warn_impcast_integer_precision_constant)
11487 << PrettySourceValue << PrettyTargetValue << E->getType() << T
11488 << E->getSourceRange() << clang::SourceRange(CC));
11493 // Fall through for non-constants to give a sign conversion warning.
11496 if ((TargetRange.NonNegative && !SourceRange.NonNegative) ||
11497 (!TargetRange.NonNegative && SourceRange.NonNegative &&
11498 SourceRange.Width == TargetRange.Width)) {
11499 if (S.SourceMgr.isInSystemMacro(CC))
11502 unsigned DiagID = diag::warn_impcast_integer_sign;
11504 // Traditionally, gcc has warned about this under -Wsign-compare.
11505 // We also want to warn about it in -Wconversion.
11506 // So if -Wconversion is off, use a completely identical diagnostic
11507 // in the sign-compare group.
11508 // The conditional-checking code will
11510 DiagID = diag::warn_impcast_integer_sign_conditional;
11514 return DiagnoseImpCast(S, E, T, CC, DiagID);
11517 // Diagnose conversions between different enumeration types.
11518 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
11519 // type, to give us better diagnostics.
11520 QualType SourceType = E->getType();
11521 if (!S.getLangOpts().CPlusPlus) {
11522 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
11523 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
11524 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
11525 SourceType = S.Context.getTypeDeclType(Enum);
11526 Source = S.Context.getCanonicalType(SourceType).getTypePtr();
11530 if (const EnumType *SourceEnum = Source->getAs<EnumType>())
11531 if (const EnumType *TargetEnum = Target->getAs<EnumType>())
11532 if (SourceEnum->getDecl()->hasNameForLinkage() &&
11533 TargetEnum->getDecl()->hasNameForLinkage() &&
11534 SourceEnum != TargetEnum) {
11535 if (S.SourceMgr.isInSystemMacro(CC))
11538 return DiagnoseImpCast(S, E, SourceType, T, CC,
11539 diag::warn_impcast_different_enum_types);
11543 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
11544 SourceLocation CC, QualType T);
11546 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
11547 SourceLocation CC, bool &ICContext) {
11548 E = E->IgnoreParenImpCasts();
11550 if (isa<ConditionalOperator>(E))
11551 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T);
11553 AnalyzeImplicitConversions(S, E, CC);
11554 if (E->getType() != T)
11555 return CheckImplicitConversion(S, E, T, CC, &ICContext);
11558 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
11559 SourceLocation CC, QualType T) {
11560 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
11562 bool Suspicious = false;
11563 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
11564 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
11566 // If -Wconversion would have warned about either of the candidates
11567 // for a signedness conversion to the context type...
11568 if (!Suspicious) return;
11570 // ...but it's currently ignored...
11571 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
11574 // ...then check whether it would have warned about either of the
11575 // candidates for a signedness conversion to the condition type.
11576 if (E->getType() == T) return;
11578 Suspicious = false;
11579 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
11580 E->getType(), CC, &Suspicious);
11582 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
11583 E->getType(), CC, &Suspicious);
11586 /// Check conversion of given expression to boolean.
11587 /// Input argument E is a logical expression.
11588 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
11589 if (S.getLangOpts().Bool)
11591 if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
11593 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
11596 /// AnalyzeImplicitConversions - Find and report any interesting
11597 /// implicit conversions in the given expression. There are a couple
11598 /// of competing diagnostics here, -Wconversion and -Wsign-compare.
11599 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
11600 SourceLocation CC) {
11601 QualType T = OrigE->getType();
11602 Expr *E = OrigE->IgnoreParenImpCasts();
11604 if (E->isTypeDependent() || E->isValueDependent())
11607 // For conditional operators, we analyze the arguments as if they
11608 // were being fed directly into the output.
11609 if (isa<ConditionalOperator>(E)) {
11610 ConditionalOperator *CO = cast<ConditionalOperator>(E);
11611 CheckConditionalOperator(S, CO, CC, T);
11615 // Check implicit argument conversions for function calls.
11616 if (CallExpr *Call = dyn_cast<CallExpr>(E))
11617 CheckImplicitArgumentConversions(S, Call, CC);
11619 // Go ahead and check any implicit conversions we might have skipped.
11620 // The non-canonical typecheck is just an optimization;
11621 // CheckImplicitConversion will filter out dead implicit conversions.
11622 if (E->getType() != T)
11623 CheckImplicitConversion(S, E, T, CC);
11625 // Now continue drilling into this expression.
11627 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
11628 // The bound subexpressions in a PseudoObjectExpr are not reachable
11629 // as transitive children.
11630 // FIXME: Use a more uniform representation for this.
11631 for (auto *SE : POE->semantics())
11632 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
11633 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC);
11636 // Skip past explicit casts.
11637 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) {
11638 E = CE->getSubExpr()->IgnoreParenImpCasts();
11639 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
11640 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
11641 return AnalyzeImplicitConversions(S, E, CC);
11644 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
11645 // Do a somewhat different check with comparison operators.
11646 if (BO->isComparisonOp())
11647 return AnalyzeComparison(S, BO);
11649 // And with simple assignments.
11650 if (BO->getOpcode() == BO_Assign)
11651 return AnalyzeAssignment(S, BO);
11652 // And with compound assignments.
11653 if (BO->isAssignmentOp())
11654 return AnalyzeCompoundAssignment(S, BO);
11657 // These break the otherwise-useful invariant below. Fortunately,
11658 // we don't really need to recurse into them, because any internal
11659 // expressions should have been analyzed already when they were
11660 // built into statements.
11661 if (isa<StmtExpr>(E)) return;
11663 // Don't descend into unevaluated contexts.
11664 if (isa<UnaryExprOrTypeTraitExpr>(E)) return;
11666 // Now just recurse over the expression's children.
11667 CC = E->getExprLoc();
11668 BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
11669 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
11670 for (Stmt *SubStmt : E->children()) {
11671 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
11675 if (IsLogicalAndOperator &&
11676 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
11677 // Ignore checking string literals that are in logical and operators.
11678 // This is a common pattern for asserts.
11680 AnalyzeImplicitConversions(S, ChildExpr, CC);
11683 if (BO && BO->isLogicalOp()) {
11684 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
11685 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
11686 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
11688 SubExpr = BO->getRHS()->IgnoreParenImpCasts();
11689 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
11690 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
11693 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
11694 if (U->getOpcode() == UO_LNot) {
11695 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
11696 } else if (U->getOpcode() != UO_AddrOf) {
11697 if (U->getSubExpr()->getType()->isAtomicType())
11698 S.Diag(U->getSubExpr()->getBeginLoc(),
11699 diag::warn_atomic_implicit_seq_cst);
11704 /// Diagnose integer type and any valid implicit conversion to it.
11705 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
11706 // Taking into account implicit conversions,
11707 // allow any integer.
11708 if (!E->getType()->isIntegerType()) {
11709 S.Diag(E->getBeginLoc(),
11710 diag::err_opencl_enqueue_kernel_invalid_local_size_type);
11713 // Potentially emit standard warnings for implicit conversions if enabled
11714 // using -Wconversion.
11715 CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
11719 // Helper function for Sema::DiagnoseAlwaysNonNullPointer.
11720 // Returns true when emitting a warning about taking the address of a reference.
11721 static bool CheckForReference(Sema &SemaRef, const Expr *E,
11722 const PartialDiagnostic &PD) {
11723 E = E->IgnoreParenImpCasts();
11725 const FunctionDecl *FD = nullptr;
11727 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
11728 if (!DRE->getDecl()->getType()->isReferenceType())
11730 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) {
11731 if (!M->getMemberDecl()->getType()->isReferenceType())
11733 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) {
11734 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType())
11736 FD = Call->getDirectCallee();
11741 SemaRef.Diag(E->getExprLoc(), PD);
11743 // If possible, point to location of function.
11745 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
11751 // Returns true if the SourceLocation is expanded from any macro body.
11752 // Returns false if the SourceLocation is invalid, is from not in a macro
11753 // expansion, or is from expanded from a top-level macro argument.
11754 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
11755 if (Loc.isInvalid())
11758 while (Loc.isMacroID()) {
11759 if (SM.isMacroBodyExpansion(Loc))
11761 Loc = SM.getImmediateMacroCallerLoc(Loc);
11767 /// Diagnose pointers that are always non-null.
11768 /// \param E the expression containing the pointer
11769 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
11770 /// compared to a null pointer
11771 /// \param IsEqual True when the comparison is equal to a null pointer
11772 /// \param Range Extra SourceRange to highlight in the diagnostic
11773 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
11774 Expr::NullPointerConstantKind NullKind,
11775 bool IsEqual, SourceRange Range) {
11779 // Don't warn inside macros.
11780 if (E->getExprLoc().isMacroID()) {
11781 const SourceManager &SM = getSourceManager();
11782 if (IsInAnyMacroBody(SM, E->getExprLoc()) ||
11783 IsInAnyMacroBody(SM, Range.getBegin()))
11786 E = E->IgnoreImpCasts();
11788 const bool IsCompare = NullKind != Expr::NPCK_NotNull;
11790 if (isa<CXXThisExpr>(E)) {
11791 unsigned DiagID = IsCompare ? diag::warn_this_null_compare
11792 : diag::warn_this_bool_conversion;
11793 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
11797 bool IsAddressOf = false;
11799 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
11800 if (UO->getOpcode() != UO_AddrOf)
11802 IsAddressOf = true;
11803 E = UO->getSubExpr();
11807 unsigned DiagID = IsCompare
11808 ? diag::warn_address_of_reference_null_compare
11809 : diag::warn_address_of_reference_bool_conversion;
11810 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
11812 if (CheckForReference(*this, E, PD)) {
11817 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) {
11818 bool IsParam = isa<NonNullAttr>(NonnullAttr);
11820 llvm::raw_string_ostream S(Str);
11821 E->printPretty(S, nullptr, getPrintingPolicy());
11822 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
11823 : diag::warn_cast_nonnull_to_bool;
11824 Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
11825 << E->getSourceRange() << Range << IsEqual;
11826 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam;
11829 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
11830 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) {
11831 if (auto *Callee = Call->getDirectCallee()) {
11832 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) {
11833 ComplainAboutNonnullParamOrCall(A);
11839 // Expect to find a single Decl. Skip anything more complicated.
11840 ValueDecl *D = nullptr;
11841 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
11843 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
11844 D = M->getMemberDecl();
11847 // Weak Decls can be null.
11848 if (!D || D->isWeak())
11851 // Check for parameter decl with nonnull attribute
11852 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) {
11853 if (getCurFunction() &&
11854 !getCurFunction()->ModifiedNonNullParams.count(PV)) {
11855 if (const Attr *A = PV->getAttr<NonNullAttr>()) {
11856 ComplainAboutNonnullParamOrCall(A);
11860 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
11861 // Skip function template not specialized yet.
11862 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
11864 auto ParamIter = llvm::find(FD->parameters(), PV);
11865 assert(ParamIter != FD->param_end());
11866 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
11868 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
11869 if (!NonNull->args_size()) {
11870 ComplainAboutNonnullParamOrCall(NonNull);
11874 for (const ParamIdx &ArgNo : NonNull->args()) {
11875 if (ArgNo.getASTIndex() == ParamNo) {
11876 ComplainAboutNonnullParamOrCall(NonNull);
11885 QualType T = D->getType();
11886 const bool IsArray = T->isArrayType();
11887 const bool IsFunction = T->isFunctionType();
11889 // Address of function is used to silence the function warning.
11890 if (IsAddressOf && IsFunction) {
11895 if (!IsAddressOf && !IsFunction && !IsArray)
11898 // Pretty print the expression for the diagnostic.
11900 llvm::raw_string_ostream S(Str);
11901 E->printPretty(S, nullptr, getPrintingPolicy());
11903 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
11904 : diag::warn_impcast_pointer_to_bool;
11911 DiagType = AddressOf;
11912 else if (IsFunction)
11913 DiagType = FunctionPointer;
11915 DiagType = ArrayPointer;
11917 llvm_unreachable("Could not determine diagnostic.");
11918 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
11919 << Range << IsEqual;
11924 // Suggest '&' to silence the function warning.
11925 Diag(E->getExprLoc(), diag::note_function_warning_silence)
11926 << FixItHint::CreateInsertion(E->getBeginLoc(), "&");
11928 // Check to see if '()' fixit should be emitted.
11929 QualType ReturnType;
11930 UnresolvedSet<4> NonTemplateOverloads;
11931 tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
11932 if (ReturnType.isNull())
11936 // There are two cases here. If there is null constant, the only suggest
11937 // for a pointer return type. If the null is 0, then suggest if the return
11938 // type is a pointer or an integer type.
11939 if (!ReturnType->isPointerType()) {
11940 if (NullKind == Expr::NPCK_ZeroExpression ||
11941 NullKind == Expr::NPCK_ZeroLiteral) {
11942 if (!ReturnType->isIntegerType())
11948 } else { // !IsCompare
11949 // For function to bool, only suggest if the function pointer has bool
11951 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
11954 Diag(E->getExprLoc(), diag::note_function_to_function_call)
11955 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
11958 /// Diagnoses "dangerous" implicit conversions within the given
11959 /// expression (which is a full expression). Implements -Wconversion
11960 /// and -Wsign-compare.
11962 /// \param CC the "context" location of the implicit conversion, i.e.
11963 /// the most location of the syntactic entity requiring the implicit
11965 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
11966 // Don't diagnose in unevaluated contexts.
11967 if (isUnevaluatedContext())
11970 // Don't diagnose for value- or type-dependent expressions.
11971 if (E->isTypeDependent() || E->isValueDependent())
11974 // Check for array bounds violations in cases where the check isn't triggered
11975 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
11976 // ArraySubscriptExpr is on the RHS of a variable initialization.
11977 CheckArrayAccess(E);
11979 // This is not the right CC for (e.g.) a variable initialization.
11980 AnalyzeImplicitConversions(*this, E, CC);
11983 /// CheckBoolLikeConversion - Check conversion of given expression to boolean.
11984 /// Input argument E is a logical expression.
11985 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
11986 ::CheckBoolLikeConversion(*this, E, CC);
11989 /// Diagnose when expression is an integer constant expression and its evaluation
11990 /// results in integer overflow
11991 void Sema::CheckForIntOverflow (Expr *E) {
11992 // Use a work list to deal with nested struct initializers.
11993 SmallVector<Expr *, 2> Exprs(1, E);
11996 Expr *OriginalE = Exprs.pop_back_val();
11997 Expr *E = OriginalE->IgnoreParenCasts();
11999 if (isa<BinaryOperator>(E)) {
12000 E->EvaluateForOverflow(Context);
12004 if (auto InitList = dyn_cast<InitListExpr>(OriginalE))
12005 Exprs.append(InitList->inits().begin(), InitList->inits().end());
12006 else if (isa<ObjCBoxedExpr>(OriginalE))
12007 E->EvaluateForOverflow(Context);
12008 else if (auto Call = dyn_cast<CallExpr>(E))
12009 Exprs.append(Call->arg_begin(), Call->arg_end());
12010 else if (auto Message = dyn_cast<ObjCMessageExpr>(E))
12011 Exprs.append(Message->arg_begin(), Message->arg_end());
12012 } while (!Exprs.empty());
12017 /// Visitor for expressions which looks for unsequenced operations on the
12019 class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
12020 using Base = EvaluatedExprVisitor<SequenceChecker>;
12022 /// A tree of sequenced regions within an expression. Two regions are
12023 /// unsequenced if one is an ancestor or a descendent of the other. When we
12024 /// finish processing an expression with sequencing, such as a comma
12025 /// expression, we fold its tree nodes into its parent, since they are
12026 /// unsequenced with respect to nodes we will visit later.
12027 class SequenceTree {
12029 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
12030 unsigned Parent : 31;
12031 unsigned Merged : 1;
12033 SmallVector<Value, 8> Values;
12036 /// A region within an expression which may be sequenced with respect
12037 /// to some other region.
12039 friend class SequenceTree;
12043 explicit Seq(unsigned N) : Index(N) {}
12046 Seq() : Index(0) {}
12049 SequenceTree() { Values.push_back(Value(0)); }
12050 Seq root() const { return Seq(0); }
12052 /// Create a new sequence of operations, which is an unsequenced
12053 /// subset of \p Parent. This sequence of operations is sequenced with
12054 /// respect to other children of \p Parent.
12055 Seq allocate(Seq Parent) {
12056 Values.push_back(Value(Parent.Index));
12057 return Seq(Values.size() - 1);
12060 /// Merge a sequence of operations into its parent.
12061 void merge(Seq S) {
12062 Values[S.Index].Merged = true;
12065 /// Determine whether two operations are unsequenced. This operation
12066 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
12067 /// should have been merged into its parent as appropriate.
12068 bool isUnsequenced(Seq Cur, Seq Old) {
12069 unsigned C = representative(Cur.Index);
12070 unsigned Target = representative(Old.Index);
12071 while (C >= Target) {
12074 C = Values[C].Parent;
12080 /// Pick a representative for a sequence.
12081 unsigned representative(unsigned K) {
12082 if (Values[K].Merged)
12083 // Perform path compression as we go.
12084 return Values[K].Parent = representative(Values[K].Parent);
12089 /// An object for which we can track unsequenced uses.
12090 using Object = NamedDecl *;
12092 /// Different flavors of object usage which we track. We only track the
12093 /// least-sequenced usage of each kind.
12095 /// A read of an object. Multiple unsequenced reads are OK.
12098 /// A modification of an object which is sequenced before the value
12099 /// computation of the expression, such as ++n in C++.
12102 /// A modification of an object which is not sequenced before the value
12103 /// computation of the expression, such as n++.
12104 UK_ModAsSideEffect,
12106 UK_Count = UK_ModAsSideEffect + 1
12111 SequenceTree::Seq Seq;
12113 Usage() : Use(nullptr), Seq() {}
12117 Usage Uses[UK_Count];
12119 /// Have we issued a diagnostic for this variable already?
12122 UsageInfo() : Uses(), Diagnosed(false) {}
12124 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
12128 /// Sequenced regions within the expression.
12131 /// Declaration modifications and references which we have seen.
12132 UsageInfoMap UsageMap;
12134 /// The region we are currently within.
12135 SequenceTree::Seq Region;
12137 /// Filled in with declarations which were modified as a side-effect
12138 /// (that is, post-increment operations).
12139 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr;
12141 /// Expressions to check later. We defer checking these to reduce
12143 SmallVectorImpl<Expr *> &WorkList;
12145 /// RAII object wrapping the visitation of a sequenced subexpression of an
12146 /// expression. At the end of this process, the side-effects of the evaluation
12147 /// become sequenced with respect to the value computation of the result, so
12148 /// we downgrade any UK_ModAsSideEffect within the evaluation to
12150 struct SequencedSubexpression {
12151 SequencedSubexpression(SequenceChecker &Self)
12152 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
12153 Self.ModAsSideEffect = &ModAsSideEffect;
12156 ~SequencedSubexpression() {
12157 for (auto &M : llvm::reverse(ModAsSideEffect)) {
12158 UsageInfo &U = Self.UsageMap[M.first];
12159 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect];
12160 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue);
12161 SideEffectUsage = M.second;
12163 Self.ModAsSideEffect = OldModAsSideEffect;
12166 SequenceChecker &Self;
12167 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
12168 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect;
12171 /// RAII object wrapping the visitation of a subexpression which we might
12172 /// choose to evaluate as a constant. If any subexpression is evaluated and
12173 /// found to be non-constant, this allows us to suppress the evaluation of
12174 /// the outer expression.
12175 class EvaluationTracker {
12177 EvaluationTracker(SequenceChecker &Self)
12178 : Self(Self), Prev(Self.EvalTracker) {
12179 Self.EvalTracker = this;
12182 ~EvaluationTracker() {
12183 Self.EvalTracker = Prev;
12185 Prev->EvalOK &= EvalOK;
12188 bool evaluate(const Expr *E, bool &Result) {
12189 if (!EvalOK || E->isValueDependent())
12191 EvalOK = E->EvaluateAsBooleanCondition(
12192 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated());
12197 SequenceChecker &Self;
12198 EvaluationTracker *Prev;
12199 bool EvalOK = true;
12200 } *EvalTracker = nullptr;
12202 /// Find the object which is produced by the specified expression,
12204 Object getObject(Expr *E, bool Mod) const {
12205 E = E->IgnoreParenCasts();
12206 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
12207 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
12208 return getObject(UO->getSubExpr(), Mod);
12209 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
12210 if (BO->getOpcode() == BO_Comma)
12211 return getObject(BO->getRHS(), Mod);
12212 if (Mod && BO->isAssignmentOp())
12213 return getObject(BO->getLHS(), Mod);
12214 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
12215 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
12216 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts()))
12217 return ME->getMemberDecl();
12218 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
12219 // FIXME: If this is a reference, map through to its value.
12220 return DRE->getDecl();
12224 /// Note that an object was modified or used by an expression.
12225 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) {
12226 Usage &U = UI.Uses[UK];
12227 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) {
12228 if (UK == UK_ModAsSideEffect && ModAsSideEffect)
12229 ModAsSideEffect->push_back(std::make_pair(O, U));
12235 /// Check whether a modification or use conflicts with a prior usage.
12236 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind,
12241 const Usage &U = UI.Uses[OtherKind];
12242 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq))
12246 Expr *ModOrUse = Ref;
12247 if (OtherKind == UK_Use)
12248 std::swap(Mod, ModOrUse);
12250 SemaRef.DiagRuntimeBehavior(
12251 Mod->getExprLoc(), {Mod, ModOrUse},
12252 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod
12253 : diag::warn_unsequenced_mod_use)
12254 << O << SourceRange(ModOrUse->getExprLoc()));
12255 UI.Diagnosed = true;
12258 void notePreUse(Object O, Expr *Use) {
12259 UsageInfo &U = UsageMap[O];
12260 // Uses conflict with other modifications.
12261 checkUsage(O, U, Use, UK_ModAsValue, false);
12264 void notePostUse(Object O, Expr *Use) {
12265 UsageInfo &U = UsageMap[O];
12266 checkUsage(O, U, Use, UK_ModAsSideEffect, false);
12267 addUsage(U, O, Use, UK_Use);
12270 void notePreMod(Object O, Expr *Mod) {
12271 UsageInfo &U = UsageMap[O];
12272 // Modifications conflict with other modifications and with uses.
12273 checkUsage(O, U, Mod, UK_ModAsValue, true);
12274 checkUsage(O, U, Mod, UK_Use, false);
12277 void notePostMod(Object O, Expr *Use, UsageKind UK) {
12278 UsageInfo &U = UsageMap[O];
12279 checkUsage(O, U, Use, UK_ModAsSideEffect, true);
12280 addUsage(U, O, Use, UK);
12284 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList)
12285 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
12289 void VisitStmt(Stmt *S) {
12290 // Skip all statements which aren't expressions for now.
12293 void VisitExpr(Expr *E) {
12294 // By default, just recurse to evaluated subexpressions.
12295 Base::VisitStmt(E);
12298 void VisitCastExpr(CastExpr *E) {
12299 Object O = Object();
12300 if (E->getCastKind() == CK_LValueToRValue)
12301 O = getObject(E->getSubExpr(), false);
12310 void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) {
12311 SequenceTree::Seq BeforeRegion = Tree.allocate(Region);
12312 SequenceTree::Seq AfterRegion = Tree.allocate(Region);
12313 SequenceTree::Seq OldRegion = Region;
12316 SequencedSubexpression SeqBefore(*this);
12317 Region = BeforeRegion;
12318 Visit(SequencedBefore);
12321 Region = AfterRegion;
12322 Visit(SequencedAfter);
12324 Region = OldRegion;
12326 Tree.merge(BeforeRegion);
12327 Tree.merge(AfterRegion);
12330 void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) {
12331 // C++17 [expr.sub]p1:
12332 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
12333 // expression E1 is sequenced before the expression E2.
12334 if (SemaRef.getLangOpts().CPlusPlus17)
12335 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS());
12337 Base::VisitStmt(ASE);
12340 void VisitBinComma(BinaryOperator *BO) {
12341 // C++11 [expr.comma]p1:
12342 // Every value computation and side effect associated with the left
12343 // expression is sequenced before every value computation and side
12344 // effect associated with the right expression.
12345 VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
12348 void VisitBinAssign(BinaryOperator *BO) {
12349 // The modification is sequenced after the value computation of the LHS
12350 // and RHS, so check it before inspecting the operands and update the
12352 Object O = getObject(BO->getLHS(), true);
12354 return VisitExpr(BO);
12358 // C++11 [expr.ass]p7:
12359 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated
12362 // Therefore, for a compound assignment operator, O is considered used
12363 // everywhere except within the evaluation of E1 itself.
12364 if (isa<CompoundAssignOperator>(BO))
12367 Visit(BO->getLHS());
12369 if (isa<CompoundAssignOperator>(BO))
12370 notePostUse(O, BO);
12372 Visit(BO->getRHS());
12374 // C++11 [expr.ass]p1:
12375 // the assignment is sequenced [...] before the value computation of the
12376 // assignment expression.
12377 // C11 6.5.16/3 has no such rule.
12378 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
12379 : UK_ModAsSideEffect);
12382 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) {
12383 VisitBinAssign(CAO);
12386 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
12387 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
12388 void VisitUnaryPreIncDec(UnaryOperator *UO) {
12389 Object O = getObject(UO->getSubExpr(), true);
12391 return VisitExpr(UO);
12394 Visit(UO->getSubExpr());
12395 // C++11 [expr.pre.incr]p1:
12396 // the expression ++x is equivalent to x+=1
12397 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
12398 : UK_ModAsSideEffect);
12401 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
12402 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
12403 void VisitUnaryPostIncDec(UnaryOperator *UO) {
12404 Object O = getObject(UO->getSubExpr(), true);
12406 return VisitExpr(UO);
12409 Visit(UO->getSubExpr());
12410 notePostMod(O, UO, UK_ModAsSideEffect);
12413 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated.
12414 void VisitBinLOr(BinaryOperator *BO) {
12415 // The side-effects of the LHS of an '&&' are sequenced before the
12416 // value computation of the RHS, and hence before the value computation
12417 // of the '&&' itself, unless the LHS evaluates to zero. We treat them
12418 // as if they were unconditionally sequenced.
12419 EvaluationTracker Eval(*this);
12421 SequencedSubexpression Sequenced(*this);
12422 Visit(BO->getLHS());
12426 if (Eval.evaluate(BO->getLHS(), Result)) {
12428 Visit(BO->getRHS());
12430 // Check for unsequenced operations in the RHS, treating it as an
12431 // entirely separate evaluation.
12433 // FIXME: If there are operations in the RHS which are unsequenced
12434 // with respect to operations outside the RHS, and those operations
12435 // are unconditionally evaluated, diagnose them.
12436 WorkList.push_back(BO->getRHS());
12439 void VisitBinLAnd(BinaryOperator *BO) {
12440 EvaluationTracker Eval(*this);
12442 SequencedSubexpression Sequenced(*this);
12443 Visit(BO->getLHS());
12447 if (Eval.evaluate(BO->getLHS(), Result)) {
12449 Visit(BO->getRHS());
12451 WorkList.push_back(BO->getRHS());
12455 // Only visit the condition, unless we can be sure which subexpression will
12457 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) {
12458 EvaluationTracker Eval(*this);
12460 SequencedSubexpression Sequenced(*this);
12461 Visit(CO->getCond());
12465 if (Eval.evaluate(CO->getCond(), Result))
12466 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr());
12468 WorkList.push_back(CO->getTrueExpr());
12469 WorkList.push_back(CO->getFalseExpr());
12473 void VisitCallExpr(CallExpr *CE) {
12474 // C++11 [intro.execution]p15:
12475 // When calling a function [...], every value computation and side effect
12476 // associated with any argument expression, or with the postfix expression
12477 // designating the called function, is sequenced before execution of every
12478 // expression or statement in the body of the function [and thus before
12479 // the value computation of its result].
12480 SequencedSubexpression Sequenced(*this);
12481 Base::VisitCallExpr(CE);
12483 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
12486 void VisitCXXConstructExpr(CXXConstructExpr *CCE) {
12487 // This is a call, so all subexpressions are sequenced before the result.
12488 SequencedSubexpression Sequenced(*this);
12490 if (!CCE->isListInitialization())
12491 return VisitExpr(CCE);
12493 // In C++11, list initializations are sequenced.
12494 SmallVector<SequenceTree::Seq, 32> Elts;
12495 SequenceTree::Seq Parent = Region;
12496 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(),
12497 E = CCE->arg_end();
12499 Region = Tree.allocate(Parent);
12500 Elts.push_back(Region);
12504 // Forget that the initializers are sequenced.
12506 for (unsigned I = 0; I < Elts.size(); ++I)
12507 Tree.merge(Elts[I]);
12510 void VisitInitListExpr(InitListExpr *ILE) {
12511 if (!SemaRef.getLangOpts().CPlusPlus11)
12512 return VisitExpr(ILE);
12514 // In C++11, list initializations are sequenced.
12515 SmallVector<SequenceTree::Seq, 32> Elts;
12516 SequenceTree::Seq Parent = Region;
12517 for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
12518 Expr *E = ILE->getInit(I);
12520 Region = Tree.allocate(Parent);
12521 Elts.push_back(Region);
12525 // Forget that the initializers are sequenced.
12527 for (unsigned I = 0; I < Elts.size(); ++I)
12528 Tree.merge(Elts[I]);
12534 void Sema::CheckUnsequencedOperations(Expr *E) {
12535 SmallVector<Expr *, 8> WorkList;
12536 WorkList.push_back(E);
12537 while (!WorkList.empty()) {
12538 Expr *Item = WorkList.pop_back_val();
12539 SequenceChecker(*this, Item, WorkList);
12543 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
12544 bool IsConstexpr) {
12545 llvm::SaveAndRestore<bool> ConstantContext(
12546 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E));
12547 CheckImplicitConversions(E, CheckLoc);
12548 if (!E->isInstantiationDependent())
12549 CheckUnsequencedOperations(E);
12550 if (!IsConstexpr && !E->isValueDependent())
12551 CheckForIntOverflow(E);
12552 DiagnoseMisalignedMembers();
12555 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
12556 FieldDecl *BitField,
12558 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
12561 static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
12562 SourceLocation Loc) {
12563 if (!PType->isVariablyModifiedType())
12565 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) {
12566 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc);
12569 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) {
12570 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc);
12573 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) {
12574 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc);
12578 const ArrayType *AT = S.Context.getAsArrayType(PType);
12582 if (AT->getSizeModifier() != ArrayType::Star) {
12583 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc);
12587 S.Diag(Loc, diag::err_array_star_in_function_definition);
12590 /// CheckParmsForFunctionDef - Check that the parameters of the given
12591 /// function are appropriate for the definition of a function. This
12592 /// takes care of any checks that cannot be performed on the
12593 /// declaration itself, e.g., that the types of each of the function
12594 /// parameters are complete.
12595 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
12596 bool CheckParameterNames) {
12597 bool HasInvalidParm = false;
12598 for (ParmVarDecl *Param : Parameters) {
12599 // C99 6.7.5.3p4: the parameters in a parameter type list in a
12600 // function declarator that is part of a function definition of
12601 // that function shall not have incomplete type.
12603 // This is also C++ [dcl.fct]p6.
12604 if (!Param->isInvalidDecl() &&
12605 RequireCompleteType(Param->getLocation(), Param->getType(),
12606 diag::err_typecheck_decl_incomplete_type)) {
12607 Param->setInvalidDecl();
12608 HasInvalidParm = true;
12611 // C99 6.9.1p5: If the declarator includes a parameter type list, the
12612 // declaration of each parameter shall include an identifier.
12613 if (CheckParameterNames &&
12614 Param->getIdentifier() == nullptr &&
12615 !Param->isImplicit() &&
12616 !getLangOpts().CPlusPlus)
12617 Diag(Param->getLocation(), diag::err_parameter_name_omitted);
12620 // If the function declarator is not part of a definition of that
12621 // function, parameters may have incomplete type and may use the [*]
12622 // notation in their sequences of declarator specifiers to specify
12623 // variable length array types.
12624 QualType PType = Param->getOriginalType();
12625 // FIXME: This diagnostic should point the '[*]' if source-location
12626 // information is added for it.
12627 diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
12629 // If the parameter is a c++ class type and it has to be destructed in the
12630 // callee function, declare the destructor so that it can be called by the
12631 // callee function. Do not perform any direct access check on the dtor here.
12632 if (!Param->isInvalidDecl()) {
12633 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
12634 if (!ClassDecl->isInvalidDecl() &&
12635 !ClassDecl->hasIrrelevantDestructor() &&
12636 !ClassDecl->isDependentContext() &&
12637 ClassDecl->isParamDestroyedInCallee()) {
12638 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
12639 MarkFunctionReferenced(Param->getLocation(), Destructor);
12640 DiagnoseUseOfDecl(Destructor, Param->getLocation());
12645 // Parameters with the pass_object_size attribute only need to be marked
12646 // constant at function definitions. Because we lack information about
12647 // whether we're on a declaration or definition when we're instantiating the
12648 // attribute, we need to check for constness here.
12649 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
12650 if (!Param->getType().isConstQualified())
12651 Diag(Param->getLocation(), diag::err_attribute_pointers_only)
12652 << Attr->getSpelling() << 1;
12654 // Check for parameter names shadowing fields from the class.
12655 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
12656 // The owning context for the parameter should be the function, but we
12657 // want to see if this function's declaration context is a record.
12658 DeclContext *DC = Param->getDeclContext();
12659 if (DC && DC->isFunctionOrMethod()) {
12660 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
12661 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(),
12662 RD, /*DeclIsField*/ false);
12667 return HasInvalidParm;
12670 /// A helper function to get the alignment of a Decl referred to by DeclRefExpr
12672 static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign,
12673 ASTContext &Context) {
12674 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
12675 return Context.getDeclAlign(DRE->getDecl());
12677 if (const auto *ME = dyn_cast<MemberExpr>(E))
12678 return Context.getDeclAlign(ME->getMemberDecl());
12683 /// CheckCastAlign - Implements -Wcast-align, which warns when a
12684 /// pointer cast increases the alignment requirements.
12685 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
12686 // This is actually a lot of work to potentially be doing on every
12687 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
12688 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
12691 // Ignore dependent types.
12692 if (T->isDependentType() || Op->getType()->isDependentType())
12695 // Require that the destination be a pointer type.
12696 const PointerType *DestPtr = T->getAs<PointerType>();
12697 if (!DestPtr) return;
12699 // If the destination has alignment 1, we're done.
12700 QualType DestPointee = DestPtr->getPointeeType();
12701 if (DestPointee->isIncompleteType()) return;
12702 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee);
12703 if (DestAlign.isOne()) return;
12705 // Require that the source be a pointer type.
12706 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
12707 if (!SrcPtr) return;
12708 QualType SrcPointee = SrcPtr->getPointeeType();
12710 // Whitelist casts from cv void*. We already implicitly
12711 // whitelisted casts to cv void*, since they have alignment 1.
12712 // Also whitelist casts involving incomplete types, which implicitly
12713 // includes 'void'.
12714 if (SrcPointee->isIncompleteType()) return;
12716 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
12718 if (auto *CE = dyn_cast<CastExpr>(Op)) {
12719 if (CE->getCastKind() == CK_ArrayToPointerDecay)
12720 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context);
12721 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) {
12722 if (UO->getOpcode() == UO_AddrOf)
12723 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context);
12726 if (SrcAlign >= DestAlign) return;
12728 Diag(TRange.getBegin(), diag::warn_cast_align)
12729 << Op->getType() << T
12730 << static_cast<unsigned>(SrcAlign.getQuantity())
12731 << static_cast<unsigned>(DestAlign.getQuantity())
12732 << TRange << Op->getSourceRange();
12735 /// Check whether this array fits the idiom of a size-one tail padded
12736 /// array member of a struct.
12738 /// We avoid emitting out-of-bounds access warnings for such arrays as they are
12739 /// commonly used to emulate flexible arrays in C89 code.
12740 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size,
12741 const NamedDecl *ND) {
12742 if (Size != 1 || !ND) return false;
12744 const FieldDecl *FD = dyn_cast<FieldDecl>(ND);
12745 if (!FD) return false;
12747 // Don't consider sizes resulting from macro expansions or template argument
12748 // substitution to form C89 tail-padded arrays.
12750 TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
12752 TypeLoc TL = TInfo->getTypeLoc();
12753 // Look through typedefs.
12754 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) {
12755 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
12756 TInfo = TDL->getTypeSourceInfo();
12759 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) {
12760 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
12761 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
12767 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
12768 if (!RD) return false;
12769 if (RD->isUnion()) return false;
12770 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
12771 if (!CRD->isStandardLayout()) return false;
12774 // See if this is the last field decl in the record.
12775 const Decl *D = FD;
12776 while ((D = D->getNextDeclInContext()))
12777 if (isa<FieldDecl>(D))
12782 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
12783 const ArraySubscriptExpr *ASE,
12784 bool AllowOnePastEnd, bool IndexNegated) {
12785 // Already diagnosed by the constant evaluator.
12786 if (isConstantEvaluated())
12789 IndexExpr = IndexExpr->IgnoreParenImpCasts();
12790 if (IndexExpr->isValueDependent())
12793 const Type *EffectiveType =
12794 BaseExpr->getType()->getPointeeOrArrayElementType();
12795 BaseExpr = BaseExpr->IgnoreParenCasts();
12796 const ConstantArrayType *ArrayTy =
12797 Context.getAsConstantArrayType(BaseExpr->getType());
12802 const Type *BaseType = ArrayTy->getElementType().getTypePtr();
12803 if (EffectiveType->isDependentType() || BaseType->isDependentType())
12806 Expr::EvalResult Result;
12807 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects))
12810 llvm::APSInt index = Result.Val.getInt();
12814 const NamedDecl *ND = nullptr;
12815 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
12816 ND = DRE->getDecl();
12817 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
12818 ND = ME->getMemberDecl();
12820 if (index.isUnsigned() || !index.isNegative()) {
12821 // It is possible that the type of the base expression after
12822 // IgnoreParenCasts is incomplete, even though the type of the base
12823 // expression before IgnoreParenCasts is complete (see PR39746 for an
12824 // example). In this case we have no information about whether the array
12825 // access exceeds the array bounds. However we can still diagnose an array
12826 // access which precedes the array bounds.
12827 if (BaseType->isIncompleteType())
12830 llvm::APInt size = ArrayTy->getSize();
12831 if (!size.isStrictlyPositive())
12834 if (BaseType != EffectiveType) {
12835 // Make sure we're comparing apples to apples when comparing index to size
12836 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
12837 uint64_t array_typesize = Context.getTypeSize(BaseType);
12838 // Handle ptrarith_typesize being zero, such as when casting to void*
12839 if (!ptrarith_typesize) ptrarith_typesize = 1;
12840 if (ptrarith_typesize != array_typesize) {
12841 // There's a cast to a different size type involved
12842 uint64_t ratio = array_typesize / ptrarith_typesize;
12843 // TODO: Be smarter about handling cases where array_typesize is not a
12844 // multiple of ptrarith_typesize
12845 if (ptrarith_typesize * ratio == array_typesize)
12846 size *= llvm::APInt(size.getBitWidth(), ratio);
12850 if (size.getBitWidth() > index.getBitWidth())
12851 index = index.zext(size.getBitWidth());
12852 else if (size.getBitWidth() < index.getBitWidth())
12853 size = size.zext(index.getBitWidth());
12855 // For array subscripting the index must be less than size, but for pointer
12856 // arithmetic also allow the index (offset) to be equal to size since
12857 // computing the next address after the end of the array is legal and
12858 // commonly done e.g. in C++ iterators and range-based for loops.
12859 if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
12862 // Also don't warn for arrays of size 1 which are members of some
12863 // structure. These are often used to approximate flexible arrays in C89
12865 if (IsTailPaddedMemberArray(*this, size, ND))
12868 // Suppress the warning if the subscript expression (as identified by the
12869 // ']' location) and the index expression are both from macro expansions
12870 // within a system header.
12872 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
12873 ASE->getRBracketLoc());
12874 if (SourceMgr.isInSystemHeader(RBracketLoc)) {
12875 SourceLocation IndexLoc =
12876 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc());
12877 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc))
12882 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds;
12884 DiagID = diag::warn_array_index_exceeds_bounds;
12886 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
12887 PDiag(DiagID) << index.toString(10, true)
12888 << size.toString(10, true)
12889 << (unsigned)size.getLimitedValue(~0U)
12890 << IndexExpr->getSourceRange());
12892 unsigned DiagID = diag::warn_array_index_precedes_bounds;
12894 DiagID = diag::warn_ptr_arith_precedes_bounds;
12895 if (index.isNegative()) index = -index;
12898 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
12899 PDiag(DiagID) << index.toString(10, true)
12900 << IndexExpr->getSourceRange());
12904 // Try harder to find a NamedDecl to point at in the note.
12905 while (const ArraySubscriptExpr *ASE =
12906 dyn_cast<ArraySubscriptExpr>(BaseExpr))
12907 BaseExpr = ASE->getBase()->IgnoreParenCasts();
12908 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
12909 ND = DRE->getDecl();
12910 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
12911 ND = ME->getMemberDecl();
12915 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
12916 PDiag(diag::note_array_index_out_of_bounds)
12917 << ND->getDeclName());
12920 void Sema::CheckArrayAccess(const Expr *expr) {
12921 int AllowOnePastEnd = 0;
12923 expr = expr->IgnoreParenImpCasts();
12924 switch (expr->getStmtClass()) {
12925 case Stmt::ArraySubscriptExprClass: {
12926 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
12927 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
12928 AllowOnePastEnd > 0);
12929 expr = ASE->getBase();
12932 case Stmt::MemberExprClass: {
12933 expr = cast<MemberExpr>(expr)->getBase();
12936 case Stmt::OMPArraySectionExprClass: {
12937 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
12938 if (ASE->getLowerBound())
12939 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
12940 /*ASE=*/nullptr, AllowOnePastEnd > 0);
12943 case Stmt::UnaryOperatorClass: {
12944 // Only unwrap the * and & unary operators
12945 const UnaryOperator *UO = cast<UnaryOperator>(expr);
12946 expr = UO->getSubExpr();
12947 switch (UO->getOpcode()) {
12959 case Stmt::ConditionalOperatorClass: {
12960 const ConditionalOperator *cond = cast<ConditionalOperator>(expr);
12961 if (const Expr *lhs = cond->getLHS())
12962 CheckArrayAccess(lhs);
12963 if (const Expr *rhs = cond->getRHS())
12964 CheckArrayAccess(rhs);
12967 case Stmt::CXXOperatorCallExprClass: {
12968 const auto *OCE = cast<CXXOperatorCallExpr>(expr);
12969 for (const auto *Arg : OCE->arguments())
12970 CheckArrayAccess(Arg);
12979 //===--- CHECK: Objective-C retain cycles ----------------------------------//
12983 struct RetainCycleOwner {
12984 VarDecl *Variable = nullptr;
12986 SourceLocation Loc;
12987 bool Indirect = false;
12989 RetainCycleOwner() = default;
12991 void setLocsFrom(Expr *e) {
12992 Loc = e->getExprLoc();
12993 Range = e->getSourceRange();
12999 /// Consider whether capturing the given variable can possibly lead to
13000 /// a retain cycle.
13001 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
13002 // In ARC, it's captured strongly iff the variable has __strong
13003 // lifetime. In MRR, it's captured strongly if the variable is
13004 // __block and has an appropriate type.
13005 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
13008 owner.Variable = var;
13010 owner.setLocsFrom(ref);
13014 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
13016 e = e->IgnoreParens();
13017 if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
13018 switch (cast->getCastKind()) {
13020 case CK_LValueBitCast:
13021 case CK_LValueToRValue:
13022 case CK_ARCReclaimReturnedObject:
13023 e = cast->getSubExpr();
13031 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
13032 ObjCIvarDecl *ivar = ref->getDecl();
13033 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
13036 // Try to find a retain cycle in the base.
13037 if (!findRetainCycleOwner(S, ref->getBase(), owner))
13040 if (ref->isFreeIvar()) owner.setLocsFrom(ref);
13041 owner.Indirect = true;
13045 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
13046 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
13047 if (!var) return false;
13048 return considerVariable(var, ref, owner);
13051 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
13052 if (member->isArrow()) return false;
13054 // Don't count this as an indirect ownership.
13055 e = member->getBase();
13059 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
13060 // Only pay attention to pseudo-objects on property references.
13061 ObjCPropertyRefExpr *pre
13062 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
13064 if (!pre) return false;
13065 if (pre->isImplicitProperty()) return false;
13066 ObjCPropertyDecl *property = pre->getExplicitProperty();
13067 if (!property->isRetaining() &&
13068 !(property->getPropertyIvarDecl() &&
13069 property->getPropertyIvarDecl()->getType()
13070 .getObjCLifetime() == Qualifiers::OCL_Strong))
13073 owner.Indirect = true;
13074 if (pre->isSuperReceiver()) {
13075 owner.Variable = S.getCurMethodDecl()->getSelfDecl();
13076 if (!owner.Variable)
13078 owner.Loc = pre->getLocation();
13079 owner.Range = pre->getSourceRange();
13082 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
13083 ->getSourceExpr());
13095 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
13096 ASTContext &Context;
13098 Expr *Capturer = nullptr;
13099 bool VarWillBeReased = false;
13101 FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
13102 : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
13103 Context(Context), Variable(variable) {}
13105 void VisitDeclRefExpr(DeclRefExpr *ref) {
13106 if (ref->getDecl() == Variable && !Capturer)
13110 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
13111 if (Capturer) return;
13112 Visit(ref->getBase());
13113 if (Capturer && ref->isFreeIvar())
13117 void VisitBlockExpr(BlockExpr *block) {
13118 // Look inside nested blocks
13119 if (block->getBlockDecl()->capturesVariable(Variable))
13120 Visit(block->getBlockDecl()->getBody());
13123 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
13124 if (Capturer) return;
13125 if (OVE->getSourceExpr())
13126 Visit(OVE->getSourceExpr());
13129 void VisitBinaryOperator(BinaryOperator *BinOp) {
13130 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
13132 Expr *LHS = BinOp->getLHS();
13133 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
13134 if (DRE->getDecl() != Variable)
13136 if (Expr *RHS = BinOp->getRHS()) {
13137 RHS = RHS->IgnoreParenCasts();
13138 llvm::APSInt Value;
13140 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0);
13148 /// Check whether the given argument is a block which captures a
13150 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
13151 assert(owner.Variable && owner.Loc.isValid());
13153 e = e->IgnoreParenCasts();
13155 // Look through [^{...} copy] and Block_copy(^{...}).
13156 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
13157 Selector Cmd = ME->getSelector();
13158 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
13159 e = ME->getInstanceReceiver();
13162 e = e->IgnoreParenCasts();
13164 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
13165 if (CE->getNumArgs() == 1) {
13166 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
13168 const IdentifierInfo *FnI = Fn->getIdentifier();
13169 if (FnI && FnI->isStr("_Block_copy")) {
13170 e = CE->getArg(0)->IgnoreParenCasts();
13176 BlockExpr *block = dyn_cast<BlockExpr>(e);
13177 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
13180 FindCaptureVisitor visitor(S.Context, owner.Variable);
13181 visitor.Visit(block->getBlockDecl()->getBody());
13182 return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
13185 static void diagnoseRetainCycle(Sema &S, Expr *capturer,
13186 RetainCycleOwner &owner) {
13188 assert(owner.Variable && owner.Loc.isValid());
13190 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
13191 << owner.Variable << capturer->getSourceRange();
13192 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
13193 << owner.Indirect << owner.Range;
13196 /// Check for a keyword selector that starts with the word 'add' or
13198 static bool isSetterLikeSelector(Selector sel) {
13199 if (sel.isUnarySelector()) return false;
13201 StringRef str = sel.getNameForSlot(0);
13202 while (!str.empty() && str.front() == '_') str = str.substr(1);
13203 if (str.startswith("set"))
13204 str = str.substr(3);
13205 else if (str.startswith("add")) {
13206 // Specially whitelist 'addOperationWithBlock:'.
13207 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
13209 str = str.substr(3);
13214 if (str.empty()) return true;
13215 return !isLowercase(str.front());
13218 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
13219 ObjCMessageExpr *Message) {
13220 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
13221 Message->getReceiverInterface(),
13222 NSAPI::ClassId_NSMutableArray);
13223 if (!IsMutableArray) {
13227 Selector Sel = Message->getSelector();
13229 Optional<NSAPI::NSArrayMethodKind> MKOpt =
13230 S.NSAPIObj->getNSArrayMethodKind(Sel);
13235 NSAPI::NSArrayMethodKind MK = *MKOpt;
13238 case NSAPI::NSMutableArr_addObject:
13239 case NSAPI::NSMutableArr_insertObjectAtIndex:
13240 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
13242 case NSAPI::NSMutableArr_replaceObjectAtIndex:
13253 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
13254 ObjCMessageExpr *Message) {
13255 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
13256 Message->getReceiverInterface(),
13257 NSAPI::ClassId_NSMutableDictionary);
13258 if (!IsMutableDictionary) {
13262 Selector Sel = Message->getSelector();
13264 Optional<NSAPI::NSDictionaryMethodKind> MKOpt =
13265 S.NSAPIObj->getNSDictionaryMethodKind(Sel);
13270 NSAPI::NSDictionaryMethodKind MK = *MKOpt;
13273 case NSAPI::NSMutableDict_setObjectForKey:
13274 case NSAPI::NSMutableDict_setValueForKey:
13275 case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
13285 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
13286 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
13287 Message->getReceiverInterface(),
13288 NSAPI::ClassId_NSMutableSet);
13290 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
13291 Message->getReceiverInterface(),
13292 NSAPI::ClassId_NSMutableOrderedSet);
13293 if (!IsMutableSet && !IsMutableOrderedSet) {
13297 Selector Sel = Message->getSelector();
13299 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel);
13304 NSAPI::NSSetMethodKind MK = *MKOpt;
13307 case NSAPI::NSMutableSet_addObject:
13308 case NSAPI::NSOrderedSet_setObjectAtIndex:
13309 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
13310 case NSAPI::NSOrderedSet_insertObjectAtIndex:
13312 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
13319 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
13320 if (!Message->isInstanceMessage()) {
13324 Optional<int> ArgOpt;
13326 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
13327 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
13328 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
13332 int ArgIndex = *ArgOpt;
13334 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
13335 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
13336 Arg = OE->getSourceExpr()->IgnoreImpCasts();
13339 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
13340 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
13341 if (ArgRE->isObjCSelfExpr()) {
13342 Diag(Message->getSourceRange().getBegin(),
13343 diag::warn_objc_circular_container)
13344 << ArgRE->getDecl() << StringRef("'super'");
13348 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
13350 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
13351 Receiver = OE->getSourceExpr()->IgnoreImpCasts();
13354 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
13355 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
13356 if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
13357 ValueDecl *Decl = ReceiverRE->getDecl();
13358 Diag(Message->getSourceRange().getBegin(),
13359 diag::warn_objc_circular_container)
13361 if (!ArgRE->isObjCSelfExpr()) {
13362 Diag(Decl->getLocation(),
13363 diag::note_objc_circular_container_declared_here)
13368 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
13369 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
13370 if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
13371 ObjCIvarDecl *Decl = IvarRE->getDecl();
13372 Diag(Message->getSourceRange().getBegin(),
13373 diag::warn_objc_circular_container)
13375 Diag(Decl->getLocation(),
13376 diag::note_objc_circular_container_declared_here)
13384 /// Check a message send to see if it's likely to cause a retain cycle.
13385 void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
13386 // Only check instance methods whose selector looks like a setter.
13387 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
13390 // Try to find a variable that the receiver is strongly owned by.
13391 RetainCycleOwner owner;
13392 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
13393 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
13396 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
13397 owner.Variable = getCurMethodDecl()->getSelfDecl();
13398 owner.Loc = msg->getSuperLoc();
13399 owner.Range = msg->getSuperLoc();
13402 // Check whether the receiver is captured by any of the arguments.
13403 const ObjCMethodDecl *MD = msg->getMethodDecl();
13404 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
13405 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) {
13406 // noescape blocks should not be retained by the method.
13407 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
13409 return diagnoseRetainCycle(*this, capturer, owner);
13414 /// Check a property assign to see if it's likely to cause a retain cycle.
13415 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
13416 RetainCycleOwner owner;
13417 if (!findRetainCycleOwner(*this, receiver, owner))
13420 if (Expr *capturer = findCapturingExpr(*this, argument, owner))
13421 diagnoseRetainCycle(*this, capturer, owner);
13424 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
13425 RetainCycleOwner Owner;
13426 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
13429 // Because we don't have an expression for the variable, we have to set the
13430 // location explicitly here.
13431 Owner.Loc = Var->getLocation();
13432 Owner.Range = Var->getSourceRange();
13434 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
13435 diagnoseRetainCycle(*this, Capturer, Owner);
13438 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
13439 Expr *RHS, bool isProperty) {
13440 // Check if RHS is an Objective-C object literal, which also can get
13441 // immediately zapped in a weak reference. Note that we explicitly
13442 // allow ObjCStringLiterals, since those are designed to never really die.
13443 RHS = RHS->IgnoreParenImpCasts();
13445 // This enum needs to match with the 'select' in
13446 // warn_objc_arc_literal_assign (off-by-1).
13447 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS);
13448 if (Kind == Sema::LK_String || Kind == Sema::LK_None)
13451 S.Diag(Loc, diag::warn_arc_literal_assign)
13453 << (isProperty ? 0 : 1)
13454 << RHS->getSourceRange();
13459 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
13460 Qualifiers::ObjCLifetime LT,
13461 Expr *RHS, bool isProperty) {
13462 // Strip off any implicit cast added to get to the one ARC-specific.
13463 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
13464 if (cast->getCastKind() == CK_ARCConsumeObject) {
13465 S.Diag(Loc, diag::warn_arc_retained_assign)
13466 << (LT == Qualifiers::OCL_ExplicitNone)
13467 << (isProperty ? 0 : 1)
13468 << RHS->getSourceRange();
13471 RHS = cast->getSubExpr();
13474 if (LT == Qualifiers::OCL_Weak &&
13475 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
13481 bool Sema::checkUnsafeAssigns(SourceLocation Loc,
13482 QualType LHS, Expr *RHS) {
13483 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
13485 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
13488 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false))
13494 void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
13495 Expr *LHS, Expr *RHS) {
13497 // PropertyRef on LHS type need be directly obtained from
13498 // its declaration as it has a PseudoType.
13499 ObjCPropertyRefExpr *PRE
13500 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
13501 if (PRE && !PRE->isImplicitProperty()) {
13502 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
13504 LHSType = PD->getType();
13507 if (LHSType.isNull())
13508 LHSType = LHS->getType();
13510 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
13512 if (LT == Qualifiers::OCL_Weak) {
13513 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
13514 getCurFunction()->markSafeWeakUse(LHS);
13517 if (checkUnsafeAssigns(Loc, LHSType, RHS))
13520 // FIXME. Check for other life times.
13521 if (LT != Qualifiers::OCL_None)
13525 if (PRE->isImplicitProperty())
13527 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
13531 unsigned Attributes = PD->getPropertyAttributes();
13532 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
13533 // when 'assign' attribute was not explicitly specified
13534 // by user, ignore it and rely on property type itself
13535 // for lifetime info.
13536 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
13537 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
13538 LHSType->isObjCRetainableType())
13541 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
13542 if (cast->getCastKind() == CK_ARCConsumeObject) {
13543 Diag(Loc, diag::warn_arc_retained_property_assign)
13544 << RHS->getSourceRange();
13547 RHS = cast->getSubExpr();
13550 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) {
13551 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
13557 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
13559 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
13560 SourceLocation StmtLoc,
13561 const NullStmt *Body) {
13562 // Do not warn if the body is a macro that expands to nothing, e.g:
13567 if (Body->hasLeadingEmptyMacro())
13570 // Get line numbers of statement and body.
13571 bool StmtLineInvalid;
13572 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc,
13574 if (StmtLineInvalid)
13577 bool BodyLineInvalid;
13578 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
13580 if (BodyLineInvalid)
13583 // Warn if null statement and body are on the same line.
13584 if (StmtLine != BodyLine)
13590 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
13593 // Since this is a syntactic check, don't emit diagnostic for template
13594 // instantiations, this just adds noise.
13595 if (CurrentInstantiationScope)
13598 // The body should be a null statement.
13599 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
13603 // Do the usual checks.
13604 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
13607 Diag(NBody->getSemiLoc(), DiagID);
13608 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
13611 void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
13612 const Stmt *PossibleBody) {
13613 assert(!CurrentInstantiationScope); // Ensured by caller
13615 SourceLocation StmtLoc;
13618 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
13619 StmtLoc = FS->getRParenLoc();
13620 Body = FS->getBody();
13621 DiagID = diag::warn_empty_for_body;
13622 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
13623 StmtLoc = WS->getCond()->getSourceRange().getEnd();
13624 Body = WS->getBody();
13625 DiagID = diag::warn_empty_while_body;
13627 return; // Neither `for' nor `while'.
13629 // The body should be a null statement.
13630 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
13634 // Skip expensive checks if diagnostic is disabled.
13635 if (Diags.isIgnored(DiagID, NBody->getSemiLoc()))
13638 // Do the usual checks.
13639 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
13642 // `for(...);' and `while(...);' are popular idioms, so in order to keep
13643 // noise level low, emit diagnostics only if for/while is followed by a
13644 // CompoundStmt, e.g.:
13645 // for (int i = 0; i < n; i++);
13649 // or if for/while is followed by a statement with more indentation
13650 // than for/while itself:
13651 // for (int i = 0; i < n; i++);
13653 bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
13654 if (!ProbableTypo) {
13655 bool BodyColInvalid;
13656 unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
13657 PossibleBody->getBeginLoc(), &BodyColInvalid);
13658 if (BodyColInvalid)
13661 bool StmtColInvalid;
13663 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid);
13664 if (StmtColInvalid)
13667 if (BodyCol > StmtCol)
13668 ProbableTypo = true;
13671 if (ProbableTypo) {
13672 Diag(NBody->getSemiLoc(), DiagID);
13673 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
13677 //===--- CHECK: Warn on self move with std::move. -------------------------===//
13679 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
13680 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
13681 SourceLocation OpLoc) {
13682 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
13685 if (inTemplateInstantiation())
13688 // Strip parens and casts away.
13689 LHSExpr = LHSExpr->IgnoreParenImpCasts();
13690 RHSExpr = RHSExpr->IgnoreParenImpCasts();
13692 // Check for a call expression
13693 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
13694 if (!CE || CE->getNumArgs() != 1)
13697 // Check for a call to std::move
13698 if (!CE->isCallToStdMove())
13701 // Get argument from std::move
13702 RHSExpr = CE->getArg(0);
13704 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
13705 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
13707 // Two DeclRefExpr's, check that the decls are the same.
13708 if (LHSDeclRef && RHSDeclRef) {
13709 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
13711 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
13712 RHSDeclRef->getDecl()->getCanonicalDecl())
13715 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13716 << LHSExpr->getSourceRange()
13717 << RHSExpr->getSourceRange();
13721 // Member variables require a different approach to check for self moves.
13722 // MemberExpr's are the same if every nested MemberExpr refers to the same
13723 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
13724 // the base Expr's are CXXThisExpr's.
13725 const Expr *LHSBase = LHSExpr;
13726 const Expr *RHSBase = RHSExpr;
13727 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr);
13728 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr);
13729 if (!LHSME || !RHSME)
13732 while (LHSME && RHSME) {
13733 if (LHSME->getMemberDecl()->getCanonicalDecl() !=
13734 RHSME->getMemberDecl()->getCanonicalDecl())
13737 LHSBase = LHSME->getBase();
13738 RHSBase = RHSME->getBase();
13739 LHSME = dyn_cast<MemberExpr>(LHSBase);
13740 RHSME = dyn_cast<MemberExpr>(RHSBase);
13743 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase);
13744 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase);
13745 if (LHSDeclRef && RHSDeclRef) {
13746 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
13748 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
13749 RHSDeclRef->getDecl()->getCanonicalDecl())
13752 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13753 << LHSExpr->getSourceRange()
13754 << RHSExpr->getSourceRange();
13758 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
13759 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13760 << LHSExpr->getSourceRange()
13761 << RHSExpr->getSourceRange();
13764 //===--- Layout compatibility ----------------------------------------------//
13766 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
13768 /// Check if two enumeration types are layout-compatible.
13769 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
13770 // C++11 [dcl.enum] p8:
13771 // Two enumeration types are layout-compatible if they have the same
13772 // underlying type.
13773 return ED1->isComplete() && ED2->isComplete() &&
13774 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
13777 /// Check if two fields are layout-compatible.
13778 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
13779 FieldDecl *Field2) {
13780 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
13783 if (Field1->isBitField() != Field2->isBitField())
13786 if (Field1->isBitField()) {
13787 // Make sure that the bit-fields are the same length.
13788 unsigned Bits1 = Field1->getBitWidthValue(C);
13789 unsigned Bits2 = Field2->getBitWidthValue(C);
13791 if (Bits1 != Bits2)
13798 /// Check if two standard-layout structs are layout-compatible.
13799 /// (C++11 [class.mem] p17)
13800 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
13802 // If both records are C++ classes, check that base classes match.
13803 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
13804 // If one of records is a CXXRecordDecl we are in C++ mode,
13805 // thus the other one is a CXXRecordDecl, too.
13806 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
13807 // Check number of base classes.
13808 if (D1CXX->getNumBases() != D2CXX->getNumBases())
13811 // Check the base classes.
13812 for (CXXRecordDecl::base_class_const_iterator
13813 Base1 = D1CXX->bases_begin(),
13814 BaseEnd1 = D1CXX->bases_end(),
13815 Base2 = D2CXX->bases_begin();
13817 ++Base1, ++Base2) {
13818 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
13821 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
13822 // If only RD2 is a C++ class, it should have zero base classes.
13823 if (D2CXX->getNumBases() > 0)
13827 // Check the fields.
13828 RecordDecl::field_iterator Field2 = RD2->field_begin(),
13829 Field2End = RD2->field_end(),
13830 Field1 = RD1->field_begin(),
13831 Field1End = RD1->field_end();
13832 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
13833 if (!isLayoutCompatible(C, *Field1, *Field2))
13836 if (Field1 != Field1End || Field2 != Field2End)
13842 /// Check if two standard-layout unions are layout-compatible.
13843 /// (C++11 [class.mem] p18)
13844 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
13846 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
13847 for (auto *Field2 : RD2->fields())
13848 UnmatchedFields.insert(Field2);
13850 for (auto *Field1 : RD1->fields()) {
13851 llvm::SmallPtrSet<FieldDecl *, 8>::iterator
13852 I = UnmatchedFields.begin(),
13853 E = UnmatchedFields.end();
13855 for ( ; I != E; ++I) {
13856 if (isLayoutCompatible(C, Field1, *I)) {
13857 bool Result = UnmatchedFields.erase(*I);
13867 return UnmatchedFields.empty();
13870 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
13872 if (RD1->isUnion() != RD2->isUnion())
13875 if (RD1->isUnion())
13876 return isLayoutCompatibleUnion(C, RD1, RD2);
13878 return isLayoutCompatibleStruct(C, RD1, RD2);
13881 /// Check if two types are layout-compatible in C++11 sense.
13882 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
13883 if (T1.isNull() || T2.isNull())
13886 // C++11 [basic.types] p11:
13887 // If two types T1 and T2 are the same type, then T1 and T2 are
13888 // layout-compatible types.
13889 if (C.hasSameType(T1, T2))
13892 T1 = T1.getCanonicalType().getUnqualifiedType();
13893 T2 = T2.getCanonicalType().getUnqualifiedType();
13895 const Type::TypeClass TC1 = T1->getTypeClass();
13896 const Type::TypeClass TC2 = T2->getTypeClass();
13901 if (TC1 == Type::Enum) {
13902 return isLayoutCompatible(C,
13903 cast<EnumType>(T1)->getDecl(),
13904 cast<EnumType>(T2)->getDecl());
13905 } else if (TC1 == Type::Record) {
13906 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
13909 return isLayoutCompatible(C,
13910 cast<RecordType>(T1)->getDecl(),
13911 cast<RecordType>(T2)->getDecl());
13917 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
13919 /// Given a type tag expression find the type tag itself.
13921 /// \param TypeExpr Type tag expression, as it appears in user's code.
13923 /// \param VD Declaration of an identifier that appears in a type tag.
13925 /// \param MagicValue Type tag magic value.
13927 /// \param isConstantEvaluated wether the evalaution should be performed in
13929 /// constant context.
13930 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
13931 const ValueDecl **VD, uint64_t *MagicValue,
13932 bool isConstantEvaluated) {
13937 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
13939 switch (TypeExpr->getStmtClass()) {
13940 case Stmt::UnaryOperatorClass: {
13941 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr);
13942 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
13943 TypeExpr = UO->getSubExpr();
13949 case Stmt::DeclRefExprClass: {
13950 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr);
13951 *VD = DRE->getDecl();
13955 case Stmt::IntegerLiteralClass: {
13956 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr);
13957 llvm::APInt MagicValueAPInt = IL->getValue();
13958 if (MagicValueAPInt.getActiveBits() <= 64) {
13959 *MagicValue = MagicValueAPInt.getZExtValue();
13965 case Stmt::BinaryConditionalOperatorClass:
13966 case Stmt::ConditionalOperatorClass: {
13967 const AbstractConditionalOperator *ACO =
13968 cast<AbstractConditionalOperator>(TypeExpr);
13970 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx,
13971 isConstantEvaluated)) {
13973 TypeExpr = ACO->getTrueExpr();
13975 TypeExpr = ACO->getFalseExpr();
13981 case Stmt::BinaryOperatorClass: {
13982 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr);
13983 if (BO->getOpcode() == BO_Comma) {
13984 TypeExpr = BO->getRHS();
13996 /// Retrieve the C type corresponding to type tag TypeExpr.
13998 /// \param TypeExpr Expression that specifies a type tag.
14000 /// \param MagicValues Registered magic values.
14002 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
14005 /// \param TypeInfo Information about the corresponding C type.
14007 /// \param isConstantEvaluated wether the evalaution should be performed in
14008 /// constant context.
14010 /// \returns true if the corresponding C type was found.
14011 static bool GetMatchingCType(
14012 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr,
14013 const ASTContext &Ctx,
14014 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData>
14016 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo,
14017 bool isConstantEvaluated) {
14018 FoundWrongKind = false;
14020 // Variable declaration that has type_tag_for_datatype attribute.
14021 const ValueDecl *VD = nullptr;
14023 uint64_t MagicValue;
14025 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated))
14029 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
14030 if (I->getArgumentKind() != ArgumentKind) {
14031 FoundWrongKind = true;
14034 TypeInfo.Type = I->getMatchingCType();
14035 TypeInfo.LayoutCompatible = I->getLayoutCompatible();
14036 TypeInfo.MustBeNull = I->getMustBeNull();
14045 llvm::DenseMap<Sema::TypeTagMagicValue,
14046 Sema::TypeTagData>::const_iterator I =
14047 MagicValues->find(std::make_pair(ArgumentKind, MagicValue));
14048 if (I == MagicValues->end())
14051 TypeInfo = I->second;
14055 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
14056 uint64_t MagicValue, QualType Type,
14057 bool LayoutCompatible,
14059 if (!TypeTagForDatatypeMagicValues)
14060 TypeTagForDatatypeMagicValues.reset(
14061 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
14063 TypeTagMagicValue Magic(ArgumentKind, MagicValue);
14064 (*TypeTagForDatatypeMagicValues)[Magic] =
14065 TypeTagData(Type, LayoutCompatible, MustBeNull);
14068 static bool IsSameCharType(QualType T1, QualType T2) {
14069 const BuiltinType *BT1 = T1->getAs<BuiltinType>();
14073 const BuiltinType *BT2 = T2->getAs<BuiltinType>();
14077 BuiltinType::Kind T1Kind = BT1->getKind();
14078 BuiltinType::Kind T2Kind = BT2->getKind();
14080 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
14081 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
14082 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
14083 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
14086 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
14087 const ArrayRef<const Expr *> ExprArgs,
14088 SourceLocation CallSiteLoc) {
14089 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
14090 bool IsPointerAttr = Attr->getIsPointer();
14092 // Retrieve the argument representing the 'type_tag'.
14093 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
14094 if (TypeTagIdxAST >= ExprArgs.size()) {
14095 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
14096 << 0 << Attr->getTypeTagIdx().getSourceIndex();
14099 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
14100 bool FoundWrongKind;
14101 TypeTagData TypeInfo;
14102 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
14103 TypeTagForDatatypeMagicValues.get(), FoundWrongKind,
14104 TypeInfo, isConstantEvaluated())) {
14105 if (FoundWrongKind)
14106 Diag(TypeTagExpr->getExprLoc(),
14107 diag::warn_type_tag_for_datatype_wrong_kind)
14108 << TypeTagExpr->getSourceRange();
14112 // Retrieve the argument representing the 'arg_idx'.
14113 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
14114 if (ArgumentIdxAST >= ExprArgs.size()) {
14115 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
14116 << 1 << Attr->getArgumentIdx().getSourceIndex();
14119 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
14120 if (IsPointerAttr) {
14121 // Skip implicit cast of pointer to `void *' (as a function argument).
14122 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
14123 if (ICE->getType()->isVoidPointerType() &&
14124 ICE->getCastKind() == CK_BitCast)
14125 ArgumentExpr = ICE->getSubExpr();
14127 QualType ArgumentType = ArgumentExpr->getType();
14129 // Passing a `void*' pointer shouldn't trigger a warning.
14130 if (IsPointerAttr && ArgumentType->isVoidPointerType())
14133 if (TypeInfo.MustBeNull) {
14134 // Type tag with matching void type requires a null pointer.
14135 if (!ArgumentExpr->isNullPointerConstant(Context,
14136 Expr::NPC_ValueDependentIsNotNull)) {
14137 Diag(ArgumentExpr->getExprLoc(),
14138 diag::warn_type_safety_null_pointer_required)
14139 << ArgumentKind->getName()
14140 << ArgumentExpr->getSourceRange()
14141 << TypeTagExpr->getSourceRange();
14146 QualType RequiredType = TypeInfo.Type;
14148 RequiredType = Context.getPointerType(RequiredType);
14150 bool mismatch = false;
14151 if (!TypeInfo.LayoutCompatible) {
14152 mismatch = !Context.hasSameType(ArgumentType, RequiredType);
14154 // C++11 [basic.fundamental] p1:
14155 // Plain char, signed char, and unsigned char are three distinct types.
14157 // But we treat plain `char' as equivalent to `signed char' or `unsigned
14158 // char' depending on the current char signedness mode.
14160 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(),
14161 RequiredType->getPointeeType())) ||
14162 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType)))
14166 mismatch = !isLayoutCompatible(Context,
14167 ArgumentType->getPointeeType(),
14168 RequiredType->getPointeeType());
14170 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType);
14173 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
14174 << ArgumentType << ArgumentKind
14175 << TypeInfo.LayoutCompatible << RequiredType
14176 << ArgumentExpr->getSourceRange()
14177 << TypeTagExpr->getSourceRange();
14180 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
14181 CharUnits Alignment) {
14182 MisalignedMembers.emplace_back(E, RD, MD, Alignment);
14185 void Sema::DiagnoseMisalignedMembers() {
14186 for (MisalignedMember &m : MisalignedMembers) {
14187 const NamedDecl *ND = m.RD;
14188 if (ND->getName().empty()) {
14189 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
14192 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
14193 << m.MD << ND << m.E->getSourceRange();
14195 MisalignedMembers.clear();
14198 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
14199 E = E->IgnoreParens();
14200 if (!T->isPointerType() && !T->isIntegerType())
14202 if (isa<UnaryOperator>(E) &&
14203 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
14204 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
14205 if (isa<MemberExpr>(Op)) {
14206 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
14207 if (MA != MisalignedMembers.end() &&
14208 (T->isIntegerType() ||
14209 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
14210 Context.getTypeAlignInChars(
14211 T->getPointeeType()) <= MA->Alignment))))
14212 MisalignedMembers.erase(MA);
14217 void Sema::RefersToMemberWithReducedAlignment(
14219 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
14221 const auto *ME = dyn_cast<MemberExpr>(E);
14225 // No need to check expressions with an __unaligned-qualified type.
14226 if (E->getType().getQualifiers().hasUnaligned())
14229 // For a chain of MemberExpr like "a.b.c.d" this list
14230 // will keep FieldDecl's like [d, c, b].
14231 SmallVector<FieldDecl *, 4> ReverseMemberChain;
14232 const MemberExpr *TopME = nullptr;
14233 bool AnyIsPacked = false;
14235 QualType BaseType = ME->getBase()->getType();
14237 BaseType = BaseType->getPointeeType();
14238 RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl();
14239 if (RD->isInvalidDecl())
14242 ValueDecl *MD = ME->getMemberDecl();
14243 auto *FD = dyn_cast<FieldDecl>(MD);
14244 // We do not care about non-data members.
14245 if (!FD || FD->isInvalidDecl())
14249 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
14250 ReverseMemberChain.push_back(FD);
14253 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens());
14255 assert(TopME && "We did not compute a topmost MemberExpr!");
14257 // Not the scope of this diagnostic.
14261 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
14262 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase);
14263 // TODO: The innermost base of the member expression may be too complicated.
14264 // For now, just disregard these cases. This is left for future
14266 if (!DRE && !isa<CXXThisExpr>(TopBase))
14269 // Alignment expected by the whole expression.
14270 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType());
14272 // No need to do anything else with this case.
14273 if (ExpectedAlignment.isOne())
14276 // Synthesize offset of the whole access.
14278 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend();
14280 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I));
14283 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
14284 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
14285 ReverseMemberChain.back()->getParent()->getTypeForDecl());
14287 // The base expression of the innermost MemberExpr may give
14288 // stronger guarantees than the class containing the member.
14289 if (DRE && !TopME->isArrow()) {
14290 const ValueDecl *VD = DRE->getDecl();
14291 if (!VD->getType()->isReferenceType())
14292 CompleteObjectAlignment =
14293 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD));
14296 // Check if the synthesized offset fulfills the alignment.
14297 if (Offset % ExpectedAlignment != 0 ||
14298 // It may fulfill the offset it but the effective alignment may still be
14299 // lower than the expected expression alignment.
14300 CompleteObjectAlignment < ExpectedAlignment) {
14301 // If this happens, we want to determine a sensible culprit of this.
14302 // Intuitively, watching the chain of member expressions from right to
14303 // left, we start with the required alignment (as required by the field
14304 // type) but some packed attribute in that chain has reduced the alignment.
14305 // It may happen that another packed structure increases it again. But if
14306 // we are here such increase has not been enough. So pointing the first
14307 // FieldDecl that either is packed or else its RecordDecl is,
14308 // seems reasonable.
14309 FieldDecl *FD = nullptr;
14310 CharUnits Alignment;
14311 for (FieldDecl *FDI : ReverseMemberChain) {
14312 if (FDI->hasAttr<PackedAttr>() ||
14313 FDI->getParent()->hasAttr<PackedAttr>()) {
14315 Alignment = std::min(
14316 Context.getTypeAlignInChars(FD->getType()),
14317 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
14321 assert(FD && "We did not find a packed FieldDecl!");
14322 Action(E, FD->getParent(), FD, Alignment);
14326 void Sema::CheckAddressOfPackedMember(Expr *rhs) {
14327 using namespace std::placeholders;
14329 RefersToMemberWithReducedAlignment(
14330 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,