1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements extra semantic analysis beyond what is enforced
11 // by the C type system.
13 //===----------------------------------------------------------------------===//
15 #include "clang/AST/APValue.h"
16 #include "clang/AST/ASTContext.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/AttrIterator.h"
19 #include "clang/AST/CharUnits.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/DeclBase.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/AST/DeclarationName.h"
25 #include "clang/AST/EvaluatedExprVisitor.h"
26 #include "clang/AST/Expr.h"
27 #include "clang/AST/ExprCXX.h"
28 #include "clang/AST/ExprObjC.h"
29 #include "clang/AST/ExprOpenMP.h"
30 #include "clang/AST/NSAPI.h"
31 #include "clang/AST/NonTrivialTypeVisitor.h"
32 #include "clang/AST/OperationKinds.h"
33 #include "clang/AST/Stmt.h"
34 #include "clang/AST/TemplateBase.h"
35 #include "clang/AST/Type.h"
36 #include "clang/AST/TypeLoc.h"
37 #include "clang/AST/UnresolvedSet.h"
38 #include "clang/Analysis/Analyses/FormatString.h"
39 #include "clang/Basic/AddressSpaces.h"
40 #include "clang/Basic/CharInfo.h"
41 #include "clang/Basic/Diagnostic.h"
42 #include "clang/Basic/IdentifierTable.h"
43 #include "clang/Basic/LLVM.h"
44 #include "clang/Basic/LangOptions.h"
45 #include "clang/Basic/OpenCLOptions.h"
46 #include "clang/Basic/OperatorKinds.h"
47 #include "clang/Basic/PartialDiagnostic.h"
48 #include "clang/Basic/SourceLocation.h"
49 #include "clang/Basic/SourceManager.h"
50 #include "clang/Basic/Specifiers.h"
51 #include "clang/Basic/SyncScope.h"
52 #include "clang/Basic/TargetBuiltins.h"
53 #include "clang/Basic/TargetCXXABI.h"
54 #include "clang/Basic/TargetInfo.h"
55 #include "clang/Basic/TypeTraits.h"
56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
57 #include "clang/Sema/Initialization.h"
58 #include "clang/Sema/Lookup.h"
59 #include "clang/Sema/Ownership.h"
60 #include "clang/Sema/Scope.h"
61 #include "clang/Sema/ScopeInfo.h"
62 #include "clang/Sema/Sema.h"
63 #include "clang/Sema/SemaInternal.h"
64 #include "llvm/ADT/APFloat.h"
65 #include "llvm/ADT/APInt.h"
66 #include "llvm/ADT/APSInt.h"
67 #include "llvm/ADT/ArrayRef.h"
68 #include "llvm/ADT/DenseMap.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/None.h"
71 #include "llvm/ADT/Optional.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallBitVector.h"
74 #include "llvm/ADT/SmallPtrSet.h"
75 #include "llvm/ADT/SmallString.h"
76 #include "llvm/ADT/SmallVector.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/ADT/StringSwitch.h"
79 #include "llvm/ADT/Triple.h"
80 #include "llvm/Support/AtomicOrdering.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/ConvertUTF.h"
84 #include "llvm/Support/ErrorHandling.h"
85 #include "llvm/Support/Format.h"
86 #include "llvm/Support/Locale.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
99 using namespace clang;
100 using namespace sema;
102 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
103 unsigned ByteNo) const {
104 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
105 Context.getTargetInfo());
108 /// Checks that a call expression's argument count is the desired number.
109 /// This is useful when doing custom type-checking. Returns true on error.
110 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
111 unsigned argCount = call->getNumArgs();
112 if (argCount == desiredArgCount) return false;
114 if (argCount < desiredArgCount)
115 return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args)
116 << 0 /*function call*/ << desiredArgCount << argCount
117 << call->getSourceRange();
119 // Highlight all the excess arguments.
120 SourceRange range(call->getArg(desiredArgCount)->getLocStart(),
121 call->getArg(argCount - 1)->getLocEnd());
123 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
124 << 0 /*function call*/ << desiredArgCount << argCount
125 << call->getArg(1)->getSourceRange();
128 /// Check that the first argument to __builtin_annotation is an integer
129 /// and the second argument is a non-wide string literal.
130 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
131 if (checkArgCount(S, TheCall, 2))
134 // First argument should be an integer.
135 Expr *ValArg = TheCall->getArg(0);
136 QualType Ty = ValArg->getType();
137 if (!Ty->isIntegerType()) {
138 S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg)
139 << ValArg->getSourceRange();
143 // Second argument should be a constant string.
144 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
145 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
146 if (!Literal || !Literal->isAscii()) {
147 S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg)
148 << StrArg->getSourceRange();
152 TheCall->setType(Ty);
156 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
157 // We need at least one argument.
158 if (TheCall->getNumArgs() < 1) {
159 S.Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
160 << 0 << 1 << TheCall->getNumArgs()
161 << TheCall->getCallee()->getSourceRange();
165 // All arguments should be wide string literals.
166 for (Expr *Arg : TheCall->arguments()) {
167 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
168 if (!Literal || !Literal->isWide()) {
169 S.Diag(Arg->getLocStart(), diag::err_msvc_annotation_wide_str)
170 << Arg->getSourceRange();
178 /// Check that the argument to __builtin_addressof is a glvalue, and set the
179 /// result type to the corresponding pointer type.
180 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
181 if (checkArgCount(S, TheCall, 1))
184 ExprResult Arg(TheCall->getArg(0));
185 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart());
186 if (ResultType.isNull())
189 TheCall->setArg(0, Arg.get());
190 TheCall->setType(ResultType);
194 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
195 if (checkArgCount(S, TheCall, 3))
198 // First two arguments should be integers.
199 for (unsigned I = 0; I < 2; ++I) {
200 ExprResult Arg = TheCall->getArg(I);
201 QualType Ty = Arg.get()->getType();
202 if (!Ty->isIntegerType()) {
203 S.Diag(Arg.get()->getLocStart(), diag::err_overflow_builtin_must_be_int)
204 << Ty << Arg.get()->getSourceRange();
207 InitializedEntity Entity = InitializedEntity::InitializeParameter(
208 S.getASTContext(), Ty, /*consume*/ false);
209 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
212 TheCall->setArg(I, Arg.get());
215 // Third argument should be a pointer to a non-const integer.
216 // IRGen correctly handles volatile, restrict, and address spaces, and
217 // the other qualifiers aren't possible.
219 ExprResult Arg = TheCall->getArg(2);
220 QualType Ty = Arg.get()->getType();
221 const auto *PtrTy = Ty->getAs<PointerType>();
222 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
223 !PtrTy->getPointeeType().isConstQualified())) {
224 S.Diag(Arg.get()->getLocStart(),
225 diag::err_overflow_builtin_must_be_ptr_int)
226 << Ty << Arg.get()->getSourceRange();
229 InitializedEntity Entity = InitializedEntity::InitializeParameter(
230 S.getASTContext(), Ty, /*consume*/ false);
231 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
234 TheCall->setArg(2, Arg.get());
239 static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
240 CallExpr *TheCall, unsigned SizeIdx,
241 unsigned DstSizeIdx) {
242 if (TheCall->getNumArgs() <= SizeIdx ||
243 TheCall->getNumArgs() <= DstSizeIdx)
246 const Expr *SizeArg = TheCall->getArg(SizeIdx);
247 const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx);
249 llvm::APSInt Size, DstSize;
251 // find out if both sizes are known at compile time
252 if (!SizeArg->EvaluateAsInt(Size, S.Context) ||
253 !DstSizeArg->EvaluateAsInt(DstSize, S.Context))
256 if (Size.ule(DstSize))
259 // confirmed overflow so generate the diagnostic.
260 IdentifierInfo *FnName = FDecl->getIdentifier();
261 SourceLocation SL = TheCall->getLocStart();
262 SourceRange SR = TheCall->getSourceRange();
264 S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName;
267 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
268 if (checkArgCount(S, BuiltinCall, 2))
271 SourceLocation BuiltinLoc = BuiltinCall->getLocStart();
272 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
273 Expr *Call = BuiltinCall->getArg(0);
274 Expr *Chain = BuiltinCall->getArg(1);
276 if (Call->getStmtClass() != Stmt::CallExprClass) {
277 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
278 << Call->getSourceRange();
282 auto CE = cast<CallExpr>(Call);
283 if (CE->getCallee()->getType()->isBlockPointerType()) {
284 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
285 << Call->getSourceRange();
289 const Decl *TargetDecl = CE->getCalleeDecl();
290 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
291 if (FD->getBuiltinID()) {
292 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
293 << Call->getSourceRange();
297 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
298 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
299 << Call->getSourceRange();
303 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
304 if (ChainResult.isInvalid())
306 if (!ChainResult.get()->getType()->isPointerType()) {
307 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
308 << Chain->getSourceRange();
312 QualType ReturnTy = CE->getCallReturnType(S.Context);
313 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
314 QualType BuiltinTy = S.Context.getFunctionType(
315 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
316 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
319 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
321 BuiltinCall->setType(CE->getType());
322 BuiltinCall->setValueKind(CE->getValueKind());
323 BuiltinCall->setObjectKind(CE->getObjectKind());
324 BuiltinCall->setCallee(Builtin);
325 BuiltinCall->setArg(1, ChainResult.get());
330 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
331 Scope::ScopeFlags NeededScopeFlags,
333 // Scopes aren't available during instantiation. Fortunately, builtin
334 // functions cannot be template args so they cannot be formed through template
335 // instantiation. Therefore checking once during the parse is sufficient.
336 if (SemaRef.inTemplateInstantiation())
339 Scope *S = SemaRef.getCurScope();
340 while (S && !S->isSEHExceptScope())
342 if (!S || !(S->getFlags() & NeededScopeFlags)) {
343 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
344 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
345 << DRE->getDecl()->getIdentifier();
352 static inline bool isBlockPointer(Expr *Arg) {
353 return Arg->getType()->isBlockPointerType();
356 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
357 /// void*, which is a requirement of device side enqueue.
358 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
359 const BlockPointerType *BPT =
360 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
361 ArrayRef<QualType> Params =
362 BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes();
363 unsigned ArgCounter = 0;
364 bool IllegalParams = false;
365 // Iterate through the block parameters until either one is found that is not
366 // a local void*, or the block is valid.
367 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
368 I != E; ++I, ++ArgCounter) {
369 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
370 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
371 LangAS::opencl_local) {
372 // Get the location of the error. If a block literal has been passed
373 // (BlockExpr) then we can point straight to the offending argument,
374 // else we just point to the variable reference.
375 SourceLocation ErrorLoc;
376 if (isa<BlockExpr>(BlockArg)) {
377 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
378 ErrorLoc = BD->getParamDecl(ArgCounter)->getLocStart();
379 } else if (isa<DeclRefExpr>(BlockArg)) {
380 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getLocStart();
383 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
384 IllegalParams = true;
388 return IllegalParams;
391 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
392 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) {
393 S.Diag(Call->getLocStart(), diag::err_opencl_requires_extension)
394 << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
400 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
401 if (checkArgCount(S, TheCall, 2))
404 if (checkOpenCLSubgroupExt(S, TheCall))
407 // First argument is an ndrange_t type.
408 Expr *NDRangeArg = TheCall->getArg(0);
409 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
410 S.Diag(NDRangeArg->getLocStart(),
411 diag::err_opencl_builtin_expected_type)
412 << TheCall->getDirectCallee() << "'ndrange_t'";
416 Expr *BlockArg = TheCall->getArg(1);
417 if (!isBlockPointer(BlockArg)) {
418 S.Diag(BlockArg->getLocStart(),
419 diag::err_opencl_builtin_expected_type)
420 << TheCall->getDirectCallee() << "block";
423 return checkOpenCLBlockArgs(S, BlockArg);
426 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
427 /// get_kernel_work_group_size
428 /// and get_kernel_preferred_work_group_size_multiple builtin functions.
429 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
430 if (checkArgCount(S, TheCall, 1))
433 Expr *BlockArg = TheCall->getArg(0);
434 if (!isBlockPointer(BlockArg)) {
435 S.Diag(BlockArg->getLocStart(),
436 diag::err_opencl_builtin_expected_type)
437 << TheCall->getDirectCallee() << "block";
440 return checkOpenCLBlockArgs(S, BlockArg);
443 /// Diagnose integer type and any valid implicit conversion to it.
444 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
445 const QualType &IntType);
447 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
448 unsigned Start, unsigned End) {
449 bool IllegalParams = false;
450 for (unsigned I = Start; I <= End; ++I)
451 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
452 S.Context.getSizeType());
453 return IllegalParams;
456 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
457 /// 'local void*' parameter of passed block.
458 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
460 unsigned NumNonVarArgs) {
461 const BlockPointerType *BPT =
462 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
463 unsigned NumBlockParams =
464 BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams();
465 unsigned TotalNumArgs = TheCall->getNumArgs();
467 // For each argument passed to the block, a corresponding uint needs to
468 // be passed to describe the size of the local memory.
469 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
470 S.Diag(TheCall->getLocStart(),
471 diag::err_opencl_enqueue_kernel_local_size_args);
475 // Check that the sizes of the local memory are specified by integers.
476 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
480 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
481 /// overload formats specified in Table 6.13.17.1.
482 /// int enqueue_kernel(queue_t queue,
483 /// kernel_enqueue_flags_t flags,
484 /// const ndrange_t ndrange,
485 /// void (^block)(void))
486 /// int enqueue_kernel(queue_t queue,
487 /// kernel_enqueue_flags_t flags,
488 /// const ndrange_t ndrange,
489 /// uint num_events_in_wait_list,
490 /// clk_event_t *event_wait_list,
491 /// clk_event_t *event_ret,
492 /// void (^block)(void))
493 /// int enqueue_kernel(queue_t queue,
494 /// kernel_enqueue_flags_t flags,
495 /// const ndrange_t ndrange,
496 /// void (^block)(local void*, ...),
498 /// int enqueue_kernel(queue_t queue,
499 /// kernel_enqueue_flags_t flags,
500 /// const ndrange_t ndrange,
501 /// uint num_events_in_wait_list,
502 /// clk_event_t *event_wait_list,
503 /// clk_event_t *event_ret,
504 /// void (^block)(local void*, ...),
506 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
507 unsigned NumArgs = TheCall->getNumArgs();
510 S.Diag(TheCall->getLocStart(), diag::err_typecheck_call_too_few_args);
514 Expr *Arg0 = TheCall->getArg(0);
515 Expr *Arg1 = TheCall->getArg(1);
516 Expr *Arg2 = TheCall->getArg(2);
517 Expr *Arg3 = TheCall->getArg(3);
519 // First argument always needs to be a queue_t type.
520 if (!Arg0->getType()->isQueueT()) {
521 S.Diag(TheCall->getArg(0)->getLocStart(),
522 diag::err_opencl_builtin_expected_type)
523 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
527 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
528 if (!Arg1->getType()->isIntegerType()) {
529 S.Diag(TheCall->getArg(1)->getLocStart(),
530 diag::err_opencl_builtin_expected_type)
531 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
535 // Third argument is always an ndrange_t type.
536 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
537 S.Diag(TheCall->getArg(2)->getLocStart(),
538 diag::err_opencl_builtin_expected_type)
539 << TheCall->getDirectCallee() << "'ndrange_t'";
543 // With four arguments, there is only one form that the function could be
544 // called in: no events and no variable arguments.
546 // check that the last argument is the right block type.
547 if (!isBlockPointer(Arg3)) {
548 S.Diag(Arg3->getLocStart(), diag::err_opencl_builtin_expected_type)
549 << TheCall->getDirectCallee() << "block";
552 // we have a block type, check the prototype
553 const BlockPointerType *BPT =
554 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
555 if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) {
556 S.Diag(Arg3->getLocStart(),
557 diag::err_opencl_enqueue_kernel_blocks_no_args);
562 // we can have block + varargs.
563 if (isBlockPointer(Arg3))
564 return (checkOpenCLBlockArgs(S, Arg3) ||
565 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
566 // last two cases with either exactly 7 args or 7 args and varargs.
568 // check common block argument.
569 Expr *Arg6 = TheCall->getArg(6);
570 if (!isBlockPointer(Arg6)) {
571 S.Diag(Arg6->getLocStart(), diag::err_opencl_builtin_expected_type)
572 << TheCall->getDirectCallee() << "block";
575 if (checkOpenCLBlockArgs(S, Arg6))
578 // Forth argument has to be any integer type.
579 if (!Arg3->getType()->isIntegerType()) {
580 S.Diag(TheCall->getArg(3)->getLocStart(),
581 diag::err_opencl_builtin_expected_type)
582 << TheCall->getDirectCallee() << "integer";
585 // check remaining common arguments.
586 Expr *Arg4 = TheCall->getArg(4);
587 Expr *Arg5 = TheCall->getArg(5);
589 // Fifth argument is always passed as a pointer to clk_event_t.
590 if (!Arg4->isNullPointerConstant(S.Context,
591 Expr::NPC_ValueDependentIsNotNull) &&
592 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
593 S.Diag(TheCall->getArg(4)->getLocStart(),
594 diag::err_opencl_builtin_expected_type)
595 << TheCall->getDirectCallee()
596 << S.Context.getPointerType(S.Context.OCLClkEventTy);
600 // Sixth argument is always passed as a pointer to clk_event_t.
601 if (!Arg5->isNullPointerConstant(S.Context,
602 Expr::NPC_ValueDependentIsNotNull) &&
603 !(Arg5->getType()->isPointerType() &&
604 Arg5->getType()->getPointeeType()->isClkEventT())) {
605 S.Diag(TheCall->getArg(5)->getLocStart(),
606 diag::err_opencl_builtin_expected_type)
607 << TheCall->getDirectCallee()
608 << S.Context.getPointerType(S.Context.OCLClkEventTy);
615 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
618 // None of the specific case has been detected, give generic error
619 S.Diag(TheCall->getLocStart(),
620 diag::err_opencl_enqueue_kernel_incorrect_args);
624 /// Returns OpenCL access qual.
625 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
626 return D->getAttr<OpenCLAccessAttr>();
629 /// Returns true if pipe element type is different from the pointer.
630 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
631 const Expr *Arg0 = Call->getArg(0);
632 // First argument type should always be pipe.
633 if (!Arg0->getType()->isPipeType()) {
634 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg)
635 << Call->getDirectCallee() << Arg0->getSourceRange();
638 OpenCLAccessAttr *AccessQual =
639 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
640 // Validates the access qualifier is compatible with the call.
641 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
642 // read_only and write_only, and assumed to be read_only if no qualifier is
644 switch (Call->getDirectCallee()->getBuiltinID()) {
645 case Builtin::BIread_pipe:
646 case Builtin::BIreserve_read_pipe:
647 case Builtin::BIcommit_read_pipe:
648 case Builtin::BIwork_group_reserve_read_pipe:
649 case Builtin::BIsub_group_reserve_read_pipe:
650 case Builtin::BIwork_group_commit_read_pipe:
651 case Builtin::BIsub_group_commit_read_pipe:
652 if (!(!AccessQual || AccessQual->isReadOnly())) {
653 S.Diag(Arg0->getLocStart(),
654 diag::err_opencl_builtin_pipe_invalid_access_modifier)
655 << "read_only" << Arg0->getSourceRange();
659 case Builtin::BIwrite_pipe:
660 case Builtin::BIreserve_write_pipe:
661 case Builtin::BIcommit_write_pipe:
662 case Builtin::BIwork_group_reserve_write_pipe:
663 case Builtin::BIsub_group_reserve_write_pipe:
664 case Builtin::BIwork_group_commit_write_pipe:
665 case Builtin::BIsub_group_commit_write_pipe:
666 if (!(AccessQual && AccessQual->isWriteOnly())) {
667 S.Diag(Arg0->getLocStart(),
668 diag::err_opencl_builtin_pipe_invalid_access_modifier)
669 << "write_only" << Arg0->getSourceRange();
679 /// Returns true if pipe element type is different from the pointer.
680 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
681 const Expr *Arg0 = Call->getArg(0);
682 const Expr *ArgIdx = Call->getArg(Idx);
683 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
684 const QualType EltTy = PipeTy->getElementType();
685 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
686 // The Idx argument should be a pointer and the type of the pointer and
687 // the type of pipe element should also be the same.
689 !S.Context.hasSameType(
690 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
691 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
692 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
693 << ArgIdx->getType() << ArgIdx->getSourceRange();
699 // Performs semantic analysis for the read/write_pipe call.
700 // \param S Reference to the semantic analyzer.
701 // \param Call A pointer to the builtin call.
702 // \return True if a semantic error has been found, false otherwise.
703 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
704 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
705 // functions have two forms.
706 switch (Call->getNumArgs()) {
708 if (checkOpenCLPipeArg(S, Call))
710 // The call with 2 arguments should be
711 // read/write_pipe(pipe T, T*).
712 // Check packet type T.
713 if (checkOpenCLPipePacketType(S, Call, 1))
718 if (checkOpenCLPipeArg(S, Call))
720 // The call with 4 arguments should be
721 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
722 // Check reserve_id_t.
723 if (!Call->getArg(1)->getType()->isReserveIDT()) {
724 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
725 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
726 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
731 const Expr *Arg2 = Call->getArg(2);
732 if (!Arg2->getType()->isIntegerType() &&
733 !Arg2->getType()->isUnsignedIntegerType()) {
734 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
735 << Call->getDirectCallee() << S.Context.UnsignedIntTy
736 << Arg2->getType() << Arg2->getSourceRange();
740 // Check packet type T.
741 if (checkOpenCLPipePacketType(S, Call, 3))
745 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_arg_num)
746 << Call->getDirectCallee() << Call->getSourceRange();
753 // Performs a semantic analysis on the {work_group_/sub_group_
754 // /_}reserve_{read/write}_pipe
755 // \param S Reference to the semantic analyzer.
756 // \param Call The call to the builtin function to be analyzed.
757 // \return True if a semantic error was found, false otherwise.
758 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
759 if (checkArgCount(S, Call, 2))
762 if (checkOpenCLPipeArg(S, Call))
765 // Check the reserve size.
766 if (!Call->getArg(1)->getType()->isIntegerType() &&
767 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
768 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
769 << Call->getDirectCallee() << S.Context.UnsignedIntTy
770 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
774 // Since return type of reserve_read/write_pipe built-in function is
775 // reserve_id_t, which is not defined in the builtin def file , we used int
776 // as return type and need to override the return type of these functions.
777 Call->setType(S.Context.OCLReserveIDTy);
782 // Performs a semantic analysis on {work_group_/sub_group_
783 // /_}commit_{read/write}_pipe
784 // \param S Reference to the semantic analyzer.
785 // \param Call The call to the builtin function to be analyzed.
786 // \return True if a semantic error was found, false otherwise.
787 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
788 if (checkArgCount(S, Call, 2))
791 if (checkOpenCLPipeArg(S, Call))
794 // Check reserve_id_t.
795 if (!Call->getArg(1)->getType()->isReserveIDT()) {
796 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
797 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
798 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
805 // Performs a semantic analysis on the call to built-in Pipe
807 // \param S Reference to the semantic analyzer.
808 // \param Call The call to the builtin function to be analyzed.
809 // \return True if a semantic error was found, false otherwise.
810 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
811 if (checkArgCount(S, Call, 1))
814 if (!Call->getArg(0)->getType()->isPipeType()) {
815 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg)
816 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
823 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
824 // Performs semantic analysis for the to_global/local/private call.
825 // \param S Reference to the semantic analyzer.
826 // \param BuiltinID ID of the builtin function.
827 // \param Call A pointer to the builtin call.
828 // \return True if a semantic error has been found, false otherwise.
829 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
831 if (Call->getNumArgs() != 1) {
832 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_arg_num)
833 << Call->getDirectCallee() << Call->getSourceRange();
837 auto RT = Call->getArg(0)->getType();
838 if (!RT->isPointerType() || RT->getPointeeType()
839 .getAddressSpace() == LangAS::opencl_constant) {
840 S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_invalid_arg)
841 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
845 RT = RT->getPointeeType();
846 auto Qual = RT.getQualifiers();
848 case Builtin::BIto_global:
849 Qual.setAddressSpace(LangAS::opencl_global);
851 case Builtin::BIto_local:
852 Qual.setAddressSpace(LangAS::opencl_local);
854 case Builtin::BIto_private:
855 Qual.setAddressSpace(LangAS::opencl_private);
858 llvm_unreachable("Invalid builtin function");
860 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
861 RT.getUnqualifiedType(), Qual)));
866 // Emit an error and return true if the current architecture is not in the list
867 // of supported architectures.
869 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
870 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
871 llvm::Triple::ArchType CurArch =
872 S.getASTContext().getTargetInfo().getTriple().getArch();
873 if (llvm::is_contained(SupportedArchs, CurArch))
875 S.Diag(TheCall->getLocStart(), diag::err_builtin_target_unsupported)
876 << TheCall->getSourceRange();
881 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
883 ExprResult TheCallResult(TheCall);
885 // Find out if any arguments are required to be integer constant expressions.
886 unsigned ICEArguments = 0;
887 ASTContext::GetBuiltinTypeError Error;
888 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
889 if (Error != ASTContext::GE_None)
890 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
892 // If any arguments are required to be ICE's, check and diagnose.
893 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
894 // Skip arguments not required to be ICE's.
895 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
898 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
900 ICEArguments &= ~(1 << ArgNo);
904 case Builtin::BI__builtin___CFStringMakeConstantString:
905 assert(TheCall->getNumArgs() == 1 &&
906 "Wrong # arguments to builtin CFStringMakeConstantString");
907 if (CheckObjCString(TheCall->getArg(0)))
910 case Builtin::BI__builtin_ms_va_start:
911 case Builtin::BI__builtin_stdarg_start:
912 case Builtin::BI__builtin_va_start:
913 if (SemaBuiltinVAStart(BuiltinID, TheCall))
916 case Builtin::BI__va_start: {
917 switch (Context.getTargetInfo().getTriple().getArch()) {
918 case llvm::Triple::arm:
919 case llvm::Triple::thumb:
920 if (SemaBuiltinVAStartARMMicrosoft(TheCall))
924 if (SemaBuiltinVAStart(BuiltinID, TheCall))
931 // The acquire, release, and no fence variants are ARM and AArch64 only.
932 case Builtin::BI_interlockedbittestandset_acq:
933 case Builtin::BI_interlockedbittestandset_rel:
934 case Builtin::BI_interlockedbittestandset_nf:
935 case Builtin::BI_interlockedbittestandreset_acq:
936 case Builtin::BI_interlockedbittestandreset_rel:
937 case Builtin::BI_interlockedbittestandreset_nf:
938 if (CheckBuiltinTargetSupport(
939 *this, BuiltinID, TheCall,
940 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
944 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
945 case Builtin::BI_bittest64:
946 case Builtin::BI_bittestandcomplement64:
947 case Builtin::BI_bittestandreset64:
948 case Builtin::BI_bittestandset64:
949 case Builtin::BI_interlockedbittestandreset64:
950 case Builtin::BI_interlockedbittestandset64:
951 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
952 {llvm::Triple::x86_64, llvm::Triple::arm,
953 llvm::Triple::thumb, llvm::Triple::aarch64}))
957 case Builtin::BI__builtin_isgreater:
958 case Builtin::BI__builtin_isgreaterequal:
959 case Builtin::BI__builtin_isless:
960 case Builtin::BI__builtin_islessequal:
961 case Builtin::BI__builtin_islessgreater:
962 case Builtin::BI__builtin_isunordered:
963 if (SemaBuiltinUnorderedCompare(TheCall))
966 case Builtin::BI__builtin_fpclassify:
967 if (SemaBuiltinFPClassification(TheCall, 6))
970 case Builtin::BI__builtin_isfinite:
971 case Builtin::BI__builtin_isinf:
972 case Builtin::BI__builtin_isinf_sign:
973 case Builtin::BI__builtin_isnan:
974 case Builtin::BI__builtin_isnormal:
975 case Builtin::BI__builtin_signbit:
976 case Builtin::BI__builtin_signbitf:
977 case Builtin::BI__builtin_signbitl:
978 if (SemaBuiltinFPClassification(TheCall, 1))
981 case Builtin::BI__builtin_shufflevector:
982 return SemaBuiltinShuffleVector(TheCall);
983 // TheCall will be freed by the smart pointer here, but that's fine, since
984 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
985 case Builtin::BI__builtin_prefetch:
986 if (SemaBuiltinPrefetch(TheCall))
989 case Builtin::BI__builtin_alloca_with_align:
990 if (SemaBuiltinAllocaWithAlign(TheCall))
993 case Builtin::BI__assume:
994 case Builtin::BI__builtin_assume:
995 if (SemaBuiltinAssume(TheCall))
998 case Builtin::BI__builtin_assume_aligned:
999 if (SemaBuiltinAssumeAligned(TheCall))
1002 case Builtin::BI__builtin_object_size:
1003 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1006 case Builtin::BI__builtin_longjmp:
1007 if (SemaBuiltinLongjmp(TheCall))
1010 case Builtin::BI__builtin_setjmp:
1011 if (SemaBuiltinSetjmp(TheCall))
1014 case Builtin::BI_setjmp:
1015 case Builtin::BI_setjmpex:
1016 if (checkArgCount(*this, TheCall, 1))
1019 case Builtin::BI__builtin_classify_type:
1020 if (checkArgCount(*this, TheCall, 1)) return true;
1021 TheCall->setType(Context.IntTy);
1023 case Builtin::BI__builtin_constant_p:
1024 if (checkArgCount(*this, TheCall, 1)) return true;
1025 TheCall->setType(Context.IntTy);
1027 case Builtin::BI__sync_fetch_and_add:
1028 case Builtin::BI__sync_fetch_and_add_1:
1029 case Builtin::BI__sync_fetch_and_add_2:
1030 case Builtin::BI__sync_fetch_and_add_4:
1031 case Builtin::BI__sync_fetch_and_add_8:
1032 case Builtin::BI__sync_fetch_and_add_16:
1033 case Builtin::BI__sync_fetch_and_sub:
1034 case Builtin::BI__sync_fetch_and_sub_1:
1035 case Builtin::BI__sync_fetch_and_sub_2:
1036 case Builtin::BI__sync_fetch_and_sub_4:
1037 case Builtin::BI__sync_fetch_and_sub_8:
1038 case Builtin::BI__sync_fetch_and_sub_16:
1039 case Builtin::BI__sync_fetch_and_or:
1040 case Builtin::BI__sync_fetch_and_or_1:
1041 case Builtin::BI__sync_fetch_and_or_2:
1042 case Builtin::BI__sync_fetch_and_or_4:
1043 case Builtin::BI__sync_fetch_and_or_8:
1044 case Builtin::BI__sync_fetch_and_or_16:
1045 case Builtin::BI__sync_fetch_and_and:
1046 case Builtin::BI__sync_fetch_and_and_1:
1047 case Builtin::BI__sync_fetch_and_and_2:
1048 case Builtin::BI__sync_fetch_and_and_4:
1049 case Builtin::BI__sync_fetch_and_and_8:
1050 case Builtin::BI__sync_fetch_and_and_16:
1051 case Builtin::BI__sync_fetch_and_xor:
1052 case Builtin::BI__sync_fetch_and_xor_1:
1053 case Builtin::BI__sync_fetch_and_xor_2:
1054 case Builtin::BI__sync_fetch_and_xor_4:
1055 case Builtin::BI__sync_fetch_and_xor_8:
1056 case Builtin::BI__sync_fetch_and_xor_16:
1057 case Builtin::BI__sync_fetch_and_nand:
1058 case Builtin::BI__sync_fetch_and_nand_1:
1059 case Builtin::BI__sync_fetch_and_nand_2:
1060 case Builtin::BI__sync_fetch_and_nand_4:
1061 case Builtin::BI__sync_fetch_and_nand_8:
1062 case Builtin::BI__sync_fetch_and_nand_16:
1063 case Builtin::BI__sync_add_and_fetch:
1064 case Builtin::BI__sync_add_and_fetch_1:
1065 case Builtin::BI__sync_add_and_fetch_2:
1066 case Builtin::BI__sync_add_and_fetch_4:
1067 case Builtin::BI__sync_add_and_fetch_8:
1068 case Builtin::BI__sync_add_and_fetch_16:
1069 case Builtin::BI__sync_sub_and_fetch:
1070 case Builtin::BI__sync_sub_and_fetch_1:
1071 case Builtin::BI__sync_sub_and_fetch_2:
1072 case Builtin::BI__sync_sub_and_fetch_4:
1073 case Builtin::BI__sync_sub_and_fetch_8:
1074 case Builtin::BI__sync_sub_and_fetch_16:
1075 case Builtin::BI__sync_and_and_fetch:
1076 case Builtin::BI__sync_and_and_fetch_1:
1077 case Builtin::BI__sync_and_and_fetch_2:
1078 case Builtin::BI__sync_and_and_fetch_4:
1079 case Builtin::BI__sync_and_and_fetch_8:
1080 case Builtin::BI__sync_and_and_fetch_16:
1081 case Builtin::BI__sync_or_and_fetch:
1082 case Builtin::BI__sync_or_and_fetch_1:
1083 case Builtin::BI__sync_or_and_fetch_2:
1084 case Builtin::BI__sync_or_and_fetch_4:
1085 case Builtin::BI__sync_or_and_fetch_8:
1086 case Builtin::BI__sync_or_and_fetch_16:
1087 case Builtin::BI__sync_xor_and_fetch:
1088 case Builtin::BI__sync_xor_and_fetch_1:
1089 case Builtin::BI__sync_xor_and_fetch_2:
1090 case Builtin::BI__sync_xor_and_fetch_4:
1091 case Builtin::BI__sync_xor_and_fetch_8:
1092 case Builtin::BI__sync_xor_and_fetch_16:
1093 case Builtin::BI__sync_nand_and_fetch:
1094 case Builtin::BI__sync_nand_and_fetch_1:
1095 case Builtin::BI__sync_nand_and_fetch_2:
1096 case Builtin::BI__sync_nand_and_fetch_4:
1097 case Builtin::BI__sync_nand_and_fetch_8:
1098 case Builtin::BI__sync_nand_and_fetch_16:
1099 case Builtin::BI__sync_val_compare_and_swap:
1100 case Builtin::BI__sync_val_compare_and_swap_1:
1101 case Builtin::BI__sync_val_compare_and_swap_2:
1102 case Builtin::BI__sync_val_compare_and_swap_4:
1103 case Builtin::BI__sync_val_compare_and_swap_8:
1104 case Builtin::BI__sync_val_compare_and_swap_16:
1105 case Builtin::BI__sync_bool_compare_and_swap:
1106 case Builtin::BI__sync_bool_compare_and_swap_1:
1107 case Builtin::BI__sync_bool_compare_and_swap_2:
1108 case Builtin::BI__sync_bool_compare_and_swap_4:
1109 case Builtin::BI__sync_bool_compare_and_swap_8:
1110 case Builtin::BI__sync_bool_compare_and_swap_16:
1111 case Builtin::BI__sync_lock_test_and_set:
1112 case Builtin::BI__sync_lock_test_and_set_1:
1113 case Builtin::BI__sync_lock_test_and_set_2:
1114 case Builtin::BI__sync_lock_test_and_set_4:
1115 case Builtin::BI__sync_lock_test_and_set_8:
1116 case Builtin::BI__sync_lock_test_and_set_16:
1117 case Builtin::BI__sync_lock_release:
1118 case Builtin::BI__sync_lock_release_1:
1119 case Builtin::BI__sync_lock_release_2:
1120 case Builtin::BI__sync_lock_release_4:
1121 case Builtin::BI__sync_lock_release_8:
1122 case Builtin::BI__sync_lock_release_16:
1123 case Builtin::BI__sync_swap:
1124 case Builtin::BI__sync_swap_1:
1125 case Builtin::BI__sync_swap_2:
1126 case Builtin::BI__sync_swap_4:
1127 case Builtin::BI__sync_swap_8:
1128 case Builtin::BI__sync_swap_16:
1129 return SemaBuiltinAtomicOverloaded(TheCallResult);
1130 case Builtin::BI__builtin_nontemporal_load:
1131 case Builtin::BI__builtin_nontemporal_store:
1132 return SemaBuiltinNontemporalOverloaded(TheCallResult);
1133 #define BUILTIN(ID, TYPE, ATTRS)
1134 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1135 case Builtin::BI##ID: \
1136 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1137 #include "clang/Basic/Builtins.def"
1138 case Builtin::BI__annotation:
1139 if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1142 case Builtin::BI__builtin_annotation:
1143 if (SemaBuiltinAnnotation(*this, TheCall))
1146 case Builtin::BI__builtin_addressof:
1147 if (SemaBuiltinAddressof(*this, TheCall))
1150 case Builtin::BI__builtin_add_overflow:
1151 case Builtin::BI__builtin_sub_overflow:
1152 case Builtin::BI__builtin_mul_overflow:
1153 if (SemaBuiltinOverflow(*this, TheCall))
1156 case Builtin::BI__builtin_operator_new:
1157 case Builtin::BI__builtin_operator_delete: {
1158 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1160 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1161 if (Res.isInvalid())
1162 CorrectDelayedTyposInExpr(TheCallResult.get());
1165 case Builtin::BI__builtin_dump_struct: {
1166 // We first want to ensure we are called with 2 arguments
1167 if (checkArgCount(*this, TheCall, 2))
1169 // Ensure that the first argument is of type 'struct XX *'
1170 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1171 const QualType PtrArgType = PtrArg->getType();
1172 if (!PtrArgType->isPointerType() ||
1173 !PtrArgType->getPointeeType()->isRecordType()) {
1174 Diag(PtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1175 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1176 << "structure pointer";
1180 // Ensure that the second argument is of type 'FunctionType'
1181 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1182 const QualType FnPtrArgType = FnPtrArg->getType();
1183 if (!FnPtrArgType->isPointerType()) {
1184 Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1185 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1186 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1190 const auto *FuncType =
1191 FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1194 Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1195 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1196 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1200 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1201 if (!FT->getNumParams()) {
1202 Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1203 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1204 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1207 QualType PT = FT->getParamType(0);
1208 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1209 !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1210 !PT->getPointeeType().isConstQualified()) {
1211 Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1212 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1213 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1218 TheCall->setType(Context.IntTy);
1222 // check secure string manipulation functions where overflows
1223 // are detectable at compile time
1224 case Builtin::BI__builtin___memcpy_chk:
1225 case Builtin::BI__builtin___memmove_chk:
1226 case Builtin::BI__builtin___memset_chk:
1227 case Builtin::BI__builtin___strlcat_chk:
1228 case Builtin::BI__builtin___strlcpy_chk:
1229 case Builtin::BI__builtin___strncat_chk:
1230 case Builtin::BI__builtin___strncpy_chk:
1231 case Builtin::BI__builtin___stpncpy_chk:
1232 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3);
1234 case Builtin::BI__builtin___memccpy_chk:
1235 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4);
1237 case Builtin::BI__builtin___snprintf_chk:
1238 case Builtin::BI__builtin___vsnprintf_chk:
1239 SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3);
1241 case Builtin::BI__builtin_call_with_static_chain:
1242 if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1245 case Builtin::BI__exception_code:
1246 case Builtin::BI_exception_code:
1247 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1248 diag::err_seh___except_block))
1251 case Builtin::BI__exception_info:
1252 case Builtin::BI_exception_info:
1253 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1254 diag::err_seh___except_filter))
1257 case Builtin::BI__GetExceptionInfo:
1258 if (checkArgCount(*this, TheCall, 1))
1261 if (CheckCXXThrowOperand(
1262 TheCall->getLocStart(),
1263 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1267 TheCall->setType(Context.VoidPtrTy);
1269 // OpenCL v2.0, s6.13.16 - Pipe functions
1270 case Builtin::BIread_pipe:
1271 case Builtin::BIwrite_pipe:
1272 // Since those two functions are declared with var args, we need a semantic
1273 // check for the argument.
1274 if (SemaBuiltinRWPipe(*this, TheCall))
1276 TheCall->setType(Context.IntTy);
1278 case Builtin::BIreserve_read_pipe:
1279 case Builtin::BIreserve_write_pipe:
1280 case Builtin::BIwork_group_reserve_read_pipe:
1281 case Builtin::BIwork_group_reserve_write_pipe:
1282 if (SemaBuiltinReserveRWPipe(*this, TheCall))
1285 case Builtin::BIsub_group_reserve_read_pipe:
1286 case Builtin::BIsub_group_reserve_write_pipe:
1287 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1288 SemaBuiltinReserveRWPipe(*this, TheCall))
1291 case Builtin::BIcommit_read_pipe:
1292 case Builtin::BIcommit_write_pipe:
1293 case Builtin::BIwork_group_commit_read_pipe:
1294 case Builtin::BIwork_group_commit_write_pipe:
1295 if (SemaBuiltinCommitRWPipe(*this, TheCall))
1298 case Builtin::BIsub_group_commit_read_pipe:
1299 case Builtin::BIsub_group_commit_write_pipe:
1300 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1301 SemaBuiltinCommitRWPipe(*this, TheCall))
1304 case Builtin::BIget_pipe_num_packets:
1305 case Builtin::BIget_pipe_max_packets:
1306 if (SemaBuiltinPipePackets(*this, TheCall))
1308 TheCall->setType(Context.UnsignedIntTy);
1310 case Builtin::BIto_global:
1311 case Builtin::BIto_local:
1312 case Builtin::BIto_private:
1313 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1316 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1317 case Builtin::BIenqueue_kernel:
1318 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1321 case Builtin::BIget_kernel_work_group_size:
1322 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1323 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1326 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1327 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1328 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1331 case Builtin::BI__builtin_os_log_format:
1332 case Builtin::BI__builtin_os_log_format_buffer_size:
1333 if (SemaBuiltinOSLogFormat(TheCall))
1338 // Since the target specific builtins for each arch overlap, only check those
1339 // of the arch we are compiling for.
1340 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
1341 switch (Context.getTargetInfo().getTriple().getArch()) {
1342 case llvm::Triple::arm:
1343 case llvm::Triple::armeb:
1344 case llvm::Triple::thumb:
1345 case llvm::Triple::thumbeb:
1346 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
1349 case llvm::Triple::aarch64:
1350 case llvm::Triple::aarch64_be:
1351 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
1354 case llvm::Triple::hexagon:
1355 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
1358 case llvm::Triple::mips:
1359 case llvm::Triple::mipsel:
1360 case llvm::Triple::mips64:
1361 case llvm::Triple::mips64el:
1362 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
1365 case llvm::Triple::systemz:
1366 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
1369 case llvm::Triple::x86:
1370 case llvm::Triple::x86_64:
1371 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
1374 case llvm::Triple::ppc:
1375 case llvm::Triple::ppc64:
1376 case llvm::Triple::ppc64le:
1377 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
1385 return TheCallResult;
1388 // Get the valid immediate range for the specified NEON type code.
1389 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
1390 NeonTypeFlags Type(t);
1391 int IsQuad = ForceQuad ? true : Type.isQuad();
1392 switch (Type.getEltType()) {
1393 case NeonTypeFlags::Int8:
1394 case NeonTypeFlags::Poly8:
1395 return shift ? 7 : (8 << IsQuad) - 1;
1396 case NeonTypeFlags::Int16:
1397 case NeonTypeFlags::Poly16:
1398 return shift ? 15 : (4 << IsQuad) - 1;
1399 case NeonTypeFlags::Int32:
1400 return shift ? 31 : (2 << IsQuad) - 1;
1401 case NeonTypeFlags::Int64:
1402 case NeonTypeFlags::Poly64:
1403 return shift ? 63 : (1 << IsQuad) - 1;
1404 case NeonTypeFlags::Poly128:
1405 return shift ? 127 : (1 << IsQuad) - 1;
1406 case NeonTypeFlags::Float16:
1407 assert(!shift && "cannot shift float types!");
1408 return (4 << IsQuad) - 1;
1409 case NeonTypeFlags::Float32:
1410 assert(!shift && "cannot shift float types!");
1411 return (2 << IsQuad) - 1;
1412 case NeonTypeFlags::Float64:
1413 assert(!shift && "cannot shift float types!");
1414 return (1 << IsQuad) - 1;
1416 llvm_unreachable("Invalid NeonTypeFlag!");
1419 /// getNeonEltType - Return the QualType corresponding to the elements of
1420 /// the vector type specified by the NeonTypeFlags. This is used to check
1421 /// the pointer arguments for Neon load/store intrinsics.
1422 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
1423 bool IsPolyUnsigned, bool IsInt64Long) {
1424 switch (Flags.getEltType()) {
1425 case NeonTypeFlags::Int8:
1426 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
1427 case NeonTypeFlags::Int16:
1428 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
1429 case NeonTypeFlags::Int32:
1430 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
1431 case NeonTypeFlags::Int64:
1433 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
1435 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
1436 : Context.LongLongTy;
1437 case NeonTypeFlags::Poly8:
1438 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
1439 case NeonTypeFlags::Poly16:
1440 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
1441 case NeonTypeFlags::Poly64:
1443 return Context.UnsignedLongTy;
1445 return Context.UnsignedLongLongTy;
1446 case NeonTypeFlags::Poly128:
1448 case NeonTypeFlags::Float16:
1449 return Context.HalfTy;
1450 case NeonTypeFlags::Float32:
1451 return Context.FloatTy;
1452 case NeonTypeFlags::Float64:
1453 return Context.DoubleTy;
1455 llvm_unreachable("Invalid NeonTypeFlag!");
1458 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1459 llvm::APSInt Result;
1463 bool HasConstPtr = false;
1464 switch (BuiltinID) {
1465 #define GET_NEON_OVERLOAD_CHECK
1466 #include "clang/Basic/arm_neon.inc"
1467 #include "clang/Basic/arm_fp16.inc"
1468 #undef GET_NEON_OVERLOAD_CHECK
1471 // For NEON intrinsics which are overloaded on vector element type, validate
1472 // the immediate which specifies which variant to emit.
1473 unsigned ImmArg = TheCall->getNumArgs()-1;
1475 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
1478 TV = Result.getLimitedValue(64);
1479 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
1480 return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
1481 << TheCall->getArg(ImmArg)->getSourceRange();
1484 if (PtrArgNum >= 0) {
1485 // Check that pointer arguments have the specified type.
1486 Expr *Arg = TheCall->getArg(PtrArgNum);
1487 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
1488 Arg = ICE->getSubExpr();
1489 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
1490 QualType RHSTy = RHS.get()->getType();
1492 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
1493 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
1494 Arch == llvm::Triple::aarch64_be;
1496 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
1498 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
1500 EltTy = EltTy.withConst();
1501 QualType LHSTy = Context.getPointerType(EltTy);
1502 AssignConvertType ConvTy;
1503 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
1504 if (RHS.isInvalid())
1506 if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy,
1507 RHS.get(), AA_Assigning))
1511 // For NEON intrinsics which take an immediate value as part of the
1512 // instruction, range check them here.
1513 unsigned i = 0, l = 0, u = 0;
1514 switch (BuiltinID) {
1517 #define GET_NEON_IMMEDIATE_CHECK
1518 #include "clang/Basic/arm_neon.inc"
1519 #include "clang/Basic/arm_fp16.inc"
1520 #undef GET_NEON_IMMEDIATE_CHECK
1523 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
1526 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
1527 unsigned MaxWidth) {
1528 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
1529 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1530 BuiltinID == ARM::BI__builtin_arm_strex ||
1531 BuiltinID == ARM::BI__builtin_arm_stlex ||
1532 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1533 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1534 BuiltinID == AArch64::BI__builtin_arm_strex ||
1535 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
1536 "unexpected ARM builtin");
1537 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
1538 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1539 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1540 BuiltinID == AArch64::BI__builtin_arm_ldaex;
1542 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1544 // Ensure that we have the proper number of arguments.
1545 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
1548 // Inspect the pointer argument of the atomic builtin. This should always be
1549 // a pointer type, whose element is an integral scalar or pointer type.
1550 // Because it is a pointer type, we don't have to worry about any implicit
1552 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
1553 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
1554 if (PointerArgRes.isInvalid())
1556 PointerArg = PointerArgRes.get();
1558 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
1560 Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
1561 << PointerArg->getType() << PointerArg->getSourceRange();
1565 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
1566 // task is to insert the appropriate casts into the AST. First work out just
1567 // what the appropriate type is.
1568 QualType ValType = pointerType->getPointeeType();
1569 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
1571 AddrType.addConst();
1573 // Issue a warning if the cast is dodgy.
1574 CastKind CastNeeded = CK_NoOp;
1575 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
1576 CastNeeded = CK_BitCast;
1577 Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers)
1578 << PointerArg->getType()
1579 << Context.getPointerType(AddrType)
1580 << AA_Passing << PointerArg->getSourceRange();
1583 // Finally, do the cast and replace the argument with the corrected version.
1584 AddrType = Context.getPointerType(AddrType);
1585 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
1586 if (PointerArgRes.isInvalid())
1588 PointerArg = PointerArgRes.get();
1590 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
1592 // In general, we allow ints, floats and pointers to be loaded and stored.
1593 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
1594 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
1595 Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
1596 << PointerArg->getType() << PointerArg->getSourceRange();
1600 // But ARM doesn't have instructions to deal with 128-bit versions.
1601 if (Context.getTypeSize(ValType) > MaxWidth) {
1602 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
1603 Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
1604 << PointerArg->getType() << PointerArg->getSourceRange();
1608 switch (ValType.getObjCLifetime()) {
1609 case Qualifiers::OCL_None:
1610 case Qualifiers::OCL_ExplicitNone:
1614 case Qualifiers::OCL_Weak:
1615 case Qualifiers::OCL_Strong:
1616 case Qualifiers::OCL_Autoreleasing:
1617 Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
1618 << ValType << PointerArg->getSourceRange();
1623 TheCall->setType(ValType);
1627 // Initialize the argument to be stored.
1628 ExprResult ValArg = TheCall->getArg(0);
1629 InitializedEntity Entity = InitializedEntity::InitializeParameter(
1630 Context, ValType, /*consume*/ false);
1631 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
1632 if (ValArg.isInvalid())
1634 TheCall->setArg(0, ValArg.get());
1636 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
1637 // but the custom checker bypasses all default analysis.
1638 TheCall->setType(Context.IntTy);
1642 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1643 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
1644 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1645 BuiltinID == ARM::BI__builtin_arm_strex ||
1646 BuiltinID == ARM::BI__builtin_arm_stlex) {
1647 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
1650 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
1651 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1652 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
1655 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
1656 BuiltinID == ARM::BI__builtin_arm_wsr64)
1657 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
1659 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
1660 BuiltinID == ARM::BI__builtin_arm_rsrp ||
1661 BuiltinID == ARM::BI__builtin_arm_wsr ||
1662 BuiltinID == ARM::BI__builtin_arm_wsrp)
1663 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1665 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
1668 // For intrinsics which take an immediate value as part of the instruction,
1669 // range check them here.
1670 // FIXME: VFP Intrinsics should error if VFP not present.
1671 switch (BuiltinID) {
1672 default: return false;
1673 case ARM::BI__builtin_arm_ssat:
1674 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
1675 case ARM::BI__builtin_arm_usat:
1676 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
1677 case ARM::BI__builtin_arm_ssat16:
1678 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
1679 case ARM::BI__builtin_arm_usat16:
1680 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
1681 case ARM::BI__builtin_arm_vcvtr_f:
1682 case ARM::BI__builtin_arm_vcvtr_d:
1683 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
1684 case ARM::BI__builtin_arm_dmb:
1685 case ARM::BI__builtin_arm_dsb:
1686 case ARM::BI__builtin_arm_isb:
1687 case ARM::BI__builtin_arm_dbg:
1688 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
1692 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
1693 CallExpr *TheCall) {
1694 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1695 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1696 BuiltinID == AArch64::BI__builtin_arm_strex ||
1697 BuiltinID == AArch64::BI__builtin_arm_stlex) {
1698 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
1701 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1702 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1703 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
1704 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
1705 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
1708 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1709 BuiltinID == AArch64::BI__builtin_arm_wsr64)
1710 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1712 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1713 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1714 BuiltinID == AArch64::BI__builtin_arm_wsr ||
1715 BuiltinID == AArch64::BI__builtin_arm_wsrp)
1716 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1718 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
1721 // For intrinsics which take an immediate value as part of the instruction,
1722 // range check them here.
1723 unsigned i = 0, l = 0, u = 0;
1724 switch (BuiltinID) {
1725 default: return false;
1726 case AArch64::BI__builtin_arm_dmb:
1727 case AArch64::BI__builtin_arm_dsb:
1728 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
1731 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
1734 bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
1735 static const std::map<unsigned, std::vector<StringRef>> ValidCPU = {
1736 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, {"v65"} },
1737 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, {"v62", "v65"} },
1738 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, {"v62", "v65"} },
1739 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, {"v62", "v65"} },
1740 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {"v60", "v62", "v65"} },
1741 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {"v60", "v62", "v65"} },
1742 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {"v60", "v62", "v65"} },
1743 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {"v60", "v62", "v65"} },
1744 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {"v60", "v62", "v65"} },
1745 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {"v60", "v62", "v65"} },
1746 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {"v60", "v62", "v65"} },
1747 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {"v60", "v62", "v65"} },
1748 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {"v60", "v62", "v65"} },
1749 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {"v60", "v62", "v65"} },
1750 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {"v60", "v62", "v65"} },
1751 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {"v60", "v62", "v65"} },
1752 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, {"v62", "v65"} },
1753 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, {"v62", "v65"} },
1754 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, {"v62", "v65"} },
1757 static const std::map<unsigned, std::vector<StringRef>> ValidHVX = {
1758 { Hexagon::BI__builtin_HEXAGON_V6_extractw, {"v60", "v62", "v65"} },
1759 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, {"v60", "v62", "v65"} },
1760 { Hexagon::BI__builtin_HEXAGON_V6_hi, {"v60", "v62", "v65"} },
1761 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, {"v60", "v62", "v65"} },
1762 { Hexagon::BI__builtin_HEXAGON_V6_lo, {"v60", "v62", "v65"} },
1763 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, {"v60", "v62", "v65"} },
1764 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, {"v62", "v65"} },
1765 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, {"v62", "v65"} },
1766 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, {"v62", "v65"} },
1767 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, {"v62", "v65"} },
1768 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, {"v60", "v62", "v65"} },
1769 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, {"v60", "v62", "v65"} },
1770 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, {"v60", "v62", "v65"} },
1771 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, {"v60", "v62", "v65"} },
1772 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, {"v60", "v62", "v65"} },
1773 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, {"v60", "v62", "v65"} },
1774 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, {"v60", "v62", "v65"} },
1775 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, {"v60", "v62", "v65"} },
1776 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, {"v60", "v62", "v65"} },
1777 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, {"v60", "v62", "v65"} },
1778 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, {"v60", "v62", "v65"} },
1779 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, {"v60", "v62", "v65"} },
1780 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, {"v60", "v62", "v65"} },
1781 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, {"v60", "v62", "v65"} },
1782 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, {"v62", "v65"} },
1783 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, {"v62", "v65"} },
1784 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, {"v60", "v62", "v65"} },
1785 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, {"v60", "v62", "v65"} },
1786 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, {"v62", "v65"} },
1787 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, {"v62", "v65"} },
1788 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, {"v62", "v65"} },
1789 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, {"v62", "v65"} },
1790 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, {"v65"} },
1791 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, {"v65"} },
1792 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, {"v65"} },
1793 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, {"v65"} },
1794 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, {"v60", "v62", "v65"} },
1795 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, {"v60", "v62", "v65"} },
1796 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, {"v60", "v62", "v65"} },
1797 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, {"v60", "v62", "v65"} },
1798 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, {"v60", "v62", "v65"} },
1799 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, {"v60", "v62", "v65"} },
1800 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, {"v60", "v62", "v65"} },
1801 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, {"v60", "v62", "v65"} },
1802 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, {"v60", "v62", "v65"} },
1803 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, {"v60", "v62", "v65"} },
1804 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, {"v60", "v62", "v65"} },
1805 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, {"v60", "v62", "v65"} },
1806 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, {"v60", "v62", "v65"} },
1807 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, {"v60", "v62", "v65"} },
1808 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, {"v60", "v62", "v65"} },
1809 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, {"v60", "v62", "v65"} },
1810 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, {"v60", "v62", "v65"} },
1811 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, {"v60", "v62", "v65"} },
1812 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, {"v60", "v62", "v65"} },
1813 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, {"v60", "v62", "v65"} },
1814 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, {"v62", "v65"} },
1815 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, {"v62", "v65"} },
1816 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, {"v62", "v65"} },
1817 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, {"v62", "v65"} },
1818 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, {"v62", "v65"} },
1819 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, {"v62", "v65"} },
1820 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, {"v62", "v65"} },
1821 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, {"v62", "v65"} },
1822 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, {"v62", "v65"} },
1823 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, {"v62", "v65"} },
1824 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, {"v60", "v62", "v65"} },
1825 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, {"v60", "v62", "v65"} },
1826 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, {"v60", "v62", "v65"} },
1827 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, {"v60", "v62", "v65"} },
1828 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, {"v60", "v62", "v65"} },
1829 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, {"v60", "v62", "v65"} },
1830 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, {"v60", "v62", "v65"} },
1831 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, {"v60", "v62", "v65"} },
1832 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, {"v60", "v62", "v65"} },
1833 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, {"v60", "v62", "v65"} },
1834 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, {"v62", "v65"} },
1835 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, {"v62", "v65"} },
1836 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, {"v60", "v62", "v65"} },
1837 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, {"v60", "v62", "v65"} },
1838 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, {"v62", "v65"} },
1839 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, {"v62", "v65"} },
1840 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, {"v60", "v62", "v65"} },
1841 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, {"v60", "v62", "v65"} },
1842 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, {"v60", "v62", "v65"} },
1843 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, {"v60", "v62", "v65"} },
1844 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, {"v62", "v65"} },
1845 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, {"v62", "v65"} },
1846 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, {"v60", "v62", "v65"} },
1847 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, {"v60", "v62", "v65"} },
1848 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, {"v60", "v62", "v65"} },
1849 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, {"v60", "v62", "v65"} },
1850 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, {"v60", "v62", "v65"} },
1851 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, {"v60", "v62", "v65"} },
1852 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, {"v62", "v65"} },
1853 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, {"v62", "v65"} },
1854 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, {"v62", "v65"} },
1855 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, {"v62", "v65"} },
1856 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, {"v62", "v65"} },
1857 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, {"v62", "v65"} },
1858 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, {"v60", "v62", "v65"} },
1859 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, {"v60", "v62", "v65"} },
1860 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, {"v60", "v62", "v65"} },
1861 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, {"v60", "v62", "v65"} },
1862 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, {"v60", "v62", "v65"} },
1863 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, {"v60", "v62", "v65"} },
1864 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, {"v60", "v62", "v65"} },
1865 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, {"v60", "v62", "v65"} },
1866 { Hexagon::BI__builtin_HEXAGON_V6_valignb, {"v60", "v62", "v65"} },
1867 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, {"v60", "v62", "v65"} },
1868 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {"v60", "v62", "v65"} },
1869 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {"v60", "v62", "v65"} },
1870 { Hexagon::BI__builtin_HEXAGON_V6_vand, {"v60", "v62", "v65"} },
1871 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, {"v60", "v62", "v65"} },
1872 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, {"v62", "v65"} },
1873 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, {"v62", "v65"} },
1874 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, {"v62", "v65"} },
1875 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, {"v62", "v65"} },
1876 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, {"v60", "v62", "v65"} },
1877 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, {"v60", "v62", "v65"} },
1878 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, {"v60", "v62", "v65"} },
1879 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, {"v60", "v62", "v65"} },
1880 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, {"v62", "v65"} },
1881 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, {"v62", "v65"} },
1882 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, {"v62", "v65"} },
1883 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, {"v62", "v65"} },
1884 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, {"v60", "v62", "v65"} },
1885 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, {"v60", "v62", "v65"} },
1886 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, {"v60", "v62", "v65"} },
1887 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, {"v60", "v62", "v65"} },
1888 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, {"v60", "v62", "v65"} },
1889 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, {"v60", "v62", "v65"} },
1890 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, {"v65"} },
1891 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, {"v65"} },
1892 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, {"v60", "v62", "v65"} },
1893 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, {"v60", "v62", "v65"} },
1894 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, {"v60", "v62", "v65"} },
1895 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, {"v60", "v62", "v65"} },
1896 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, {"v60", "v62", "v65"} },
1897 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, {"v60", "v62", "v65"} },
1898 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, {"v60", "v62", "v65"} },
1899 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, {"v60", "v62", "v65"} },
1900 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, {"v60", "v62", "v65"} },
1901 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, {"v60", "v62", "v65"} },
1902 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, {"v65"} },
1903 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, {"v65"} },
1904 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, {"v60", "v62", "v65"} },
1905 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, {"v60", "v62", "v65"} },
1906 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, {"v62", "v65"} },
1907 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, {"v62", "v65"} },
1908 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, {"v60", "v62", "v65"} },
1909 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, {"v60", "v62", "v65"} },
1910 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, {"v60", "v62", "v65"} },
1911 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, {"v60", "v62", "v65"} },
1912 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, {"v60", "v62", "v65"} },
1913 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, {"v60", "v62", "v65"} },
1914 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, {"v65"} },
1915 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, {"v65"} },
1916 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, {"v65"} },
1917 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, {"v65"} },
1918 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, {"v62", "v65"} },
1919 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, {"v62", "v65"} },
1920 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, {"v65"} },
1921 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, {"v65"} },
1922 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, {"v60", "v62", "v65"} },
1923 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, {"v60", "v62", "v65"} },
1924 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, {"v60", "v62", "v65"} },
1925 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, {"v60", "v62", "v65"} },
1926 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, {"v60", "v62", "v65"} },
1927 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, {"v60", "v62", "v65"} },
1928 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, {"v60", "v62", "v65"} },
1929 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, {"v60", "v62", "v65"} },
1930 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, {"v60", "v62", "v65"} },
1931 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, {"v60", "v62", "v65"} },
1932 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, {"v62", "v65"} },
1933 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, {"v62", "v65"} },
1934 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, {"v60", "v62", "v65"} },
1935 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, {"v60", "v62", "v65"} },
1936 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, {"v60", "v62", "v65"} },
1937 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, {"v60", "v62", "v65"} },
1938 { Hexagon::BI__builtin_HEXAGON_V6_vassign, {"v60", "v62", "v65"} },
1939 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, {"v60", "v62", "v65"} },
1940 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, {"v60", "v62", "v65"} },
1941 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, {"v60", "v62", "v65"} },
1942 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, {"v65"} },
1943 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, {"v65"} },
1944 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, {"v65"} },
1945 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, {"v65"} },
1946 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, {"v60", "v62", "v65"} },
1947 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, {"v60", "v62", "v65"} },
1948 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, {"v60", "v62", "v65"} },
1949 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, {"v60", "v62", "v65"} },
1950 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, {"v60", "v62", "v65"} },
1951 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, {"v60", "v62", "v65"} },
1952 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, {"v60", "v62", "v65"} },
1953 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, {"v60", "v62", "v65"} },
1954 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, {"v60", "v62", "v65"} },
1955 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, {"v60", "v62", "v65"} },
1956 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, {"v60", "v62", "v65"} },
1957 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, {"v60", "v62", "v65"} },
1958 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, {"v65"} },
1959 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, {"v65"} },
1960 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, {"v65"} },
1961 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, {"v65"} },
1962 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, {"v60", "v62", "v65"} },
1963 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, {"v60", "v62", "v65"} },
1964 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, {"v60", "v62", "v65"} },
1965 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, {"v60", "v62", "v65"} },
1966 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, {"v60", "v62", "v65"} },
1967 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, {"v60", "v62", "v65"} },
1968 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, {"v60", "v62", "v65"} },
1969 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, {"v60", "v62", "v65"} },
1970 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, {"v60", "v62", "v65"} },
1971 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, {"v60", "v62", "v65"} },
1972 { Hexagon::BI__builtin_HEXAGON_V6_vd0, {"v60", "v62", "v65"} },
1973 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, {"v60", "v62", "v65"} },
1974 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, {"v65"} },
1975 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, {"v65"} },
1976 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, {"v60", "v62", "v65"} },
1977 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, {"v60", "v62", "v65"} },
1978 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, {"v60", "v62", "v65"} },
1979 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, {"v60", "v62", "v65"} },
1980 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, {"v60", "v62", "v65"} },
1981 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, {"v60", "v62", "v65"} },
1982 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, {"v60", "v62", "v65"} },
1983 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, {"v60", "v62", "v65"} },
1984 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, {"v60", "v62", "v65"} },
1985 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, {"v60", "v62", "v65"} },
1986 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, {"v60", "v62", "v65"} },
1987 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, {"v60", "v62", "v65"} },
1988 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, {"v60", "v62", "v65"} },
1989 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, {"v60", "v62", "v65"} },
1990 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, {"v60", "v62", "v65"} },
1991 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, {"v60", "v62", "v65"} },
1992 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, {"v60", "v62", "v65"} },
1993 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, {"v60", "v62", "v65"} },
1994 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, {"v60", "v62", "v65"} },
1995 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, {"v60", "v62", "v65"} },
1996 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, {"v60", "v62", "v65"} },
1997 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, {"v60", "v62", "v65"} },
1998 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, {"v60", "v62", "v65"} },
1999 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, {"v60", "v62", "v65"} },
2000 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, {"v60", "v62", "v65"} },
2001 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, {"v60", "v62", "v65"} },
2002 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, {"v60", "v62", "v65"} },
2003 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, {"v60", "v62", "v65"} },
2004 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, {"v60", "v62", "v65"} },
2005 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, {"v60", "v62", "v65"} },
2006 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, {"v60", "v62", "v65"} },
2007 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, {"v60", "v62", "v65"} },
2008 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, {"v60", "v62", "v65"} },
2009 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, {"v60", "v62", "v65"} },
2010 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, {"v60", "v62", "v65"} },
2011 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, {"v60", "v62", "v65"} },
2012 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, {"v60", "v62", "v65"} },
2013 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, {"v60", "v62", "v65"} },
2014 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, {"v60", "v62", "v65"} },
2015 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, {"v60", "v62", "v65"} },
2016 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, {"v60", "v62", "v65"} },
2017 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, {"v60", "v62", "v65"} },
2018 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, {"v60", "v62", "v65"} },
2019 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, {"v60", "v62", "v65"} },
2020 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, {"v60", "v62", "v65"} },
2021 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, {"v60", "v62", "v65"} },
2022 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, {"v60", "v62", "v65"} },
2023 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, {"v60", "v62", "v65"} },
2024 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, {"v60", "v62", "v65"} },
2025 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, {"v60", "v62", "v65"} },
2026 { Hexagon::BI__builtin_HEXAGON_V6_veqb, {"v60", "v62", "v65"} },
2027 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, {"v60", "v62", "v65"} },
2028 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, {"v60", "v62", "v65"} },
2029 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, {"v60", "v62", "v65"} },
2030 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, {"v60", "v62", "v65"} },
2031 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, {"v60", "v62", "v65"} },
2032 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, {"v60", "v62", "v65"} },
2033 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, {"v60", "v62", "v65"} },
2034 { Hexagon::BI__builtin_HEXAGON_V6_veqh, {"v60", "v62", "v65"} },
2035 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, {"v60", "v62", "v65"} },
2036 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, {"v60", "v62", "v65"} },
2037 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, {"v60", "v62", "v65"} },
2038 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, {"v60", "v62", "v65"} },
2039 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, {"v60", "v62", "v65"} },
2040 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, {"v60", "v62", "v65"} },
2041 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, {"v60", "v62", "v65"} },
2042 { Hexagon::BI__builtin_HEXAGON_V6_veqw, {"v60", "v62", "v65"} },
2043 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, {"v60", "v62", "v65"} },
2044 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, {"v60", "v62", "v65"} },
2045 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, {"v60", "v62", "v65"} },
2046 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, {"v60", "v62", "v65"} },
2047 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, {"v60", "v62", "v65"} },
2048 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, {"v60", "v62", "v65"} },
2049 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, {"v60", "v62", "v65"} },
2050 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, {"v60", "v62", "v65"} },
2051 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, {"v60", "v62", "v65"} },
2052 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, {"v60", "v62", "v65"} },
2053 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, {"v60", "v62", "v65"} },
2054 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, {"v60", "v62", "v65"} },
2055 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, {"v60", "v62", "v65"} },
2056 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, {"v60", "v62", "v65"} },
2057 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, {"v60", "v62", "v65"} },
2058 { Hexagon::BI__builtin_HEXAGON_V6_vgth, {"v60", "v62", "v65"} },
2059 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, {"v60", "v62", "v65"} },
2060 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, {"v60", "v62", "v65"} },
2061 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, {"v60", "v62", "v65"} },
2062 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, {"v60", "v62", "v65"} },
2063 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, {"v60", "v62", "v65"} },
2064 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, {"v60", "v62", "v65"} },
2065 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, {"v60", "v62", "v65"} },
2066 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, {"v60", "v62", "v65"} },
2067 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, {"v60", "v62", "v65"} },
2068 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, {"v60", "v62", "v65"} },
2069 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, {"v60", "v62", "v65"} },
2070 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, {"v60", "v62", "v65"} },
2071 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, {"v60", "v62", "v65"} },
2072 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, {"v60", "v62", "v65"} },
2073 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, {"v60", "v62", "v65"} },
2074 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, {"v60", "v62", "v65"} },
2075 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, {"v60", "v62", "v65"} },
2076 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, {"v60", "v62", "v65"} },
2077 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, {"v60", "v62", "v65"} },
2078 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, {"v60", "v62", "v65"} },
2079 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, {"v60", "v62", "v65"} },
2080 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, {"v60", "v62", "v65"} },
2081 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, {"v60", "v62", "v65"} },
2082 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, {"v60", "v62", "v65"} },
2083 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, {"v60", "v62", "v65"} },
2084 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, {"v60", "v62", "v65"} },
2085 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, {"v60", "v62", "v65"} },
2086 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, {"v60", "v62", "v65"} },
2087 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, {"v60", "v62", "v65"} },
2088 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, {"v60", "v62", "v65"} },
2089 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, {"v60", "v62", "v65"} },
2090 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, {"v60", "v62", "v65"} },
2091 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, {"v60", "v62", "v65"} },
2092 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, {"v60", "v62", "v65"} },
2093 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, {"v60", "v62", "v65"} },
2094 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, {"v60", "v62", "v65"} },
2095 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, {"v60", "v62", "v65"} },
2096 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, {"v60", "v62", "v65"} },
2097 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, {"v60", "v62", "v65"} },
2098 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, {"v60", "v62", "v65"} },
2099 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, {"v60", "v62", "v65"} },
2100 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, {"v60", "v62", "v65"} },
2101 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, {"v60", "v62", "v65"} },
2102 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {"v60", "v62", "v65"} },
2103 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {"v60", "v62", "v65"} },
2104 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, {"v62", "v65"} },
2105 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, {"v62", "v65"} },
2106 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, {"v60", "v62", "v65"} },
2107 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, {"v60", "v62", "v65"} },
2108 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, {"v60", "v62", "v65"} },
2109 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, {"v60", "v62", "v65"} },
2110 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, {"v60", "v62", "v65"} },
2111 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, {"v60", "v62", "v65"} },
2112 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, {"v60", "v62", "v65"} },
2113 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, {"v60", "v62", "v65"} },
2114 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, {"v65"} },
2115 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, {"v65"} },
2116 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, {"v60", "v62", "v65"} },
2117 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, {"v60", "v62", "v65"} },
2118 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {"v62", "v65"} },
2119 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {"v62", "v65"} },
2120 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, {"v62", "v65"} },
2121 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, {"v62", "v65"} },
2122 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, {"v60", "v62", "v65"} },
2123 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, {"v60", "v62", "v65"} },
2124 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {"v62", "v65"} },
2125 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, {"v62", "v65"} },
2126 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, {"v60", "v62", "v65"} },
2127 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, {"v60", "v62", "v65"} },
2128 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {"v62", "v65"} },
2129 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {"v62", "v65"} },
2130 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, {"v62", "v65"} },
2131 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, {"v62", "v65"} },
2132 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, {"v60", "v62", "v65"} },
2133 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, {"v60", "v62", "v65"} },
2134 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {"v62", "v65"} },
2135 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, {"v62", "v65"} },
2136 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, {"v62", "v65"} },
2137 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, {"v62", "v65"} },
2138 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, {"v60", "v62", "v65"} },
2139 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, {"v60", "v62", "v65"} },
2140 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, {"v60", "v62", "v65"} },
2141 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, {"v60", "v62", "v65"} },
2142 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, {"v60", "v62", "v65"} },
2143 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, {"v60", "v62", "v65"} },
2144 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, {"v60", "v62", "v65"} },
2145 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, {"v60", "v62", "v65"} },
2146 { Hexagon::BI__builtin_HEXAGON_V6_vminb, {"v62", "v65"} },
2147 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, {"v62", "v65"} },
2148 { Hexagon::BI__builtin_HEXAGON_V6_vminh, {"v60", "v62", "v65"} },
2149 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, {"v60", "v62", "v65"} },
2150 { Hexagon::BI__builtin_HEXAGON_V6_vminub, {"v60", "v62", "v65"} },
2151 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, {"v60", "v62", "v65"} },
2152 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, {"v60", "v62", "v65"} },
2153 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, {"v60", "v62", "v65"} },
2154 { Hexagon::BI__builtin_HEXAGON_V6_vminw, {"v60", "v62", "v65"} },
2155 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, {"v60", "v62", "v65"} },
2156 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, {"v60", "v62", "v65"} },
2157 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, {"v60", "v62", "v65"} },
2158 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, {"v60", "v62", "v65"} },
2159 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, {"v60", "v62", "v65"} },
2160 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, {"v60", "v62", "v65"} },
2161 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, {"v60", "v62", "v65"} },
2162 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, {"v65"} },
2163 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, {"v65"} },
2164 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, {"v65"} },
2165 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, {"v65"} },
2166 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, {"v60", "v62", "v65"} },
2167 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, {"v60", "v62", "v65"} },
2168 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, {"v60", "v62", "v65"} },
2169 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, {"v60", "v62", "v65"} },
2170 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, {"v60", "v62", "v65"} },
2171 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, {"v60", "v62", "v65"} },
2172 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, {"v65"} },
2173 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, {"v65"} },
2174 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, {"v62", "v65"} },
2175 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, {"v62", "v65"} },
2176 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, {"v62", "v65"} },
2177 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, {"v62", "v65"} },
2178 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, {"v65"} },
2179 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, {"v65"} },
2180 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, {"v65"} },
2181 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, {"v65"} },
2182 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, {"v60", "v62", "v65"} },
2183 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, {"v60", "v62", "v65"} },
2184 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, {"v60", "v62", "v65"} },
2185 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, {"v60", "v62", "v65"} },
2186 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, {"v60", "v62", "v65"} },
2187 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, {"v60", "v62", "v65"} },
2188 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, {"v60", "v62", "v65"} },
2189 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, {"v60", "v62", "v65"} },
2190 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, {"v60", "v62", "v65"} },
2191 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, {"v60", "v62", "v65"} },
2192 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, {"v60", "v62", "v65"} },
2193 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, {"v60", "v62", "v65"} },
2194 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, {"v60", "v62", "v65"} },
2195 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, {"v60", "v62", "v65"} },
2196 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, {"v62", "v65"} },
2197 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, {"v62", "v65"} },
2198 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, {"v60", "v62", "v65"} },
2199 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, {"v60", "v62", "v65"} },
2200 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, {"v65"} },
2201 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, {"v65"} },
2202 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, {"v60", "v62", "v65"} },
2203 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, {"v60", "v62", "v65"} },
2204 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, {"v60", "v62", "v65"} },
2205 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, {"v60", "v62", "v65"} },
2206 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, {"v60", "v62", "v65"} },
2207 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, {"v60", "v62", "v65"} },
2208 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, {"v60", "v62", "v65"} },
2209 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, {"v60", "v62", "v65"} },
2210 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, {"v60", "v62", "v65"} },
2211 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, {"v60", "v62", "v65"} },
2212 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, {"v60", "v62", "v65"} },
2213 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, {"v60", "v62", "v65"} },
2214 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, {"v60", "v62", "v65"} },
2215 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, {"v60", "v62", "v65"} },
2216 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, {"v60", "v62", "v65"} },
2217 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, {"v60", "v62", "v65"} },
2218 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, {"v60", "v62", "v65"} },
2219 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, {"v60", "v62", "v65"} },
2220 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, {"v60", "v62", "v65"} },
2221 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, {"v60", "v62", "v65"} },
2222 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, {"v60", "v62", "v65"} },
2223 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, {"v60", "v62", "v65"} },
2224 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, {"v60", "v62", "v65"} },
2225 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, {"v60", "v62", "v65"} },
2226 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, {"v60", "v62", "v65"} },
2227 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, {"v60", "v62", "v65"} },
2228 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, {"v60", "v62", "v65"} },
2229 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, {"v60", "v62", "v65"} },
2230 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, {"v60", "v62", "v65"} },
2231 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, {"v60", "v62", "v65"} },
2232 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, {"v60", "v62", "v65"} },
2233 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, {"v60", "v62", "v65"} },
2234 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, {"v60", "v62", "v65"} },
2235 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, {"v60", "v62", "v65"} },
2236 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, {"v60", "v62", "v65"} },
2237 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, {"v60", "v62", "v65"} },
2238 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, {"v60", "v62", "v65"} },
2239 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, {"v60", "v62", "v65"} },
2240 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, {"v60", "v62", "v65"} },
2241 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, {"v60", "v62", "v65"} },
2242 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, {"v60", "v62", "v65"} },
2243 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, {"v60", "v62", "v65"} },
2244 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, {"v62", "v65"} },
2245 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, {"v62", "v65"} },
2246 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, {"v62", "v65"} },
2247 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, {"v62", "v65"} },
2248 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, {"v60", "v62", "v65"} },
2249 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, {"v60", "v62", "v65"} },
2250 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, {"v62", "v65"} },
2251 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, {"v62", "v65"} },
2252 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, {"v60", "v62", "v65"} },
2253 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, {"v60", "v62", "v65"} },
2254 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, {"v60", "v62", "v65"} },
2255 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, {"v60", "v62", "v65"} },
2256 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, {"v60", "v62", "v65"} },
2257 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, {"v60", "v62", "v65"} },
2258 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, {"v60", "v62", "v65"} },
2259 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, {"v60", "v62", "v65"} },
2260 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, {"v60", "v62", "v65"} },
2261 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, {"v60", "v62", "v65"} },
2262 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, {"v60", "v62", "v65"} },
2263 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, {"v60", "v62", "v65"} },
2264 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, {"v60", "v62", "v65"} },
2265 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, {"v60", "v62", "v65"} },
2266 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, {"v60", "v62", "v65"} },
2267 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, {"v60", "v62", "v65"} },
2268 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, {"v60", "v62", "v65"} },
2269 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, {"v60", "v62", "v65"} },
2270 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, {"v65"} },
2271 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, {"v65"} },
2272 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, {"v65"} },
2273 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, {"v65"} },
2274 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, {"v60", "v62", "v65"} },
2275 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, {"v60", "v62", "v65"} },
2276 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, {"v60", "v62", "v65"} },
2277 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, {"v60", "v62", "v65"} },
2278 { Hexagon::BI__builtin_HEXAGON_V6_vmux, {"v60", "v62", "v65"} },
2279 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, {"v60", "v62", "v65"} },
2280 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, {"v65"} },
2281 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, {"v65"} },
2282 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, {"v60", "v62", "v65"} },
2283 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, {"v60", "v62", "v65"} },
2284 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, {"v60", "v62", "v65"} },
2285 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, {"v60", "v62", "v65"} },
2286 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, {"v60", "v62", "v65"} },
2287 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, {"v60", "v62", "v65"} },
2288 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, {"v60", "v62", "v65"} },
2289 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, {"v60", "v62", "v65"} },
2290 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, {"v60", "v62", "v65"} },
2291 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, {"v60", "v62", "v65"} },
2292 { Hexagon::BI__builtin_HEXAGON_V6_vnot, {"v60", "v62", "v65"} },
2293 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, {"v60", "v62", "v65"} },
2294 { Hexagon::BI__builtin_HEXAGON_V6_vor, {"v60", "v62", "v65"} },
2295 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, {"v60", "v62", "v65"} },
2296 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, {"v60", "v62", "v65"} },
2297 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, {"v60", "v62", "v65"} },
2298 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, {"v60", "v62", "v65"} },
2299 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, {"v60", "v62", "v65"} },
2300 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, {"v60", "v62", "v65"} },
2301 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, {"v60", "v62", "v65"} },
2302 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, {"v60", "v62", "v65"} },
2303 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, {"v60", "v62", "v65"} },
2304 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, {"v60", "v62", "v65"} },
2305 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, {"v60", "v62", "v65"} },
2306 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, {"v60", "v62", "v65"} },
2307 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, {"v60", "v62", "v65"} },
2308 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, {"v60", "v62", "v65"} },
2309 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, {"v60", "v62", "v65"} },
2310 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, {"v60", "v62", "v65"} },
2311 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, {"v60", "v62", "v65"} },
2312 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, {"v60", "v62", "v65"} },
2313 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, {"v60", "v62", "v65"} },
2314 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, {"v65"} },
2315 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, {"v65"} },
2316 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, {"v65"} },
2317 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, {"v65"} },
2318 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, {"v65"} },
2319 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, {"v65"} },
2320 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, {"v60", "v62", "v65"} },
2321 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, {"v60", "v62", "v65"} },
2322 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, {"v65"} },
2323 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, {"v65"} },
2324 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, {"v65"} },
2325 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, {"v65"} },
2326 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, {"v60", "v62", "v65"} },
2327 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, {"v60", "v62", "v65"} },
2328 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, {"v60", "v62", "v65"} },
2329 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, {"v60", "v62", "v65"} },
2330 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {"v60", "v62", "v65"} },
2331 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {"v60", "v62", "v65"} },
2332 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {"v60", "v62", "v65"} },
2333 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, {"v60", "v62", "v65"} },
2334 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, {"v60", "v62", "v65"} },
2335 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, {"v60", "v62", "v65"} },
2336 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, {"v60", "v62", "v65"} },
2337 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, {"v60", "v62", "v65"} },
2338 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, {"v60", "v62", "v65"} },
2339 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, {"v60", "v62", "v65"} },
2340 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, {"v60", "v62", "v65"} },
2341 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, {"v60", "v62", "v65"} },
2342 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, {"v60", "v62", "v65"} },
2343 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, {"v60", "v62", "v65"} },
2344 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, {"v60", "v62", "v65"} },
2345 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, {"v60", "v62", "v65"} },
2346 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {"v60", "v62", "v65"} },
2347 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {"v60", "v62", "v65"} },
2348 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {"v60", "v62", "v65"} },
2349 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, {"v60", "v62", "v65"} },
2350 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, {"v65"} },
2351 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, {"v65"} },
2352 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, {"v65"} },
2353 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, {"v65"} },
2354 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, {"v60", "v62", "v65"} },
2355 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, {"v60", "v62", "v65"} },
2356 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, {"v60", "v62", "v65"} },
2357 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, {"v60", "v62", "v65"} },
2358 { Hexagon::BI__builtin_HEXAGON_V6_vror, {"v60", "v62", "v65"} },
2359 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, {"v60", "v62", "v65"} },
2360 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, {"v60", "v62", "v65"} },
2361 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, {"v60", "v62", "v65"} },
2362 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, {"v60", "v62", "v65"} },
2363 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, {"v60", "v62", "v65"} },
2364 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, {"v62", "v65"} },
2365 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, {"v62", "v65"} },
2366 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, {"v62", "v65"} },
2367 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, {"v62", "v65"} },
2368 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, {"v60", "v62", "v65"} },
2369 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, {"v60", "v62", "v65"} },
2370 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, {"v60", "v62", "v65"} },
2371 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, {"v60", "v62", "v65"} },
2372 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {"v60", "v62", "v65"} },
2373 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {"v60", "v62", "v65"} },
2374 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {"v60", "v62", "v65"} },
2375 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, {"v60", "v62", "v65"} },
2376 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, {"v60", "v62", "v65"} },
2377 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, {"v60", "v62", "v65"} },
2378 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, {"v62", "v65"} },
2379 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, {"v62", "v65"} },
2380 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, {"v60", "v62", "v65"} },
2381 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, {"v60", "v62", "v65"} },
2382 { Hexagon::BI__builtin_HEXAGON_V6_vsb, {"v60", "v62", "v65"} },
2383 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, {"v60", "v62", "v65"} },
2384 { Hexagon::BI__builtin_HEXAGON_V6_vsh, {"v60", "v62", "v65"} },
2385 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, {"v60", "v62", "v65"} },
2386 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, {"v60", "v62", "v65"} },
2387 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, {"v60", "v62", "v65"} },
2388 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, {"v60", "v62", "v65"} },
2389 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, {"v60", "v62", "v65"} },
2390 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, {"v60", "v62", "v65"} },
2391 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, {"v60", "v62", "v65"} },
2392 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, {"v60", "v62", "v65"} },
2393 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, {"v60", "v62", "v65"} },
2394 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, {"v60", "v62", "v65"} },
2395 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, {"v60", "v62", "v65"} },
2396 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, {"v60", "v62", "v65"} },
2397 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, {"v60", "v62", "v65"} },
2398 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, {"v60", "v62", "v65"} },
2399 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, {"v60", "v62", "v65"} },
2400 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, {"v60", "v62", "v65"} },
2401 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, {"v60", "v62", "v65"} },
2402 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, {"v60", "v62", "v65"} },
2403 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, {"v60", "v62", "v65"} },
2404 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, {"v60", "v62", "v65"} },
2405 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, {"v60", "v62", "v65"} },
2406 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, {"v60", "v62", "v65"} },
2407 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, {"v60", "v62", "v65"} },
2408 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, {"v62", "v65"} },
2409 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, {"v62", "v65"} },
2410 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, {"v62", "v65"} },
2411 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, {"v62", "v65"} },
2412 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, {"v62", "v65"} },
2413 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, {"v62", "v65"} },
2414 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, {"v60", "v62", "v65"} },
2415 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, {"v60", "v62", "v65"} },
2416 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, {"v60", "v62", "v65"} },
2417 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, {"v60", "v62", "v65"} },
2418 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, {"v60", "v62", "v65"} },
2419 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, {"v60", "v62", "v65"} },
2420 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, {"v60", "v62", "v65"} },
2421 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, {"v60", "v62", "v65"} },
2422 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, {"v60", "v62", "v65"} },
2423 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, {"v60", "v62", "v65"} },
2424 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, {"v60", "v62", "v65"} },
2425 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, {"v60", "v62", "v65"} },
2426 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, {"v60", "v62", "v65"} },
2427 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, {"v60", "v62", "v65"} },
2428 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, {"v60", "v62", "v65"} },
2429 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, {"v60", "v62", "v65"} },
2430 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, {"v62", "v65"} },
2431 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, {"v62", "v65"} },
2432 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, {"v60", "v62", "v65"} },
2433 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, {"v60", "v62", "v65"} },
2434 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, {"v60", "v62", "v65"} },
2435 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, {"v60", "v62", "v65"} },
2436 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, {"v60", "v62", "v65"} },
2437 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, {"v60", "v62", "v65"} },
2438 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, {"v62", "v65"} },
2439 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, {"v62", "v65"} },
2440 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, {"v62", "v65"} },
2441 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, {"v62", "v65"} },
2442 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, {"v60", "v62", "v65"} },
2443 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, {"v60", "v62", "v65"} },
2444 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, {"v60", "v62", "v65"} },
2445 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, {"v60", "v62", "v65"} },
2446 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, {"v60", "v62", "v65"} },
2447 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, {"v60", "v62", "v65"} },
2448 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, {"v60", "v62", "v65"} },
2449 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, {"v60", "v62", "v65"} },
2450 { Hexagon::BI__builtin_HEXAGON_V6_vswap, {"v60", "v62", "v65"} },
2451 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, {"v60", "v62", "v65"} },
2452 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, {"v60", "v62", "v65"} },
2453 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, {"v60", "v62", "v65"} },
2454 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, {"v60", "v62", "v65"} },
2455 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, {"v60", "v62", "v65"} },
2456 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, {"v60", "v62", "v65"} },
2457 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, {"v60", "v62", "v65"} },
2458 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, {"v60", "v62", "v65"} },
2459 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, {"v60", "v62", "v65"} },
2460 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, {"v60", "v62", "v65"} },
2461 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, {"v60", "v62", "v65"} },
2462 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, {"v60", "v62", "v65"} },
2463 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, {"v60", "v62", "v65"} },
2464 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, {"v60", "v62", "v65"} },
2465 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, {"v60", "v62", "v65"} },
2466 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, {"v60", "v62", "v65"} },
2467 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, {"v60", "v62", "v65"} },
2468 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, {"v60", "v62", "v65"} },
2469 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, {"v60", "v62", "v65"} },
2470 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, {"v60", "v62", "v65"} },
2471 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, {"v60", "v62", "v65"} },
2472 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, {"v60", "v62", "v65"} },
2473 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, {"v60", "v62", "v65"} },
2474 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, {"v60", "v62", "v65"} },
2475 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, {"v60", "v62", "v65"} },
2476 { Hexagon::BI__builtin_HEXAGON_V6_vxor, {"v60", "v62", "v65"} },
2477 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, {"v60", "v62", "v65"} },
2478 { Hexagon::BI__builtin_HEXAGON_V6_vzb, {"v60", "v62", "v65"} },
2479 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, {"v60", "v62", "v65"} },
2480 { Hexagon::BI__builtin_HEXAGON_V6_vzh, {"v60", "v62", "v65"} },
2481 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, {"v60", "v62", "v65"} },
2484 const TargetInfo &TI = Context.getTargetInfo();
2486 auto FC = ValidCPU.find(BuiltinID);
2487 if (FC != ValidCPU.end()) {
2488 const TargetOptions &Opts = TI.getTargetOpts();
2489 StringRef CPU = Opts.CPU;
2491 assert(CPU.startswith("hexagon") && "Unexpected CPU name");
2492 CPU.consume_front("hexagon");
2493 if (llvm::none_of(FC->second, [CPU](StringRef S) { return S == CPU; }))
2494 return Diag(TheCall->getLocStart(),
2495 diag::err_hexagon_builtin_unsupported_cpu);
2499 auto FH = ValidHVX.find(BuiltinID);
2500 if (FH != ValidHVX.end()) {
2501 if (!TI.hasFeature("hvx"))
2502 return Diag(TheCall->getLocStart(),
2503 diag::err_hexagon_builtin_requires_hvx);
2505 bool IsValid = llvm::any_of(FH->second,
2506 [&TI] (StringRef V) {
2507 std::string F = "hvx" + V.str();
2508 return TI.hasFeature(F);
2511 return Diag(TheCall->getLocStart(),
2512 diag::err_hexagon_builtin_unsupported_hvx);
2518 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
2520 ArgInfo(unsigned O, bool S, unsigned W, unsigned A)
2521 : OpNum(O), IsSigned(S), BitWidth(W), Align(A) {}
2523 bool IsSigned = false;
2524 unsigned BitWidth = 0;
2528 static const std::map<unsigned, std::vector<ArgInfo>> Infos = {
2529 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
2530 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
2531 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
2532 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} },
2533 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
2534 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
2535 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
2536 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
2537 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
2538 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
2539 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
2541 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
2542 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
2543 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
2544 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
2545 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
2546 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
2547 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
2548 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
2549 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
2550 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
2551 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
2553 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
2554 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
2555 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
2556 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
2557 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
2558 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
2559 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
2560 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
2561 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
2562 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
2563 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
2564 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
2565 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
2566 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
2567 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
2568 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
2569 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
2570 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
2571 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
2572 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
2573 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
2574 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
2575 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
2576 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
2577 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
2578 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
2579 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
2580 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
2581 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
2582 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
2583 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
2584 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
2585 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
2586 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
2587 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
2588 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
2589 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
2590 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
2591 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
2592 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
2593 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
2594 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
2595 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
2596 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
2597 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
2598 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
2599 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
2600 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
2601 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
2602 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
2603 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
2604 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
2605 {{ 1, false, 6, 0 }} },
2606 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
2607 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
2608 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
2609 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
2610 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
2611 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
2612 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
2613 {{ 1, false, 5, 0 }} },
2614 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
2615 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
2616 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
2617 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
2618 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
2619 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
2620 { 2, false, 5, 0 }} },
2621 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
2622 { 2, false, 6, 0 }} },
2623 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
2624 { 3, false, 5, 0 }} },
2625 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
2626 { 3, false, 6, 0 }} },
2627 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
2628 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
2629 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
2630 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
2631 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
2632 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
2633 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
2634 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
2635 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
2636 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
2637 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
2638 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
2639 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
2640 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
2641 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
2642 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
2643 {{ 2, false, 4, 0 },
2644 { 3, false, 5, 0 }} },
2645 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
2646 {{ 2, false, 4, 0 },
2647 { 3, false, 5, 0 }} },
2648 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
2649 {{ 2, false, 4, 0 },
2650 { 3, false, 5, 0 }} },
2651 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
2652 {{ 2, false, 4, 0 },
2653 { 3, false, 5, 0 }} },
2654 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
2655 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
2656 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
2657 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
2658 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
2659 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
2660 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
2661 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
2662 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
2663 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
2664 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
2665 { 2, false, 5, 0 }} },
2666 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
2667 { 2, false, 6, 0 }} },
2668 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
2669 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
2670 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
2671 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
2672 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
2673 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
2674 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
2675 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
2676 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
2677 {{ 1, false, 4, 0 }} },
2678 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
2679 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
2680 {{ 1, false, 4, 0 }} },
2681 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
2682 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
2683 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
2684 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
2685 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
2686 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
2687 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
2688 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
2689 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
2690 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
2691 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
2692 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
2693 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
2694 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
2695 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
2696 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
2697 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
2698 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
2699 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
2700 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
2701 {{ 3, false, 1, 0 }} },
2702 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
2703 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
2704 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
2705 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
2706 {{ 3, false, 1, 0 }} },
2707 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
2708 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
2709 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
2710 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
2711 {{ 3, false, 1, 0 }} },
2714 auto F = Infos.find(BuiltinID);
2715 if (F == Infos.end())
2720 for (const ArgInfo &A : F->second) {
2721 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth-1)) : 0;
2722 int32_t Max = (1 << (A.IsSigned ? A.BitWidth-1 : A.BitWidth)) - 1;
2724 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
2726 unsigned M = 1 << A.Align;
2729 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
2730 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
2736 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
2737 CallExpr *TheCall) {
2738 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) ||
2739 CheckHexagonBuiltinArgument(BuiltinID, TheCall);
2743 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the
2744 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The
2745 // ordering for DSP is unspecified. MSA is ordered by the data format used
2746 // by the underlying instruction i.e., df/m, df/n and then by size.
2748 // FIXME: The size tests here should instead be tablegen'd along with the
2749 // definitions from include/clang/Basic/BuiltinsMips.def.
2750 // FIXME: GCC is strict on signedness for some of these intrinsics, we should
2752 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2753 unsigned i = 0, l = 0, u = 0, m = 0;
2754 switch (BuiltinID) {
2755 default: return false;
2756 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
2757 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
2758 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
2759 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
2760 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
2761 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
2762 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
2763 // MSA instrinsics. Instructions (which the intrinsics maps to) which use the
2765 // These intrinsics take an unsigned 3 bit immediate.
2766 case Mips::BI__builtin_msa_bclri_b:
2767 case Mips::BI__builtin_msa_bnegi_b:
2768 case Mips::BI__builtin_msa_bseti_b:
2769 case Mips::BI__builtin_msa_sat_s_b:
2770 case Mips::BI__builtin_msa_sat_u_b:
2771 case Mips::BI__builtin_msa_slli_b:
2772 case Mips::BI__builtin_msa_srai_b:
2773 case Mips::BI__builtin_msa_srari_b:
2774 case Mips::BI__builtin_msa_srli_b:
2775 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
2776 case Mips::BI__builtin_msa_binsli_b:
2777 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
2778 // These intrinsics take an unsigned 4 bit immediate.
2779 case Mips::BI__builtin_msa_bclri_h:
2780 case Mips::BI__builtin_msa_bnegi_h:
2781 case Mips::BI__builtin_msa_bseti_h:
2782 case Mips::BI__builtin_msa_sat_s_h:
2783 case Mips::BI__builtin_msa_sat_u_h:
2784 case Mips::BI__builtin_msa_slli_h:
2785 case Mips::BI__builtin_msa_srai_h:
2786 case Mips::BI__builtin_msa_srari_h:
2787 case Mips::BI__builtin_msa_srli_h:
2788 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
2789 case Mips::BI__builtin_msa_binsli_h:
2790 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
2791 // These intrinsics take an unsigned 5 bit immediate.
2792 // The first block of intrinsics actually have an unsigned 5 bit field,
2793 // not a df/n field.
2794 case Mips::BI__builtin_msa_clei_u_b:
2795 case Mips::BI__builtin_msa_clei_u_h:
2796 case Mips::BI__builtin_msa_clei_u_w:
2797 case Mips::BI__builtin_msa_clei_u_d:
2798 case Mips::BI__builtin_msa_clti_u_b:
2799 case Mips::BI__builtin_msa_clti_u_h:
2800 case Mips::BI__builtin_msa_clti_u_w:
2801 case Mips::BI__builtin_msa_clti_u_d:
2802 case Mips::BI__builtin_msa_maxi_u_b:
2803 case Mips::BI__builtin_msa_maxi_u_h:
2804 case Mips::BI__builtin_msa_maxi_u_w:
2805 case Mips::BI__builtin_msa_maxi_u_d:
2806 case Mips::BI__builtin_msa_mini_u_b:
2807 case Mips::BI__builtin_msa_mini_u_h:
2808 case Mips::BI__builtin_msa_mini_u_w:
2809 case Mips::BI__builtin_msa_mini_u_d:
2810 case Mips::BI__builtin_msa_addvi_b:
2811 case Mips::BI__builtin_msa_addvi_h:
2812 case Mips::BI__builtin_msa_addvi_w:
2813 case Mips::BI__builtin_msa_addvi_d:
2814 case Mips::BI__builtin_msa_bclri_w:
2815 case Mips::BI__builtin_msa_bnegi_w:
2816 case Mips::BI__builtin_msa_bseti_w:
2817 case Mips::BI__builtin_msa_sat_s_w:
2818 case Mips::BI__builtin_msa_sat_u_w:
2819 case Mips::BI__builtin_msa_slli_w:
2820 case Mips::BI__builtin_msa_srai_w:
2821 case Mips::BI__builtin_msa_srari_w:
2822 case Mips::BI__builtin_msa_srli_w:
2823 case Mips::BI__builtin_msa_srlri_w:
2824 case Mips::BI__builtin_msa_subvi_b:
2825 case Mips::BI__builtin_msa_subvi_h:
2826 case Mips::BI__builtin_msa_subvi_w:
2827 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
2828 case Mips::BI__builtin_msa_binsli_w:
2829 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
2830 // These intrinsics take an unsigned 6 bit immediate.
2831 case Mips::BI__builtin_msa_bclri_d:
2832 case Mips::BI__builtin_msa_bnegi_d:
2833 case Mips::BI__builtin_msa_bseti_d:
2834 case Mips::BI__builtin_msa_sat_s_d:
2835 case Mips::BI__builtin_msa_sat_u_d:
2836 case Mips::BI__builtin_msa_slli_d:
2837 case Mips::BI__builtin_msa_srai_d:
2838 case Mips::BI__builtin_msa_srari_d:
2839 case Mips::BI__builtin_msa_srli_d:
2840 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
2841 case Mips::BI__builtin_msa_binsli_d:
2842 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
2843 // These intrinsics take a signed 5 bit immediate.
2844 case Mips::BI__builtin_msa_ceqi_b:
2845 case Mips::BI__builtin_msa_ceqi_h:
2846 case Mips::BI__builtin_msa_ceqi_w:
2847 case Mips::BI__builtin_msa_ceqi_d:
2848 case Mips::BI__builtin_msa_clti_s_b:
2849 case Mips::BI__builtin_msa_clti_s_h:
2850 case Mips::BI__builtin_msa_clti_s_w:
2851 case Mips::BI__builtin_msa_clti_s_d:
2852 case Mips::BI__builtin_msa_clei_s_b:
2853 case Mips::BI__builtin_msa_clei_s_h:
2854 case Mips::BI__builtin_msa_clei_s_w:
2855 case Mips::BI__builtin_msa_clei_s_d:
2856 case Mips::BI__builtin_msa_maxi_s_b:
2857 case Mips::BI__builtin_msa_maxi_s_h:
2858 case Mips::BI__builtin_msa_maxi_s_w:
2859 case Mips::BI__builtin_msa_maxi_s_d:
2860 case Mips::BI__builtin_msa_mini_s_b:
2861 case Mips::BI__builtin_msa_mini_s_h:
2862 case Mips::BI__builtin_msa_mini_s_w:
2863 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
2864 // These intrinsics take an unsigned 8 bit immediate.
2865 case Mips::BI__builtin_msa_andi_b:
2866 case Mips::BI__builtin_msa_nori_b:
2867 case Mips::BI__builtin_msa_ori_b:
2868 case Mips::BI__builtin_msa_shf_b:
2869 case Mips::BI__builtin_msa_shf_h:
2870 case Mips::BI__builtin_msa_shf_w:
2871 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
2872 case Mips::BI__builtin_msa_bseli_b:
2873 case Mips::BI__builtin_msa_bmnzi_b:
2874 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
2876 // These intrinsics take an unsigned 4 bit immediate.
2877 case Mips::BI__builtin_msa_copy_s_b:
2878 case Mips::BI__builtin_msa_copy_u_b:
2879 case Mips::BI__builtin_msa_insve_b:
2880 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
2881 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
2882 // These intrinsics take an unsigned 3 bit immediate.
2883 case Mips::BI__builtin_msa_copy_s_h:
2884 case Mips::BI__builtin_msa_copy_u_h:
2885 case Mips::BI__builtin_msa_insve_h:
2886 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
2887 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
2888 // These intrinsics take an unsigned 2 bit immediate.
2889 case Mips::BI__builtin_msa_copy_s_w:
2890 case Mips::BI__builtin_msa_copy_u_w:
2891 case Mips::BI__builtin_msa_insve_w:
2892 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
2893 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
2894 // These intrinsics take an unsigned 1 bit immediate.
2895 case Mips::BI__builtin_msa_copy_s_d:
2896 case Mips::BI__builtin_msa_copy_u_d:
2897 case Mips::BI__builtin_msa_insve_d:
2898 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
2899 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
2900 // Memory offsets and immediate loads.
2901 // These intrinsics take a signed 10 bit immediate.
2902 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
2903 case Mips::BI__builtin_msa_ldi_h:
2904 case Mips::BI__builtin_msa_ldi_w:
2905 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
2906 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 16; break;
2907 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 16; break;
2908 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 16; break;
2909 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 16; break;
2910 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 16; break;
2911 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 16; break;
2912 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 16; break;
2913 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 16; break;
2917 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
2919 return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
2920 SemaBuiltinConstantArgMultiple(TheCall, i, m);
2923 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2924 unsigned i = 0, l = 0, u = 0;
2925 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
2926 BuiltinID == PPC::BI__builtin_divdeu ||
2927 BuiltinID == PPC::BI__builtin_bpermd;
2928 bool IsTarget64Bit = Context.getTargetInfo()
2929 .getTypeWidth(Context
2931 .getIntPtrType()) == 64;
2932 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
2933 BuiltinID == PPC::BI__builtin_divweu ||
2934 BuiltinID == PPC::BI__builtin_divde ||
2935 BuiltinID == PPC::BI__builtin_divdeu;
2937 if (Is64BitBltin && !IsTarget64Bit)
2938 return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt)
2939 << TheCall->getSourceRange();
2941 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
2942 (BuiltinID == PPC::BI__builtin_bpermd &&
2943 !Context.getTargetInfo().hasFeature("bpermd")))
2944 return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7)
2945 << TheCall->getSourceRange();
2947 switch (BuiltinID) {
2948 default: return false;
2949 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
2950 case PPC::BI__builtin_altivec_crypto_vshasigmad:
2951 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2952 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
2953 case PPC::BI__builtin_tbegin:
2954 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
2955 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
2956 case PPC::BI__builtin_tabortwc:
2957 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
2958 case PPC::BI__builtin_tabortwci:
2959 case PPC::BI__builtin_tabortdci:
2960 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
2961 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
2962 case PPC::BI__builtin_vsx_xxpermdi:
2963 case PPC::BI__builtin_vsx_xxsldwi:
2964 return SemaBuiltinVSX(TheCall);
2966 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
2969 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
2970 CallExpr *TheCall) {
2971 if (BuiltinID == SystemZ::BI__builtin_tabort) {
2972 Expr *Arg = TheCall->getArg(0);
2973 llvm::APSInt AbortCode(32);
2974 if (Arg->isIntegerConstantExpr(AbortCode, Context) &&
2975 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256)
2976 return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code)
2977 << Arg->getSourceRange();
2980 // For intrinsics which take an immediate value as part of the instruction,
2981 // range check them here.
2982 unsigned i = 0, l = 0, u = 0;
2983 switch (BuiltinID) {
2984 default: return false;
2985 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
2986 case SystemZ::BI__builtin_s390_verimb:
2987 case SystemZ::BI__builtin_s390_verimh:
2988 case SystemZ::BI__builtin_s390_verimf:
2989 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
2990 case SystemZ::BI__builtin_s390_vfaeb:
2991 case SystemZ::BI__builtin_s390_vfaeh:
2992 case SystemZ::BI__builtin_s390_vfaef:
2993 case SystemZ::BI__builtin_s390_vfaebs:
2994 case SystemZ::BI__builtin_s390_vfaehs:
2995 case SystemZ::BI__builtin_s390_vfaefs:
2996 case SystemZ::BI__builtin_s390_vfaezb:
2997 case SystemZ::BI__builtin_s390_vfaezh:
2998 case SystemZ::BI__builtin_s390_vfaezf:
2999 case SystemZ::BI__builtin_s390_vfaezbs:
3000 case SystemZ::BI__builtin_s390_vfaezhs:
3001 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
3002 case SystemZ::BI__builtin_s390_vfisb:
3003 case SystemZ::BI__builtin_s390_vfidb:
3004 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
3005 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3006 case SystemZ::BI__builtin_s390_vftcisb:
3007 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
3008 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
3009 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
3010 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
3011 case SystemZ::BI__builtin_s390_vstrcb:
3012 case SystemZ::BI__builtin_s390_vstrch:
3013 case SystemZ::BI__builtin_s390_vstrcf:
3014 case SystemZ::BI__builtin_s390_vstrczb:
3015 case SystemZ::BI__builtin_s390_vstrczh:
3016 case SystemZ::BI__builtin_s390_vstrczf:
3017 case SystemZ::BI__builtin_s390_vstrcbs:
3018 case SystemZ::BI__builtin_s390_vstrchs:
3019 case SystemZ::BI__builtin_s390_vstrcfs:
3020 case SystemZ::BI__builtin_s390_vstrczbs:
3021 case SystemZ::BI__builtin_s390_vstrczhs:
3022 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
3023 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
3024 case SystemZ::BI__builtin_s390_vfminsb:
3025 case SystemZ::BI__builtin_s390_vfmaxsb:
3026 case SystemZ::BI__builtin_s390_vfmindb:
3027 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
3029 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3032 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
3033 /// This checks that the target supports __builtin_cpu_supports and
3034 /// that the string argument is constant and valid.
3035 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
3036 Expr *Arg = TheCall->getArg(0);
3038 // Check if the argument is a string literal.
3039 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3040 return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
3041 << Arg->getSourceRange();
3043 // Check the contents of the string.
3045 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3046 if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
3047 return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports)
3048 << Arg->getSourceRange();
3052 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
3053 /// This checks that the target supports __builtin_cpu_is and
3054 /// that the string argument is constant and valid.
3055 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
3056 Expr *Arg = TheCall->getArg(0);
3058 // Check if the argument is a string literal.
3059 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3060 return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
3061 << Arg->getSourceRange();
3063 // Check the contents of the string.
3065 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3066 if (!S.Context.getTargetInfo().validateCpuIs(Feature))
3067 return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_is)
3068 << Arg->getSourceRange();
3072 // Check if the rounding mode is legal.
3073 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
3074 // Indicates if this instruction has rounding control or just SAE.
3077 unsigned ArgNum = 0;
3078 switch (BuiltinID) {
3081 case X86::BI__builtin_ia32_vcvttsd2si32:
3082 case X86::BI__builtin_ia32_vcvttsd2si64:
3083 case X86::BI__builtin_ia32_vcvttsd2usi32:
3084 case X86::BI__builtin_ia32_vcvttsd2usi64:
3085 case X86::BI__builtin_ia32_vcvttss2si32:
3086 case X86::BI__builtin_ia32_vcvttss2si64:
3087 case X86::BI__builtin_ia32_vcvttss2usi32:
3088 case X86::BI__builtin_ia32_vcvttss2usi64:
3091 case X86::BI__builtin_ia32_maxpd512:
3092 case X86::BI__builtin_ia32_maxps512:
3093 case X86::BI__builtin_ia32_minpd512:
3094 case X86::BI__builtin_ia32_minps512:
3097 case X86::BI__builtin_ia32_cvtps2pd512_mask:
3098 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
3099 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
3100 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
3101 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
3102 case X86::BI__builtin_ia32_cvttps2dq512_mask:
3103 case X86::BI__builtin_ia32_cvttps2qq512_mask:
3104 case X86::BI__builtin_ia32_cvttps2udq512_mask:
3105 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
3106 case X86::BI__builtin_ia32_exp2pd_mask:
3107 case X86::BI__builtin_ia32_exp2ps_mask:
3108 case X86::BI__builtin_ia32_getexppd512_mask:
3109 case X86::BI__builtin_ia32_getexpps512_mask:
3110 case X86::BI__builtin_ia32_rcp28pd_mask:
3111 case X86::BI__builtin_ia32_rcp28ps_mask:
3112 case X86::BI__builtin_ia32_rsqrt28pd_mask:
3113 case X86::BI__builtin_ia32_rsqrt28ps_mask:
3114 case X86::BI__builtin_ia32_vcomisd:
3115 case X86::BI__builtin_ia32_vcomiss:
3116 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
3119 case X86::BI__builtin_ia32_cmppd512_mask:
3120 case X86::BI__builtin_ia32_cmpps512_mask:
3121 case X86::BI__builtin_ia32_cmpsd_mask:
3122 case X86::BI__builtin_ia32_cmpss_mask:
3123 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
3124 case X86::BI__builtin_ia32_getexpsd128_round_mask:
3125 case X86::BI__builtin_ia32_getexpss128_round_mask:
3126 case X86::BI__builtin_ia32_maxsd_round_mask:
3127 case X86::BI__builtin_ia32_maxss_round_mask:
3128 case X86::BI__builtin_ia32_minsd_round_mask:
3129 case X86::BI__builtin_ia32_minss_round_mask:
3130 case X86::BI__builtin_ia32_rcp28sd_round_mask:
3131 case X86::BI__builtin_ia32_rcp28ss_round_mask:
3132 case X86::BI__builtin_ia32_reducepd512_mask:
3133 case X86::BI__builtin_ia32_reduceps512_mask:
3134 case X86::BI__builtin_ia32_rndscalepd_mask:
3135 case X86::BI__builtin_ia32_rndscaleps_mask:
3136 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
3137 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
3140 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3141 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3142 case X86::BI__builtin_ia32_fixupimmps512_mask:
3143 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3144 case X86::BI__builtin_ia32_fixupimmsd_mask:
3145 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3146 case X86::BI__builtin_ia32_fixupimmss_mask:
3147 case X86::BI__builtin_ia32_fixupimmss_maskz:
3148 case X86::BI__builtin_ia32_rangepd512_mask:
3149 case X86::BI__builtin_ia32_rangeps512_mask:
3150 case X86::BI__builtin_ia32_rangesd128_round_mask:
3151 case X86::BI__builtin_ia32_rangess128_round_mask:
3152 case X86::BI__builtin_ia32_reducesd_mask:
3153 case X86::BI__builtin_ia32_reducess_mask:
3154 case X86::BI__builtin_ia32_rndscalesd_round_mask:
3155 case X86::BI__builtin_ia32_rndscaless_round_mask:
3158 case X86::BI__builtin_ia32_vcvtsd2si64:
3159 case X86::BI__builtin_ia32_vcvtsd2si32:
3160 case X86::BI__builtin_ia32_vcvtsd2usi32:
3161 case X86::BI__builtin_ia32_vcvtsd2usi64:
3162 case X86::BI__builtin_ia32_vcvtss2si32:
3163 case X86::BI__builtin_ia32_vcvtss2si64:
3164 case X86::BI__builtin_ia32_vcvtss2usi32:
3165 case X86::BI__builtin_ia32_vcvtss2usi64:
3166 case X86::BI__builtin_ia32_sqrtpd512:
3167 case X86::BI__builtin_ia32_sqrtps512:
3171 case X86::BI__builtin_ia32_addpd512:
3172 case X86::BI__builtin_ia32_addps512:
3173 case X86::BI__builtin_ia32_divpd512:
3174 case X86::BI__builtin_ia32_divps512:
3175 case X86::BI__builtin_ia32_mulpd512:
3176 case X86::BI__builtin_ia32_mulps512:
3177 case X86::BI__builtin_ia32_subpd512:
3178 case X86::BI__builtin_ia32_subps512:
3179 case X86::BI__builtin_ia32_cvtsi2sd64:
3180 case X86::BI__builtin_ia32_cvtsi2ss32:
3181 case X86::BI__builtin_ia32_cvtsi2ss64:
3182 case X86::BI__builtin_ia32_cvtusi2sd64:
3183 case X86::BI__builtin_ia32_cvtusi2ss32:
3184 case X86::BI__builtin_ia32_cvtusi2ss64:
3188 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
3189 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
3190 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
3191 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
3192 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
3193 case X86::BI__builtin_ia32_cvtps2qq512_mask:
3194 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
3195 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
3196 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
3197 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
3198 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
3202 case X86::BI__builtin_ia32_addss_round_mask:
3203 case X86::BI__builtin_ia32_addsd_round_mask:
3204 case X86::BI__builtin_ia32_divss_round_mask:
3205 case X86::BI__builtin_ia32_divsd_round_mask:
3206 case X86::BI__builtin_ia32_mulss_round_mask:
3207 case X86::BI__builtin_ia32_mulsd_round_mask:
3208 case X86::BI__builtin_ia32_subss_round_mask:
3209 case X86::BI__builtin_ia32_subsd_round_mask:
3210 case X86::BI__builtin_ia32_scalefpd512_mask:
3211 case X86::BI__builtin_ia32_scalefps512_mask:
3212 case X86::BI__builtin_ia32_scalefsd_round_mask:
3213 case X86::BI__builtin_ia32_scalefss_round_mask:
3214 case X86::BI__builtin_ia32_getmantpd512_mask:
3215 case X86::BI__builtin_ia32_getmantps512_mask:
3216 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
3217 case X86::BI__builtin_ia32_sqrtsd_round_mask:
3218 case X86::BI__builtin_ia32_sqrtss_round_mask:
3219 case X86::BI__builtin_ia32_vfmaddsd3_mask:
3220 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
3221 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
3222 case X86::BI__builtin_ia32_vfmaddss3_mask:
3223 case X86::BI__builtin_ia32_vfmaddss3_maskz:
3224 case X86::BI__builtin_ia32_vfmaddss3_mask3:
3225 case X86::BI__builtin_ia32_vfmaddpd512_mask:
3226 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
3227 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
3228 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
3229 case X86::BI__builtin_ia32_vfmaddps512_mask:
3230 case X86::BI__builtin_ia32_vfmaddps512_maskz:
3231 case X86::BI__builtin_ia32_vfmaddps512_mask3:
3232 case X86::BI__builtin_ia32_vfmsubps512_mask3:
3233 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
3234 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
3235 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
3236 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
3237 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
3238 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
3239 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
3240 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
3244 case X86::BI__builtin_ia32_getmantsd_round_mask:
3245 case X86::BI__builtin_ia32_getmantss_round_mask:
3251 llvm::APSInt Result;
3253 // We can't check the value of a dependent argument.
3254 Expr *Arg = TheCall->getArg(ArgNum);
3255 if (Arg->isTypeDependent() || Arg->isValueDependent())
3258 // Check constant-ness first.
3259 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3262 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
3263 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
3264 // combined with ROUND_NO_EXC.
3265 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
3266 Result == 8/*ROUND_NO_EXC*/ ||
3267 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
3270 return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_rounding)
3271 << Arg->getSourceRange();
3274 // Check if the gather/scatter scale is legal.
3275 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
3276 CallExpr *TheCall) {
3277 unsigned ArgNum = 0;
3278 switch (BuiltinID) {
3281 case X86::BI__builtin_ia32_gatherpfdpd:
3282 case X86::BI__builtin_ia32_gatherpfdps:
3283 case X86::BI__builtin_ia32_gatherpfqpd:
3284 case X86::BI__builtin_ia32_gatherpfqps:
3285 case X86::BI__builtin_ia32_scatterpfdpd:
3286 case X86::BI__builtin_ia32_scatterpfdps:
3287 case X86::BI__builtin_ia32_scatterpfqpd:
3288 case X86::BI__builtin_ia32_scatterpfqps:
3291 case X86::BI__builtin_ia32_gatherd_pd:
3292 case X86::BI__builtin_ia32_gatherd_pd256:
3293 case X86::BI__builtin_ia32_gatherq_pd:
3294 case X86::BI__builtin_ia32_gatherq_pd256:
3295 case X86::BI__builtin_ia32_gatherd_ps:
3296 case X86::BI__builtin_ia32_gatherd_ps256:
3297 case X86::BI__builtin_ia32_gatherq_ps:
3298 case X86::BI__builtin_ia32_gatherq_ps256:
3299 case X86::BI__builtin_ia32_gatherd_q:
3300 case X86::BI__builtin_ia32_gatherd_q256:
3301 case X86::BI__builtin_ia32_gatherq_q:
3302 case X86::BI__builtin_ia32_gatherq_q256:
3303 case X86::BI__builtin_ia32_gatherd_d:
3304 case X86::BI__builtin_ia32_gatherd_d256:
3305 case X86::BI__builtin_ia32_gatherq_d:
3306 case X86::BI__builtin_ia32_gatherq_d256:
3307 case X86::BI__builtin_ia32_gather3div2df:
3308 case X86::BI__builtin_ia32_gather3div2di:
3309 case X86::BI__builtin_ia32_gather3div4df:
3310 case X86::BI__builtin_ia32_gather3div4di:
3311 case X86::BI__builtin_ia32_gather3div4sf:
3312 case X86::BI__builtin_ia32_gather3div4si:
3313 case X86::BI__builtin_ia32_gather3div8sf:
3314 case X86::BI__builtin_ia32_gather3div8si:
3315 case X86::BI__builtin_ia32_gather3siv2df:
3316 case X86::BI__builtin_ia32_gather3siv2di:
3317 case X86::BI__builtin_ia32_gather3siv4df:
3318 case X86::BI__builtin_ia32_gather3siv4di:
3319 case X86::BI__builtin_ia32_gather3siv4sf:
3320 case X86::BI__builtin_ia32_gather3siv4si:
3321 case X86::BI__builtin_ia32_gather3siv8sf:
3322 case X86::BI__builtin_ia32_gather3siv8si:
3323 case X86::BI__builtin_ia32_gathersiv8df:
3324 case X86::BI__builtin_ia32_gathersiv16sf:
3325 case X86::BI__builtin_ia32_gatherdiv8df:
3326 case X86::BI__builtin_ia32_gatherdiv16sf:
3327 case X86::BI__builtin_ia32_gathersiv8di:
3328 case X86::BI__builtin_ia32_gathersiv16si:
3329 case X86::BI__builtin_ia32_gatherdiv8di:
3330 case X86::BI__builtin_ia32_gatherdiv16si:
3331 case X86::BI__builtin_ia32_scatterdiv2df:
3332 case X86::BI__builtin_ia32_scatterdiv2di:
3333 case X86::BI__builtin_ia32_scatterdiv4df:
3334 case X86::BI__builtin_ia32_scatterdiv4di:
3335 case X86::BI__builtin_ia32_scatterdiv4sf:
3336 case X86::BI__builtin_ia32_scatterdiv4si:
3337 case X86::BI__builtin_ia32_scatterdiv8sf:
3338 case X86::BI__builtin_ia32_scatterdiv8si:
3339 case X86::BI__builtin_ia32_scattersiv2df:
3340 case X86::BI__builtin_ia32_scattersiv2di:
3341 case X86::BI__builtin_ia32_scattersiv4df:
3342 case X86::BI__builtin_ia32_scattersiv4di:
3343 case X86::BI__builtin_ia32_scattersiv4sf:
3344 case X86::BI__builtin_ia32_scattersiv4si:
3345 case X86::BI__builtin_ia32_scattersiv8sf:
3346 case X86::BI__builtin_ia32_scattersiv8si:
3347 case X86::BI__builtin_ia32_scattersiv8df:
3348 case X86::BI__builtin_ia32_scattersiv16sf:
3349 case X86::BI__builtin_ia32_scatterdiv8df:
3350 case X86::BI__builtin_ia32_scatterdiv16sf:
3351 case X86::BI__builtin_ia32_scattersiv8di:
3352 case X86::BI__builtin_ia32_scattersiv16si:
3353 case X86::BI__builtin_ia32_scatterdiv8di:
3354 case X86::BI__builtin_ia32_scatterdiv16si:
3359 llvm::APSInt Result;
3361 // We can't check the value of a dependent argument.
3362 Expr *Arg = TheCall->getArg(ArgNum);
3363 if (Arg->isTypeDependent() || Arg->isValueDependent())
3366 // Check constant-ness first.
3367 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3370 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
3373 return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_scale)
3374 << Arg->getSourceRange();
3377 static bool isX86_32Builtin(unsigned BuiltinID) {
3378 // These builtins only work on x86-32 targets.
3379 switch (BuiltinID) {
3380 case X86::BI__builtin_ia32_readeflags_u32:
3381 case X86::BI__builtin_ia32_writeeflags_u32:
3388 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3389 if (BuiltinID == X86::BI__builtin_cpu_supports)
3390 return SemaBuiltinCpuSupports(*this, TheCall);
3392 if (BuiltinID == X86::BI__builtin_cpu_is)
3393 return SemaBuiltinCpuIs(*this, TheCall);
3395 // Check for 32-bit only builtins on a 64-bit target.
3396 const llvm::Triple &TT = Context.getTargetInfo().getTriple();
3397 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
3398 return Diag(TheCall->getCallee()->getLocStart(),
3399 diag::err_32_bit_builtin_64_bit_tgt);
3401 // If the intrinsic has rounding or SAE make sure its valid.
3402 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
3405 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
3406 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
3409 // For intrinsics which take an immediate value as part of the instruction,
3410 // range check them here.
3411 int i = 0, l = 0, u = 0;
3412 switch (BuiltinID) {
3415 case X86::BI__builtin_ia32_vec_ext_v2si:
3416 case X86::BI__builtin_ia32_vec_ext_v2di:
3417 case X86::BI__builtin_ia32_vextractf128_pd256:
3418 case X86::BI__builtin_ia32_vextractf128_ps256:
3419 case X86::BI__builtin_ia32_vextractf128_si256:
3420 case X86::BI__builtin_ia32_extract128i256:
3421 case X86::BI__builtin_ia32_extractf64x4_mask:
3422 case X86::BI__builtin_ia32_extracti64x4_mask:
3423 case X86::BI__builtin_ia32_extractf32x8_mask:
3424 case X86::BI__builtin_ia32_extracti32x8_mask:
3425 case X86::BI__builtin_ia32_extractf64x2_256_mask:
3426 case X86::BI__builtin_ia32_extracti64x2_256_mask:
3427 case X86::BI__builtin_ia32_extractf32x4_256_mask:
3428 case X86::BI__builtin_ia32_extracti32x4_256_mask:
3429 i = 1; l = 0; u = 1;
3431 case X86::BI__builtin_ia32_vec_set_v2di:
3432 case X86::BI__builtin_ia32_vinsertf128_pd256:
3433 case X86::BI__builtin_ia32_vinsertf128_ps256:
3434 case X86::BI__builtin_ia32_vinsertf128_si256:
3435 case X86::BI__builtin_ia32_insert128i256:
3436 case X86::BI__builtin_ia32_insertf32x8:
3437 case X86::BI__builtin_ia32_inserti32x8:
3438 case X86::BI__builtin_ia32_insertf64x4:
3439 case X86::BI__builtin_ia32_inserti64x4:
3440 case X86::BI__builtin_ia32_insertf64x2_256:
3441 case X86::BI__builtin_ia32_inserti64x2_256:
3442 case X86::BI__builtin_ia32_insertf32x4_256:
3443 case X86::BI__builtin_ia32_inserti32x4_256:
3444 i = 2; l = 0; u = 1;
3446 case X86::BI__builtin_ia32_vpermilpd:
3447 case X86::BI__builtin_ia32_vec_ext_v4hi:
3448 case X86::BI__builtin_ia32_vec_ext_v4si:
3449 case X86::BI__builtin_ia32_vec_ext_v4sf:
3450 case X86::BI__builtin_ia32_vec_ext_v4di:
3451 case X86::BI__builtin_ia32_extractf32x4_mask:
3452 case X86::BI__builtin_ia32_extracti32x4_mask:
3453 case X86::BI__builtin_ia32_extractf64x2_512_mask:
3454 case X86::BI__builtin_ia32_extracti64x2_512_mask:
3455 i = 1; l = 0; u = 3;
3457 case X86::BI_mm_prefetch:
3458 case X86::BI__builtin_ia32_vec_ext_v8hi:
3459 case X86::BI__builtin_ia32_vec_ext_v8si:
3460 i = 1; l = 0; u = 7;
3462 case X86::BI__builtin_ia32_sha1rnds4:
3463 case X86::BI__builtin_ia32_blendpd:
3464 case X86::BI__builtin_ia32_shufpd:
3465 case X86::BI__builtin_ia32_vec_set_v4hi:
3466 case X86::BI__builtin_ia32_vec_set_v4si:
3467 case X86::BI__builtin_ia32_vec_set_v4di:
3468 case X86::BI__builtin_ia32_shuf_f32x4_256:
3469 case X86::BI__builtin_ia32_shuf_f64x2_256:
3470 case X86::BI__builtin_ia32_shuf_i32x4_256:
3471 case X86::BI__builtin_ia32_shuf_i64x2_256:
3472 case X86::BI__builtin_ia32_insertf64x2_512:
3473 case X86::BI__builtin_ia32_inserti64x2_512:
3474 case X86::BI__builtin_ia32_insertf32x4:
3475 case X86::BI__builtin_ia32_inserti32x4:
3476 i = 2; l = 0; u = 3;
3478 case X86::BI__builtin_ia32_vpermil2pd:
3479 case X86::BI__builtin_ia32_vpermil2pd256:
3480 case X86::BI__builtin_ia32_vpermil2ps:
3481 case X86::BI__builtin_ia32_vpermil2ps256:
3482 i = 3; l = 0; u = 3;
3484 case X86::BI__builtin_ia32_cmpb128_mask:
3485 case X86::BI__builtin_ia32_cmpw128_mask:
3486 case X86::BI__builtin_ia32_cmpd128_mask:
3487 case X86::BI__builtin_ia32_cmpq128_mask:
3488 case X86::BI__builtin_ia32_cmpb256_mask:
3489 case X86::BI__builtin_ia32_cmpw256_mask:
3490 case X86::BI__builtin_ia32_cmpd256_mask:
3491 case X86::BI__builtin_ia32_cmpq256_mask:
3492 case X86::BI__builtin_ia32_cmpb512_mask:
3493 case X86::BI__builtin_ia32_cmpw512_mask:
3494 case X86::BI__builtin_ia32_cmpd512_mask:
3495 case X86::BI__builtin_ia32_cmpq512_mask:
3496 case X86::BI__builtin_ia32_ucmpb128_mask:
3497 case X86::BI__builtin_ia32_ucmpw128_mask:
3498 case X86::BI__builtin_ia32_ucmpd128_mask:
3499 case X86::BI__builtin_ia32_ucmpq128_mask:
3500 case X86::BI__builtin_ia32_ucmpb256_mask:
3501 case X86::BI__builtin_ia32_ucmpw256_mask:
3502 case X86::BI__builtin_ia32_ucmpd256_mask:
3503 case X86::BI__builtin_ia32_ucmpq256_mask:
3504 case X86::BI__builtin_ia32_ucmpb512_mask:
3505 case X86::BI__builtin_ia32_ucmpw512_mask:
3506 case X86::BI__builtin_ia32_ucmpd512_mask:
3507 case X86::BI__builtin_ia32_ucmpq512_mask:
3508 case X86::BI__builtin_ia32_vpcomub:
3509 case X86::BI__builtin_ia32_vpcomuw:
3510 case X86::BI__builtin_ia32_vpcomud:
3511 case X86::BI__builtin_ia32_vpcomuq:
3512 case X86::BI__builtin_ia32_vpcomb:
3513 case X86::BI__builtin_ia32_vpcomw:
3514 case X86::BI__builtin_ia32_vpcomd:
3515 case X86::BI__builtin_ia32_vpcomq:
3516 case X86::BI__builtin_ia32_vec_set_v8hi:
3517 case X86::BI__builtin_ia32_vec_set_v8si:
3518 i = 2; l = 0; u = 7;
3520 case X86::BI__builtin_ia32_vpermilpd256:
3521 case X86::BI__builtin_ia32_roundps:
3522 case X86::BI__builtin_ia32_roundpd:
3523 case X86::BI__builtin_ia32_roundps256:
3524 case X86::BI__builtin_ia32_roundpd256:
3525 case X86::BI__builtin_ia32_getmantpd128_mask:
3526 case X86::BI__builtin_ia32_getmantpd256_mask:
3527 case X86::BI__builtin_ia32_getmantps128_mask:
3528 case X86::BI__builtin_ia32_getmantps256_mask:
3529 case X86::BI__builtin_ia32_getmantpd512_mask:
3530 case X86::BI__builtin_ia32_getmantps512_mask:
3531 case X86::BI__builtin_ia32_vec_ext_v16qi:
3532 case X86::BI__builtin_ia32_vec_ext_v16hi:
3533 i = 1; l = 0; u = 15;
3535 case X86::BI__builtin_ia32_pblendd128:
3536 case X86::BI__builtin_ia32_blendps:
3537 case X86::BI__builtin_ia32_blendpd256:
3538 case X86::BI__builtin_ia32_shufpd256:
3539 case X86::BI__builtin_ia32_roundss:
3540 case X86::BI__builtin_ia32_roundsd:
3541 case X86::BI__builtin_ia32_rangepd128_mask:
3542 case X86::BI__builtin_ia32_rangepd256_mask:
3543 case X86::BI__builtin_ia32_rangepd512_mask:
3544 case X86::BI__builtin_ia32_rangeps128_mask:
3545 case X86::BI__builtin_ia32_rangeps256_mask:
3546 case X86::BI__builtin_ia32_rangeps512_mask:
3547 case X86::BI__builtin_ia32_getmantsd_round_mask:
3548 case X86::BI__builtin_ia32_getmantss_round_mask:
3549 case X86::BI__builtin_ia32_vec_set_v16qi:
3550 case X86::BI__builtin_ia32_vec_set_v16hi:
3551 i = 2; l = 0; u = 15;
3553 case X86::BI__builtin_ia32_vec_ext_v32qi:
3554 i = 1; l = 0; u = 31;
3556 case X86::BI__builtin_ia32_cmpps:
3557 case X86::BI__builtin_ia32_cmpss:
3558 case X86::BI__builtin_ia32_cmppd:
3559 case X86::BI__builtin_ia32_cmpsd:
3560 case X86::BI__builtin_ia32_cmpps256:
3561 case X86::BI__builtin_ia32_cmppd256:
3562 case X86::BI__builtin_ia32_cmpps128_mask:
3563 case X86::BI__builtin_ia32_cmppd128_mask:
3564 case X86::BI__builtin_ia32_cmpps256_mask:
3565 case X86::BI__builtin_ia32_cmppd256_mask:
3566 case X86::BI__builtin_ia32_cmpps512_mask:
3567 case X86::BI__builtin_ia32_cmppd512_mask:
3568 case X86::BI__builtin_ia32_cmpsd_mask:
3569 case X86::BI__builtin_ia32_cmpss_mask:
3570 case X86::BI__builtin_ia32_vec_set_v32qi:
3571 i = 2; l = 0; u = 31;
3573 case X86::BI__builtin_ia32_permdf256:
3574 case X86::BI__builtin_ia32_permdi256:
3575 case X86::BI__builtin_ia32_permdf512:
3576 case X86::BI__builtin_ia32_permdi512:
3577 case X86::BI__builtin_ia32_vpermilps:
3578 case X86::BI__builtin_ia32_vpermilps256:
3579 case X86::BI__builtin_ia32_vpermilpd512:
3580 case X86::BI__builtin_ia32_vpermilps512:
3581 case X86::BI__builtin_ia32_pshufd:
3582 case X86::BI__builtin_ia32_pshufd256:
3583 case X86::BI__builtin_ia32_pshufd512:
3584 case X86::BI__builtin_ia32_pshufhw:
3585 case X86::BI__builtin_ia32_pshufhw256:
3586 case X86::BI__builtin_ia32_pshufhw512:
3587 case X86::BI__builtin_ia32_pshuflw:
3588 case X86::BI__builtin_ia32_pshuflw256:
3589 case X86::BI__builtin_ia32_pshuflw512:
3590 case X86::BI__builtin_ia32_vcvtps2ph:
3591 case X86::BI__builtin_ia32_vcvtps2ph_mask:
3592 case X86::BI__builtin_ia32_vcvtps2ph256:
3593 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
3594 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
3595 case X86::BI__builtin_ia32_rndscaleps_128_mask:
3596 case X86::BI__builtin_ia32_rndscalepd_128_mask:
3597 case X86::BI__builtin_ia32_rndscaleps_256_mask:
3598 case X86::BI__builtin_ia32_rndscalepd_256_mask:
3599 case X86::BI__builtin_ia32_rndscaleps_mask:
3600 case X86::BI__builtin_ia32_rndscalepd_mask:
3601 case X86::BI__builtin_ia32_reducepd128_mask:
3602 case X86::BI__builtin_ia32_reducepd256_mask:
3603 case X86::BI__builtin_ia32_reducepd512_mask:
3604 case X86::BI__builtin_ia32_reduceps128_mask:
3605 case X86::BI__builtin_ia32_reduceps256_mask:
3606 case X86::BI__builtin_ia32_reduceps512_mask:
3607 case X86::BI__builtin_ia32_prold512:
3608 case X86::BI__builtin_ia32_prolq512:
3609 case X86::BI__builtin_ia32_prold128:
3610 case X86::BI__builtin_ia32_prold256:
3611 case X86::BI__builtin_ia32_prolq128:
3612 case X86::BI__builtin_ia32_prolq256:
3613 case X86::BI__builtin_ia32_prord512:
3614 case X86::BI__builtin_ia32_prorq512:
3615 case X86::BI__builtin_ia32_prord128:
3616 case X86::BI__builtin_ia32_prord256:
3617 case X86::BI__builtin_ia32_prorq128:
3618 case X86::BI__builtin_ia32_prorq256:
3619 case X86::BI__builtin_ia32_fpclasspd128_mask:
3620 case X86::BI__builtin_ia32_fpclasspd256_mask:
3621 case X86::BI__builtin_ia32_fpclassps128_mask:
3622 case X86::BI__builtin_ia32_fpclassps256_mask:
3623 case X86::BI__builtin_ia32_fpclassps512_mask:
3624 case X86::BI__builtin_ia32_fpclasspd512_mask:
3625 case X86::BI__builtin_ia32_fpclasssd_mask:
3626 case X86::BI__builtin_ia32_fpclassss_mask:
3627 case X86::BI__builtin_ia32_pslldqi128_byteshift:
3628 case X86::BI__builtin_ia32_pslldqi256_byteshift:
3629 case X86::BI__builtin_ia32_pslldqi512_byteshift:
3630 case X86::BI__builtin_ia32_psrldqi128_byteshift:
3631 case X86::BI__builtin_ia32_psrldqi256_byteshift:
3632 case X86::BI__builtin_ia32_psrldqi512_byteshift:
3633 i = 1; l = 0; u = 255;
3635 case X86::BI__builtin_ia32_vperm2f128_pd256:
3636 case X86::BI__builtin_ia32_vperm2f128_ps256:
3637 case X86::BI__builtin_ia32_vperm2f128_si256:
3638 case X86::BI__builtin_ia32_permti256:
3639 case X86::BI__builtin_ia32_pblendw128:
3640 case X86::BI__builtin_ia32_pblendw256:
3641 case X86::BI__builtin_ia32_blendps256:
3642 case X86::BI__builtin_ia32_pblendd256:
3643 case X86::BI__builtin_ia32_palignr128:
3644 case X86::BI__builtin_ia32_palignr256:
3645 case X86::BI__builtin_ia32_palignr512:
3646 case X86::BI__builtin_ia32_alignq512:
3647 case X86::BI__builtin_ia32_alignd512:
3648 case X86::BI__builtin_ia32_alignd128:
3649 case X86::BI__builtin_ia32_alignd256:
3650 case X86::BI__builtin_ia32_alignq128:
3651 case X86::BI__builtin_ia32_alignq256:
3652 case X86::BI__builtin_ia32_vcomisd:
3653 case X86::BI__builtin_ia32_vcomiss:
3654 case X86::BI__builtin_ia32_shuf_f32x4:
3655 case X86::BI__builtin_ia32_shuf_f64x2:
3656 case X86::BI__builtin_ia32_shuf_i32x4:
3657 case X86::BI__builtin_ia32_shuf_i64x2:
3658 case X86::BI__builtin_ia32_shufpd512:
3659 case X86::BI__builtin_ia32_shufps:
3660 case X86::BI__builtin_ia32_shufps256:
3661 case X86::BI__builtin_ia32_shufps512:
3662 case X86::BI__builtin_ia32_dbpsadbw128:
3663 case X86::BI__builtin_ia32_dbpsadbw256:
3664 case X86::BI__builtin_ia32_dbpsadbw512:
3665 case X86::BI__builtin_ia32_vpshldd128:
3666 case X86::BI__builtin_ia32_vpshldd256:
3667 case X86::BI__builtin_ia32_vpshldd512:
3668 case X86::BI__builtin_ia32_vpshldq128:
3669 case X86::BI__builtin_ia32_vpshldq256:
3670 case X86::BI__builtin_ia32_vpshldq512:
3671 case X86::BI__builtin_ia32_vpshldw128:
3672 case X86::BI__builtin_ia32_vpshldw256:
3673 case X86::BI__builtin_ia32_vpshldw512:
3674 case X86::BI__builtin_ia32_vpshrdd128:
3675 case X86::BI__builtin_ia32_vpshrdd256:
3676 case X86::BI__builtin_ia32_vpshrdd512:
3677 case X86::BI__builtin_ia32_vpshrdq128:
3678 case X86::BI__builtin_ia32_vpshrdq256:
3679 case X86::BI__builtin_ia32_vpshrdq512:
3680 case X86::BI__builtin_ia32_vpshrdw128:
3681 case X86::BI__builtin_ia32_vpshrdw256:
3682 case X86::BI__builtin_ia32_vpshrdw512:
3683 i = 2; l = 0; u = 255;
3685 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3686 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3687 case X86::BI__builtin_ia32_fixupimmps512_mask:
3688 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3689 case X86::BI__builtin_ia32_fixupimmsd_mask:
3690 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3691 case X86::BI__builtin_ia32_fixupimmss_mask:
3692 case X86::BI__builtin_ia32_fixupimmss_maskz:
3693 case X86::BI__builtin_ia32_fixupimmpd128_mask:
3694 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
3695 case X86::BI__builtin_ia32_fixupimmpd256_mask:
3696 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
3697 case X86::BI__builtin_ia32_fixupimmps128_mask:
3698 case X86::BI__builtin_ia32_fixupimmps128_maskz:
3699 case X86::BI__builtin_ia32_fixupimmps256_mask:
3700 case X86::BI__builtin_ia32_fixupimmps256_maskz:
3701 case X86::BI__builtin_ia32_pternlogd512_mask:
3702 case X86::BI__builtin_ia32_pternlogd512_maskz:
3703 case X86::BI__builtin_ia32_pternlogq512_mask:
3704 case X86::BI__builtin_ia32_pternlogq512_maskz:
3705 case X86::BI__builtin_ia32_pternlogd128_mask:
3706 case X86::BI__builtin_ia32_pternlogd128_maskz:
3707 case X86::BI__builtin_ia32_pternlogd256_mask:
3708 case X86::BI__builtin_ia32_pternlogd256_maskz:
3709 case X86::BI__builtin_ia32_pternlogq128_mask:
3710 case X86::BI__builtin_ia32_pternlogq128_maskz:
3711 case X86::BI__builtin_ia32_pternlogq256_mask:
3712 case X86::BI__builtin_ia32_pternlogq256_maskz:
3713 i = 3; l = 0; u = 255;
3715 case X86::BI__builtin_ia32_gatherpfdpd:
3716 case X86::BI__builtin_ia32_gatherpfdps:
3717 case X86::BI__builtin_ia32_gatherpfqpd:
3718 case X86::BI__builtin_ia32_gatherpfqps:
3719 case X86::BI__builtin_ia32_scatterpfdpd:
3720 case X86::BI__builtin_ia32_scatterpfdps:
3721 case X86::BI__builtin_ia32_scatterpfqpd:
3722 case X86::BI__builtin_ia32_scatterpfqps:
3723 i = 4; l = 2; u = 3;
3725 case X86::BI__builtin_ia32_rndscalesd_round_mask:
3726 case X86::BI__builtin_ia32_rndscaless_round_mask:
3727 i = 4; l = 0; u = 255;
3731 // Note that we don't force a hard error on the range check here, allowing
3732 // template-generated or macro-generated dead code to potentially have out-of-
3733 // range values. These need to code generate, but don't need to necessarily
3734 // make any sense. We use a warning that defaults to an error.
3735 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
3738 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
3739 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
3740 /// Returns true when the format fits the function and the FormatStringInfo has
3742 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
3743 FormatStringInfo *FSI) {
3744 FSI->HasVAListArg = Format->getFirstArg() == 0;
3745 FSI->FormatIdx = Format->getFormatIdx() - 1;
3746 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
3748 // The way the format attribute works in GCC, the implicit this argument
3749 // of member functions is counted. However, it doesn't appear in our own
3750 // lists, so decrement format_idx in that case.
3752 if(FSI->FormatIdx == 0)
3755 if (FSI->FirstDataArg != 0)
3756 --FSI->FirstDataArg;
3761 /// Checks if a the given expression evaluates to null.
3763 /// Returns true if the value evaluates to null.
3764 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
3765 // If the expression has non-null type, it doesn't evaluate to null.
3766 if (auto nullability
3767 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
3768 if (*nullability == NullabilityKind::NonNull)
3772 // As a special case, transparent unions initialized with zero are
3773 // considered null for the purposes of the nonnull attribute.
3774 if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
3775 if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
3776 if (const CompoundLiteralExpr *CLE =
3777 dyn_cast<CompoundLiteralExpr>(Expr))
3778 if (const InitListExpr *ILE =
3779 dyn_cast<InitListExpr>(CLE->getInitializer()))
3780 Expr = ILE->getInit(0);
3784 return (!Expr->isValueDependent() &&
3785 Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
3789 static void CheckNonNullArgument(Sema &S,
3790 const Expr *ArgExpr,
3791 SourceLocation CallSiteLoc) {
3792 if (CheckNonNullExpr(S, ArgExpr))
3793 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
3794 S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange());
3797 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
3798 FormatStringInfo FSI;
3799 if ((GetFormatStringType(Format) == FST_NSString) &&
3800 getFormatStringInfo(Format, false, &FSI)) {
3801 Idx = FSI.FormatIdx;
3807 /// Diagnose use of %s directive in an NSString which is being passed
3808 /// as formatting string to formatting method.
3810 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
3811 const NamedDecl *FDecl,
3815 bool Format = false;
3816 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
3817 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
3822 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
3823 if (S.GetFormatNSStringIdx(I, Idx)) {
3828 if (!Format || NumArgs <= Idx)
3830 const Expr *FormatExpr = Args[Idx];
3831 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
3832 FormatExpr = CSCE->getSubExpr();
3833 const StringLiteral *FormatString;
3834 if (const ObjCStringLiteral *OSL =
3835 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
3836 FormatString = OSL->getString();
3838 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
3841 if (S.FormatStringHasSArg(FormatString)) {
3842 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
3844 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
3845 << FDecl->getDeclName();
3849 /// Determine whether the given type has a non-null nullability annotation.
3850 static bool isNonNullType(ASTContext &ctx, QualType type) {
3851 if (auto nullability = type->getNullability(ctx))
3852 return *nullability == NullabilityKind::NonNull;
3857 static void CheckNonNullArguments(Sema &S,
3858 const NamedDecl *FDecl,
3859 const FunctionProtoType *Proto,
3860 ArrayRef<const Expr *> Args,
3861 SourceLocation CallSiteLoc) {
3862 assert((FDecl || Proto) && "Need a function declaration or prototype");
3864 // Check the attributes attached to the method/function itself.
3865 llvm::SmallBitVector NonNullArgs;
3867 // Handle the nonnull attribute on the function/method declaration itself.
3868 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
3869 if (!NonNull->args_size()) {
3870 // Easy case: all pointer arguments are nonnull.
3871 for (const auto *Arg : Args)
3872 if (S.isValidPointerAttrType(Arg->getType()))
3873 CheckNonNullArgument(S, Arg, CallSiteLoc);
3877 for (const ParamIdx &Idx : NonNull->args()) {
3878 unsigned IdxAST = Idx.getASTIndex();
3879 if (IdxAST >= Args.size())
3881 if (NonNullArgs.empty())
3882 NonNullArgs.resize(Args.size());
3883 NonNullArgs.set(IdxAST);
3888 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
3889 // Handle the nonnull attribute on the parameters of the
3891 ArrayRef<ParmVarDecl*> parms;
3892 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
3893 parms = FD->parameters();
3895 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
3897 unsigned ParamIndex = 0;
3898 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
3899 I != E; ++I, ++ParamIndex) {
3900 const ParmVarDecl *PVD = *I;
3901 if (PVD->hasAttr<NonNullAttr>() ||
3902 isNonNullType(S.Context, PVD->getType())) {
3903 if (NonNullArgs.empty())
3904 NonNullArgs.resize(Args.size());
3906 NonNullArgs.set(ParamIndex);
3910 // If we have a non-function, non-method declaration but no
3911 // function prototype, try to dig out the function prototype.
3913 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
3914 QualType type = VD->getType().getNonReferenceType();
3915 if (auto pointerType = type->getAs<PointerType>())
3916 type = pointerType->getPointeeType();
3917 else if (auto blockType = type->getAs<BlockPointerType>())
3918 type = blockType->getPointeeType();
3919 // FIXME: data member pointers?
3921 // Dig out the function prototype, if there is one.
3922 Proto = type->getAs<FunctionProtoType>();
3926 // Fill in non-null argument information from the nullability
3927 // information on the parameter types (if we have them).
3930 for (auto paramType : Proto->getParamTypes()) {
3931 if (isNonNullType(S.Context, paramType)) {
3932 if (NonNullArgs.empty())
3933 NonNullArgs.resize(Args.size());
3935 NonNullArgs.set(Index);
3943 // Check for non-null arguments.
3944 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
3945 ArgIndex != ArgIndexEnd; ++ArgIndex) {
3946 if (NonNullArgs[ArgIndex])
3947 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
3951 /// Handles the checks for format strings, non-POD arguments to vararg
3952 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
3954 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
3955 const Expr *ThisArg, ArrayRef<const Expr *> Args,
3956 bool IsMemberFunction, SourceLocation Loc,
3957 SourceRange Range, VariadicCallType CallType) {
3958 // FIXME: We should check as much as we can in the template definition.
3959 if (CurContext->isDependentContext())
3962 // Printf and scanf checking.
3963 llvm::SmallBitVector CheckedVarArgs;
3965 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
3966 // Only create vector if there are format attributes.
3967 CheckedVarArgs.resize(Args.size());
3969 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
3974 // Refuse POD arguments that weren't caught by the format string
3976 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
3977 if (CallType != VariadicDoesNotApply &&
3978 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
3979 unsigned NumParams = Proto ? Proto->getNumParams()
3980 : FDecl && isa<FunctionDecl>(FDecl)
3981 ? cast<FunctionDecl>(FDecl)->getNumParams()
3982 : FDecl && isa<ObjCMethodDecl>(FDecl)
3983 ? cast<ObjCMethodDecl>(FDecl)->param_size()
3986 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
3987 // Args[ArgIdx] can be null in malformed code.
3988 if (const Expr *Arg = Args[ArgIdx]) {
3989 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
3990 checkVariadicArgument(Arg, CallType);
3995 if (FDecl || Proto) {
3996 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
3998 // Type safety checking.
4000 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
4001 CheckArgumentWithTypeTag(I, Args, Loc);
4006 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
4009 /// CheckConstructorCall - Check a constructor call for correctness and safety
4010 /// properties not enforced by the C type system.
4011 void Sema::CheckConstructorCall(FunctionDecl *FDecl,
4012 ArrayRef<const Expr *> Args,
4013 const FunctionProtoType *Proto,
4014 SourceLocation Loc) {
4015 VariadicCallType CallType =
4016 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
4017 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
4018 Loc, SourceRange(), CallType);
4021 /// CheckFunctionCall - Check a direct function call for various correctness
4022 /// and safety properties not strictly enforced by the C type system.
4023 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
4024 const FunctionProtoType *Proto) {
4025 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
4026 isa<CXXMethodDecl>(FDecl);
4027 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
4028 IsMemberOperatorCall;
4029 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
4030 TheCall->getCallee());
4031 Expr** Args = TheCall->getArgs();
4032 unsigned NumArgs = TheCall->getNumArgs();
4034 Expr *ImplicitThis = nullptr;
4035 if (IsMemberOperatorCall) {
4036 // If this is a call to a member operator, hide the first argument
4038 // FIXME: Our choice of AST representation here is less than ideal.
4039 ImplicitThis = Args[0];
4042 } else if (IsMemberFunction)
4044 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
4046 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
4047 IsMemberFunction, TheCall->getRParenLoc(),
4048 TheCall->getCallee()->getSourceRange(), CallType);
4050 IdentifierInfo *FnInfo = FDecl->getIdentifier();
4051 // None of the checks below are needed for functions that don't have
4052 // simple names (e.g., C++ conversion functions).
4056 CheckAbsoluteValueFunction(TheCall, FDecl);
4057 CheckMaxUnsignedZero(TheCall, FDecl);
4059 if (getLangOpts().ObjC1)
4060 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
4062 unsigned CMId = FDecl->getMemoryFunctionKind();
4066 // Handle memory setting and copying functions.
4067 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
4068 CheckStrlcpycatArguments(TheCall, FnInfo);
4069 else if (CMId == Builtin::BIstrncat)
4070 CheckStrncatArguments(TheCall, FnInfo);
4072 CheckMemaccessArguments(TheCall, CMId, FnInfo);
4077 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
4078 ArrayRef<const Expr *> Args) {
4079 VariadicCallType CallType =
4080 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
4082 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
4083 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
4089 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
4090 const FunctionProtoType *Proto) {
4092 if (const auto *V = dyn_cast<VarDecl>(NDecl))
4093 Ty = V->getType().getNonReferenceType();
4094 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
4095 Ty = F->getType().getNonReferenceType();
4099 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
4100 !Ty->isFunctionProtoType())
4103 VariadicCallType CallType;
4104 if (!Proto || !Proto->isVariadic()) {
4105 CallType = VariadicDoesNotApply;
4106 } else if (Ty->isBlockPointerType()) {
4107 CallType = VariadicBlock;
4108 } else { // Ty->isFunctionPointerType()
4109 CallType = VariadicFunction;
4112 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
4113 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4114 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4115 TheCall->getCallee()->getSourceRange(), CallType);
4120 /// Checks function calls when a FunctionDecl or a NamedDecl is not available,
4121 /// such as function pointers returned from functions.
4122 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
4123 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
4124 TheCall->getCallee());
4125 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
4126 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4127 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4128 TheCall->getCallee()->getSourceRange(), CallType);
4133 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
4134 if (!llvm::isValidAtomicOrderingCABI(Ordering))
4137 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
4139 case AtomicExpr::AO__c11_atomic_init:
4140 case AtomicExpr::AO__opencl_atomic_init:
4141 llvm_unreachable("There is no ordering argument for an init");
4143 case AtomicExpr::AO__c11_atomic_load:
4144 case AtomicExpr::AO__opencl_atomic_load:
4145 case AtomicExpr::AO__atomic_load_n:
4146 case AtomicExpr::AO__atomic_load:
4147 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
4148 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4150 case AtomicExpr::AO__c11_atomic_store:
4151 case AtomicExpr::AO__opencl_atomic_store:
4152 case AtomicExpr::AO__atomic_store:
4153 case AtomicExpr::AO__atomic_store_n:
4154 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
4155 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
4156 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4163 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
4164 AtomicExpr::AtomicOp Op) {
4165 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
4166 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4168 // All the non-OpenCL operations take one of the following forms.
4169 // The OpenCL operations take the __c11 forms with one extra argument for
4170 // synchronization scope.
4172 // C __c11_atomic_init(A *, C)
4175 // C __c11_atomic_load(A *, int)
4178 // void __atomic_load(A *, CP, int)
4181 // void __atomic_store(A *, CP, int)
4184 // C __c11_atomic_add(A *, M, int)
4187 // C __atomic_exchange_n(A *, CP, int)
4190 // void __atomic_exchange(A *, C *, CP, int)
4193 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
4196 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
4200 const unsigned NumForm = GNUCmpXchg + 1;
4201 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
4202 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
4204 // C is an appropriate type,
4205 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
4206 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
4207 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
4208 // the int parameters are for orderings.
4210 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
4211 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
4212 "need to update code for modified forms");
4213 static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
4214 AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
4215 AtomicExpr::AO__atomic_load,
4216 "need to update code for modified C11 atomics");
4217 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
4218 Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
4219 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
4220 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
4222 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
4223 Op == AtomicExpr::AO__atomic_store_n ||
4224 Op == AtomicExpr::AO__atomic_exchange_n ||
4225 Op == AtomicExpr::AO__atomic_compare_exchange_n;
4226 bool IsAddSub = false;
4227 bool IsMinMax = false;
4230 case AtomicExpr::AO__c11_atomic_init:
4231 case AtomicExpr::AO__opencl_atomic_init:
4235 case AtomicExpr::AO__c11_atomic_load:
4236 case AtomicExpr::AO__opencl_atomic_load:
4237 case AtomicExpr::AO__atomic_load_n:
4241 case AtomicExpr::AO__atomic_load:
4245 case AtomicExpr::AO__c11_atomic_store:
4246 case AtomicExpr::AO__opencl_atomic_store:
4247 case AtomicExpr::AO__atomic_store:
4248 case AtomicExpr::AO__atomic_store_n:
4252 case AtomicExpr::AO__c11_atomic_fetch_add:
4253 case AtomicExpr::AO__c11_atomic_fetch_sub:
4254 case AtomicExpr::AO__opencl_atomic_fetch_add:
4255 case AtomicExpr::AO__opencl_atomic_fetch_sub:
4256 case AtomicExpr::AO__opencl_atomic_fetch_min:
4257 case AtomicExpr::AO__opencl_atomic_fetch_max:
4258 case AtomicExpr::AO__atomic_fetch_add:
4259 case AtomicExpr::AO__atomic_fetch_sub:
4260 case AtomicExpr::AO__atomic_add_fetch:
4261 case AtomicExpr::AO__atomic_sub_fetch:
4264 case AtomicExpr::AO__c11_atomic_fetch_and:
4265 case AtomicExpr::AO__c11_atomic_fetch_or:
4266 case AtomicExpr::AO__c11_atomic_fetch_xor:
4267 case AtomicExpr::AO__opencl_atomic_fetch_and:
4268 case AtomicExpr::AO__opencl_atomic_fetch_or:
4269 case AtomicExpr::AO__opencl_atomic_fetch_xor:
4270 case AtomicExpr::AO__atomic_fetch_and:
4271 case AtomicExpr::AO__atomic_fetch_or:
4272 case AtomicExpr::AO__atomic_fetch_xor:
4273 case AtomicExpr::AO__atomic_fetch_nand:
4274 case AtomicExpr::AO__atomic_and_fetch:
4275 case AtomicExpr::AO__atomic_or_fetch:
4276 case AtomicExpr::AO__atomic_xor_fetch:
4277 case AtomicExpr::AO__atomic_nand_fetch:
4281 case AtomicExpr::AO__atomic_fetch_min:
4282 case AtomicExpr::AO__atomic_fetch_max:
4287 case AtomicExpr::AO__c11_atomic_exchange:
4288 case AtomicExpr::AO__opencl_atomic_exchange:
4289 case AtomicExpr::AO__atomic_exchange_n:
4293 case AtomicExpr::AO__atomic_exchange:
4297 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
4298 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
4299 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
4300 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
4304 case AtomicExpr::AO__atomic_compare_exchange:
4305 case AtomicExpr::AO__atomic_compare_exchange_n:
4310 unsigned AdjustedNumArgs = NumArgs[Form];
4311 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
4313 // Check we have the right number of arguments.
4314 if (TheCall->getNumArgs() < AdjustedNumArgs) {
4315 Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
4316 << 0 << AdjustedNumArgs << TheCall->getNumArgs()
4317 << TheCall->getCallee()->getSourceRange();
4319 } else if (TheCall->getNumArgs() > AdjustedNumArgs) {
4320 Diag(TheCall->getArg(AdjustedNumArgs)->getLocStart(),
4321 diag::err_typecheck_call_too_many_args)
4322 << 0 << AdjustedNumArgs << TheCall->getNumArgs()
4323 << TheCall->getCallee()->getSourceRange();
4327 // Inspect the first argument of the atomic operation.
4328 Expr *Ptr = TheCall->getArg(0);
4329 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
4330 if (ConvertedPtr.isInvalid())
4333 Ptr = ConvertedPtr.get();
4334 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
4336 Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
4337 << Ptr->getType() << Ptr->getSourceRange();
4341 // For a __c11 builtin, this should be a pointer to an _Atomic type.
4342 QualType AtomTy = pointerType->getPointeeType(); // 'A'
4343 QualType ValType = AtomTy; // 'C'
4345 if (!AtomTy->isAtomicType()) {
4346 Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
4347 << Ptr->getType() << Ptr->getSourceRange();
4350 if (AtomTy.isConstQualified() ||
4351 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
4352 Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic)
4353 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
4354 << Ptr->getSourceRange();
4357 ValType = AtomTy->getAs<AtomicType>()->getValueType();
4358 } else if (Form != Load && Form != LoadCopy) {
4359 if (ValType.isConstQualified()) {
4360 Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_pointer)
4361 << Ptr->getType() << Ptr->getSourceRange();
4366 // For an arithmetic operation, the implied arithmetic must be well-formed.
4367 if (Form == Arithmetic) {
4368 // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
4369 if (IsAddSub && !ValType->isIntegerType()
4370 && !ValType->isPointerType()) {
4371 Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
4372 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4376 const BuiltinType *BT = ValType->getAs<BuiltinType>();
4377 if (!BT || (BT->getKind() != BuiltinType::Int &&
4378 BT->getKind() != BuiltinType::UInt)) {
4379 Diag(DRE->getLocStart(), diag::err_atomic_op_needs_int32_or_ptr);
4383 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
4384 Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
4385 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4388 if (IsC11 && ValType->isPointerType() &&
4389 RequireCompleteType(Ptr->getLocStart(), ValType->getPointeeType(),
4390 diag::err_incomplete_type)) {
4393 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
4394 // For __atomic_*_n operations, the value type must be a scalar integral or
4395 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
4396 Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
4397 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4401 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
4402 !AtomTy->isScalarType()) {
4403 // For GNU atomics, require a trivially-copyable type. This is not part of
4404 // the GNU atomics specification, but we enforce it for sanity.
4405 Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy)
4406 << Ptr->getType() << Ptr->getSourceRange();
4410 switch (ValType.getObjCLifetime()) {
4411 case Qualifiers::OCL_None:
4412 case Qualifiers::OCL_ExplicitNone:
4416 case Qualifiers::OCL_Weak:
4417 case Qualifiers::OCL_Strong:
4418 case Qualifiers::OCL_Autoreleasing:
4419 // FIXME: Can this happen? By this point, ValType should be known
4420 // to be trivially copyable.
4421 Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
4422 << ValType << Ptr->getSourceRange();
4426 // All atomic operations have an overload which takes a pointer to a volatile
4427 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
4428 // into the result or the other operands. Similarly atomic_load takes a
4429 // pointer to a const 'A'.
4430 ValType.removeLocalVolatile();
4431 ValType.removeLocalConst();
4432 QualType ResultType = ValType;
4433 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
4435 ResultType = Context.VoidTy;
4436 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
4437 ResultType = Context.BoolTy;
4439 // The type of a parameter passed 'by value'. In the GNU atomics, such
4440 // arguments are actually passed as pointers.
4441 QualType ByValType = ValType; // 'CP'
4442 bool IsPassedByAddress = false;
4443 if (!IsC11 && !IsN) {
4444 ByValType = Ptr->getType();
4445 IsPassedByAddress = true;
4448 // The first argument's non-CV pointer type is used to deduce the type of
4449 // subsequent arguments, except for:
4450 // - weak flag (always converted to bool)
4451 // - memory order (always converted to int)
4452 // - scope (always converted to int)
4453 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
4455 if (i < NumVals[Form] + 1) {
4458 // The first argument is always a pointer. It has a fixed type.
4459 // It is always dereferenced, a nullptr is undefined.
4460 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
4461 // Nothing else to do: we already know all we want about this pointer.
4464 // The second argument is the non-atomic operand. For arithmetic, this
4465 // is always passed by value, and for a compare_exchange it is always
4466 // passed by address. For the rest, GNU uses by-address and C11 uses
4468 assert(Form != Load);
4469 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
4471 else if (Form == Copy || Form == Xchg) {
4472 if (IsPassedByAddress)
4473 // The value pointer is always dereferenced, a nullptr is undefined.
4474 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
4476 } else if (Form == Arithmetic)
4477 Ty = Context.getPointerDiffType();
4479 Expr *ValArg = TheCall->getArg(i);
4480 // The value pointer is always dereferenced, a nullptr is undefined.
4481 CheckNonNullArgument(*this, ValArg, DRE->getLocStart());
4482 LangAS AS = LangAS::Default;
4483 // Keep address space of non-atomic pointer type.
4484 if (const PointerType *PtrTy =
4485 ValArg->getType()->getAs<PointerType>()) {
4486 AS = PtrTy->getPointeeType().getAddressSpace();
4488 Ty = Context.getPointerType(
4489 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
4493 // The third argument to compare_exchange / GNU exchange is the desired
4494 // value, either by-value (for the C11 and *_n variant) or as a pointer.
4495 if (IsPassedByAddress)
4496 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
4500 // The fourth argument to GNU compare_exchange is a 'weak' flag.
4501 Ty = Context.BoolTy;
4505 // The order(s) and scope are always converted to int.
4509 InitializedEntity Entity =
4510 InitializedEntity::InitializeParameter(Context, Ty, false);
4511 ExprResult Arg = TheCall->getArg(i);
4512 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4513 if (Arg.isInvalid())
4515 TheCall->setArg(i, Arg.get());
4518 // Permute the arguments into a 'consistent' order.
4519 SmallVector<Expr*, 5> SubExprs;
4520 SubExprs.push_back(Ptr);
4523 // Note, AtomicExpr::getVal1() has a special case for this atomic.
4524 SubExprs.push_back(TheCall->getArg(1)); // Val1
4527 SubExprs.push_back(TheCall->getArg(1)); // Order
4533 SubExprs.push_back(TheCall->getArg(2)); // Order
4534 SubExprs.push_back(TheCall->getArg(1)); // Val1
4537 // Note, AtomicExpr::getVal2() has a special case for this atomic.
4538 SubExprs.push_back(TheCall->getArg(3)); // Order
4539 SubExprs.push_back(TheCall->getArg(1)); // Val1
4540 SubExprs.push_back(TheCall->getArg(2)); // Val2
4543 SubExprs.push_back(TheCall->getArg(3)); // Order
4544 SubExprs.push_back(TheCall->getArg(1)); // Val1
4545 SubExprs.push_back(TheCall->getArg(4)); // OrderFail
4546 SubExprs.push_back(TheCall->getArg(2)); // Val2
4549 SubExprs.push_back(TheCall->getArg(4)); // Order
4550 SubExprs.push_back(TheCall->getArg(1)); // Val1
4551 SubExprs.push_back(TheCall->getArg(5)); // OrderFail
4552 SubExprs.push_back(TheCall->getArg(2)); // Val2
4553 SubExprs.push_back(TheCall->getArg(3)); // Weak
4557 if (SubExprs.size() >= 2 && Form != Init) {
4558 llvm::APSInt Result(32);
4559 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
4560 !isValidOrderingForOp(Result.getSExtValue(), Op))
4561 Diag(SubExprs[1]->getLocStart(),
4562 diag::warn_atomic_op_has_invalid_memory_order)
4563 << SubExprs[1]->getSourceRange();
4566 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
4567 auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
4568 llvm::APSInt Result(32);
4569 if (Scope->isIntegerConstantExpr(Result, Context) &&
4570 !ScopeModel->isValid(Result.getZExtValue())) {
4571 Diag(Scope->getLocStart(), diag::err_atomic_op_has_invalid_synch_scope)
4572 << Scope->getSourceRange();
4574 SubExprs.push_back(Scope);
4577 AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
4578 SubExprs, ResultType, Op,
4579 TheCall->getRParenLoc());
4581 if ((Op == AtomicExpr::AO__c11_atomic_load ||
4582 Op == AtomicExpr::AO__c11_atomic_store ||
4583 Op == AtomicExpr::AO__opencl_atomic_load ||
4584 Op == AtomicExpr::AO__opencl_atomic_store ) &&
4585 Context.AtomicUsesUnsupportedLibcall(AE))
4586 Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib)
4587 << ((Op == AtomicExpr::AO__c11_atomic_load ||
4588 Op == AtomicExpr::AO__opencl_atomic_load)
4594 /// checkBuiltinArgument - Given a call to a builtin function, perform
4595 /// normal type-checking on the given argument, updating the call in
4596 /// place. This is useful when a builtin function requires custom
4597 /// type-checking for some of its arguments but not necessarily all of
4600 /// Returns true on error.
4601 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
4602 FunctionDecl *Fn = E->getDirectCallee();
4603 assert(Fn && "builtin call without direct callee!");
4605 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
4606 InitializedEntity Entity =
4607 InitializedEntity::InitializeParameter(S.Context, Param);
4609 ExprResult Arg = E->getArg(0);
4610 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
4611 if (Arg.isInvalid())
4614 E->setArg(ArgIndex, Arg.get());
4618 /// SemaBuiltinAtomicOverloaded - We have a call to a function like
4619 /// __sync_fetch_and_add, which is an overloaded function based on the pointer
4620 /// type of its first argument. The main ActOnCallExpr routines have already
4621 /// promoted the types of arguments because all of these calls are prototyped as
4624 /// This function goes through and does final semantic checking for these
4627 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
4628 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
4629 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4630 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
4632 // Ensure that we have at least one argument to do type inference from.
4633 if (TheCall->getNumArgs() < 1) {
4634 Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
4635 << 0 << 1 << TheCall->getNumArgs()
4636 << TheCall->getCallee()->getSourceRange();
4640 // Inspect the first argument of the atomic builtin. This should always be
4641 // a pointer type, whose element is an integral scalar or pointer type.
4642 // Because it is a pointer type, we don't have to worry about any implicit
4644 // FIXME: We don't allow floating point scalars as input.
4645 Expr *FirstArg = TheCall->getArg(0);
4646 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
4647 if (FirstArgResult.isInvalid())
4649 FirstArg = FirstArgResult.get();
4650 TheCall->setArg(0, FirstArg);
4652 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
4654 Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
4655 << FirstArg->getType() << FirstArg->getSourceRange();
4659 QualType ValType = pointerType->getPointeeType();
4660 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
4661 !ValType->isBlockPointerType()) {
4662 Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
4663 << FirstArg->getType() << FirstArg->getSourceRange();
4667 if (ValType.isConstQualified()) {
4668 Diag(DRE->getLocStart(), diag::err_atomic_builtin_cannot_be_const)
4669 << FirstArg->getType() << FirstArg->getSourceRange();
4673 switch (ValType.getObjCLifetime()) {
4674 case Qualifiers::OCL_None:
4675 case Qualifiers::OCL_ExplicitNone:
4679 case Qualifiers::OCL_Weak:
4680 case Qualifiers::OCL_Strong:
4681 case Qualifiers::OCL_Autoreleasing:
4682 Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
4683 << ValType << FirstArg->getSourceRange();
4687 // Strip any qualifiers off ValType.
4688 ValType = ValType.getUnqualifiedType();
4690 // The majority of builtins return a value, but a few have special return
4691 // types, so allow them to override appropriately below.
4692 QualType ResultType = ValType;
4694 // We need to figure out which concrete builtin this maps onto. For example,
4695 // __sync_fetch_and_add with a 2 byte object turns into
4696 // __sync_fetch_and_add_2.
4697 #define BUILTIN_ROW(x) \
4698 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
4699 Builtin::BI##x##_8, Builtin::BI##x##_16 }
4701 static const unsigned BuiltinIndices[][5] = {
4702 BUILTIN_ROW(__sync_fetch_and_add),
4703 BUILTIN_ROW(__sync_fetch_and_sub),
4704 BUILTIN_ROW(__sync_fetch_and_or),
4705 BUILTIN_ROW(__sync_fetch_and_and),
4706 BUILTIN_ROW(__sync_fetch_and_xor),
4707 BUILTIN_ROW(__sync_fetch_and_nand),
4709 BUILTIN_ROW(__sync_add_and_fetch),
4710 BUILTIN_ROW(__sync_sub_and_fetch),
4711 BUILTIN_ROW(__sync_and_and_fetch),
4712 BUILTIN_ROW(__sync_or_and_fetch),
4713 BUILTIN_ROW(__sync_xor_and_fetch),
4714 BUILTIN_ROW(__sync_nand_and_fetch),
4716 BUILTIN_ROW(__sync_val_compare_and_swap),
4717 BUILTIN_ROW(__sync_bool_compare_and_swap),
4718 BUILTIN_ROW(__sync_lock_test_and_set),
4719 BUILTIN_ROW(__sync_lock_release),
4720 BUILTIN_ROW(__sync_swap)
4724 // Determine the index of the size.
4726 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
4727 case 1: SizeIndex = 0; break;
4728 case 2: SizeIndex = 1; break;
4729 case 4: SizeIndex = 2; break;
4730 case 8: SizeIndex = 3; break;
4731 case 16: SizeIndex = 4; break;
4733 Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
4734 << FirstArg->getType() << FirstArg->getSourceRange();
4738 // Each of these builtins has one pointer argument, followed by some number of
4739 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
4740 // that we ignore. Find out which row of BuiltinIndices to read from as well
4741 // as the number of fixed args.
4742 unsigned BuiltinID = FDecl->getBuiltinID();
4743 unsigned BuiltinIndex, NumFixed = 1;
4744 bool WarnAboutSemanticsChange = false;
4745 switch (BuiltinID) {
4746 default: llvm_unreachable("Unknown overloaded atomic builtin!");
4747 case Builtin::BI__sync_fetch_and_add:
4748 case Builtin::BI__sync_fetch_and_add_1:
4749 case Builtin::BI__sync_fetch_and_add_2:
4750 case Builtin::BI__sync_fetch_and_add_4:
4751 case Builtin::BI__sync_fetch_and_add_8:
4752 case Builtin::BI__sync_fetch_and_add_16:
4756 case Builtin::BI__sync_fetch_and_sub:
4757 case Builtin::BI__sync_fetch_and_sub_1:
4758 case Builtin::BI__sync_fetch_and_sub_2:
4759 case Builtin::BI__sync_fetch_and_sub_4:
4760 case Builtin::BI__sync_fetch_and_sub_8:
4761 case Builtin::BI__sync_fetch_and_sub_16:
4765 case Builtin::BI__sync_fetch_and_or:
4766 case Builtin::BI__sync_fetch_and_or_1:
4767 case Builtin::BI__sync_fetch_and_or_2:
4768 case Builtin::BI__sync_fetch_and_or_4:
4769 case Builtin::BI__sync_fetch_and_or_8:
4770 case Builtin::BI__sync_fetch_and_or_16:
4774 case Builtin::BI__sync_fetch_and_and:
4775 case Builtin::BI__sync_fetch_and_and_1:
4776 case Builtin::BI__sync_fetch_and_and_2:
4777 case Builtin::BI__sync_fetch_and_and_4:
4778 case Builtin::BI__sync_fetch_and_and_8:
4779 case Builtin::BI__sync_fetch_and_and_16:
4783 case Builtin::BI__sync_fetch_and_xor:
4784 case Builtin::BI__sync_fetch_and_xor_1:
4785 case Builtin::BI__sync_fetch_and_xor_2:
4786 case Builtin::BI__sync_fetch_and_xor_4:
4787 case Builtin::BI__sync_fetch_and_xor_8:
4788 case Builtin::BI__sync_fetch_and_xor_16:
4792 case Builtin::BI__sync_fetch_and_nand:
4793 case Builtin::BI__sync_fetch_and_nand_1:
4794 case Builtin::BI__sync_fetch_and_nand_2:
4795 case Builtin::BI__sync_fetch_and_nand_4:
4796 case Builtin::BI__sync_fetch_and_nand_8:
4797 case Builtin::BI__sync_fetch_and_nand_16:
4799 WarnAboutSemanticsChange = true;
4802 case Builtin::BI__sync_add_and_fetch:
4803 case Builtin::BI__sync_add_and_fetch_1:
4804 case Builtin::BI__sync_add_and_fetch_2:
4805 case Builtin::BI__sync_add_and_fetch_4:
4806 case Builtin::BI__sync_add_and_fetch_8:
4807 case Builtin::BI__sync_add_and_fetch_16:
4811 case Builtin::BI__sync_sub_and_fetch:
4812 case Builtin::BI__sync_sub_and_fetch_1:
4813 case Builtin::BI__sync_sub_and_fetch_2:
4814 case Builtin::BI__sync_sub_and_fetch_4:
4815 case Builtin::BI__sync_sub_and_fetch_8:
4816 case Builtin::BI__sync_sub_and_fetch_16:
4820 case Builtin::BI__sync_and_and_fetch:
4821 case Builtin::BI__sync_and_and_fetch_1:
4822 case Builtin::BI__sync_and_and_fetch_2:
4823 case Builtin::BI__sync_and_and_fetch_4:
4824 case Builtin::BI__sync_and_and_fetch_8:
4825 case Builtin::BI__sync_and_and_fetch_16:
4829 case Builtin::BI__sync_or_and_fetch:
4830 case Builtin::BI__sync_or_and_fetch_1:
4831 case Builtin::BI__sync_or_and_fetch_2:
4832 case Builtin::BI__sync_or_and_fetch_4:
4833 case Builtin::BI__sync_or_and_fetch_8:
4834 case Builtin::BI__sync_or_and_fetch_16:
4838 case Builtin::BI__sync_xor_and_fetch:
4839 case Builtin::BI__sync_xor_and_fetch_1:
4840 case Builtin::BI__sync_xor_and_fetch_2:
4841 case Builtin::BI__sync_xor_and_fetch_4:
4842 case Builtin::BI__sync_xor_and_fetch_8:
4843 case Builtin::BI__sync_xor_and_fetch_16:
4847 case Builtin::BI__sync_nand_and_fetch:
4848 case Builtin::BI__sync_nand_and_fetch_1:
4849 case Builtin::BI__sync_nand_and_fetch_2:
4850 case Builtin::BI__sync_nand_and_fetch_4:
4851 case Builtin::BI__sync_nand_and_fetch_8:
4852 case Builtin::BI__sync_nand_and_fetch_16:
4854 WarnAboutSemanticsChange = true;
4857 case Builtin::BI__sync_val_compare_and_swap:
4858 case Builtin::BI__sync_val_compare_and_swap_1:
4859 case Builtin::BI__sync_val_compare_and_swap_2:
4860 case Builtin::BI__sync_val_compare_and_swap_4:
4861 case Builtin::BI__sync_val_compare_and_swap_8:
4862 case Builtin::BI__sync_val_compare_and_swap_16:
4867 case Builtin::BI__sync_bool_compare_and_swap:
4868 case Builtin::BI__sync_bool_compare_and_swap_1:
4869 case Builtin::BI__sync_bool_compare_and_swap_2:
4870 case Builtin::BI__sync_bool_compare_and_swap_4:
4871 case Builtin::BI__sync_bool_compare_and_swap_8:
4872 case Builtin::BI__sync_bool_compare_and_swap_16:
4875 ResultType = Context.BoolTy;
4878 case Builtin::BI__sync_lock_test_and_set:
4879 case Builtin::BI__sync_lock_test_and_set_1:
4880 case Builtin::BI__sync_lock_test_and_set_2:
4881 case Builtin::BI__sync_lock_test_and_set_4:
4882 case Builtin::BI__sync_lock_test_and_set_8:
4883 case Builtin::BI__sync_lock_test_and_set_16:
4887 case Builtin::BI__sync_lock_release:
4888 case Builtin::BI__sync_lock_release_1:
4889 case Builtin::BI__sync_lock_release_2:
4890 case Builtin::BI__sync_lock_release_4:
4891 case Builtin::BI__sync_lock_release_8:
4892 case Builtin::BI__sync_lock_release_16:
4895 ResultType = Context.VoidTy;
4898 case Builtin::BI__sync_swap:
4899 case Builtin::BI__sync_swap_1:
4900 case Builtin::BI__sync_swap_2:
4901 case Builtin::BI__sync_swap_4:
4902 case Builtin::BI__sync_swap_8:
4903 case Builtin::BI__sync_swap_16:
4908 // Now that we know how many fixed arguments we expect, first check that we
4909 // have at least that many.
4910 if (TheCall->getNumArgs() < 1+NumFixed) {
4911 Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
4912 << 0 << 1+NumFixed << TheCall->getNumArgs()
4913 << TheCall->getCallee()->getSourceRange();
4917 if (WarnAboutSemanticsChange) {
4918 Diag(TheCall->getLocEnd(), diag::warn_sync_fetch_and_nand_semantics_change)
4919 << TheCall->getCallee()->getSourceRange();
4922 // Get the decl for the concrete builtin from this, we can tell what the
4923 // concrete integer type we should convert to is.
4924 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
4925 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
4926 FunctionDecl *NewBuiltinDecl;
4927 if (NewBuiltinID == BuiltinID)
4928 NewBuiltinDecl = FDecl;
4930 // Perform builtin lookup to avoid redeclaring it.
4931 DeclarationName DN(&Context.Idents.get(NewBuiltinName));
4932 LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName);
4933 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
4934 assert(Res.getFoundDecl());
4935 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
4936 if (!NewBuiltinDecl)
4940 // The first argument --- the pointer --- has a fixed type; we
4941 // deduce the types of the rest of the arguments accordingly. Walk
4942 // the remaining arguments, converting them to the deduced value type.
4943 for (unsigned i = 0; i != NumFixed; ++i) {
4944 ExprResult Arg = TheCall->getArg(i+1);
4946 // GCC does an implicit conversion to the pointer or integer ValType. This
4947 // can fail in some cases (1i -> int**), check for this error case now.
4948 // Initialize the argument.
4949 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
4950 ValType, /*consume*/ false);
4951 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4952 if (Arg.isInvalid())
4955 // Okay, we have something that *can* be converted to the right type. Check
4956 // to see if there is a potentially weird extension going on here. This can
4957 // happen when you do an atomic operation on something like an char* and
4958 // pass in 42. The 42 gets converted to char. This is even more strange
4959 // for things like 45.123 -> char, etc.
4960 // FIXME: Do this check.
4961 TheCall->setArg(i+1, Arg.get());
4964 ASTContext& Context = this->getASTContext();
4966 // Create a new DeclRefExpr to refer to the new decl.
4967 DeclRefExpr* NewDRE = DeclRefExpr::Create(
4969 DRE->getQualifierLoc(),
4972 /*enclosing*/ false,
4974 Context.BuiltinFnTy,
4975 DRE->getValueKind());
4977 // Set the callee in the CallExpr.
4978 // FIXME: This loses syntactic information.
4979 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
4980 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
4981 CK_BuiltinFnToFnPtr);
4982 TheCall->setCallee(PromotedCall.get());
4984 // Change the result type of the call to match the original value type. This
4985 // is arbitrary, but the codegen for these builtins ins design to handle it
4987 TheCall->setType(ResultType);
4989 return TheCallResult;
4992 /// SemaBuiltinNontemporalOverloaded - We have a call to
4993 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
4994 /// overloaded function based on the pointer type of its last argument.
4996 /// This function goes through and does final semantic checking for these
4998 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
4999 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
5001 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5002 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5003 unsigned BuiltinID = FDecl->getBuiltinID();
5004 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
5005 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
5006 "Unexpected nontemporal load/store builtin!");
5007 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
5008 unsigned numArgs = isStore ? 2 : 1;
5010 // Ensure that we have the proper number of arguments.
5011 if (checkArgCount(*this, TheCall, numArgs))
5014 // Inspect the last argument of the nontemporal builtin. This should always
5015 // be a pointer type, from which we imply the type of the memory access.
5016 // Because it is a pointer type, we don't have to worry about any implicit
5018 Expr *PointerArg = TheCall->getArg(numArgs - 1);
5019 ExprResult PointerArgResult =
5020 DefaultFunctionArrayLvalueConversion(PointerArg);
5022 if (PointerArgResult.isInvalid())
5024 PointerArg = PointerArgResult.get();
5025 TheCall->setArg(numArgs - 1, PointerArg);
5027 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
5029 Diag(DRE->getLocStart(), diag::err_nontemporal_builtin_must_be_pointer)
5030 << PointerArg->getType() << PointerArg->getSourceRange();
5034 QualType ValType = pointerType->getPointeeType();
5036 // Strip any qualifiers off ValType.
5037 ValType = ValType.getUnqualifiedType();
5038 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5039 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
5040 !ValType->isVectorType()) {
5041 Diag(DRE->getLocStart(),
5042 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
5043 << PointerArg->getType() << PointerArg->getSourceRange();
5048 TheCall->setType(ValType);
5049 return TheCallResult;
5052 ExprResult ValArg = TheCall->getArg(0);
5053 InitializedEntity Entity = InitializedEntity::InitializeParameter(
5054 Context, ValType, /*consume*/ false);
5055 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
5056 if (ValArg.isInvalid())
5059 TheCall->setArg(0, ValArg.get());
5060 TheCall->setType(Context.VoidTy);
5061 return TheCallResult;
5064 /// CheckObjCString - Checks that the argument to the builtin
5065 /// CFString constructor is correct
5066 /// Note: It might also make sense to do the UTF-16 conversion here (would
5067 /// simplify the backend).
5068 bool Sema::CheckObjCString(Expr *Arg) {
5069 Arg = Arg->IgnoreParenCasts();
5070 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
5072 if (!Literal || !Literal->isAscii()) {
5073 Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant)
5074 << Arg->getSourceRange();
5078 if (Literal->containsNonAsciiOrNull()) {
5079 StringRef String = Literal->getString();
5080 unsigned NumBytes = String.size();
5081 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
5082 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
5083 llvm::UTF16 *ToPtr = &ToBuf[0];
5085 llvm::ConversionResult Result =
5086 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
5087 ToPtr + NumBytes, llvm::strictConversion);
5088 // Check for conversion failure.
5089 if (Result != llvm::conversionOK)
5090 Diag(Arg->getLocStart(),
5091 diag::warn_cfstring_truncated) << Arg->getSourceRange();
5096 /// CheckObjCString - Checks that the format string argument to the os_log()
5097 /// and os_trace() functions is correct, and converts it to const char *.
5098 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
5099 Arg = Arg->IgnoreParenCasts();
5100 auto *Literal = dyn_cast<StringLiteral>(Arg);
5102 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
5103 Literal = ObjcLiteral->getString();
5107 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
5109 Diag(Arg->getLocStart(), diag::err_os_log_format_not_string_constant)
5110 << Arg->getSourceRange());
5113 ExprResult Result(Literal);
5114 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
5115 InitializedEntity Entity =
5116 InitializedEntity::InitializeParameter(Context, ResultTy, false);
5117 Result = PerformCopyInitialization(Entity, SourceLocation(), Result);
5121 /// Check that the user is calling the appropriate va_start builtin for the
5122 /// target and calling convention.
5123 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
5124 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
5125 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
5126 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64;
5127 bool IsWindows = TT.isOSWindows();
5128 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
5129 if (IsX64 || IsAArch64) {
5130 CallingConv CC = CC_C;
5131 if (const FunctionDecl *FD = S.getCurFunctionDecl())
5132 CC = FD->getType()->getAs<FunctionType>()->getCallConv();
5134 // Don't allow this in System V ABI functions.
5135 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
5136 return S.Diag(Fn->getLocStart(),
5137 diag::err_ms_va_start_used_in_sysv_function);
5139 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
5140 // On x64 Windows, don't allow this in System V ABI functions.
5141 // (Yes, that means there's no corresponding way to support variadic
5142 // System V ABI functions on Windows.)
5143 if ((IsWindows && CC == CC_X86_64SysV) ||
5144 (!IsWindows && CC == CC_Win64))
5145 return S.Diag(Fn->getLocStart(),
5146 diag::err_va_start_used_in_wrong_abi_function)
5153 return S.Diag(Fn->getLocStart(), diag::err_builtin_x64_aarch64_only);
5157 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
5158 ParmVarDecl **LastParam = nullptr) {
5159 // Determine whether the current function, block, or obj-c method is variadic
5160 // and get its parameter list.
5161 bool IsVariadic = false;
5162 ArrayRef<ParmVarDecl *> Params;
5163 DeclContext *Caller = S.CurContext;
5164 if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
5165 IsVariadic = Block->isVariadic();
5166 Params = Block->parameters();
5167 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
5168 IsVariadic = FD->isVariadic();
5169 Params = FD->parameters();
5170 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
5171 IsVariadic = MD->isVariadic();
5172 // FIXME: This isn't correct for methods (results in bogus warning).
5173 Params = MD->parameters();
5174 } else if (isa<CapturedDecl>(Caller)) {
5175 // We don't support va_start in a CapturedDecl.
5176 S.Diag(Fn->getLocStart(), diag::err_va_start_captured_stmt);
5179 // This must be some other declcontext that parses exprs.
5180 S.Diag(Fn->getLocStart(), diag::err_va_start_outside_function);
5185 S.Diag(Fn->getLocStart(), diag::err_va_start_fixed_function);
5190 *LastParam = Params.empty() ? nullptr : Params.back();
5195 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
5196 /// for validity. Emit an error and return true on failure; return false
5198 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
5199 Expr *Fn = TheCall->getCallee();
5201 if (checkVAStartABI(*this, BuiltinID, Fn))
5204 if (TheCall->getNumArgs() > 2) {
5205 Diag(TheCall->getArg(2)->getLocStart(),
5206 diag::err_typecheck_call_too_many_args)
5207 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5208 << Fn->getSourceRange()
5209 << SourceRange(TheCall->getArg(2)->getLocStart(),
5210 (*(TheCall->arg_end()-1))->getLocEnd());
5214 if (TheCall->getNumArgs() < 2) {
5215 return Diag(TheCall->getLocEnd(),
5216 diag::err_typecheck_call_too_few_args_at_least)
5217 << 0 /*function call*/ << 2 << TheCall->getNumArgs();
5220 // Type-check the first argument normally.
5221 if (checkBuiltinArgument(*this, TheCall, 0))
5224 // Check that the current function is variadic, and get its last parameter.
5225 ParmVarDecl *LastParam;
5226 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
5229 // Verify that the second argument to the builtin is the last argument of the
5230 // current function or method.
5231 bool SecondArgIsLastNamedArgument = false;
5232 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
5234 // These are valid if SecondArgIsLastNamedArgument is false after the next
5237 SourceLocation ParamLoc;
5238 bool IsCRegister = false;
5240 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
5241 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
5242 SecondArgIsLastNamedArgument = PV == LastParam;
5244 Type = PV->getType();
5245 ParamLoc = PV->getLocation();
5247 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
5251 if (!SecondArgIsLastNamedArgument)
5252 Diag(TheCall->getArg(1)->getLocStart(),
5253 diag::warn_second_arg_of_va_start_not_last_named_param);
5254 else if (IsCRegister || Type->isReferenceType() ||
5255 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
5256 // Promotable integers are UB, but enumerations need a bit of
5257 // extra checking to see what their promotable type actually is.
5258 if (!Type->isPromotableIntegerType())
5260 if (!Type->isEnumeralType())
5262 const EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
5264 Context.typesAreCompatible(ED->getPromotionType(), Type));
5266 unsigned Reason = 0;
5267 if (Type->isReferenceType()) Reason = 1;
5268 else if (IsCRegister) Reason = 2;
5269 Diag(Arg->getLocStart(), diag::warn_va_start_type_is_undefined) << Reason;
5270 Diag(ParamLoc, diag::note_parameter_type) << Type;
5273 TheCall->setType(Context.VoidTy);
5277 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
5278 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
5279 // const char *named_addr);
5281 Expr *Func = Call->getCallee();
5283 if (Call->getNumArgs() < 3)
5284 return Diag(Call->getLocEnd(),
5285 diag::err_typecheck_call_too_few_args_at_least)
5286 << 0 /*function call*/ << 3 << Call->getNumArgs();
5288 // Type-check the first argument normally.
5289 if (checkBuiltinArgument(*this, Call, 0))
5292 // Check that the current function is variadic.
5293 if (checkVAStartIsInVariadicFunction(*this, Func))
5296 // __va_start on Windows does not validate the parameter qualifiers
5298 const Expr *Arg1 = Call->getArg(1)->IgnoreParens();
5299 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
5301 const Expr *Arg2 = Call->getArg(2)->IgnoreParens();
5302 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
5304 const QualType &ConstCharPtrTy =
5305 Context.getPointerType(Context.CharTy.withConst());
5306 if (!Arg1Ty->isPointerType() ||
5307 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy)
5308 Diag(Arg1->getLocStart(), diag::err_typecheck_convert_incompatible)
5309 << Arg1->getType() << ConstCharPtrTy
5310 << 1 /* different class */
5311 << 0 /* qualifier difference */
5312 << 3 /* parameter mismatch */
5313 << 2 << Arg1->getType() << ConstCharPtrTy;
5315 const QualType SizeTy = Context.getSizeType();
5316 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
5317 Diag(Arg2->getLocStart(), diag::err_typecheck_convert_incompatible)
5318 << Arg2->getType() << SizeTy
5319 << 1 /* different class */
5320 << 0 /* qualifier difference */
5321 << 3 /* parameter mismatch */
5322 << 3 << Arg2->getType() << SizeTy;
5327 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
5328 /// friends. This is declared to take (...), so we have to check everything.
5329 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
5330 if (TheCall->getNumArgs() < 2)
5331 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
5332 << 0 << 2 << TheCall->getNumArgs()/*function call*/;
5333 if (TheCall->getNumArgs() > 2)
5334 return Diag(TheCall->getArg(2)->getLocStart(),
5335 diag::err_typecheck_call_too_many_args)
5336 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5337 << SourceRange(TheCall->getArg(2)->getLocStart(),
5338 (*(TheCall->arg_end()-1))->getLocEnd());
5340 ExprResult OrigArg0 = TheCall->getArg(0);
5341 ExprResult OrigArg1 = TheCall->getArg(1);
5343 // Do standard promotions between the two arguments, returning their common
5345 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
5346 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
5349 // Make sure any conversions are pushed back into the call; this is
5350 // type safe since unordered compare builtins are declared as "_Bool
5352 TheCall->setArg(0, OrigArg0.get());
5353 TheCall->setArg(1, OrigArg1.get());
5355 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
5358 // If the common type isn't a real floating type, then the arguments were
5359 // invalid for this operation.
5360 if (Res.isNull() || !Res->isRealFloatingType())
5361 return Diag(OrigArg0.get()->getLocStart(),
5362 diag::err_typecheck_call_invalid_ordered_compare)
5363 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
5364 << SourceRange(OrigArg0.get()->getLocStart(), OrigArg1.get()->getLocEnd());
5369 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
5370 /// __builtin_isnan and friends. This is declared to take (...), so we have
5371 /// to check everything. We expect the last argument to be a floating point
5373 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
5374 if (TheCall->getNumArgs() < NumArgs)
5375 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
5376 << 0 << NumArgs << TheCall->getNumArgs()/*function call*/;
5377 if (TheCall->getNumArgs() > NumArgs)
5378 return Diag(TheCall->getArg(NumArgs)->getLocStart(),
5379 diag::err_typecheck_call_too_many_args)
5380 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
5381 << SourceRange(TheCall->getArg(NumArgs)->getLocStart(),
5382 (*(TheCall->arg_end()-1))->getLocEnd());
5384 Expr *OrigArg = TheCall->getArg(NumArgs-1);
5386 if (OrigArg->isTypeDependent())
5389 // This operation requires a non-_Complex floating-point number.
5390 if (!OrigArg->getType()->isRealFloatingType())
5391 return Diag(OrigArg->getLocStart(),
5392 diag::err_typecheck_call_invalid_unary_fp)
5393 << OrigArg->getType() << OrigArg->getSourceRange();
5395 // If this is an implicit conversion from float -> float, double, or
5396 // long double, remove it.
5397 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
5398 // Only remove standard FloatCasts, leaving other casts inplace
5399 if (Cast->getCastKind() == CK_FloatingCast) {
5400 Expr *CastArg = Cast->getSubExpr();
5401 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
5403 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
5404 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) ||
5405 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) &&
5406 "promotion from float to either float, double, or long double is "
5407 "the only expected cast here");
5408 Cast->setSubExpr(nullptr);
5409 TheCall->setArg(NumArgs-1, CastArg);
5417 // Customized Sema Checking for VSX builtins that have the following signature:
5418 // vector [...] builtinName(vector [...], vector [...], const int);
5419 // Which takes the same type of vectors (any legal vector type) for the first
5420 // two arguments and takes compile time constant for the third argument.
5421 // Example builtins are :
5422 // vector double vec_xxpermdi(vector double, vector double, int);
5423 // vector short vec_xxsldwi(vector short, vector short, int);
5424 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
5425 unsigned ExpectedNumArgs = 3;
5426 if (TheCall->getNumArgs() < ExpectedNumArgs)
5427 return Diag(TheCall->getLocEnd(),
5428 diag::err_typecheck_call_too_few_args_at_least)
5429 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
5430 << TheCall->getSourceRange();
5432 if (TheCall->getNumArgs() > ExpectedNumArgs)
5433 return Diag(TheCall->getLocEnd(),
5434 diag::err_typecheck_call_too_many_args_at_most)
5435 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
5436 << TheCall->getSourceRange();
5438 // Check the third argument is a compile time constant
5440 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context))
5441 return Diag(TheCall->getLocStart(),
5442 diag::err_vsx_builtin_nonconstant_argument)
5443 << 3 /* argument index */ << TheCall->getDirectCallee()
5444 << SourceRange(TheCall->getArg(2)->getLocStart(),
5445 TheCall->getArg(2)->getLocEnd());
5447 QualType Arg1Ty = TheCall->getArg(0)->getType();
5448 QualType Arg2Ty = TheCall->getArg(1)->getType();
5450 // Check the type of argument 1 and argument 2 are vectors.
5451 SourceLocation BuiltinLoc = TheCall->getLocStart();
5452 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
5453 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
5454 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
5455 << TheCall->getDirectCallee()
5456 << SourceRange(TheCall->getArg(0)->getLocStart(),
5457 TheCall->getArg(1)->getLocEnd());
5460 // Check the first two arguments are the same type.
5461 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
5462 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
5463 << TheCall->getDirectCallee()
5464 << SourceRange(TheCall->getArg(0)->getLocStart(),
5465 TheCall->getArg(1)->getLocEnd());
5468 // When default clang type checking is turned off and the customized type
5469 // checking is used, the returning type of the function must be explicitly
5470 // set. Otherwise it is _Bool by default.
5471 TheCall->setType(Arg1Ty);
5476 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
5477 // This is declared to take (...), so we have to check everything.
5478 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
5479 if (TheCall->getNumArgs() < 2)
5480 return ExprError(Diag(TheCall->getLocEnd(),
5481 diag::err_typecheck_call_too_few_args_at_least)
5482 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
5483 << TheCall->getSourceRange());
5485 // Determine which of the following types of shufflevector we're checking:
5486 // 1) unary, vector mask: (lhs, mask)
5487 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
5488 QualType resType = TheCall->getArg(0)->getType();
5489 unsigned numElements = 0;
5491 if (!TheCall->getArg(0)->isTypeDependent() &&
5492 !TheCall->getArg(1)->isTypeDependent()) {
5493 QualType LHSType = TheCall->getArg(0)->getType();
5494 QualType RHSType = TheCall->getArg(1)->getType();
5496 if (!LHSType->isVectorType() || !RHSType->isVectorType())
5497 return ExprError(Diag(TheCall->getLocStart(),
5498 diag::err_vec_builtin_non_vector)
5499 << TheCall->getDirectCallee()
5500 << SourceRange(TheCall->getArg(0)->getLocStart(),
5501 TheCall->getArg(1)->getLocEnd()));
5503 numElements = LHSType->getAs<VectorType>()->getNumElements();
5504 unsigned numResElements = TheCall->getNumArgs() - 2;
5506 // Check to see if we have a call with 2 vector arguments, the unary shuffle
5507 // with mask. If so, verify that RHS is an integer vector type with the
5508 // same number of elts as lhs.
5509 if (TheCall->getNumArgs() == 2) {
5510 if (!RHSType->hasIntegerRepresentation() ||
5511 RHSType->getAs<VectorType>()->getNumElements() != numElements)
5512 return ExprError(Diag(TheCall->getLocStart(),
5513 diag::err_vec_builtin_incompatible_vector)
5514 << TheCall->getDirectCallee()
5515 << SourceRange(TheCall->getArg(1)->getLocStart(),
5516 TheCall->getArg(1)->getLocEnd()));
5517 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
5518 return ExprError(Diag(TheCall->getLocStart(),
5519 diag::err_vec_builtin_incompatible_vector)
5520 << TheCall->getDirectCallee()
5521 << SourceRange(TheCall->getArg(0)->getLocStart(),
5522 TheCall->getArg(1)->getLocEnd()));
5523 } else if (numElements != numResElements) {
5524 QualType eltType = LHSType->getAs<VectorType>()->getElementType();
5525 resType = Context.getVectorType(eltType, numResElements,
5526 VectorType::GenericVector);
5530 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
5531 if (TheCall->getArg(i)->isTypeDependent() ||
5532 TheCall->getArg(i)->isValueDependent())
5535 llvm::APSInt Result(32);
5536 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
5537 return ExprError(Diag(TheCall->getLocStart(),
5538 diag::err_shufflevector_nonconstant_argument)
5539 << TheCall->getArg(i)->getSourceRange());
5541 // Allow -1 which will be translated to undef in the IR.
5542 if (Result.isSigned() && Result.isAllOnesValue())
5545 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
5546 return ExprError(Diag(TheCall->getLocStart(),
5547 diag::err_shufflevector_argument_too_large)
5548 << TheCall->getArg(i)->getSourceRange());
5551 SmallVector<Expr*, 32> exprs;
5553 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
5554 exprs.push_back(TheCall->getArg(i));
5555 TheCall->setArg(i, nullptr);
5558 return new (Context) ShuffleVectorExpr(Context, exprs, resType,
5559 TheCall->getCallee()->getLocStart(),
5560 TheCall->getRParenLoc());
5563 /// SemaConvertVectorExpr - Handle __builtin_convertvector
5564 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
5565 SourceLocation BuiltinLoc,
5566 SourceLocation RParenLoc) {
5567 ExprValueKind VK = VK_RValue;
5568 ExprObjectKind OK = OK_Ordinary;
5569 QualType DstTy = TInfo->getType();
5570 QualType SrcTy = E->getType();
5572 if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
5573 return ExprError(Diag(BuiltinLoc,
5574 diag::err_convertvector_non_vector)
5575 << E->getSourceRange());
5576 if (!DstTy->isVectorType() && !DstTy->isDependentType())
5577 return ExprError(Diag(BuiltinLoc,
5578 diag::err_convertvector_non_vector_type));
5580 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
5581 unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements();
5582 unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements();
5583 if (SrcElts != DstElts)
5584 return ExprError(Diag(BuiltinLoc,
5585 diag::err_convertvector_incompatible_vector)
5586 << E->getSourceRange());
5589 return new (Context)
5590 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
5593 /// SemaBuiltinPrefetch - Handle __builtin_prefetch.
5594 // This is declared to take (const void*, ...) and can take two
5595 // optional constant int args.
5596 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
5597 unsigned NumArgs = TheCall->getNumArgs();
5600 return Diag(TheCall->getLocEnd(),
5601 diag::err_typecheck_call_too_many_args_at_most)
5602 << 0 /*function call*/ << 3 << NumArgs
5603 << TheCall->getSourceRange();
5605 // Argument 0 is checked for us and the remaining arguments must be
5606 // constant integers.
5607 for (unsigned i = 1; i != NumArgs; ++i)
5608 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
5614 /// SemaBuiltinAssume - Handle __assume (MS Extension).
5615 // __assume does not evaluate its arguments, and should warn if its argument
5616 // has side effects.
5617 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
5618 Expr *Arg = TheCall->getArg(0);
5619 if (Arg->isInstantiationDependent()) return false;
5621 if (Arg->HasSideEffects(Context))
5622 Diag(Arg->getLocStart(), diag::warn_assume_side_effects)
5623 << Arg->getSourceRange()
5624 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
5629 /// Handle __builtin_alloca_with_align. This is declared
5630 /// as (size_t, size_t) where the second size_t must be a power of 2 greater
5632 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
5633 // The alignment must be a constant integer.
5634 Expr *Arg = TheCall->getArg(1);
5636 // We can't check the value of a dependent argument.
5637 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
5638 if (const auto *UE =
5639 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
5640 if (UE->getKind() == UETT_AlignOf)
5641 Diag(TheCall->getLocStart(), diag::warn_alloca_align_alignof)
5642 << Arg->getSourceRange();
5644 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
5646 if (!Result.isPowerOf2())
5647 return Diag(TheCall->getLocStart(),
5648 diag::err_alignment_not_power_of_two)
5649 << Arg->getSourceRange();
5651 if (Result < Context.getCharWidth())
5652 return Diag(TheCall->getLocStart(), diag::err_alignment_too_small)
5653 << (unsigned)Context.getCharWidth()
5654 << Arg->getSourceRange();
5656 if (Result > std::numeric_limits<int32_t>::max())
5657 return Diag(TheCall->getLocStart(), diag::err_alignment_too_big)
5658 << std::numeric_limits<int32_t>::max()
5659 << Arg->getSourceRange();
5665 /// Handle __builtin_assume_aligned. This is declared
5666 /// as (const void*, size_t, ...) and can take one optional constant int arg.
5667 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
5668 unsigned NumArgs = TheCall->getNumArgs();
5671 return Diag(TheCall->getLocEnd(),
5672 diag::err_typecheck_call_too_many_args_at_most)
5673 << 0 /*function call*/ << 3 << NumArgs
5674 << TheCall->getSourceRange();
5676 // The alignment must be a constant integer.
5677 Expr *Arg = TheCall->getArg(1);
5679 // We can't check the value of a dependent argument.
5680 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
5681 llvm::APSInt Result;
5682 if (SemaBuiltinConstantArg(TheCall, 1, Result))
5685 if (!Result.isPowerOf2())
5686 return Diag(TheCall->getLocStart(),
5687 diag::err_alignment_not_power_of_two)
5688 << Arg->getSourceRange();
5692 ExprResult Arg(TheCall->getArg(2));
5693 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
5694 Context.getSizeType(), false);
5695 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5696 if (Arg.isInvalid()) return true;
5697 TheCall->setArg(2, Arg.get());
5703 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
5704 unsigned BuiltinID =
5705 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
5706 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
5708 unsigned NumArgs = TheCall->getNumArgs();
5709 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
5710 if (NumArgs < NumRequiredArgs) {
5711 return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
5712 << 0 /* function call */ << NumRequiredArgs << NumArgs
5713 << TheCall->getSourceRange();
5715 if (NumArgs >= NumRequiredArgs + 0x100) {
5716 return Diag(TheCall->getLocEnd(),
5717 diag::err_typecheck_call_too_many_args_at_most)
5718 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
5719 << TheCall->getSourceRange();
5723 // For formatting call, check buffer arg.
5725 ExprResult Arg(TheCall->getArg(i));
5726 InitializedEntity Entity = InitializedEntity::InitializeParameter(
5727 Context, Context.VoidPtrTy, false);
5728 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5729 if (Arg.isInvalid())
5731 TheCall->setArg(i, Arg.get());
5735 // Check string literal arg.
5736 unsigned FormatIdx = i;
5738 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i));
5739 if (Arg.isInvalid())
5741 TheCall->setArg(i, Arg.get());
5745 // Make sure variadic args are scalar.
5746 unsigned FirstDataArg = i;
5747 while (i < NumArgs) {
5748 ExprResult Arg = DefaultVariadicArgumentPromotion(
5749 TheCall->getArg(i), VariadicFunction, nullptr);
5750 if (Arg.isInvalid())
5752 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
5753 if (ArgSize.getQuantity() >= 0x100) {
5754 return Diag(Arg.get()->getLocEnd(), diag::err_os_log_argument_too_big)
5755 << i << (int)ArgSize.getQuantity() << 0xff
5756 << TheCall->getSourceRange();
5758 TheCall->setArg(i, Arg.get());
5762 // Check formatting specifiers. NOTE: We're only doing this for the non-size
5763 // call to avoid duplicate diagnostics.
5765 llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
5766 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
5767 bool Success = CheckFormatArguments(
5768 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
5769 VariadicFunction, TheCall->getLocStart(), SourceRange(),
5776 TheCall->setType(Context.getSizeType());
5778 TheCall->setType(Context.VoidPtrTy);
5783 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
5784 /// TheCall is a constant expression.
5785 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
5786 llvm::APSInt &Result) {
5787 Expr *Arg = TheCall->getArg(ArgNum);
5788 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5789 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5791 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
5793 if (!Arg->isIntegerConstantExpr(Result, Context))
5794 return Diag(TheCall->getLocStart(), diag::err_constant_integer_arg_type)
5795 << FDecl->getDeclName() << Arg->getSourceRange();
5800 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
5801 /// TheCall is a constant expression in the range [Low, High].
5802 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
5803 int Low, int High, bool RangeIsError) {
5804 llvm::APSInt Result;
5806 // We can't check the value of a dependent argument.
5807 Expr *Arg = TheCall->getArg(ArgNum);
5808 if (Arg->isTypeDependent() || Arg->isValueDependent())
5811 // Check constant-ness first.
5812 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
5815 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
5817 return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
5818 << Result.toString(10) << Low << High << Arg->getSourceRange();
5820 // Defer the warning until we know if the code will be emitted so that
5821 // dead code can ignore this.
5822 DiagRuntimeBehavior(TheCall->getLocStart(), TheCall,
5823 PDiag(diag::warn_argument_invalid_range)
5824 << Result.toString(10) << Low << High
5825 << Arg->getSourceRange());
5831 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
5832 /// TheCall is a constant expression is a multiple of Num..
5833 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
5835 llvm::APSInt Result;
5837 // We can't check the value of a dependent argument.
5838 Expr *Arg = TheCall->getArg(ArgNum);
5839 if (Arg->isTypeDependent() || Arg->isValueDependent())
5842 // Check constant-ness first.
5843 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
5846 if (Result.getSExtValue() % Num != 0)
5847 return Diag(TheCall->getLocStart(), diag::err_argument_not_multiple)
5848 << Num << Arg->getSourceRange();
5853 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
5854 /// TheCall is an ARM/AArch64 special register string literal.
5855 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
5856 int ArgNum, unsigned ExpectedFieldNum,
5858 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
5859 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
5860 BuiltinID == ARM::BI__builtin_arm_rsr ||
5861 BuiltinID == ARM::BI__builtin_arm_rsrp ||
5862 BuiltinID == ARM::BI__builtin_arm_wsr ||
5863 BuiltinID == ARM::BI__builtin_arm_wsrp;
5864 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
5865 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
5866 BuiltinID == AArch64::BI__builtin_arm_rsr ||
5867 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
5868 BuiltinID == AArch64::BI__builtin_arm_wsr ||
5869 BuiltinID == AArch64::BI__builtin_arm_wsrp;
5870 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
5872 // We can't check the value of a dependent argument.
5873 Expr *Arg = TheCall->getArg(ArgNum);
5874 if (Arg->isTypeDependent() || Arg->isValueDependent())
5877 // Check if the argument is a string literal.
5878 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
5879 return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
5880 << Arg->getSourceRange();
5882 // Check the type of special register given.
5883 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
5884 SmallVector<StringRef, 6> Fields;
5885 Reg.split(Fields, ":");
5887 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
5888 return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
5889 << Arg->getSourceRange();
5891 // If the string is the name of a register then we cannot check that it is
5892 // valid here but if the string is of one the forms described in ACLE then we
5893 // can check that the supplied fields are integers and within the valid
5895 if (Fields.size() > 1) {
5896 bool FiveFields = Fields.size() == 5;
5898 bool ValidString = true;
5900 ValidString &= Fields[0].startswith_lower("cp") ||
5901 Fields[0].startswith_lower("p");
5904 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1);
5906 ValidString &= Fields[2].startswith_lower("c");
5908 Fields[2] = Fields[2].drop_front(1);
5911 ValidString &= Fields[3].startswith_lower("c");
5913 Fields[3] = Fields[3].drop_front(1);
5917 SmallVector<int, 5> Ranges;
5919 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
5921 Ranges.append({15, 7, 15});
5923 for (unsigned i=0; i<Fields.size(); ++i) {
5925 ValidString &= !Fields[i].getAsInteger(10, IntField);
5926 ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
5930 return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
5931 << Arg->getSourceRange();
5932 } else if (IsAArch64Builtin && Fields.size() == 1) {
5933 // If the register name is one of those that appear in the condition below
5934 // and the special register builtin being used is one of the write builtins,
5935 // then we require that the argument provided for writing to the register
5936 // is an integer constant expression. This is because it will be lowered to
5937 // an MSR (immediate) instruction, so we need to know the immediate at
5939 if (TheCall->getNumArgs() != 2)
5942 std::string RegLower = Reg.lower();
5943 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" &&
5944 RegLower != "pan" && RegLower != "uao")
5947 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
5953 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
5954 /// This checks that the target supports __builtin_longjmp and
5955 /// that val is a constant 1.
5956 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
5957 if (!Context.getTargetInfo().hasSjLjLowering())
5958 return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_unsupported)
5959 << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
5961 Expr *Arg = TheCall->getArg(1);
5962 llvm::APSInt Result;
5964 // TODO: This is less than ideal. Overload this to take a value.
5965 if (SemaBuiltinConstantArg(TheCall, 1, Result))
5969 return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val)
5970 << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
5975 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
5976 /// This checks that the target supports __builtin_setjmp.
5977 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
5978 if (!Context.getTargetInfo().hasSjLjLowering())
5979 return Diag(TheCall->getLocStart(), diag::err_builtin_setjmp_unsupported)
5980 << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
5986 class UncoveredArgHandler {
5987 enum { Unknown = -1, AllCovered = -2 };
5989 signed FirstUncoveredArg = Unknown;
5990 SmallVector<const Expr *, 4> DiagnosticExprs;
5993 UncoveredArgHandler() = default;
5995 bool hasUncoveredArg() const {
5996 return (FirstUncoveredArg >= 0);
5999 unsigned getUncoveredArg() const {
6000 assert(hasUncoveredArg() && "no uncovered argument");
6001 return FirstUncoveredArg;
6004 void setAllCovered() {
6005 // A string has been found with all arguments covered, so clear out
6007 DiagnosticExprs.clear();
6008 FirstUncoveredArg = AllCovered;
6011 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
6012 assert(NewFirstUncoveredArg >= 0 && "Outside range");
6014 // Don't update if a previous string covers all arguments.
6015 if (FirstUncoveredArg == AllCovered)
6018 // UncoveredArgHandler tracks the highest uncovered argument index
6019 // and with it all the strings that match this index.
6020 if (NewFirstUncoveredArg == FirstUncoveredArg)
6021 DiagnosticExprs.push_back(StrExpr);
6022 else if (NewFirstUncoveredArg > FirstUncoveredArg) {
6023 DiagnosticExprs.clear();
6024 DiagnosticExprs.push_back(StrExpr);
6025 FirstUncoveredArg = NewFirstUncoveredArg;
6029 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
6032 enum StringLiteralCheckType {
6034 SLCT_UncheckedLiteral,
6040 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
6041 BinaryOperatorKind BinOpKind,
6042 bool AddendIsRight) {
6043 unsigned BitWidth = Offset.getBitWidth();
6044 unsigned AddendBitWidth = Addend.getBitWidth();
6045 // There might be negative interim results.
6046 if (Addend.isUnsigned()) {
6047 Addend = Addend.zext(++AddendBitWidth);
6048 Addend.setIsSigned(true);
6050 // Adjust the bit width of the APSInts.
6051 if (AddendBitWidth > BitWidth) {
6052 Offset = Offset.sext(AddendBitWidth);
6053 BitWidth = AddendBitWidth;
6054 } else if (BitWidth > AddendBitWidth) {
6055 Addend = Addend.sext(BitWidth);
6059 llvm::APSInt ResOffset = Offset;
6060 if (BinOpKind == BO_Add)
6061 ResOffset = Offset.sadd_ov(Addend, Ov);
6063 assert(AddendIsRight && BinOpKind == BO_Sub &&
6064 "operator must be add or sub with addend on the right");
6065 ResOffset = Offset.ssub_ov(Addend, Ov);
6068 // We add an offset to a pointer here so we should support an offset as big as
6071 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&
6072 "index (intermediate) result too big");
6073 Offset = Offset.sext(2 * BitWidth);
6074 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
6083 // This is a wrapper class around StringLiteral to support offsetted string
6084 // literals as format strings. It takes the offset into account when returning
6085 // the string and its length or the source locations to display notes correctly.
6086 class FormatStringLiteral {
6087 const StringLiteral *FExpr;
6091 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
6092 : FExpr(fexpr), Offset(Offset) {}
6094 StringRef getString() const {
6095 return FExpr->getString().drop_front(Offset);
6098 unsigned getByteLength() const {
6099 return FExpr->getByteLength() - getCharByteWidth() * Offset;
6102 unsigned getLength() const { return FExpr->getLength() - Offset; }
6103 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
6105 StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
6107 QualType getType() const { return FExpr->getType(); }
6109 bool isAscii() const { return FExpr->isAscii(); }
6110 bool isWide() const { return FExpr->isWide(); }
6111 bool isUTF8() const { return FExpr->isUTF8(); }
6112 bool isUTF16() const { return FExpr->isUTF16(); }
6113 bool isUTF32() const { return FExpr->isUTF32(); }
6114 bool isPascal() const { return FExpr->isPascal(); }
6116 SourceLocation getLocationOfByte(
6117 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
6118 const TargetInfo &Target, unsigned *StartToken = nullptr,
6119 unsigned *StartTokenByteOffset = nullptr) const {
6120 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target,
6121 StartToken, StartTokenByteOffset);
6124 SourceLocation getLocStart() const LLVM_READONLY {
6125 return FExpr->getLocStart().getLocWithOffset(Offset);
6128 SourceLocation getLocEnd() const LLVM_READONLY { return FExpr->getLocEnd(); }
6133 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
6134 const Expr *OrigFormatExpr,
6135 ArrayRef<const Expr *> Args,
6136 bool HasVAListArg, unsigned format_idx,
6137 unsigned firstDataArg,
6138 Sema::FormatStringType Type,
6139 bool inFunctionCall,
6140 Sema::VariadicCallType CallType,
6141 llvm::SmallBitVector &CheckedVarArgs,
6142 UncoveredArgHandler &UncoveredArg);
6144 // Determine if an expression is a string literal or constant string.
6145 // If this function returns false on the arguments to a function expecting a
6146 // format string, we will usually need to emit a warning.
6147 // True string literals are then checked by CheckFormatString.
6148 static StringLiteralCheckType
6149 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
6150 bool HasVAListArg, unsigned format_idx,
6151 unsigned firstDataArg, Sema::FormatStringType Type,
6152 Sema::VariadicCallType CallType, bool InFunctionCall,
6153 llvm::SmallBitVector &CheckedVarArgs,
6154 UncoveredArgHandler &UncoveredArg,
6155 llvm::APSInt Offset) {
6157 assert(Offset.isSigned() && "invalid offset");
6159 if (E->isTypeDependent() || E->isValueDependent())
6160 return SLCT_NotALiteral;
6162 E = E->IgnoreParenCasts();
6164 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
6165 // Technically -Wformat-nonliteral does not warn about this case.
6166 // The behavior of printf and friends in this case is implementation
6167 // dependent. Ideally if the format string cannot be null then
6168 // it should have a 'nonnull' attribute in the function prototype.
6169 return SLCT_UncheckedLiteral;
6171 switch (E->getStmtClass()) {
6172 case Stmt::BinaryConditionalOperatorClass:
6173 case Stmt::ConditionalOperatorClass: {
6174 // The expression is a literal if both sub-expressions were, and it was
6175 // completely checked only if both sub-expressions were checked.
6176 const AbstractConditionalOperator *C =
6177 cast<AbstractConditionalOperator>(E);
6179 // Determine whether it is necessary to check both sub-expressions, for
6180 // example, because the condition expression is a constant that can be
6181 // evaluated at compile time.
6182 bool CheckLeft = true, CheckRight = true;
6185 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext())) {
6192 // We need to maintain the offsets for the right and the left hand side
6193 // separately to check if every possible indexed expression is a valid
6194 // string literal. They might have different offsets for different string
6195 // literals in the end.
6196 StringLiteralCheckType Left;
6198 Left = SLCT_UncheckedLiteral;
6200 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args,
6201 HasVAListArg, format_idx, firstDataArg,
6202 Type, CallType, InFunctionCall,
6203 CheckedVarArgs, UncoveredArg, Offset);
6204 if (Left == SLCT_NotALiteral || !CheckRight) {
6209 StringLiteralCheckType Right =
6210 checkFormatStringExpr(S, C->getFalseExpr(), Args,
6211 HasVAListArg, format_idx, firstDataArg,
6212 Type, CallType, InFunctionCall, CheckedVarArgs,
6213 UncoveredArg, Offset);
6215 return (CheckLeft && Left < Right) ? Left : Right;
6218 case Stmt::ImplicitCastExprClass:
6219 E = cast<ImplicitCastExpr>(E)->getSubExpr();
6222 case Stmt::OpaqueValueExprClass:
6223 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
6227 return SLCT_NotALiteral;
6229 case Stmt::PredefinedExprClass:
6230 // While __func__, etc., are technically not string literals, they
6231 // cannot contain format specifiers and thus are not a security
6233 return SLCT_UncheckedLiteral;
6235 case Stmt::DeclRefExprClass: {
6236 const DeclRefExpr *DR = cast<DeclRefExpr>(E);
6238 // As an exception, do not flag errors for variables binding to
6239 // const string literals.
6240 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
6241 bool isConstant = false;
6242 QualType T = DR->getType();
6244 if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
6245 isConstant = AT->getElementType().isConstant(S.Context);
6246 } else if (const PointerType *PT = T->getAs<PointerType>()) {
6247 isConstant = T.isConstant(S.Context) &&
6248 PT->getPointeeType().isConstant(S.Context);
6249 } else if (T->isObjCObjectPointerType()) {
6250 // In ObjC, there is usually no "const ObjectPointer" type,
6251 // so don't check if the pointee type is constant.
6252 isConstant = T.isConstant(S.Context);
6256 if (const Expr *Init = VD->getAnyInitializer()) {
6257 // Look through initializers like const char c[] = { "foo" }
6258 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
6259 if (InitList->isStringLiteralInit())
6260 Init = InitList->getInit(0)->IgnoreParenImpCasts();
6262 return checkFormatStringExpr(S, Init, Args,
6263 HasVAListArg, format_idx,
6264 firstDataArg, Type, CallType,
6265 /*InFunctionCall*/ false, CheckedVarArgs,
6266 UncoveredArg, Offset);
6270 // For vprintf* functions (i.e., HasVAListArg==true), we add a
6271 // special check to see if the format string is a function parameter
6272 // of the function calling the printf function. If the function
6273 // has an attribute indicating it is a printf-like function, then we
6274 // should suppress warnings concerning non-literals being used in a call
6275 // to a vprintf function. For example:
6278 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
6280 // va_start(ap, fmt);
6281 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
6285 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
6286 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
6287 int PVIndex = PV->getFunctionScopeIndex() + 1;
6288 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
6289 // adjust for implicit parameter
6290 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
6291 if (MD->isInstance())
6293 // We also check if the formats are compatible.
6294 // We can't pass a 'scanf' string to a 'printf' function.
6295 if (PVIndex == PVFormat->getFormatIdx() &&
6296 Type == S.GetFormatStringType(PVFormat))
6297 return SLCT_UncheckedLiteral;
6304 return SLCT_NotALiteral;
6307 case Stmt::CallExprClass:
6308 case Stmt::CXXMemberCallExprClass: {
6309 const CallExpr *CE = cast<CallExpr>(E);
6310 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
6311 bool IsFirst = true;
6312 StringLiteralCheckType CommonResult;
6313 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
6314 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
6315 StringLiteralCheckType Result = checkFormatStringExpr(
6316 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
6317 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
6319 CommonResult = Result;
6324 return CommonResult;
6326 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
6327 unsigned BuiltinID = FD->getBuiltinID();
6328 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
6329 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
6330 const Expr *Arg = CE->getArg(0);
6331 return checkFormatStringExpr(S, Arg, Args,
6332 HasVAListArg, format_idx,
6333 firstDataArg, Type, CallType,
6334 InFunctionCall, CheckedVarArgs,
6335 UncoveredArg, Offset);
6340 return SLCT_NotALiteral;
6342 case Stmt::ObjCMessageExprClass: {
6343 const auto *ME = cast<ObjCMessageExpr>(E);
6344 if (const auto *ND = ME->getMethodDecl()) {
6345 if (const auto *FA = ND->getAttr<FormatArgAttr>()) {
6346 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
6347 return checkFormatStringExpr(
6348 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
6349 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
6353 return SLCT_NotALiteral;
6355 case Stmt::ObjCStringLiteralClass:
6356 case Stmt::StringLiteralClass: {
6357 const StringLiteral *StrE = nullptr;
6359 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
6360 StrE = ObjCFExpr->getString();
6362 StrE = cast<StringLiteral>(E);
6365 if (Offset.isNegative() || Offset > StrE->getLength()) {
6366 // TODO: It would be better to have an explicit warning for out of
6368 return SLCT_NotALiteral;
6370 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
6371 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx,
6372 firstDataArg, Type, InFunctionCall, CallType,
6373 CheckedVarArgs, UncoveredArg);
6374 return SLCT_CheckedLiteral;
6377 return SLCT_NotALiteral;
6379 case Stmt::BinaryOperatorClass: {
6380 llvm::APSInt LResult;
6381 llvm::APSInt RResult;
6383 const BinaryOperator *BinOp = cast<BinaryOperator>(E);
6385 // A string literal + an int offset is still a string literal.
6386 if (BinOp->isAdditiveOp()) {
6387 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context);
6388 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context);
6390 if (LIsInt != RIsInt) {
6391 BinaryOperatorKind BinOpKind = BinOp->getOpcode();
6394 if (BinOpKind == BO_Add) {
6395 sumOffsets(Offset, LResult, BinOpKind, RIsInt);
6396 E = BinOp->getRHS();
6400 sumOffsets(Offset, RResult, BinOpKind, RIsInt);
6401 E = BinOp->getLHS();
6407 return SLCT_NotALiteral;
6409 case Stmt::UnaryOperatorClass: {
6410 const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
6411 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
6412 if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
6413 llvm::APSInt IndexResult;
6414 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) {
6415 sumOffsets(Offset, IndexResult, BO_Add, /*RHS is int*/ true);
6421 return SLCT_NotALiteral;
6425 return SLCT_NotALiteral;
6429 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
6430 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
6431 .Case("scanf", FST_Scanf)
6432 .Cases("printf", "printf0", FST_Printf)
6433 .Cases("NSString", "CFString", FST_NSString)
6434 .Case("strftime", FST_Strftime)
6435 .Case("strfmon", FST_Strfmon)
6436 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
6437 .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
6438 .Case("os_trace", FST_OSLog)
6439 .Case("os_log", FST_OSLog)
6440 .Default(FST_Unknown);
6443 /// CheckFormatArguments - Check calls to printf and scanf (and similar
6444 /// functions) for correct use of format strings.
6445 /// Returns true if a format string has been fully checked.
6446 bool Sema::CheckFormatArguments(const FormatAttr *Format,
6447 ArrayRef<const Expr *> Args,
6449 VariadicCallType CallType,
6450 SourceLocation Loc, SourceRange Range,
6451 llvm::SmallBitVector &CheckedVarArgs) {
6452 FormatStringInfo FSI;
6453 if (getFormatStringInfo(Format, IsCXXMember, &FSI))
6454 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx,
6455 FSI.FirstDataArg, GetFormatStringType(Format),
6456 CallType, Loc, Range, CheckedVarArgs);
6460 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
6461 bool HasVAListArg, unsigned format_idx,
6462 unsigned firstDataArg, FormatStringType Type,
6463 VariadicCallType CallType,
6464 SourceLocation Loc, SourceRange Range,
6465 llvm::SmallBitVector &CheckedVarArgs) {
6466 // CHECK: printf/scanf-like function is called with no format string.
6467 if (format_idx >= Args.size()) {
6468 Diag(Loc, diag::warn_missing_format_string) << Range;
6472 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
6474 // CHECK: format string is not a string literal.
6476 // Dynamically generated format strings are difficult to
6477 // automatically vet at compile time. Requiring that format strings
6478 // are string literals: (1) permits the checking of format strings by
6479 // the compiler and thereby (2) can practically remove the source of
6480 // many format string exploits.
6482 // Format string can be either ObjC string (e.g. @"%d") or
6483 // C string (e.g. "%d")
6484 // ObjC string uses the same format specifiers as C string, so we can use
6485 // the same format string checking logic for both ObjC and C strings.
6486 UncoveredArgHandler UncoveredArg;
6487 StringLiteralCheckType CT =
6488 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
6489 format_idx, firstDataArg, Type, CallType,
6490 /*IsFunctionCall*/ true, CheckedVarArgs,
6492 /*no string offset*/ llvm::APSInt(64, false) = 0);
6494 // Generate a diagnostic where an uncovered argument is detected.
6495 if (UncoveredArg.hasUncoveredArg()) {
6496 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
6497 assert(ArgIdx < Args.size() && "ArgIdx outside bounds");
6498 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]);
6501 if (CT != SLCT_NotALiteral)
6502 // Literal format string found, check done!
6503 return CT == SLCT_CheckedLiteral;
6505 // Strftime is particular as it always uses a single 'time' argument,
6506 // so it is safe to pass a non-literal string.
6507 if (Type == FST_Strftime)
6510 // Do not emit diag when the string param is a macro expansion and the
6511 // format is either NSString or CFString. This is a hack to prevent
6512 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
6513 // which are usually used in place of NS and CF string literals.
6514 SourceLocation FormatLoc = Args[format_idx]->getLocStart();
6515 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc))
6518 // If there are no arguments specified, warn with -Wformat-security, otherwise
6519 // warn only with -Wformat-nonliteral.
6520 if (Args.size() == firstDataArg) {
6521 Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
6522 << OrigFormatExpr->getSourceRange();
6527 case FST_FreeBSDKPrintf:
6529 Diag(FormatLoc, diag::note_format_security_fixit)
6530 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
6533 Diag(FormatLoc, diag::note_format_security_fixit)
6534 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
6538 Diag(FormatLoc, diag::warn_format_nonliteral)
6539 << OrigFormatExpr->getSourceRange();
6546 class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
6549 const FormatStringLiteral *FExpr;
6550 const Expr *OrigFormatExpr;
6551 const Sema::FormatStringType FSType;
6552 const unsigned FirstDataArg;
6553 const unsigned NumDataArgs;
6554 const char *Beg; // Start of format string.
6555 const bool HasVAListArg;
6556 ArrayRef<const Expr *> Args;
6558 llvm::SmallBitVector CoveredArgs;
6559 bool usesPositionalArgs = false;
6560 bool atFirstArg = true;
6561 bool inFunctionCall;
6562 Sema::VariadicCallType CallType;
6563 llvm::SmallBitVector &CheckedVarArgs;
6564 UncoveredArgHandler &UncoveredArg;
6567 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
6568 const Expr *origFormatExpr,
6569 const Sema::FormatStringType type, unsigned firstDataArg,
6570 unsigned numDataArgs, const char *beg, bool hasVAListArg,
6571 ArrayRef<const Expr *> Args, unsigned formatIdx,
6572 bool inFunctionCall, Sema::VariadicCallType callType,
6573 llvm::SmallBitVector &CheckedVarArgs,
6574 UncoveredArgHandler &UncoveredArg)
6575 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
6576 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
6577 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx),
6578 inFunctionCall(inFunctionCall), CallType(callType),
6579 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
6580 CoveredArgs.resize(numDataArgs);
6581 CoveredArgs.reset();
6584 void DoneProcessing();
6586 void HandleIncompleteSpecifier(const char *startSpecifier,
6587 unsigned specifierLen) override;
6589 void HandleInvalidLengthModifier(
6590 const analyze_format_string::FormatSpecifier &FS,
6591 const analyze_format_string::ConversionSpecifier &CS,
6592 const char *startSpecifier, unsigned specifierLen,
6595 void HandleNonStandardLengthModifier(
6596 const analyze_format_string::FormatSpecifier &FS,
6597 const char *startSpecifier, unsigned specifierLen);
6599 void HandleNonStandardConversionSpecifier(
6600 const analyze_format_string::ConversionSpecifier &CS,
6601 const char *startSpecifier, unsigned specifierLen);
6603 void HandlePosition(const char *startPos, unsigned posLen) override;
6605 void HandleInvalidPosition(const char *startSpecifier,
6606 unsigned specifierLen,
6607 analyze_format_string::PositionContext p) override;
6609 void HandleZeroPosition(const char *startPos, unsigned posLen) override;
6611 void HandleNullChar(const char *nullCharacter) override;
6613 template <typename Range>
6615 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
6616 const PartialDiagnostic &PDiag, SourceLocation StringLoc,
6617 bool IsStringLocation, Range StringRange,
6618 ArrayRef<FixItHint> Fixit = None);
6621 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
6622 const char *startSpec,
6623 unsigned specifierLen,
6624 const char *csStart, unsigned csLen);
6626 void HandlePositionalNonpositionalArgs(SourceLocation Loc,
6627 const char *startSpec,
6628 unsigned specifierLen);
6630 SourceRange getFormatStringRange();
6631 CharSourceRange getSpecifierRange(const char *startSpecifier,
6632 unsigned specifierLen);
6633 SourceLocation getLocationOfByte(const char *x);
6635 const Expr *getDataArg(unsigned i) const;
6637 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
6638 const analyze_format_string::ConversionSpecifier &CS,
6639 const char *startSpecifier, unsigned specifierLen,
6642 template <typename Range>
6643 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
6644 bool IsStringLocation, Range StringRange,
6645 ArrayRef<FixItHint> Fixit = None);
6650 SourceRange CheckFormatHandler::getFormatStringRange() {
6651 return OrigFormatExpr->getSourceRange();
6654 CharSourceRange CheckFormatHandler::
6655 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
6656 SourceLocation Start = getLocationOfByte(startSpecifier);
6657 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
6659 // Advance the end SourceLocation by one due to half-open ranges.
6660 End = End.getLocWithOffset(1);
6662 return CharSourceRange::getCharRange(Start, End);
6665 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
6666 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(),
6667 S.getLangOpts(), S.Context.getTargetInfo());
6670 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
6671 unsigned specifierLen){
6672 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
6673 getLocationOfByte(startSpecifier),
6674 /*IsStringLocation*/true,
6675 getSpecifierRange(startSpecifier, specifierLen));
6678 void CheckFormatHandler::HandleInvalidLengthModifier(
6679 const analyze_format_string::FormatSpecifier &FS,
6680 const analyze_format_string::ConversionSpecifier &CS,
6681 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
6682 using namespace analyze_format_string;
6684 const LengthModifier &LM = FS.getLengthModifier();
6685 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
6687 // See if we know how to fix this length modifier.
6688 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
6690 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
6691 getLocationOfByte(LM.getStart()),
6692 /*IsStringLocation*/true,
6693 getSpecifierRange(startSpecifier, specifierLen));
6695 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
6696 << FixedLM->toString()
6697 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
6701 if (DiagID == diag::warn_format_nonsensical_length)
6702 Hint = FixItHint::CreateRemoval(LMRange);
6704 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
6705 getLocationOfByte(LM.getStart()),
6706 /*IsStringLocation*/true,
6707 getSpecifierRange(startSpecifier, specifierLen),
6712 void CheckFormatHandler::HandleNonStandardLengthModifier(
6713 const analyze_format_string::FormatSpecifier &FS,
6714 const char *startSpecifier, unsigned specifierLen) {
6715 using namespace analyze_format_string;
6717 const LengthModifier &LM = FS.getLengthModifier();
6718 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
6720 // See if we know how to fix this length modifier.
6721 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
6723 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
6724 << LM.toString() << 0,
6725 getLocationOfByte(LM.getStart()),
6726 /*IsStringLocation*/true,
6727 getSpecifierRange(startSpecifier, specifierLen));
6729 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
6730 << FixedLM->toString()
6731 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
6734 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
6735 << LM.toString() << 0,
6736 getLocationOfByte(LM.getStart()),
6737 /*IsStringLocation*/true,
6738 getSpecifierRange(startSpecifier, specifierLen));
6742 void CheckFormatHandler::HandleNonStandardConversionSpecifier(
6743 const analyze_format_string::ConversionSpecifier &CS,
6744 const char *startSpecifier, unsigned specifierLen) {
6745 using namespace analyze_format_string;
6747 // See if we know how to fix this conversion specifier.
6748 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
6750 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
6751 << CS.toString() << /*conversion specifier*/1,
6752 getLocationOfByte(CS.getStart()),
6753 /*IsStringLocation*/true,
6754 getSpecifierRange(startSpecifier, specifierLen));
6756 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength());
6757 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
6758 << FixedCS->toString()
6759 << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
6761 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
6762 << CS.toString() << /*conversion specifier*/1,
6763 getLocationOfByte(CS.getStart()),
6764 /*IsStringLocation*/true,
6765 getSpecifierRange(startSpecifier, specifierLen));
6769 void CheckFormatHandler::HandlePosition(const char *startPos,
6771 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
6772 getLocationOfByte(startPos),
6773 /*IsStringLocation*/true,
6774 getSpecifierRange(startPos, posLen));
6778 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
6779 analyze_format_string::PositionContext p) {
6780 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
6782 getLocationOfByte(startPos), /*IsStringLocation*/true,
6783 getSpecifierRange(startPos, posLen));
6786 void CheckFormatHandler::HandleZeroPosition(const char *startPos,
6788 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
6789 getLocationOfByte(startPos),
6790 /*IsStringLocation*/true,
6791 getSpecifierRange(startPos, posLen));
6794 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
6795 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) {
6796 // The presence of a null character is likely an error.
6797 EmitFormatDiagnostic(
6798 S.PDiag(diag::warn_printf_format_string_contains_null_char),
6799 getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
6800 getFormatStringRange());
6804 // Note that this may return NULL if there was an error parsing or building
6805 // one of the argument expressions.
6806 const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
6807 return Args[FirstDataArg + i];
6810 void CheckFormatHandler::DoneProcessing() {
6811 // Does the number of data arguments exceed the number of
6812 // format conversions in the format string?
6813 if (!HasVAListArg) {
6814 // Find any arguments that weren't covered.
6816 signed notCoveredArg = CoveredArgs.find_first();
6817 if (notCoveredArg >= 0) {
6818 assert((unsigned)notCoveredArg < NumDataArgs);
6819 UncoveredArg.Update(notCoveredArg, OrigFormatExpr);
6821 UncoveredArg.setAllCovered();
6826 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
6827 const Expr *ArgExpr) {
6828 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 &&
6834 SourceLocation Loc = ArgExpr->getLocStart();
6836 if (S.getSourceManager().isInSystemMacro(Loc))
6839 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used);
6840 for (auto E : DiagnosticExprs)
6841 PDiag << E->getSourceRange();
6843 CheckFormatHandler::EmitFormatDiagnostic(
6844 S, IsFunctionCall, DiagnosticExprs[0],
6845 PDiag, Loc, /*IsStringLocation*/false,
6846 DiagnosticExprs[0]->getSourceRange());
6850 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
6852 const char *startSpec,
6853 unsigned specifierLen,
6854 const char *csStart,
6856 bool keepGoing = true;
6857 if (argIndex < NumDataArgs) {
6858 // Consider the argument coverered, even though the specifier doesn't
6860 CoveredArgs.set(argIndex);
6863 // If argIndex exceeds the number of data arguments we
6864 // don't issue a warning because that is just a cascade of warnings (and
6865 // they may have intended '%%' anyway). We don't want to continue processing
6866 // the format string after this point, however, as we will like just get
6867 // gibberish when trying to match arguments.
6871 StringRef Specifier(csStart, csLen);
6873 // If the specifier in non-printable, it could be the first byte of a UTF-8
6874 // sequence. In that case, print the UTF-8 code point. If not, print the byte
6876 std::string CodePointStr;
6877 if (!llvm::sys::locale::isPrint(*csStart)) {
6878 llvm::UTF32 CodePoint;
6879 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
6880 const llvm::UTF8 *E =
6881 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
6882 llvm::ConversionResult Result =
6883 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion);
6885 if (Result != llvm::conversionOK) {
6886 unsigned char FirstChar = *csStart;
6887 CodePoint = (llvm::UTF32)FirstChar;
6890 llvm::raw_string_ostream OS(CodePointStr);
6891 if (CodePoint < 256)
6892 OS << "\\x" << llvm::format("%02x", CodePoint);
6893 else if (CodePoint <= 0xFFFF)
6894 OS << "\\u" << llvm::format("%04x", CodePoint);
6896 OS << "\\U" << llvm::format("%08x", CodePoint);
6898 Specifier = CodePointStr;
6901 EmitFormatDiagnostic(
6902 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc,
6903 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen));
6909 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
6910 const char *startSpec,
6911 unsigned specifierLen) {
6912 EmitFormatDiagnostic(
6913 S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
6914 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
6918 CheckFormatHandler::CheckNumArgs(
6919 const analyze_format_string::FormatSpecifier &FS,
6920 const analyze_format_string::ConversionSpecifier &CS,
6921 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
6923 if (argIndex >= NumDataArgs) {
6924 PartialDiagnostic PDiag = FS.usesPositionalArg()
6925 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
6926 << (argIndex+1) << NumDataArgs)
6927 : S.PDiag(diag::warn_printf_insufficient_data_args);
6928 EmitFormatDiagnostic(
6929 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
6930 getSpecifierRange(startSpecifier, specifierLen));
6932 // Since more arguments than conversion tokens are given, by extension
6933 // all arguments are covered, so mark this as so.
6934 UncoveredArg.setAllCovered();
6940 template<typename Range>
6941 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
6943 bool IsStringLocation,
6945 ArrayRef<FixItHint> FixIt) {
6946 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
6947 Loc, IsStringLocation, StringRange, FixIt);
6950 /// If the format string is not within the function call, emit a note
6951 /// so that the function call and string are in diagnostic messages.
6953 /// \param InFunctionCall if true, the format string is within the function
6954 /// call and only one diagnostic message will be produced. Otherwise, an
6955 /// extra note will be emitted pointing to location of the format string.
6957 /// \param ArgumentExpr the expression that is passed as the format string
6958 /// argument in the function call. Used for getting locations when two
6959 /// diagnostics are emitted.
6961 /// \param PDiag the callee should already have provided any strings for the
6962 /// diagnostic message. This function only adds locations and fixits
6965 /// \param Loc primary location for diagnostic. If two diagnostics are
6966 /// required, one will be at Loc and a new SourceLocation will be created for
6969 /// \param IsStringLocation if true, Loc points to the format string should be
6970 /// used for the note. Otherwise, Loc points to the argument list and will
6971 /// be used with PDiag.
6973 /// \param StringRange some or all of the string to highlight. This is
6974 /// templated so it can accept either a CharSourceRange or a SourceRange.
6976 /// \param FixIt optional fix it hint for the format string.
6977 template <typename Range>
6978 void CheckFormatHandler::EmitFormatDiagnostic(
6979 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr,
6980 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation,
6981 Range StringRange, ArrayRef<FixItHint> FixIt) {
6982 if (InFunctionCall) {
6983 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag);
6987 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
6988 << ArgumentExpr->getSourceRange();
6990 const Sema::SemaDiagnosticBuilder &Note =
6991 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
6992 diag::note_format_string_defined);
6994 Note << StringRange;
6999 //===--- CHECK: Printf format string checking ------------------------------===//
7003 class CheckPrintfHandler : public CheckFormatHandler {
7005 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
7006 const Expr *origFormatExpr,
7007 const Sema::FormatStringType type, unsigned firstDataArg,
7008 unsigned numDataArgs, bool isObjC, const char *beg,
7009 bool hasVAListArg, ArrayRef<const Expr *> Args,
7010 unsigned formatIdx, bool inFunctionCall,
7011 Sema::VariadicCallType CallType,
7012 llvm::SmallBitVector &CheckedVarArgs,
7013 UncoveredArgHandler &UncoveredArg)
7014 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
7015 numDataArgs, beg, hasVAListArg, Args, formatIdx,
7016 inFunctionCall, CallType, CheckedVarArgs,
7019 bool isObjCContext() const { return FSType == Sema::FST_NSString; }
7021 /// Returns true if '%@' specifiers are allowed in the format string.
7022 bool allowsObjCArg() const {
7023 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
7024 FSType == Sema::FST_OSTrace;
7027 bool HandleInvalidPrintfConversionSpecifier(
7028 const analyze_printf::PrintfSpecifier &FS,
7029 const char *startSpecifier,
7030 unsigned specifierLen) override;
7032 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
7033 const char *startSpecifier,
7034 unsigned specifierLen) override;
7035 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
7036 const char *StartSpecifier,
7037 unsigned SpecifierLen,
7040 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
7041 const char *startSpecifier, unsigned specifierLen);
7042 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
7043 const analyze_printf::OptionalAmount &Amt,
7045 const char *startSpecifier, unsigned specifierLen);
7046 void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
7047 const analyze_printf::OptionalFlag &flag,
7048 const char *startSpecifier, unsigned specifierLen);
7049 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
7050 const analyze_printf::OptionalFlag &ignoredFlag,
7051 const analyze_printf::OptionalFlag &flag,
7052 const char *startSpecifier, unsigned specifierLen);
7053 bool checkForCStrMembers(const analyze_printf::ArgType &AT,
7056 void HandleEmptyObjCModifierFlag(const char *startFlag,
7057 unsigned flagLen) override;
7059 void HandleInvalidObjCModifierFlag(const char *startFlag,
7060 unsigned flagLen) override;
7062 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
7063 const char *flagsEnd,
7064 const char *conversionPosition)
7070 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
7071 const analyze_printf::PrintfSpecifier &FS,
7072 const char *startSpecifier,
7073 unsigned specifierLen) {
7074 const analyze_printf::PrintfConversionSpecifier &CS =
7075 FS.getConversionSpecifier();
7077 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
7078 getLocationOfByte(CS.getStart()),
7079 startSpecifier, specifierLen,
7080 CS.getStart(), CS.getLength());
7083 bool CheckPrintfHandler::HandleAmount(
7084 const analyze_format_string::OptionalAmount &Amt,
7085 unsigned k, const char *startSpecifier,
7086 unsigned specifierLen) {
7087 if (Amt.hasDataArgument()) {
7088 if (!HasVAListArg) {
7089 unsigned argIndex = Amt.getArgIndex();
7090 if (argIndex >= NumDataArgs) {
7091 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
7093 getLocationOfByte(Amt.getStart()),
7094 /*IsStringLocation*/true,
7095 getSpecifierRange(startSpecifier, specifierLen));
7096 // Don't do any more checking. We will just emit
7101 // Type check the data argument. It should be an 'int'.
7102 // Although not in conformance with C99, we also allow the argument to be
7103 // an 'unsigned int' as that is a reasonably safe case. GCC also
7104 // doesn't emit a warning for that case.
7105 CoveredArgs.set(argIndex);
7106 const Expr *Arg = getDataArg(argIndex);
7110 QualType T = Arg->getType();
7112 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context);
7113 assert(AT.isValid());
7115 if (!AT.matchesType(S.Context, T)) {
7116 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
7117 << k << AT.getRepresentativeTypeName(S.Context)
7118 << T << Arg->getSourceRange(),
7119 getLocationOfByte(Amt.getStart()),
7120 /*IsStringLocation*/true,
7121 getSpecifierRange(startSpecifier, specifierLen));
7122 // Don't do any more checking. We will just emit
7131 void CheckPrintfHandler::HandleInvalidAmount(
7132 const analyze_printf::PrintfSpecifier &FS,
7133 const analyze_printf::OptionalAmount &Amt,
7135 const char *startSpecifier,
7136 unsigned specifierLen) {
7137 const analyze_printf::PrintfConversionSpecifier &CS =
7138 FS.getConversionSpecifier();
7141 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
7142 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
7143 Amt.getConstantLength()))
7146 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
7147 << type << CS.toString(),
7148 getLocationOfByte(Amt.getStart()),
7149 /*IsStringLocation*/true,
7150 getSpecifierRange(startSpecifier, specifierLen),
7154 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
7155 const analyze_printf::OptionalFlag &flag,
7156 const char *startSpecifier,
7157 unsigned specifierLen) {
7158 // Warn about pointless flag with a fixit removal.
7159 const analyze_printf::PrintfConversionSpecifier &CS =
7160 FS.getConversionSpecifier();
7161 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
7162 << flag.toString() << CS.toString(),
7163 getLocationOfByte(flag.getPosition()),
7164 /*IsStringLocation*/true,
7165 getSpecifierRange(startSpecifier, specifierLen),
7166 FixItHint::CreateRemoval(
7167 getSpecifierRange(flag.getPosition(), 1)));
7170 void CheckPrintfHandler::HandleIgnoredFlag(
7171 const analyze_printf::PrintfSpecifier &FS,
7172 const analyze_printf::OptionalFlag &ignoredFlag,
7173 const analyze_printf::OptionalFlag &flag,
7174 const char *startSpecifier,
7175 unsigned specifierLen) {
7176 // Warn about ignored flag with a fixit removal.
7177 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
7178 << ignoredFlag.toString() << flag.toString(),
7179 getLocationOfByte(ignoredFlag.getPosition()),
7180 /*IsStringLocation*/true,
7181 getSpecifierRange(startSpecifier, specifierLen),
7182 FixItHint::CreateRemoval(
7183 getSpecifierRange(ignoredFlag.getPosition(), 1)));
7186 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
7188 // Warn about an empty flag.
7189 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
7190 getLocationOfByte(startFlag),
7191 /*IsStringLocation*/true,
7192 getSpecifierRange(startFlag, flagLen));
7195 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
7197 // Warn about an invalid flag.
7198 auto Range = getSpecifierRange(startFlag, flagLen);
7199 StringRef flag(startFlag, flagLen);
7200 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
7201 getLocationOfByte(startFlag),
7202 /*IsStringLocation*/true,
7203 Range, FixItHint::CreateRemoval(Range));
7206 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
7207 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
7208 // Warn about using '[...]' without a '@' conversion.
7209 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1);
7210 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
7211 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1),
7212 getLocationOfByte(conversionPosition),
7213 /*IsStringLocation*/true,
7214 Range, FixItHint::CreateRemoval(Range));
7217 // Determines if the specified is a C++ class or struct containing
7218 // a member with the specified name and kind (e.g. a CXXMethodDecl named
7220 template<typename MemberKind>
7221 static llvm::SmallPtrSet<MemberKind*, 1>
7222 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
7223 const RecordType *RT = Ty->getAs<RecordType>();
7224 llvm::SmallPtrSet<MemberKind*, 1> Results;
7228 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
7229 if (!RD || !RD->getDefinition())
7232 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
7233 Sema::LookupMemberName);
7234 R.suppressDiagnostics();
7236 // We just need to include all members of the right kind turned up by the
7237 // filter, at this point.
7238 if (S.LookupQualifiedName(R, RT->getDecl()))
7239 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
7240 NamedDecl *decl = (*I)->getUnderlyingDecl();
7241 if (MemberKind *FK = dyn_cast<MemberKind>(decl))
7247 /// Check if we could call '.c_str()' on an object.
7249 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
7250 /// allow the call, or if it would be ambiguous).
7251 bool Sema::hasCStrMethod(const Expr *E) {
7252 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
7255 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType());
7256 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
7258 if ((*MI)->getMinRequiredArguments() == 0)
7263 // Check if a (w)string was passed when a (w)char* was needed, and offer a
7264 // better diagnostic if so. AT is assumed to be valid.
7265 // Returns true when a c_str() conversion method is found.
7266 bool CheckPrintfHandler::checkForCStrMembers(
7267 const analyze_printf::ArgType &AT, const Expr *E) {
7268 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
7271 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType());
7273 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
7275 const CXXMethodDecl *Method = *MI;
7276 if (Method->getMinRequiredArguments() == 0 &&
7277 AT.matchesType(S.Context, Method->getReturnType())) {
7278 // FIXME: Suggest parens if the expression needs them.
7279 SourceLocation EndLoc = S.getLocForEndOfToken(E->getLocEnd());
7280 S.Diag(E->getLocStart(), diag::note_printf_c_str)
7282 << FixItHint::CreateInsertion(EndLoc, ".c_str()");
7291 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
7293 const char *startSpecifier,
7294 unsigned specifierLen) {
7295 using namespace analyze_format_string;
7296 using namespace analyze_printf;
7298 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
7300 if (FS.consumesDataArgument()) {
7303 usesPositionalArgs = FS.usesPositionalArg();
7305 else if (usesPositionalArgs != FS.usesPositionalArg()) {
7306 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
7307 startSpecifier, specifierLen);
7312 // First check if the field width, precision, and conversion specifier
7313 // have matching data arguments.
7314 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
7315 startSpecifier, specifierLen)) {
7319 if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
7320 startSpecifier, specifierLen)) {
7324 if (!CS.consumesDataArgument()) {
7325 // FIXME: Technically specifying a precision or field width here
7326 // makes no sense. Worth issuing a warning at some point.
7330 // Consume the argument.
7331 unsigned argIndex = FS.getArgIndex();
7332 if (argIndex < NumDataArgs) {
7333 // The check to see if the argIndex is valid will come later.
7334 // We set the bit here because we may exit early from this
7335 // function if we encounter some other error.
7336 CoveredArgs.set(argIndex);
7339 // FreeBSD kernel extensions.
7340 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
7341 CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
7342 // We need at least two arguments.
7343 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1))
7346 // Claim the second argument.
7347 CoveredArgs.set(argIndex + 1);
7349 // Type check the first argument (int for %b, pointer for %D)
7350 const Expr *Ex = getDataArg(argIndex);
7351 const analyze_printf::ArgType &AT =
7352 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
7353 ArgType(S.Context.IntTy) : ArgType::CPointerTy;
7354 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
7355 EmitFormatDiagnostic(
7356 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
7357 << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
7358 << false << Ex->getSourceRange(),
7359 Ex->getLocStart(), /*IsStringLocation*/false,
7360 getSpecifierRange(startSpecifier, specifierLen));
7362 // Type check the second argument (char * for both %b and %D)
7363 Ex = getDataArg(argIndex + 1);
7364 const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
7365 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
7366 EmitFormatDiagnostic(
7367 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
7368 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
7369 << false << Ex->getSourceRange(),
7370 Ex->getLocStart(), /*IsStringLocation*/false,
7371 getSpecifierRange(startSpecifier, specifierLen));
7376 // Check for using an Objective-C specific conversion specifier
7377 // in a non-ObjC literal.
7378 if (!allowsObjCArg() && CS.isObjCArg()) {
7379 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7383 // %P can only be used with os_log.
7384 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
7385 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7389 // %n is not allowed with os_log.
7390 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
7391 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
7392 getLocationOfByte(CS.getStart()),
7393 /*IsStringLocation*/ false,
7394 getSpecifierRange(startSpecifier, specifierLen));
7399 // Only scalars are allowed for os_trace.
7400 if (FSType == Sema::FST_OSTrace &&
7401 (CS.getKind() == ConversionSpecifier::PArg ||
7402 CS.getKind() == ConversionSpecifier::sArg ||
7403 CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
7404 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
7408 // Check for use of public/private annotation outside of os_log().
7409 if (FSType != Sema::FST_OSLog) {
7410 if (FS.isPublic().isSet()) {
7411 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
7413 getLocationOfByte(FS.isPublic().getPosition()),
7414 /*IsStringLocation*/ false,
7415 getSpecifierRange(startSpecifier, specifierLen));
7417 if (FS.isPrivate().isSet()) {
7418 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
7420 getLocationOfByte(FS.isPrivate().getPosition()),
7421 /*IsStringLocation*/ false,
7422 getSpecifierRange(startSpecifier, specifierLen));
7426 // Check for invalid use of field width
7427 if (!FS.hasValidFieldWidth()) {
7428 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
7429 startSpecifier, specifierLen);
7432 // Check for invalid use of precision
7433 if (!FS.hasValidPrecision()) {
7434 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
7435 startSpecifier, specifierLen);
7438 // Precision is mandatory for %P specifier.
7439 if (CS.getKind() == ConversionSpecifier::PArg &&
7440 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
7441 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
7442 getLocationOfByte(startSpecifier),
7443 /*IsStringLocation*/ false,
7444 getSpecifierRange(startSpecifier, specifierLen));
7447 // Check each flag does not conflict with any other component.
7448 if (!FS.hasValidThousandsGroupingPrefix())
7449 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
7450 if (!FS.hasValidLeadingZeros())
7451 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
7452 if (!FS.hasValidPlusPrefix())
7453 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
7454 if (!FS.hasValidSpacePrefix())
7455 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
7456 if (!FS.hasValidAlternativeForm())
7457 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
7458 if (!FS.hasValidLeftJustified())
7459 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
7461 // Check that flags are not ignored by another flag
7462 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
7463 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
7464 startSpecifier, specifierLen);
7465 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
7466 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
7467 startSpecifier, specifierLen);
7469 // Check the length modifier is valid with the given conversion specifier.
7470 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
7471 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
7472 diag::warn_format_nonsensical_length);
7473 else if (!FS.hasStandardLengthModifier())
7474 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
7475 else if (!FS.hasStandardLengthConversionCombination())
7476 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
7477 diag::warn_format_non_standard_conversion_spec);
7479 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
7480 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
7482 // The remaining checks depend on the data arguments.
7486 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
7489 const Expr *Arg = getDataArg(argIndex);
7493 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg);
7496 static bool requiresParensToAddCast(const Expr *E) {
7497 // FIXME: We should have a general way to reason about operator
7498 // precedence and whether parens are actually needed here.
7499 // Take care of a few common cases where they aren't.
7500 const Expr *Inside = E->IgnoreImpCasts();
7501 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside))
7502 Inside = POE->getSyntacticForm()->IgnoreImpCasts();
7504 switch (Inside->getStmtClass()) {
7505 case Stmt::ArraySubscriptExprClass:
7506 case Stmt::CallExprClass:
7507 case Stmt::CharacterLiteralClass:
7508 case Stmt::CXXBoolLiteralExprClass:
7509 case Stmt::DeclRefExprClass:
7510 case Stmt::FloatingLiteralClass:
7511 case Stmt::IntegerLiteralClass:
7512 case Stmt::MemberExprClass:
7513 case Stmt::ObjCArrayLiteralClass:
7514 case Stmt::ObjCBoolLiteralExprClass:
7515 case Stmt::ObjCBoxedExprClass:
7516 case Stmt::ObjCDictionaryLiteralClass:
7517 case Stmt::ObjCEncodeExprClass:
7518 case Stmt::ObjCIvarRefExprClass:
7519 case Stmt::ObjCMessageExprClass:
7520 case Stmt::ObjCPropertyRefExprClass:
7521 case Stmt::ObjCStringLiteralClass:
7522 case Stmt::ObjCSubscriptRefExprClass:
7523 case Stmt::ParenExprClass:
7524 case Stmt::StringLiteralClass:
7525 case Stmt::UnaryOperatorClass:
7532 static std::pair<QualType, StringRef>
7533 shouldNotPrintDirectly(const ASTContext &Context,
7534 QualType IntendedTy,
7536 // Use a 'while' to peel off layers of typedefs.
7537 QualType TyTy = IntendedTy;
7538 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
7539 StringRef Name = UserTy->getDecl()->getName();
7540 QualType CastTy = llvm::StringSwitch<QualType>(Name)
7541 .Case("CFIndex", Context.getNSIntegerType())
7542 .Case("NSInteger", Context.getNSIntegerType())
7543 .Case("NSUInteger", Context.getNSUIntegerType())
7544 .Case("SInt32", Context.IntTy)
7545 .Case("UInt32", Context.UnsignedIntTy)
7546 .Default(QualType());
7548 if (!CastTy.isNull())
7549 return std::make_pair(CastTy, Name);
7551 TyTy = UserTy->desugar();
7554 // Strip parens if necessary.
7555 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
7556 return shouldNotPrintDirectly(Context,
7557 PE->getSubExpr()->getType(),
7560 // If this is a conditional expression, then its result type is constructed
7561 // via usual arithmetic conversions and thus there might be no necessary
7562 // typedef sugar there. Recurse to operands to check for NSInteger &
7563 // Co. usage condition.
7564 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
7565 QualType TrueTy, FalseTy;
7566 StringRef TrueName, FalseName;
7568 std::tie(TrueTy, TrueName) =
7569 shouldNotPrintDirectly(Context,
7570 CO->getTrueExpr()->getType(),
7572 std::tie(FalseTy, FalseName) =
7573 shouldNotPrintDirectly(Context,
7574 CO->getFalseExpr()->getType(),
7575 CO->getFalseExpr());
7577 if (TrueTy == FalseTy)
7578 return std::make_pair(TrueTy, TrueName);
7579 else if (TrueTy.isNull())
7580 return std::make_pair(FalseTy, FalseName);
7581 else if (FalseTy.isNull())
7582 return std::make_pair(TrueTy, TrueName);
7585 return std::make_pair(QualType(), StringRef());
7589 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
7590 const char *StartSpecifier,
7591 unsigned SpecifierLen,
7593 using namespace analyze_format_string;
7594 using namespace analyze_printf;
7596 // Now type check the data expression that matches the
7597 // format specifier.
7598 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext());
7602 QualType ExprTy = E->getType();
7603 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) {
7604 ExprTy = TET->getUnderlyingExpr()->getType();
7607 const analyze_printf::ArgType::MatchKind Match =
7608 AT.matchesType(S.Context, ExprTy);
7609 bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic;
7610 if (Match == analyze_printf::ArgType::Match)
7613 // Look through argument promotions for our error message's reported type.
7614 // This includes the integral and floating promotions, but excludes array
7615 // and function pointer decay; seeing that an argument intended to be a
7616 // string has type 'char [6]' is probably more confusing than 'char *'.
7617 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
7618 if (ICE->getCastKind() == CK_IntegralCast ||
7619 ICE->getCastKind() == CK_FloatingCast) {
7620 E = ICE->getSubExpr();
7621 ExprTy = E->getType();
7623 // Check if we didn't match because of an implicit cast from a 'char'
7624 // or 'short' to an 'int'. This is done because printf is a varargs
7626 if (ICE->getType() == S.Context.IntTy ||
7627 ICE->getType() == S.Context.UnsignedIntTy) {
7628 // All further checking is done on the subexpression.
7629 if (AT.matchesType(S.Context, ExprTy))
7633 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
7634 // Special case for 'a', which has type 'int' in C.
7635 // Note, however, that we do /not/ want to treat multibyte constants like
7636 // 'MooV' as characters! This form is deprecated but still exists.
7637 if (ExprTy == S.Context.IntTy)
7638 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue()))
7639 ExprTy = S.Context.CharTy;
7642 // Look through enums to their underlying type.
7643 bool IsEnum = false;
7644 if (auto EnumTy = ExprTy->getAs<EnumType>()) {
7645 ExprTy = EnumTy->getDecl()->getIntegerType();
7649 // %C in an Objective-C context prints a unichar, not a wchar_t.
7650 // If the argument is an integer of some kind, believe the %C and suggest
7651 // a cast instead of changing the conversion specifier.
7652 QualType IntendedTy = ExprTy;
7653 if (isObjCContext() &&
7654 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
7655 if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
7656 !ExprTy->isCharType()) {
7657 // 'unichar' is defined as a typedef of unsigned short, but we should
7658 // prefer using the typedef if it is visible.
7659 IntendedTy = S.Context.UnsignedShortTy;
7661 // While we are here, check if the value is an IntegerLiteral that happens
7662 // to be within the valid range.
7663 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) {
7664 const llvm::APInt &V = IL->getValue();
7665 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy))
7669 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getLocStart(),
7670 Sema::LookupOrdinaryName);
7671 if (S.LookupName(Result, S.getCurScope())) {
7672 NamedDecl *ND = Result.getFoundDecl();
7673 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
7674 if (TD->getUnderlyingType() == IntendedTy)
7675 IntendedTy = S.Context.getTypedefType(TD);
7680 // Special-case some of Darwin's platform-independence types by suggesting
7681 // casts to primitive types that are known to be large enough.
7682 bool ShouldNotPrintDirectly = false; StringRef CastTyName;
7683 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
7685 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
7686 if (!CastTy.isNull()) {
7687 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
7688 // (long in ASTContext). Only complain to pedants.
7689 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
7690 (AT.isSizeT() || AT.isPtrdiffT()) &&
7691 AT.matchesType(S.Context, CastTy))
7693 IntendedTy = CastTy;
7694 ShouldNotPrintDirectly = true;
7698 // We may be able to offer a FixItHint if it is a supported type.
7699 PrintfSpecifier fixedFS = FS;
7701 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext());
7704 // Get the fix string from the fixed format specifier
7705 SmallString<16> buf;
7706 llvm::raw_svector_ostream os(buf);
7707 fixedFS.toString(os);
7709 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
7711 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
7714 ? diag::warn_format_conversion_argument_type_mismatch_pedantic
7715 : diag::warn_format_conversion_argument_type_mismatch;
7716 // In this case, the specifier is wrong and should be changed to match
7718 EmitFormatDiagnostic(S.PDiag(Diag)
7719 << AT.getRepresentativeTypeName(S.Context)
7720 << IntendedTy << IsEnum << E->getSourceRange(),
7722 /*IsStringLocation*/ false, SpecRange,
7723 FixItHint::CreateReplacement(SpecRange, os.str()));
7725 // The canonical type for formatting this value is different from the
7726 // actual type of the expression. (This occurs, for example, with Darwin's
7727 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
7728 // should be printed as 'long' for 64-bit compatibility.)
7729 // Rather than emitting a normal format/argument mismatch, we want to
7730 // add a cast to the recommended type (and correct the format string
7732 SmallString<16> CastBuf;
7733 llvm::raw_svector_ostream CastFix(CastBuf);
7735 IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
7738 SmallVector<FixItHint,4> Hints;
7739 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly)
7740 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
7742 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
7743 // If there's already a cast present, just replace it.
7744 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
7745 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
7747 } else if (!requiresParensToAddCast(E)) {
7748 // If the expression has high enough precedence,
7749 // just write the C-style cast.
7750 Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
7753 // Otherwise, add parens around the expression as well as the cast.
7755 Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
7758 SourceLocation After = S.getLocForEndOfToken(E->getLocEnd());
7759 Hints.push_back(FixItHint::CreateInsertion(After, ")"));
7762 if (ShouldNotPrintDirectly) {
7763 // The expression has a type that should not be printed directly.
7764 // We extract the name from the typedef because we don't want to show
7765 // the underlying type in the diagnostic.
7767 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy))
7768 Name = TypedefTy->getDecl()->getName();
7771 unsigned Diag = Pedantic
7772 ? diag::warn_format_argument_needs_cast_pedantic
7773 : diag::warn_format_argument_needs_cast;
7774 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum
7775 << E->getSourceRange(),
7776 E->getLocStart(), /*IsStringLocation=*/false,
7779 // In this case, the expression could be printed using a different
7780 // specifier, but we've decided that the specifier is probably correct
7781 // and we should cast instead. Just use the normal warning message.
7782 EmitFormatDiagnostic(
7783 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
7784 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
7785 << E->getSourceRange(),
7786 E->getLocStart(), /*IsStringLocation*/false,
7791 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier,
7793 // Since the warning for passing non-POD types to variadic functions
7794 // was deferred until now, we emit a warning for non-POD
7796 switch (S.isValidVarArgType(ExprTy)) {
7797 case Sema::VAK_Valid:
7798 case Sema::VAK_ValidInCXX11: {
7801 ? diag::warn_format_conversion_argument_type_mismatch_pedantic
7802 : diag::warn_format_conversion_argument_type_mismatch;
7804 EmitFormatDiagnostic(
7805 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
7806 << IsEnum << CSR << E->getSourceRange(),
7807 E->getLocStart(), /*IsStringLocation*/ false, CSR);
7810 case Sema::VAK_Undefined:
7811 case Sema::VAK_MSVCUndefined:
7812 EmitFormatDiagnostic(
7813 S.PDiag(diag::warn_non_pod_vararg_with_format_string)
7814 << S.getLangOpts().CPlusPlus11
7817 << AT.getRepresentativeTypeName(S.Context)
7819 << E->getSourceRange(),
7820 E->getLocStart(), /*IsStringLocation*/false, CSR);
7821 checkForCStrMembers(AT, E);
7824 case Sema::VAK_Invalid:
7825 if (ExprTy->isObjCObjectType())
7826 EmitFormatDiagnostic(
7827 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
7828 << S.getLangOpts().CPlusPlus11
7831 << AT.getRepresentativeTypeName(S.Context)
7833 << E->getSourceRange(),
7834 E->getLocStart(), /*IsStringLocation*/false, CSR);
7836 // FIXME: If this is an initializer list, suggest removing the braces
7837 // or inserting a cast to the target type.
7838 S.Diag(E->getLocStart(), diag::err_cannot_pass_to_vararg_format)
7839 << isa<InitListExpr>(E) << ExprTy << CallType
7840 << AT.getRepresentativeTypeName(S.Context)
7841 << E->getSourceRange();
7845 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
7846 "format string specifier index out of range");
7847 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
7853 //===--- CHECK: Scanf format string checking ------------------------------===//
7857 class CheckScanfHandler : public CheckFormatHandler {
7859 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
7860 const Expr *origFormatExpr, Sema::FormatStringType type,
7861 unsigned firstDataArg, unsigned numDataArgs,
7862 const char *beg, bool hasVAListArg,
7863 ArrayRef<const Expr *> Args, unsigned formatIdx,
7864 bool inFunctionCall, Sema::VariadicCallType CallType,
7865 llvm::SmallBitVector &CheckedVarArgs,
7866 UncoveredArgHandler &UncoveredArg)
7867 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
7868 numDataArgs, beg, hasVAListArg, Args, formatIdx,
7869 inFunctionCall, CallType, CheckedVarArgs,
7872 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
7873 const char *startSpecifier,
7874 unsigned specifierLen) override;
7876 bool HandleInvalidScanfConversionSpecifier(
7877 const analyze_scanf::ScanfSpecifier &FS,
7878 const char *startSpecifier,
7879 unsigned specifierLen) override;
7881 void HandleIncompleteScanList(const char *start, const char *end) override;
7886 void CheckScanfHandler::HandleIncompleteScanList(const char *start,
7888 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
7889 getLocationOfByte(end), /*IsStringLocation*/true,
7890 getSpecifierRange(start, end - start));
7893 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
7894 const analyze_scanf::ScanfSpecifier &FS,
7895 const char *startSpecifier,
7896 unsigned specifierLen) {
7897 const analyze_scanf::ScanfConversionSpecifier &CS =
7898 FS.getConversionSpecifier();
7900 return HandleInvalidConversionSpecifier(FS.getArgIndex(),
7901 getLocationOfByte(CS.getStart()),
7902 startSpecifier, specifierLen,
7903 CS.getStart(), CS.getLength());
7906 bool CheckScanfHandler::HandleScanfSpecifier(
7907 const analyze_scanf::ScanfSpecifier &FS,
7908 const char *startSpecifier,
7909 unsigned specifierLen) {
7910 using namespace analyze_scanf;
7911 using namespace analyze_format_string;
7913 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
7915 // Handle case where '%' and '*' don't consume an argument. These shouldn't
7916 // be used to decide if we are using positional arguments consistently.
7917 if (FS.consumesDataArgument()) {
7920 usesPositionalArgs = FS.usesPositionalArg();
7922 else if (usesPositionalArgs != FS.usesPositionalArg()) {
7923 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
7924 startSpecifier, specifierLen);
7929 // Check if the field with is non-zero.
7930 const OptionalAmount &Amt = FS.getFieldWidth();
7931 if (Amt.getHowSpecified() == OptionalAmount::Constant) {
7932 if (Amt.getConstantAmount() == 0) {
7933 const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
7934 Amt.getConstantLength());
7935 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
7936 getLocationOfByte(Amt.getStart()),
7937 /*IsStringLocation*/true, R,
7938 FixItHint::CreateRemoval(R));
7942 if (!FS.consumesDataArgument()) {
7943 // FIXME: Technically specifying a precision or field width here
7944 // makes no sense. Worth issuing a warning at some point.
7948 // Consume the argument.
7949 unsigned argIndex = FS.getArgIndex();
7950 if (argIndex < NumDataArgs) {
7951 // The check to see if the argIndex is valid will come later.
7952 // We set the bit here because we may exit early from this
7953 // function if we encounter some other error.
7954 CoveredArgs.set(argIndex);
7957 // Check the length modifier is valid with the given conversion specifier.
7958 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
7959 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
7960 diag::warn_format_nonsensical_length);
7961 else if (!FS.hasStandardLengthModifier())
7962 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
7963 else if (!FS.hasStandardLengthConversionCombination())
7964 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
7965 diag::warn_format_non_standard_conversion_spec);
7967 if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
7968 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
7970 // The remaining checks depend on the data arguments.
7974 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
7977 // Check that the argument type matches the format specifier.
7978 const Expr *Ex = getDataArg(argIndex);
7982 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context);
7984 if (!AT.isValid()) {
7988 analyze_format_string::ArgType::MatchKind Match =
7989 AT.matchesType(S.Context, Ex->getType());
7990 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
7991 if (Match == analyze_format_string::ArgType::Match)
7994 ScanfSpecifier fixedFS = FS;
7995 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
7996 S.getLangOpts(), S.Context);
7999 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
8000 : diag::warn_format_conversion_argument_type_mismatch;
8003 // Get the fix string from the fixed format specifier.
8004 SmallString<128> buf;
8005 llvm::raw_svector_ostream os(buf);
8006 fixedFS.toString(os);
8008 EmitFormatDiagnostic(
8009 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context)
8010 << Ex->getType() << false << Ex->getSourceRange(),
8012 /*IsStringLocation*/ false,
8013 getSpecifierRange(startSpecifier, specifierLen),
8014 FixItHint::CreateReplacement(
8015 getSpecifierRange(startSpecifier, specifierLen), os.str()));
8017 EmitFormatDiagnostic(S.PDiag(Diag)
8018 << AT.getRepresentativeTypeName(S.Context)
8019 << Ex->getType() << false << Ex->getSourceRange(),
8021 /*IsStringLocation*/ false,
8022 getSpecifierRange(startSpecifier, specifierLen));
8028 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
8029 const Expr *OrigFormatExpr,
8030 ArrayRef<const Expr *> Args,
8031 bool HasVAListArg, unsigned format_idx,
8032 unsigned firstDataArg,
8033 Sema::FormatStringType Type,
8034 bool inFunctionCall,
8035 Sema::VariadicCallType CallType,
8036 llvm::SmallBitVector &CheckedVarArgs,
8037 UncoveredArgHandler &UncoveredArg) {
8038 // CHECK: is the format string a wide literal?
8039 if (!FExpr->isAscii() && !FExpr->isUTF8()) {
8040 CheckFormatHandler::EmitFormatDiagnostic(
8041 S, inFunctionCall, Args[format_idx],
8042 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getLocStart(),
8043 /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
8047 // Str - The format string. NOTE: this is NOT null-terminated!
8048 StringRef StrRef = FExpr->getString();
8049 const char *Str = StrRef.data();
8050 // Account for cases where the string literal is truncated in a declaration.
8051 const ConstantArrayType *T =
8052 S.Context.getAsConstantArrayType(FExpr->getType());
8053 assert(T && "String literal not of constant array type!");
8054 size_t TypeSize = T->getSize().getZExtValue();
8055 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
8056 const unsigned numDataArgs = Args.size() - firstDataArg;
8058 // Emit a warning if the string literal is truncated and does not contain an
8059 // embedded null character.
8060 if (TypeSize <= StrRef.size() &&
8061 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) {
8062 CheckFormatHandler::EmitFormatDiagnostic(
8063 S, inFunctionCall, Args[format_idx],
8064 S.PDiag(diag::warn_printf_format_string_not_null_terminated),
8065 FExpr->getLocStart(),
8066 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
8070 // CHECK: empty format string?
8071 if (StrLen == 0 && numDataArgs > 0) {
8072 CheckFormatHandler::EmitFormatDiagnostic(
8073 S, inFunctionCall, Args[format_idx],
8074 S.PDiag(diag::warn_empty_format_string), FExpr->getLocStart(),
8075 /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
8079 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
8080 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
8081 Type == Sema::FST_OSTrace) {
8082 CheckPrintfHandler H(
8083 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
8084 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str,
8085 HasVAListArg, Args, format_idx, inFunctionCall, CallType,
8086 CheckedVarArgs, UncoveredArg);
8088 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
8090 S.Context.getTargetInfo(),
8091 Type == Sema::FST_FreeBSDKPrintf))
8093 } else if (Type == Sema::FST_Scanf) {
8094 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
8095 numDataArgs, Str, HasVAListArg, Args, format_idx,
8096 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg);
8098 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
8100 S.Context.getTargetInfo()))
8102 } // TODO: handle other formats
8105 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
8106 // Str - The format string. NOTE: this is NOT null-terminated!
8107 StringRef StrRef = FExpr->getString();
8108 const char *Str = StrRef.data();
8109 // Account for cases where the string literal is truncated in a declaration.
8110 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
8111 assert(T && "String literal not of constant array type!");
8112 size_t TypeSize = T->getSize().getZExtValue();
8113 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
8114 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
8116 Context.getTargetInfo());
8119 //===--- CHECK: Warn on use of wrong absolute value function. -------------===//
8121 // Returns the related absolute value function that is larger, of 0 if one
8123 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
8124 switch (AbsFunction) {
8128 case Builtin::BI__builtin_abs:
8129 return Builtin::BI__builtin_labs;
8130 case Builtin::BI__builtin_labs:
8131 return Builtin::BI__builtin_llabs;
8132 case Builtin::BI__builtin_llabs:
8135 case Builtin::BI__builtin_fabsf:
8136 return Builtin::BI__builtin_fabs;
8137 case Builtin::BI__builtin_fabs:
8138 return Builtin::BI__builtin_fabsl;
8139 case Builtin::BI__builtin_fabsl:
8142 case Builtin::BI__builtin_cabsf:
8143 return Builtin::BI__builtin_cabs;
8144 case Builtin::BI__builtin_cabs:
8145 return Builtin::BI__builtin_cabsl;
8146 case Builtin::BI__builtin_cabsl:
8149 case Builtin::BIabs:
8150 return Builtin::BIlabs;
8151 case Builtin::BIlabs:
8152 return Builtin::BIllabs;
8153 case Builtin::BIllabs:
8156 case Builtin::BIfabsf:
8157 return Builtin::BIfabs;
8158 case Builtin::BIfabs:
8159 return Builtin::BIfabsl;
8160 case Builtin::BIfabsl:
8163 case Builtin::BIcabsf:
8164 return Builtin::BIcabs;
8165 case Builtin::BIcabs:
8166 return Builtin::BIcabsl;
8167 case Builtin::BIcabsl:
8172 // Returns the argument type of the absolute value function.
8173 static QualType getAbsoluteValueArgumentType(ASTContext &Context,
8178 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
8179 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error);
8180 if (Error != ASTContext::GE_None)
8183 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
8187 if (FT->getNumParams() != 1)
8190 return FT->getParamType(0);
8193 // Returns the best absolute value function, or zero, based on type and
8194 // current absolute value function.
8195 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
8196 unsigned AbsFunctionKind) {
8197 unsigned BestKind = 0;
8198 uint64_t ArgSize = Context.getTypeSize(ArgType);
8199 for (unsigned Kind = AbsFunctionKind; Kind != 0;
8200 Kind = getLargerAbsoluteValueFunction(Kind)) {
8201 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind);
8202 if (Context.getTypeSize(ParamType) >= ArgSize) {
8205 else if (Context.hasSameType(ParamType, ArgType)) {
8214 enum AbsoluteValueKind {
8220 static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
8221 if (T->isIntegralOrEnumerationType())
8223 if (T->isRealFloatingType())
8224 return AVK_Floating;
8225 if (T->isAnyComplexType())
8228 llvm_unreachable("Type not integer, floating, or complex");
8231 // Changes the absolute value function to a different type. Preserves whether
8232 // the function is a builtin.
8233 static unsigned changeAbsFunction(unsigned AbsKind,
8234 AbsoluteValueKind ValueKind) {
8235 switch (ValueKind) {
8240 case Builtin::BI__builtin_fabsf:
8241 case Builtin::BI__builtin_fabs:
8242 case Builtin::BI__builtin_fabsl:
8243 case Builtin::BI__builtin_cabsf:
8244 case Builtin::BI__builtin_cabs:
8245 case Builtin::BI__builtin_cabsl:
8246 return Builtin::BI__builtin_abs;
8247 case Builtin::BIfabsf:
8248 case Builtin::BIfabs:
8249 case Builtin::BIfabsl:
8250 case Builtin::BIcabsf:
8251 case Builtin::BIcabs:
8252 case Builtin::BIcabsl:
8253 return Builtin::BIabs;
8259 case Builtin::BI__builtin_abs:
8260 case Builtin::BI__builtin_labs:
8261 case Builtin::BI__builtin_llabs:
8262 case Builtin::BI__builtin_cabsf:
8263 case Builtin::BI__builtin_cabs:
8264 case Builtin::BI__builtin_cabsl:
8265 return Builtin::BI__builtin_fabsf;
8266 case Builtin::BIabs:
8267 case Builtin::BIlabs:
8268 case Builtin::BIllabs:
8269 case Builtin::BIcabsf:
8270 case Builtin::BIcabs:
8271 case Builtin::BIcabsl:
8272 return Builtin::BIfabsf;
8278 case Builtin::BI__builtin_abs:
8279 case Builtin::BI__builtin_labs:
8280 case Builtin::BI__builtin_llabs:
8281 case Builtin::BI__builtin_fabsf:
8282 case Builtin::BI__builtin_fabs:
8283 case Builtin::BI__builtin_fabsl:
8284 return Builtin::BI__builtin_cabsf;
8285 case Builtin::BIabs:
8286 case Builtin::BIlabs:
8287 case Builtin::BIllabs:
8288 case Builtin::BIfabsf:
8289 case Builtin::BIfabs:
8290 case Builtin::BIfabsl:
8291 return Builtin::BIcabsf;
8294 llvm_unreachable("Unable to convert function");
8297 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
8298 const IdentifierInfo *FnInfo = FDecl->getIdentifier();
8302 switch (FDecl->getBuiltinID()) {
8305 case Builtin::BI__builtin_abs:
8306 case Builtin::BI__builtin_fabs:
8307 case Builtin::BI__builtin_fabsf:
8308 case Builtin::BI__builtin_fabsl:
8309 case Builtin::BI__builtin_labs:
8310 case Builtin::BI__builtin_llabs:
8311 case Builtin::BI__builtin_cabs:
8312 case Builtin::BI__builtin_cabsf:
8313 case Builtin::BI__builtin_cabsl:
8314 case Builtin::BIabs:
8315 case Builtin::BIlabs:
8316 case Builtin::BIllabs:
8317 case Builtin::BIfabs:
8318 case Builtin::BIfabsf:
8319 case Builtin::BIfabsl:
8320 case Builtin::BIcabs:
8321 case Builtin::BIcabsf:
8322 case Builtin::BIcabsl:
8323 return FDecl->getBuiltinID();
8325 llvm_unreachable("Unknown Builtin type");
8328 // If the replacement is valid, emit a note with replacement function.
8329 // Additionally, suggest including the proper header if not already included.
8330 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
8331 unsigned AbsKind, QualType ArgType) {
8332 bool EmitHeaderHint = true;
8333 const char *HeaderName = nullptr;
8334 const char *FunctionName = nullptr;
8335 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
8336 FunctionName = "std::abs";
8337 if (ArgType->isIntegralOrEnumerationType()) {
8338 HeaderName = "cstdlib";
8339 } else if (ArgType->isRealFloatingType()) {
8340 HeaderName = "cmath";
8342 llvm_unreachable("Invalid Type");
8345 // Lookup all std::abs
8346 if (NamespaceDecl *Std = S.getStdNamespace()) {
8347 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName);
8348 R.suppressDiagnostics();
8349 S.LookupQualifiedName(R, Std);
8351 for (const auto *I : R) {
8352 const FunctionDecl *FDecl = nullptr;
8353 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) {
8354 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl());
8356 FDecl = dyn_cast<FunctionDecl>(I);
8361 // Found std::abs(), check that they are the right ones.
8362 if (FDecl->getNumParams() != 1)
8365 // Check that the parameter type can handle the argument.
8366 QualType ParamType = FDecl->getParamDecl(0)->getType();
8367 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) &&
8368 S.Context.getTypeSize(ArgType) <=
8369 S.Context.getTypeSize(ParamType)) {
8370 // Found a function, don't need the header hint.
8371 EmitHeaderHint = false;
8377 FunctionName = S.Context.BuiltinInfo.getName(AbsKind);
8378 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind);
8381 DeclarationName DN(&S.Context.Idents.get(FunctionName));
8382 LookupResult R(S, DN, Loc, Sema::LookupAnyName);
8383 R.suppressDiagnostics();
8384 S.LookupName(R, S.getCurScope());
8386 if (R.isSingleResult()) {
8387 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
8388 if (FD && FD->getBuiltinID() == AbsKind) {
8389 EmitHeaderHint = false;
8393 } else if (!R.empty()) {
8399 S.Diag(Loc, diag::note_replace_abs_function)
8400 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
8405 if (!EmitHeaderHint)
8408 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
8412 template <std::size_t StrLen>
8413 static bool IsStdFunction(const FunctionDecl *FDecl,
8414 const char (&Str)[StrLen]) {
8417 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
8419 if (!FDecl->isInStdNamespace())
8425 // Warn when using the wrong abs() function.
8426 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
8427 const FunctionDecl *FDecl) {
8428 if (Call->getNumArgs() != 1)
8431 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
8432 bool IsStdAbs = IsStdFunction(FDecl, "abs");
8433 if (AbsKind == 0 && !IsStdAbs)
8436 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType();
8437 QualType ParamType = Call->getArg(0)->getType();
8439 // Unsigned types cannot be negative. Suggest removing the absolute value
8441 if (ArgType->isUnsignedIntegerType()) {
8442 const char *FunctionName =
8443 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
8444 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
8445 Diag(Call->getExprLoc(), diag::note_remove_abs)
8447 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
8451 // Taking the absolute value of a pointer is very suspicious, they probably
8452 // wanted to index into an array, dereference a pointer, call a function, etc.
8453 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
8454 unsigned DiagType = 0;
8455 if (ArgType->isFunctionType())
8457 else if (ArgType->isArrayType())
8460 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
8464 // std::abs has overloads which prevent most of the absolute value problems
8469 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType);
8470 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType);
8472 // The argument and parameter are the same kind. Check if they are the right
8474 if (ArgValueKind == ParamValueKind) {
8475 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType))
8478 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind);
8479 Diag(Call->getExprLoc(), diag::warn_abs_too_small)
8480 << FDecl << ArgType << ParamType;
8482 if (NewAbsKind == 0)
8485 emitReplacement(*this, Call->getExprLoc(),
8486 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
8490 // ArgValueKind != ParamValueKind
8491 // The wrong type of absolute value function was used. Attempt to find the
8493 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind);
8494 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind);
8495 if (NewAbsKind == 0)
8498 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
8499 << FDecl << ParamValueKind << ArgValueKind;
8501 emitReplacement(*this, Call->getExprLoc(),
8502 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
8505 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
8506 void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
8507 const FunctionDecl *FDecl) {
8508 if (!Call || !FDecl) return;
8510 // Ignore template specializations and macros.
8511 if (inTemplateInstantiation()) return;
8512 if (Call->getExprLoc().isMacroID()) return;
8514 // Only care about the one template argument, two function parameter std::max
8515 if (Call->getNumArgs() != 2) return;
8516 if (!IsStdFunction(FDecl, "max")) return;
8517 const auto * ArgList = FDecl->getTemplateSpecializationArgs();
8518 if (!ArgList) return;
8519 if (ArgList->size() != 1) return;
8521 // Check that template type argument is unsigned integer.
8522 const auto& TA = ArgList->get(0);
8523 if (TA.getKind() != TemplateArgument::Type) return;
8524 QualType ArgType = TA.getAsType();
8525 if (!ArgType->isUnsignedIntegerType()) return;
8527 // See if either argument is a literal zero.
8528 auto IsLiteralZeroArg = [](const Expr* E) -> bool {
8529 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
8530 if (!MTE) return false;
8531 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr());
8532 if (!Num) return false;
8533 if (Num->getValue() != 0) return false;
8537 const Expr *FirstArg = Call->getArg(0);
8538 const Expr *SecondArg = Call->getArg(1);
8539 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
8540 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
8542 // Only warn when exactly one argument is zero.
8543 if (IsFirstArgZero == IsSecondArgZero) return;
8545 SourceRange FirstRange = FirstArg->getSourceRange();
8546 SourceRange SecondRange = SecondArg->getSourceRange();
8548 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
8550 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
8551 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
8553 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
8554 SourceRange RemovalRange;
8555 if (IsFirstArgZero) {
8556 RemovalRange = SourceRange(FirstRange.getBegin(),
8557 SecondRange.getBegin().getLocWithOffset(-1));
8559 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()),
8560 SecondRange.getEnd());
8563 Diag(Call->getExprLoc(), diag::note_remove_max_call)
8564 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
8565 << FixItHint::CreateRemoval(RemovalRange);
8568 //===--- CHECK: Standard memory functions ---------------------------------===//
8570 /// Takes the expression passed to the size_t parameter of functions
8571 /// such as memcmp, strncat, etc and warns if it's a comparison.
8573 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
8574 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
8575 IdentifierInfo *FnName,
8576 SourceLocation FnLoc,
8577 SourceLocation RParenLoc) {
8578 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E);
8582 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
8583 if (!Size->isComparisonOp() && !Size->isLogicalOp())
8586 SourceRange SizeRange = Size->getSourceRange();
8587 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
8588 << SizeRange << FnName;
8589 S.Diag(FnLoc, diag::note_memsize_comparison_paren)
8590 << FnName << FixItHint::CreateInsertion(
8591 S.getLocForEndOfToken(Size->getLHS()->getLocEnd()), ")")
8592 << FixItHint::CreateRemoval(RParenLoc);
8593 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
8594 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
8595 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
8601 /// Determine whether the given type is or contains a dynamic class type
8602 /// (e.g., whether it has a vtable).
8603 static const CXXRecordDecl *getContainedDynamicClass(QualType T,
8604 bool &IsContained) {
8605 // Look through array types while ignoring qualifiers.
8606 const Type *Ty = T->getBaseElementTypeUnsafe();
8607 IsContained = false;
8609 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
8610 RD = RD ? RD->getDefinition() : nullptr;
8611 if (!RD || RD->isInvalidDecl())
8614 if (RD->isDynamicClass())
8617 // Check all the fields. If any bases were dynamic, the class is dynamic.
8618 // It's impossible for a class to transitively contain itself by value, so
8619 // infinite recursion is impossible.
8620 for (auto *FD : RD->fields()) {
8622 if (const CXXRecordDecl *ContainedRD =
8623 getContainedDynamicClass(FD->getType(), SubContained)) {
8632 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
8633 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E))
8634 if (Unary->getKind() == UETT_SizeOf)
8639 /// If E is a sizeof expression, returns its argument expression,
8640 /// otherwise returns NULL.
8641 static const Expr *getSizeOfExprArg(const Expr *E) {
8642 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
8643 if (!SizeOf->isArgumentType())
8644 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
8648 /// If E is a sizeof expression, returns its argument type.
8649 static QualType getSizeOfArgType(const Expr *E) {
8650 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
8651 return SizeOf->getTypeOfArgument();
8657 struct SearchNonTrivialToInitializeField
8658 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
8660 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
8662 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
8664 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
8665 SourceLocation SL) {
8666 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
8667 asDerived().visitArray(PDIK, AT, SL);
8671 Super::visitWithKind(PDIK, FT, SL);
8674 void visitARCStrong(QualType FT, SourceLocation SL) {
8675 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
8677 void visitARCWeak(QualType FT, SourceLocation SL) {
8678 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
8680 void visitStruct(QualType FT, SourceLocation SL) {
8681 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
8682 visit(FD->getType(), FD->getLocation());
8684 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
8685 const ArrayType *AT, SourceLocation SL) {
8686 visit(getContext().getBaseElementType(AT), SL);
8688 void visitTrivial(QualType FT, SourceLocation SL) {}
8690 static void diag(QualType RT, const Expr *E, Sema &S) {
8691 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation());
8694 ASTContext &getContext() { return S.getASTContext(); }
8700 struct SearchNonTrivialToCopyField
8701 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
8702 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
8704 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
8706 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
8707 SourceLocation SL) {
8708 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
8709 asDerived().visitArray(PCK, AT, SL);
8713 Super::visitWithKind(PCK, FT, SL);
8716 void visitARCStrong(QualType FT, SourceLocation SL) {
8717 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
8719 void visitARCWeak(QualType FT, SourceLocation SL) {
8720 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
8722 void visitStruct(QualType FT, SourceLocation SL) {
8723 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
8724 visit(FD->getType(), FD->getLocation());
8726 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
8727 SourceLocation SL) {
8728 visit(getContext().getBaseElementType(AT), SL);
8730 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
8731 SourceLocation SL) {}
8732 void visitTrivial(QualType FT, SourceLocation SL) {}
8733 void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
8735 static void diag(QualType RT, const Expr *E, Sema &S) {
8736 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation());
8739 ASTContext &getContext() { return S.getASTContext(); }
8747 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
8748 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
8749 SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
8751 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) {
8752 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
8755 return doesExprLikelyComputeSize(BO->getLHS()) ||
8756 doesExprLikelyComputeSize(BO->getRHS());
8759 return getAsSizeOfExpr(SizeofExpr) != nullptr;
8762 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
8770 /// This should return true for the first call to foo, but not for the second
8771 /// (regardless of whether foo is a macro or function).
8772 static bool isArgumentExpandedFromMacro(SourceManager &SM,
8773 SourceLocation CallLoc,
8774 SourceLocation ArgLoc) {
8775 if (!CallLoc.isMacroID())
8776 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc);
8778 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) !=
8779 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc));
8782 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
8783 /// last two arguments transposed.
8784 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
8785 if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
8788 const Expr *SizeArg =
8789 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
8791 auto isLiteralZero = [](const Expr *E) {
8792 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0;
8795 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
8796 SourceLocation CallLoc = Call->getRParenLoc();
8797 SourceManager &SM = S.getSourceManager();
8798 if (isLiteralZero(SizeArg) &&
8799 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) {
8801 SourceLocation DiagLoc = SizeArg->getExprLoc();
8803 // Some platforms #define bzero to __builtin_memset. See if this is the
8804 // case, and if so, emit a better diagnostic.
8805 if (BId == Builtin::BIbzero ||
8806 (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
8807 CallLoc, SM, S.getLangOpts()) == "bzero")) {
8808 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
8809 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
8810 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) {
8811 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
8812 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
8817 // If the second argument to a memset is a sizeof expression and the third
8818 // isn't, this is also likely an error. This should catch
8819 // 'memset(buf, sizeof(buf), 0xff)'.
8820 if (BId == Builtin::BImemset &&
8821 doesExprLikelyComputeSize(Call->getArg(1)) &&
8822 !doesExprLikelyComputeSize(Call->getArg(2))) {
8823 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc();
8824 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
8825 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
8830 /// Check for dangerous or invalid arguments to memset().
8832 /// This issues warnings on known problematic, dangerous or unspecified
8833 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
8836 /// \param Call The call expression to diagnose.
8837 void Sema::CheckMemaccessArguments(const CallExpr *Call,
8839 IdentifierInfo *FnName) {
8842 // It is possible to have a non-standard definition of memset. Validate
8843 // we have enough arguments, and if not, abort further checking.
8844 unsigned ExpectedNumArgs =
8845 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
8846 if (Call->getNumArgs() < ExpectedNumArgs)
8849 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
8850 BId == Builtin::BIstrndup ? 1 : 2);
8852 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
8853 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
8855 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
8856 Call->getLocStart(), Call->getRParenLoc()))
8859 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
8860 CheckMemaccessSize(*this, BId, Call);
8862 // We have special checking when the length is a sizeof expression.
8863 QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
8864 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
8865 llvm::FoldingSetNodeID SizeOfArgID;
8867 // Although widely used, 'bzero' is not a standard function. Be more strict
8868 // with the argument types before allowing diagnostics and only allow the
8869 // form bzero(ptr, sizeof(...)).
8870 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType();
8871 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
8874 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
8875 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
8876 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
8878 QualType DestTy = Dest->getType();
8880 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
8881 PointeeTy = DestPtrTy->getPointeeType();
8883 // Never warn about void type pointers. This can be used to suppress
8885 if (PointeeTy->isVoidType())
8888 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
8889 // actually comparing the expressions for equality. Because computing the
8890 // expression IDs can be expensive, we only do this if the diagnostic is
8893 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
8894 SizeOfArg->getExprLoc())) {
8895 // We only compute IDs for expressions if the warning is enabled, and
8896 // cache the sizeof arg's ID.
8897 if (SizeOfArgID == llvm::FoldingSetNodeID())
8898 SizeOfArg->Profile(SizeOfArgID, Context, true);
8899 llvm::FoldingSetNodeID DestID;
8900 Dest->Profile(DestID, Context, true);
8901 if (DestID == SizeOfArgID) {
8902 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
8903 // over sizeof(src) as well.
8904 unsigned ActionIdx = 0; // Default is to suggest dereferencing.
8905 StringRef ReadableName = FnName->getName();
8907 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
8908 if (UnaryOp->getOpcode() == UO_AddrOf)
8909 ActionIdx = 1; // If its an address-of operator, just remove it.
8910 if (!PointeeTy->isIncompleteType() &&
8911 (Context.getTypeSize(PointeeTy) == Context.getCharWidth()))
8912 ActionIdx = 2; // If the pointee's size is sizeof(char),
8913 // suggest an explicit length.
8915 // If the function is defined as a builtin macro, do not show macro
8917 SourceLocation SL = SizeOfArg->getExprLoc();
8918 SourceRange DSR = Dest->getSourceRange();
8919 SourceRange SSR = SizeOfArg->getSourceRange();
8920 SourceManager &SM = getSourceManager();
8922 if (SM.isMacroArgExpansion(SL)) {
8923 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts);
8924 SL = SM.getSpellingLoc(SL);
8925 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()),
8926 SM.getSpellingLoc(DSR.getEnd()));
8927 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()),
8928 SM.getSpellingLoc(SSR.getEnd()));
8931 DiagRuntimeBehavior(SL, SizeOfArg,
8932 PDiag(diag::warn_sizeof_pointer_expr_memaccess)
8938 DiagRuntimeBehavior(SL, SizeOfArg,
8939 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
8947 // Also check for cases where the sizeof argument is the exact same
8948 // type as the memory argument, and where it points to a user-defined
8950 if (SizeOfArgTy != QualType()) {
8951 if (PointeeTy->isRecordType() &&
8952 Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
8953 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
8954 PDiag(diag::warn_sizeof_pointer_type_memaccess)
8955 << FnName << SizeOfArgTy << ArgIdx
8956 << PointeeTy << Dest->getSourceRange()
8957 << LenExpr->getSourceRange());
8961 } else if (DestTy->isArrayType()) {
8965 if (PointeeTy == QualType())
8968 // Always complain about dynamic classes.
8970 if (const CXXRecordDecl *ContainedRD =
8971 getContainedDynamicClass(PointeeTy, IsContained)) {
8973 unsigned OperationType = 0;
8974 // "overwritten" if we're warning about the destination for any call
8975 // but memcmp; otherwise a verb appropriate to the call.
8976 if (ArgIdx != 0 || BId == Builtin::BImemcmp) {
8977 if (BId == Builtin::BImemcpy)
8979 else if(BId == Builtin::BImemmove)
8981 else if (BId == Builtin::BImemcmp)
8985 DiagRuntimeBehavior(
8986 Dest->getExprLoc(), Dest,
8987 PDiag(diag::warn_dyn_class_memaccess)
8988 << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx)
8989 << FnName << IsContained << ContainedRD << OperationType
8990 << Call->getCallee()->getSourceRange());
8991 } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
8992 BId != Builtin::BImemset)
8993 DiagRuntimeBehavior(
8994 Dest->getExprLoc(), Dest,
8995 PDiag(diag::warn_arc_object_memaccess)
8996 << ArgIdx << FnName << PointeeTy
8997 << Call->getCallee()->getSourceRange());
8998 else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
8999 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
9000 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
9001 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9002 PDiag(diag::warn_cstruct_memaccess)
9003 << ArgIdx << FnName << PointeeTy << 0);
9004 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this);
9005 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
9006 RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
9007 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
9008 PDiag(diag::warn_cstruct_memaccess)
9009 << ArgIdx << FnName << PointeeTy << 1);
9010 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this);
9017 DiagRuntimeBehavior(
9018 Dest->getExprLoc(), Dest,
9019 PDiag(diag::note_bad_memaccess_silence)
9020 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
9025 // A little helper routine: ignore addition and subtraction of integer literals.
9026 // This intentionally does not ignore all integer constant expressions because
9027 // we don't want to remove sizeof().
9028 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
9029 Ex = Ex->IgnoreParenCasts();
9032 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex);
9033 if (!BO || !BO->isAdditiveOp())
9036 const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
9037 const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
9039 if (isa<IntegerLiteral>(RHS))
9041 else if (isa<IntegerLiteral>(LHS))
9050 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
9051 ASTContext &Context) {
9052 // Only handle constant-sized or VLAs, but not flexible members.
9053 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
9054 // Only issue the FIXIT for arrays of size > 1.
9055 if (CAT->getSize().getSExtValue() <= 1)
9057 } else if (!Ty->isVariableArrayType()) {
9063 // Warn if the user has made the 'size' argument to strlcpy or strlcat
9064 // be the size of the source, instead of the destination.
9065 void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
9066 IdentifierInfo *FnName) {
9068 // Don't crash if the user has the wrong number of arguments
9069 unsigned NumArgs = Call->getNumArgs();
9070 if ((NumArgs != 3) && (NumArgs != 4))
9073 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
9074 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
9075 const Expr *CompareWithSrc = nullptr;
9077 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
9078 Call->getLocStart(), Call->getRParenLoc()))
9081 // Look for 'strlcpy(dst, x, sizeof(x))'
9082 if (const Expr *Ex = getSizeOfExprArg(SizeArg))
9083 CompareWithSrc = Ex;
9085 // Look for 'strlcpy(dst, x, strlen(x))'
9086 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
9087 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
9088 SizeCall->getNumArgs() == 1)
9089 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
9093 if (!CompareWithSrc)
9096 // Determine if the argument to sizeof/strlen is equal to the source
9097 // argument. In principle there's all kinds of things you could do
9098 // here, for instance creating an == expression and evaluating it with
9099 // EvaluateAsBooleanCondition, but this uses a more direct technique:
9100 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg);
9104 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc);
9105 if (!CompareWithSrcDRE ||
9106 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
9109 const Expr *OriginalSizeArg = Call->getArg(2);
9110 Diag(CompareWithSrcDRE->getLocStart(), diag::warn_strlcpycat_wrong_size)
9111 << OriginalSizeArg->getSourceRange() << FnName;
9113 // Output a FIXIT hint if the destination is an array (rather than a
9114 // pointer to an array). This could be enhanced to handle some
9115 // pointers if we know the actual size, like if DstArg is 'array+2'
9116 // we could say 'sizeof(array)-2'.
9117 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
9118 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context))
9121 SmallString<128> sizeString;
9122 llvm::raw_svector_ostream OS(sizeString);
9124 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9127 Diag(OriginalSizeArg->getLocStart(), diag::note_strlcpycat_wrong_size)
9128 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
9132 /// Check if two expressions refer to the same declaration.
9133 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
9134 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
9135 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
9136 return D1->getDecl() == D2->getDecl();
9140 static const Expr *getStrlenExprArg(const Expr *E) {
9141 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
9142 const FunctionDecl *FD = CE->getDirectCallee();
9143 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
9145 return CE->getArg(0)->IgnoreParenCasts();
9150 // Warn on anti-patterns as the 'size' argument to strncat.
9151 // The correct size argument should look like following:
9152 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
9153 void Sema::CheckStrncatArguments(const CallExpr *CE,
9154 IdentifierInfo *FnName) {
9155 // Don't crash if the user has the wrong number of arguments.
9156 if (CE->getNumArgs() < 3)
9158 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
9159 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
9160 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
9162 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getLocStart(),
9163 CE->getRParenLoc()))
9166 // Identify common expressions, which are wrongly used as the size argument
9167 // to strncat and may lead to buffer overflows.
9168 unsigned PatternType = 0;
9169 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
9171 if (referToTheSameDecl(SizeOfArg, DstArg))
9174 else if (referToTheSameDecl(SizeOfArg, SrcArg))
9176 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
9177 if (BE->getOpcode() == BO_Sub) {
9178 const Expr *L = BE->getLHS()->IgnoreParenCasts();
9179 const Expr *R = BE->getRHS()->IgnoreParenCasts();
9180 // - sizeof(dst) - strlen(dst)
9181 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
9182 referToTheSameDecl(DstArg, getStrlenExprArg(R)))
9184 // - sizeof(src) - (anything)
9185 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
9190 if (PatternType == 0)
9193 // Generate the diagnostic.
9194 SourceLocation SL = LenArg->getLocStart();
9195 SourceRange SR = LenArg->getSourceRange();
9196 SourceManager &SM = getSourceManager();
9198 // If the function is defined as a builtin macro, do not show macro expansion.
9199 if (SM.isMacroArgExpansion(SL)) {
9200 SL = SM.getSpellingLoc(SL);
9201 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
9202 SM.getSpellingLoc(SR.getEnd()));
9205 // Check if the destination is an array (rather than a pointer to an array).
9206 QualType DstTy = DstArg->getType();
9207 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy,
9209 if (!isKnownSizeArray) {
9210 if (PatternType == 1)
9211 Diag(SL, diag::warn_strncat_wrong_size) << SR;
9213 Diag(SL, diag::warn_strncat_src_size) << SR;
9217 if (PatternType == 1)
9218 Diag(SL, diag::warn_strncat_large_size) << SR;
9220 Diag(SL, diag::warn_strncat_src_size) << SR;
9222 SmallString<128> sizeString;
9223 llvm::raw_svector_ostream OS(sizeString);
9225 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9228 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
9231 Diag(SL, diag::note_strncat_wrong_size)
9232 << FixItHint::CreateReplacement(SR, OS.str());
9236 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
9237 SourceLocation ReturnLoc,
9239 const AttrVec *Attrs,
9240 const FunctionDecl *FD) {
9241 // Check if the return value is null but should not be.
9242 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
9243 (!isObjCMethod && isNonNullType(Context, lhsType))) &&
9244 CheckNonNullExpr(*this, RetValExp))
9245 Diag(ReturnLoc, diag::warn_null_ret)
9246 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
9248 // C++11 [basic.stc.dynamic.allocation]p4:
9249 // If an allocation function declared with a non-throwing
9250 // exception-specification fails to allocate storage, it shall return
9251 // a null pointer. Any other allocation function that fails to allocate
9252 // storage shall indicate failure only by throwing an exception [...]
9254 OverloadedOperatorKind Op = FD->getOverloadedOperator();
9255 if (Op == OO_New || Op == OO_Array_New) {
9256 const FunctionProtoType *Proto
9257 = FD->getType()->castAs<FunctionProtoType>();
9258 if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
9259 CheckNonNullExpr(*this, RetValExp))
9260 Diag(ReturnLoc, diag::warn_operator_new_returns_null)
9261 << FD << getLangOpts().CPlusPlus11;
9266 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
9268 /// Check for comparisons of floating point operands using != and ==.
9269 /// Issue a warning if these are no self-comparisons, as they are not likely
9270 /// to do what the programmer intended.
9271 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
9272 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
9273 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
9275 // Special case: check for x == x (which is OK).
9276 // Do not emit warnings for such cases.
9277 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
9278 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
9279 if (DRL->getDecl() == DRR->getDecl())
9282 // Special case: check for comparisons against literals that can be exactly
9283 // represented by APFloat. In such cases, do not emit a warning. This
9284 // is a heuristic: often comparison against such literals are used to
9285 // detect if a value in a variable has not changed. This clearly can
9286 // lead to false negatives.
9287 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
9291 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen))
9295 // Check for comparisons with builtin types.
9296 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
9297 if (CL->getBuiltinCallee())
9300 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
9301 if (CR->getBuiltinCallee())
9304 // Emit the diagnostic.
9305 Diag(Loc, diag::warn_floatingpoint_eq)
9306 << LHS->getSourceRange() << RHS->getSourceRange();
9309 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
9310 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
9314 /// Structure recording the 'active' range of an integer-valued
9317 /// The number of bits active in the int.
9320 /// True if the int is known not to have negative values.
9323 IntRange(unsigned Width, bool NonNegative)
9324 : Width(Width), NonNegative(NonNegative) {}
9326 /// Returns the range of the bool type.
9327 static IntRange forBoolType() {
9328 return IntRange(1, true);
9331 /// Returns the range of an opaque value of the given integral type.
9332 static IntRange forValueOfType(ASTContext &C, QualType T) {
9333 return forValueOfCanonicalType(C,
9334 T->getCanonicalTypeInternal().getTypePtr());
9337 /// Returns the range of an opaque value of a canonical integral type.
9338 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
9339 assert(T->isCanonicalUnqualified());
9341 if (const VectorType *VT = dyn_cast<VectorType>(T))
9342 T = VT->getElementType().getTypePtr();
9343 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
9344 T = CT->getElementType().getTypePtr();
9345 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
9346 T = AT->getValueType().getTypePtr();
9348 if (!C.getLangOpts().CPlusPlus) {
9349 // For enum types in C code, use the underlying datatype.
9350 if (const EnumType *ET = dyn_cast<EnumType>(T))
9351 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr();
9352 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) {
9353 // For enum types in C++, use the known bit width of the enumerators.
9354 EnumDecl *Enum = ET->getDecl();
9355 // In C++11, enums can have a fixed underlying type. Use this type to
9356 // compute the range.
9357 if (Enum->isFixed()) {
9358 return IntRange(C.getIntWidth(QualType(T, 0)),
9359 !ET->isSignedIntegerOrEnumerationType());
9362 unsigned NumPositive = Enum->getNumPositiveBits();
9363 unsigned NumNegative = Enum->getNumNegativeBits();
9365 if (NumNegative == 0)
9366 return IntRange(NumPositive, true/*NonNegative*/);
9368 return IntRange(std::max(NumPositive + 1, NumNegative),
9369 false/*NonNegative*/);
9372 const BuiltinType *BT = cast<BuiltinType>(T);
9373 assert(BT->isInteger());
9375 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
9378 /// Returns the "target" range of a canonical integral type, i.e.
9379 /// the range of values expressible in the type.
9381 /// This matches forValueOfCanonicalType except that enums have the
9382 /// full range of their type, not the range of their enumerators.
9383 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
9384 assert(T->isCanonicalUnqualified());
9386 if (const VectorType *VT = dyn_cast<VectorType>(T))
9387 T = VT->getElementType().getTypePtr();
9388 if (const ComplexType *CT = dyn_cast<ComplexType>(T))
9389 T = CT->getElementType().getTypePtr();
9390 if (const AtomicType *AT = dyn_cast<AtomicType>(T))
9391 T = AT->getValueType().getTypePtr();
9392 if (const EnumType *ET = dyn_cast<EnumType>(T))
9393 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
9395 const BuiltinType *BT = cast<BuiltinType>(T);
9396 assert(BT->isInteger());
9398 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
9401 /// Returns the supremum of two ranges: i.e. their conservative merge.
9402 static IntRange join(IntRange L, IntRange R) {
9403 return IntRange(std::max(L.Width, R.Width),
9404 L.NonNegative && R.NonNegative);
9407 /// Returns the infinum of two ranges: i.e. their aggressive merge.
9408 static IntRange meet(IntRange L, IntRange R) {
9409 return IntRange(std::min(L.Width, R.Width),
9410 L.NonNegative || R.NonNegative);
9416 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
9417 unsigned MaxWidth) {
9418 if (value.isSigned() && value.isNegative())
9419 return IntRange(value.getMinSignedBits(), false);
9421 if (value.getBitWidth() > MaxWidth)
9422 value = value.trunc(MaxWidth);
9424 // isNonNegative() just checks the sign bit without considering
9426 return IntRange(value.getActiveBits(), true);
9429 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
9430 unsigned MaxWidth) {
9432 return GetValueRange(C, result.getInt(), MaxWidth);
9434 if (result.isVector()) {
9435 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
9436 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
9437 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
9438 R = IntRange::join(R, El);
9443 if (result.isComplexInt()) {
9444 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
9445 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
9446 return IntRange::join(R, I);
9449 // This can happen with lossless casts to intptr_t of "based" lvalues.
9450 // Assume it might use arbitrary bits.
9451 // FIXME: The only reason we need to pass the type in here is to get
9452 // the sign right on this one case. It would be nice if APValue
9454 assert(result.isLValue() || result.isAddrLabelDiff());
9455 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
9458 static QualType GetExprType(const Expr *E) {
9459 QualType Ty = E->getType();
9460 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
9461 Ty = AtomicRHS->getValueType();
9465 /// Pseudo-evaluate the given integer expression, estimating the
9466 /// range of values it might take.
9468 /// \param MaxWidth - the width to which the value will be truncated
9469 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth) {
9470 E = E->IgnoreParens();
9472 // Try a full evaluation first.
9473 Expr::EvalResult result;
9474 if (E->EvaluateAsRValue(result, C))
9475 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth);
9477 // I think we only want to look through implicit casts here; if the
9478 // user has an explicit widening cast, we should treat the value as
9479 // being of the new, wider type.
9480 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) {
9481 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
9482 return GetExprRange(C, CE->getSubExpr(), MaxWidth);
9484 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE));
9486 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast ||
9487 CE->getCastKind() == CK_BooleanToSignedIntegral;
9489 // Assume that non-integer casts can span the full range of the type.
9491 return OutputTypeRange;
9494 = GetExprRange(C, CE->getSubExpr(),
9495 std::min(MaxWidth, OutputTypeRange.Width));
9497 // Bail out if the subexpr's range is as wide as the cast type.
9498 if (SubRange.Width >= OutputTypeRange.Width)
9499 return OutputTypeRange;
9501 // Otherwise, we take the smaller width, and we're non-negative if
9502 // either the output type or the subexpr is.
9503 return IntRange(SubRange.Width,
9504 SubRange.NonNegative || OutputTypeRange.NonNegative);
9507 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
9508 // If we can fold the condition, just take that operand.
9510 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
9511 return GetExprRange(C, CondResult ? CO->getTrueExpr()
9512 : CO->getFalseExpr(),
9515 // Otherwise, conservatively merge.
9516 IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth);
9517 IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth);
9518 return IntRange::join(L, R);
9521 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
9522 switch (BO->getOpcode()) {
9524 llvm_unreachable("builtin <=> should have class type");
9526 // Boolean-valued operations are single-bit and positive.
9535 return IntRange::forBoolType();
9537 // The type of the assignments is the type of the LHS, so the RHS
9538 // is not necessarily the same type.
9547 return IntRange::forValueOfType(C, GetExprType(E));
9549 // Simple assignments just pass through the RHS, which will have
9550 // been coerced to the LHS type.
9553 return GetExprRange(C, BO->getRHS(), MaxWidth);
9555 // Operations with opaque sources are black-listed.
9558 return IntRange::forValueOfType(C, GetExprType(E));
9560 // Bitwise-and uses the *infinum* of the two source ranges.
9563 return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth),
9564 GetExprRange(C, BO->getRHS(), MaxWidth));
9566 // Left shift gets black-listed based on a judgement call.
9568 // ...except that we want to treat '1 << (blah)' as logically
9569 // positive. It's an important idiom.
9570 if (IntegerLiteral *I
9571 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
9572 if (I->getValue() == 1) {
9573 IntRange R = IntRange::forValueOfType(C, GetExprType(E));
9574 return IntRange(R.Width, /*NonNegative*/ true);
9580 return IntRange::forValueOfType(C, GetExprType(E));
9582 // Right shift by a constant can narrow its left argument.
9584 case BO_ShrAssign: {
9585 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
9587 // If the shift amount is a positive constant, drop the width by
9590 if (BO->getRHS()->isIntegerConstantExpr(shift, C) &&
9591 shift.isNonNegative()) {
9592 unsigned zext = shift.getZExtValue();
9593 if (zext >= L.Width)
9594 L.Width = (L.NonNegative ? 0 : 1);
9602 // Comma acts as its right operand.
9604 return GetExprRange(C, BO->getRHS(), MaxWidth);
9606 // Black-list pointer subtractions.
9608 if (BO->getLHS()->getType()->isPointerType())
9609 return IntRange::forValueOfType(C, GetExprType(E));
9612 // The width of a division result is mostly determined by the size
9615 // Don't 'pre-truncate' the operands.
9616 unsigned opWidth = C.getIntWidth(GetExprType(E));
9617 IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
9619 // If the divisor is constant, use that.
9620 llvm::APSInt divisor;
9621 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) {
9622 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor))
9623 if (log2 >= L.Width)
9624 L.Width = (L.NonNegative ? 0 : 1);
9626 L.Width = std::min(L.Width - log2, MaxWidth);
9630 // Otherwise, just use the LHS's width.
9631 IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
9632 return IntRange(L.Width, L.NonNegative && R.NonNegative);
9635 // The result of a remainder can't be larger than the result of
9638 // Don't 'pre-truncate' the operands.
9639 unsigned opWidth = C.getIntWidth(GetExprType(E));
9640 IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
9641 IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
9643 IntRange meet = IntRange::meet(L, R);
9644 meet.Width = std::min(meet.Width, MaxWidth);
9648 // The default behavior is okay for these.
9656 // The default case is to treat the operation as if it were closed
9657 // on the narrowest type that encompasses both operands.
9658 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
9659 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth);
9660 return IntRange::join(L, R);
9663 if (const auto *UO = dyn_cast<UnaryOperator>(E)) {
9664 switch (UO->getOpcode()) {
9665 // Boolean-valued operations are white-listed.
9667 return IntRange::forBoolType();
9669 // Operations with opaque sources are black-listed.
9671 case UO_AddrOf: // should be impossible
9672 return IntRange::forValueOfType(C, GetExprType(E));
9675 return GetExprRange(C, UO->getSubExpr(), MaxWidth);
9679 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
9680 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth);
9682 if (const auto *BitField = E->getSourceBitField())
9683 return IntRange(BitField->getBitWidthValue(C),
9684 BitField->getType()->isUnsignedIntegerOrEnumerationType());
9686 return IntRange::forValueOfType(C, GetExprType(E));
9689 static IntRange GetExprRange(ASTContext &C, const Expr *E) {
9690 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)));
9693 /// Checks whether the given value, which currently has the given
9694 /// source semantics, has the same value when coerced through the
9695 /// target semantics.
9696 static bool IsSameFloatAfterCast(const llvm::APFloat &value,
9697 const llvm::fltSemantics &Src,
9698 const llvm::fltSemantics &Tgt) {
9699 llvm::APFloat truncated = value;
9702 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
9703 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
9705 return truncated.bitwiseIsEqual(value);
9708 /// Checks whether the given value, which currently has the given
9709 /// source semantics, has the same value when coerced through the
9710 /// target semantics.
9712 /// The value might be a vector of floats (or a complex number).
9713 static bool IsSameFloatAfterCast(const APValue &value,
9714 const llvm::fltSemantics &Src,
9715 const llvm::fltSemantics &Tgt) {
9716 if (value.isFloat())
9717 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
9719 if (value.isVector()) {
9720 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
9721 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
9726 assert(value.isComplexFloat());
9727 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
9728 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
9731 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
9733 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
9734 // Suppress cases where we are comparing against an enum constant.
9735 if (const DeclRefExpr *DR =
9736 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
9737 if (isa<EnumConstantDecl>(DR->getDecl()))
9740 // Suppress cases where the '0' value is expanded from a macro.
9741 if (E->getLocStart().isMacroID())
9747 static bool isKnownToHaveUnsignedValue(Expr *E) {
9748 return E->getType()->isIntegerType() &&
9749 (!E->getType()->isSignedIntegerType() ||
9750 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
9754 /// The promoted range of values of a type. In general this has the
9755 /// following structure:
9757 /// |-----------| . . . |-----------|
9759 /// Min HoleMin HoleMax Max
9761 /// ... where there is only a hole if a signed type is promoted to unsigned
9762 /// (in which case Min and Max are the smallest and largest representable
9764 struct PromotedRange {
9765 // Min, or HoleMax if there is a hole.
9766 llvm::APSInt PromotedMin;
9767 // Max, or HoleMin if there is a hole.
9768 llvm::APSInt PromotedMax;
9770 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) {
9772 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned);
9773 else if (R.Width >= BitWidth && !Unsigned) {
9774 // Promotion made the type *narrower*. This happens when promoting
9775 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
9776 // Treat all values of 'signed int' as being in range for now.
9777 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned);
9778 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned);
9780 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative)
9781 .extOrTrunc(BitWidth);
9782 PromotedMin.setIsUnsigned(Unsigned);
9784 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative)
9785 .extOrTrunc(BitWidth);
9786 PromotedMax.setIsUnsigned(Unsigned);
9790 // Determine whether this range is contiguous (has no hole).
9791 bool isContiguous() const { return PromotedMin <= PromotedMax; }
9793 // Where a constant value is within the range.
9794 enum ComparisonResult {
9803 Less = LE | LT | NE,
9804 Min = LE | InRangeFlag,
9805 InRange = InRangeFlag,
9806 Max = GE | InRangeFlag,
9807 Greater = GE | GT | NE,
9809 OnlyValue = LE | GE | EQ | InRangeFlag,
9813 ComparisonResult compare(const llvm::APSInt &Value) const {
9814 assert(Value.getBitWidth() == PromotedMin.getBitWidth() &&
9815 Value.isUnsigned() == PromotedMin.isUnsigned());
9816 if (!isContiguous()) {
9817 assert(Value.isUnsigned() && "discontiguous range for signed compare");
9818 if (Value.isMinValue()) return Min;
9819 if (Value.isMaxValue()) return Max;
9820 if (Value >= PromotedMin) return InRange;
9821 if (Value <= PromotedMax) return InRange;
9825 switch (llvm::APSInt::compareValues(Value, PromotedMin)) {
9826 case -1: return Less;
9827 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min;
9829 switch (llvm::APSInt::compareValues(Value, PromotedMax)) {
9830 case -1: return InRange;
9832 case 1: return Greater;
9836 llvm_unreachable("impossible compare result");
9839 static llvm::Optional<StringRef>
9840 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
9842 ComparisonResult LTFlag = LT, GTFlag = GT;
9843 if (ConstantOnRHS) std::swap(LTFlag, GTFlag);
9845 if (R & EQ) return StringRef("'std::strong_ordering::equal'");
9846 if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
9847 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
9851 ComparisonResult TrueFlag, FalseFlag;
9855 } else if (Op == BO_NE) {
9859 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) {
9866 if (Op == BO_GE || Op == BO_LE)
9867 std::swap(TrueFlag, FalseFlag);
9870 return StringRef("true");
9872 return StringRef("false");
9878 static bool HasEnumType(Expr *E) {
9879 // Strip off implicit integral promotions.
9880 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
9881 if (ICE->getCastKind() != CK_IntegralCast &&
9882 ICE->getCastKind() != CK_NoOp)
9884 E = ICE->getSubExpr();
9887 return E->getType()->isEnumeralType();
9890 static int classifyConstantValue(Expr *Constant) {
9891 // The values of this enumeration are used in the diagnostics
9892 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
9893 enum ConstantValueKind {
9898 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant))
9899 return BL->getValue() ? ConstantValueKind::LiteralTrue
9900 : ConstantValueKind::LiteralFalse;
9901 return ConstantValueKind::Miscellaneous;
9904 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
9905 Expr *Constant, Expr *Other,
9906 const llvm::APSInt &Value,
9908 if (S.inTemplateInstantiation())
9911 Expr *OriginalOther = Other;
9913 Constant = Constant->IgnoreParenImpCasts();
9914 Other = Other->IgnoreParenImpCasts();
9916 // Suppress warnings on tautological comparisons between values of the same
9917 // enumeration type. There are only two ways we could warn on this:
9918 // - If the constant is outside the range of representable values of
9919 // the enumeration. In such a case, we should warn about the cast
9920 // to enumeration type, not about the comparison.
9921 // - If the constant is the maximum / minimum in-range value. For an
9922 // enumeratin type, such comparisons can be meaningful and useful.
9923 if (Constant->getType()->isEnumeralType() &&
9924 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType()))
9927 // TODO: Investigate using GetExprRange() to get tighter bounds
9928 // on the bit ranges.
9929 QualType OtherT = Other->getType();
9930 if (const auto *AT = OtherT->getAs<AtomicType>())
9931 OtherT = AT->getValueType();
9932 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT);
9934 // Whether we're treating Other as being a bool because of the form of
9935 // expression despite it having another type (typically 'int' in C).
9936 bool OtherIsBooleanDespiteType =
9937 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
9938 if (OtherIsBooleanDespiteType)
9939 OtherRange = IntRange::forBoolType();
9941 // Determine the promoted range of the other type and see if a comparison of
9942 // the constant against that range is tautological.
9943 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(),
9944 Value.isUnsigned());
9945 auto Cmp = OtherPromotedRange.compare(Value);
9946 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant);
9950 // Suppress the diagnostic for an in-range comparison if the constant comes
9951 // from a macro or enumerator. We don't want to diagnose
9953 // some_long_value <= INT_MAX
9955 // when sizeof(int) == sizeof(long).
9956 bool InRange = Cmp & PromotedRange::InRangeFlag;
9957 if (InRange && IsEnumConstOrFromMacro(S, Constant))
9960 // If this is a comparison to an enum constant, include that
9961 // constant in the diagnostic.
9962 const EnumConstantDecl *ED = nullptr;
9963 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant))
9964 ED = dyn_cast<EnumConstantDecl>(DR->getDecl());
9966 // Should be enough for uint128 (39 decimal digits)
9967 SmallString<64> PrettySourceValue;
9968 llvm::raw_svector_ostream OS(PrettySourceValue);
9970 OS << '\'' << *ED << "' (" << Value << ")";
9974 // FIXME: We use a somewhat different formatting for the in-range cases and
9975 // cases involving boolean values for historical reasons. We should pick a
9976 // consistent way of presenting these diagnostics.
9977 if (!InRange || Other->isKnownToHaveBooleanValue()) {
9978 S.DiagRuntimeBehavior(
9979 E->getOperatorLoc(), E,
9980 S.PDiag(!InRange ? diag::warn_out_of_range_compare
9981 : diag::warn_tautological_bool_compare)
9982 << OS.str() << classifyConstantValue(Constant)
9983 << OtherT << OtherIsBooleanDespiteType << *Result
9984 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
9986 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
9987 ? (HasEnumType(OriginalOther)
9988 ? diag::warn_unsigned_enum_always_true_comparison
9989 : diag::warn_unsigned_always_true_comparison)
9990 : diag::warn_tautological_constant_compare;
9992 S.Diag(E->getOperatorLoc(), Diag)
9993 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
9994 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
10000 /// Analyze the operands of the given comparison. Implements the
10001 /// fallback case from AnalyzeComparison.
10002 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
10003 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10004 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10007 /// Implements -Wsign-compare.
10009 /// \param E the binary operator to check for warnings
10010 static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
10011 // The type the comparison is being performed in.
10012 QualType T = E->getLHS()->getType();
10014 // Only analyze comparison operators where both sides have been converted to
10016 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()))
10017 return AnalyzeImpConvsInComparison(S, E);
10019 // Don't analyze value-dependent comparisons directly.
10020 if (E->isValueDependent())
10021 return AnalyzeImpConvsInComparison(S, E);
10023 Expr *LHS = E->getLHS();
10024 Expr *RHS = E->getRHS();
10026 if (T->isIntegralType(S.Context)) {
10027 llvm::APSInt RHSValue;
10028 llvm::APSInt LHSValue;
10030 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context);
10031 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context);
10033 // We don't care about expressions whose result is a constant.
10034 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral)
10035 return AnalyzeImpConvsInComparison(S, E);
10037 // We only care about expressions where just one side is literal
10038 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) {
10039 // Is the constant on the RHS or LHS?
10040 const bool RhsConstant = IsRHSIntegralLiteral;
10041 Expr *Const = RhsConstant ? RHS : LHS;
10042 Expr *Other = RhsConstant ? LHS : RHS;
10043 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue;
10045 // Check whether an integer constant comparison results in a value
10046 // of 'true' or 'false'.
10047 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant))
10048 return AnalyzeImpConvsInComparison(S, E);
10052 if (!T->hasUnsignedIntegerRepresentation()) {
10053 // We don't do anything special if this isn't an unsigned integral
10054 // comparison: we're only interested in integral comparisons, and
10055 // signed comparisons only happen in cases we don't care to warn about.
10056 return AnalyzeImpConvsInComparison(S, E);
10059 LHS = LHS->IgnoreParenImpCasts();
10060 RHS = RHS->IgnoreParenImpCasts();
10062 if (!S.getLangOpts().CPlusPlus) {
10063 // Avoid warning about comparison of integers with different signs when
10064 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
10065 // the type of `E`.
10066 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType()))
10067 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
10068 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType()))
10069 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
10072 // Check to see if one of the (unmodified) operands is of different
10074 Expr *signedOperand, *unsignedOperand;
10075 if (LHS->getType()->hasSignedIntegerRepresentation()) {
10076 assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
10077 "unsigned comparison between two signed integer expressions?");
10078 signedOperand = LHS;
10079 unsignedOperand = RHS;
10080 } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
10081 signedOperand = RHS;
10082 unsignedOperand = LHS;
10084 return AnalyzeImpConvsInComparison(S, E);
10087 // Otherwise, calculate the effective range of the signed operand.
10088 IntRange signedRange = GetExprRange(S.Context, signedOperand);
10090 // Go ahead and analyze implicit conversions in the operands. Note
10091 // that we skip the implicit conversions on both sides.
10092 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc());
10093 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc());
10095 // If the signed range is non-negative, -Wsign-compare won't fire.
10096 if (signedRange.NonNegative)
10099 // For (in)equality comparisons, if the unsigned operand is a
10100 // constant which cannot collide with a overflowed signed operand,
10101 // then reinterpreting the signed operand as unsigned will not
10102 // change the result of the comparison.
10103 if (E->isEqualityOp()) {
10104 unsigned comparisonWidth = S.Context.getIntWidth(T);
10105 IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand);
10107 // We should never be unable to prove that the unsigned operand is
10109 assert(unsignedRange.NonNegative && "unsigned range includes negative?");
10111 if (unsignedRange.Width < comparisonWidth)
10115 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
10116 S.PDiag(diag::warn_mixed_sign_comparison)
10117 << LHS->getType() << RHS->getType()
10118 << LHS->getSourceRange() << RHS->getSourceRange());
10121 /// Analyzes an attempt to assign the given value to a bitfield.
10123 /// Returns true if there was something fishy about the attempt.
10124 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
10125 SourceLocation InitLoc) {
10126 assert(Bitfield->isBitField());
10127 if (Bitfield->isInvalidDecl())
10130 // White-list bool bitfields.
10131 QualType BitfieldType = Bitfield->getType();
10132 if (BitfieldType->isBooleanType())
10135 if (BitfieldType->isEnumeralType()) {
10136 EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl();
10137 // If the underlying enum type was not explicitly specified as an unsigned
10138 // type and the enum contain only positive values, MSVC++ will cause an
10139 // inconsistency by storing this as a signed type.
10140 if (S.getLangOpts().CPlusPlus11 &&
10141 !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
10142 BitfieldEnumDecl->getNumPositiveBits() > 0 &&
10143 BitfieldEnumDecl->getNumNegativeBits() == 0) {
10144 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
10145 << BitfieldEnumDecl->getNameAsString();
10149 if (Bitfield->getType()->isBooleanType())
10152 // Ignore value- or type-dependent expressions.
10153 if (Bitfield->getBitWidth()->isValueDependent() ||
10154 Bitfield->getBitWidth()->isTypeDependent() ||
10155 Init->isValueDependent() ||
10156 Init->isTypeDependent())
10159 Expr *OriginalInit = Init->IgnoreParenImpCasts();
10160 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
10162 llvm::APSInt Value;
10163 if (!OriginalInit->EvaluateAsInt(Value, S.Context,
10164 Expr::SE_AllowSideEffects)) {
10165 // The RHS is not constant. If the RHS has an enum type, make sure the
10166 // bitfield is wide enough to hold all the values of the enum without
10168 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
10169 EnumDecl *ED = EnumTy->getDecl();
10170 bool SignedBitfield = BitfieldType->isSignedIntegerType();
10172 // Enum types are implicitly signed on Windows, so check if there are any
10173 // negative enumerators to see if the enum was intended to be signed or
10175 bool SignedEnum = ED->getNumNegativeBits() > 0;
10177 // Check for surprising sign changes when assigning enum values to a
10178 // bitfield of different signedness. If the bitfield is signed and we
10179 // have exactly the right number of bits to store this unsigned enum,
10180 // suggest changing the enum to an unsigned type. This typically happens
10181 // on Windows where unfixed enums always use an underlying type of 'int'.
10182 unsigned DiagID = 0;
10183 if (SignedEnum && !SignedBitfield) {
10184 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
10185 } else if (SignedBitfield && !SignedEnum &&
10186 ED->getNumPositiveBits() == FieldWidth) {
10187 DiagID = diag::warn_signed_bitfield_enum_conversion;
10191 S.Diag(InitLoc, DiagID) << Bitfield << ED;
10192 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
10193 SourceRange TypeRange =
10194 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
10195 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
10196 << SignedEnum << TypeRange;
10199 // Compute the required bitwidth. If the enum has negative values, we need
10200 // one more bit than the normal number of positive bits to represent the
10202 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1,
10203 ED->getNumNegativeBits())
10204 : ED->getNumPositiveBits();
10206 // Check the bitwidth.
10207 if (BitsNeeded > FieldWidth) {
10208 Expr *WidthExpr = Bitfield->getBitWidth();
10209 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
10211 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
10212 << BitsNeeded << ED << WidthExpr->getSourceRange();
10219 unsigned OriginalWidth = Value.getBitWidth();
10221 if (!Value.isSigned() || Value.isNegative())
10222 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
10223 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
10224 OriginalWidth = Value.getMinSignedBits();
10226 if (OriginalWidth <= FieldWidth)
10229 // Compute the value which the bitfield will contain.
10230 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
10231 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
10233 // Check whether the stored value is equal to the original value.
10234 TruncatedValue = TruncatedValue.extend(OriginalWidth);
10235 if (llvm::APSInt::isSameValue(Value, TruncatedValue))
10238 // Special-case bitfields of width 1: booleans are naturally 0/1, and
10239 // therefore don't strictly fit into a signed bitfield of width 1.
10240 if (FieldWidth == 1 && Value == 1)
10243 std::string PrettyValue = Value.toString(10);
10244 std::string PrettyTrunc = TruncatedValue.toString(10);
10246 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
10247 << PrettyValue << PrettyTrunc << OriginalInit->getType()
10248 << Init->getSourceRange();
10253 /// Analyze the given simple or compound assignment for warning-worthy
10255 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
10256 // Just recurse on the LHS.
10257 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10259 // We want to recurse on the RHS as normal unless we're assigning to
10261 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
10262 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
10263 E->getOperatorLoc())) {
10264 // Recurse, ignoring any implicit conversions on the RHS.
10265 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
10266 E->getOperatorLoc());
10270 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10273 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
10274 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
10275 SourceLocation CContext, unsigned diag,
10276 bool pruneControlFlow = false) {
10277 if (pruneControlFlow) {
10278 S.DiagRuntimeBehavior(E->getExprLoc(), E,
10280 << SourceType << T << E->getSourceRange()
10281 << SourceRange(CContext));
10284 S.Diag(E->getExprLoc(), diag)
10285 << SourceType << T << E->getSourceRange() << SourceRange(CContext);
10288 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
10289 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
10290 SourceLocation CContext,
10291 unsigned diag, bool pruneControlFlow = false) {
10292 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
10295 /// Analyze the given compound assignment for the possible losing of
10296 /// floating-point precision.
10297 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
10298 assert(isa<CompoundAssignOperator>(E) &&
10299 "Must be compound assignment operation");
10300 // Recurse on the LHS and RHS in here
10301 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
10302 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
10304 // Now check the outermost expression
10305 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
10306 const auto *RBT = cast<CompoundAssignOperator>(E)
10307 ->getComputationResultType()
10308 ->getAs<BuiltinType>();
10310 // If both source and target are floating points.
10311 if (ResultBT && ResultBT->isFloatingPoint() && RBT && RBT->isFloatingPoint())
10312 // Builtin FP kinds are ordered by increasing FP rank.
10313 if (ResultBT->getKind() < RBT->getKind())
10314 // We don't want to warn for system macro.
10315 if (!S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
10316 // warn about dropping FP rank.
10317 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(),
10318 E->getOperatorLoc(),
10319 diag::warn_impcast_float_result_precision);
10322 /// Diagnose an implicit cast from a floating point value to an integer value.
10323 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
10324 SourceLocation CContext) {
10325 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool);
10326 const bool PruneWarnings = S.inTemplateInstantiation();
10328 Expr *InnerE = E->IgnoreParenImpCasts();
10329 // We also want to warn on, e.g., "int i = -1.234"
10330 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE))
10331 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
10332 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
10334 const bool IsLiteral =
10335 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE);
10337 llvm::APFloat Value(0.0);
10339 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects);
10341 return DiagnoseImpCast(S, E, T, CContext,
10342 diag::warn_impcast_float_integer, PruneWarnings);
10345 bool isExact = false;
10347 llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
10348 T->hasUnsignedIntegerRepresentation());
10349 llvm::APFloat::opStatus Result = Value.convertToInteger(
10350 IntegerValue, llvm::APFloat::rmTowardZero, &isExact);
10352 if (Result == llvm::APFloat::opOK && isExact) {
10353 if (IsLiteral) return;
10354 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
10358 // Conversion of a floating-point value to a non-bool integer where the
10359 // integral part cannot be represented by the integer type is undefined.
10360 if (!IsBool && Result == llvm::APFloat::opInvalidOp)
10361 return DiagnoseImpCast(
10363 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
10364 : diag::warn_impcast_float_to_integer_out_of_range,
10367 unsigned DiagID = 0;
10369 // Warn on floating point literal to integer.
10370 DiagID = diag::warn_impcast_literal_float_to_integer;
10371 } else if (IntegerValue == 0) {
10372 if (Value.isZero()) { // Skip -0.0 to 0 conversion.
10373 return DiagnoseImpCast(S, E, T, CContext,
10374 diag::warn_impcast_float_integer, PruneWarnings);
10376 // Warn on non-zero to zero conversion.
10377 DiagID = diag::warn_impcast_float_to_integer_zero;
10379 if (IntegerValue.isUnsigned()) {
10380 if (!IntegerValue.isMaxValue()) {
10381 return DiagnoseImpCast(S, E, T, CContext,
10382 diag::warn_impcast_float_integer, PruneWarnings);
10384 } else { // IntegerValue.isSigned()
10385 if (!IntegerValue.isMaxSignedValue() &&
10386 !IntegerValue.isMinSignedValue()) {
10387 return DiagnoseImpCast(S, E, T, CContext,
10388 diag::warn_impcast_float_integer, PruneWarnings);
10391 // Warn on evaluatable floating point expression to integer conversion.
10392 DiagID = diag::warn_impcast_float_to_integer;
10395 // FIXME: Force the precision of the source value down so we don't print
10396 // digits which are usually useless (we don't really care here if we
10397 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
10398 // would automatically print the shortest representation, but it's a bit
10399 // tricky to implement.
10400 SmallString<16> PrettySourceValue;
10401 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
10402 precision = (precision * 59 + 195) / 196;
10403 Value.toString(PrettySourceValue, precision);
10405 SmallString<16> PrettyTargetValue;
10407 PrettyTargetValue = Value.isZero() ? "false" : "true";
10409 IntegerValue.toString(PrettyTargetValue);
10411 if (PruneWarnings) {
10412 S.DiagRuntimeBehavior(E->getExprLoc(), E,
10414 << E->getType() << T.getUnqualifiedType()
10415 << PrettySourceValue << PrettyTargetValue
10416 << E->getSourceRange() << SourceRange(CContext));
10418 S.Diag(E->getExprLoc(), DiagID)
10419 << E->getType() << T.getUnqualifiedType() << PrettySourceValue
10420 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext);
10424 static std::string PrettyPrintInRange(const llvm::APSInt &Value,
10426 if (!Range.Width) return "0";
10428 llvm::APSInt ValueInRange = Value;
10429 ValueInRange.setIsSigned(!Range.NonNegative);
10430 ValueInRange = ValueInRange.trunc(Range.Width);
10431 return ValueInRange.toString(10);
10434 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
10435 if (!isa<ImplicitCastExpr>(Ex))
10438 Expr *InnerE = Ex->IgnoreParenImpCasts();
10439 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr();
10440 const Type *Source =
10441 S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
10442 if (Target->isDependentType())
10445 const BuiltinType *FloatCandidateBT =
10446 dyn_cast<BuiltinType>(ToBool ? Source : Target);
10447 const Type *BoolCandidateType = ToBool ? Target : Source;
10449 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) &&
10450 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
10453 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
10454 SourceLocation CC) {
10455 unsigned NumArgs = TheCall->getNumArgs();
10456 for (unsigned i = 0; i < NumArgs; ++i) {
10457 Expr *CurrA = TheCall->getArg(i);
10458 if (!IsImplicitBoolFloatConversion(S, CurrA, true))
10461 bool IsSwapped = ((i > 0) &&
10462 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false));
10463 IsSwapped |= ((i < (NumArgs - 1)) &&
10464 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false));
10466 // Warn on this floating-point to bool conversion.
10467 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
10468 CurrA->getType(), CC,
10469 diag::warn_impcast_floating_point_to_bool);
10474 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
10475 SourceLocation CC) {
10476 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
10480 // Don't warn on functions which have return type nullptr_t.
10481 if (isa<CallExpr>(E))
10484 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
10485 const Expr::NullPointerConstantKind NullKind =
10486 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull);
10487 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr)
10490 // Return if target type is a safe conversion.
10491 if (T->isAnyPointerType() || T->isBlockPointerType() ||
10492 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
10495 SourceLocation Loc = E->getSourceRange().getBegin();
10497 // Venture through the macro stacks to get to the source of macro arguments.
10498 // The new location is a better location than the complete location that was
10500 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
10501 CC = S.SourceMgr.getTopMacroCallerLoc(CC);
10503 // __null is usually wrapped in a macro. Go up a macro if that is the case.
10504 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) {
10505 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
10506 Loc, S.SourceMgr, S.getLangOpts());
10507 if (MacroName == "NULL")
10508 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
10511 // Only warn if the null and context location are in the same macro expansion.
10512 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC))
10515 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
10516 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC)
10517 << FixItHint::CreateReplacement(Loc,
10518 S.getFixItZeroLiteralForType(T, Loc));
10521 static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
10522 ObjCArrayLiteral *ArrayLiteral);
10525 checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
10526 ObjCDictionaryLiteral *DictionaryLiteral);
10528 /// Check a single element within a collection literal against the
10529 /// target element type.
10530 static void checkObjCCollectionLiteralElement(Sema &S,
10531 QualType TargetElementType,
10533 unsigned ElementKind) {
10534 // Skip a bitcast to 'id' or qualified 'id'.
10535 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
10536 if (ICE->getCastKind() == CK_BitCast &&
10537 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
10538 Element = ICE->getSubExpr();
10541 QualType ElementType = Element->getType();
10542 ExprResult ElementResult(Element);
10543 if (ElementType->getAs<ObjCObjectPointerType>() &&
10544 S.CheckSingleAssignmentConstraints(TargetElementType,
10547 != Sema::Compatible) {
10548 S.Diag(Element->getLocStart(),
10549 diag::warn_objc_collection_literal_element)
10550 << ElementType << ElementKind << TargetElementType
10551 << Element->getSourceRange();
10554 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
10555 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral);
10556 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
10557 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral);
10560 /// Check an Objective-C array literal being converted to the given
10562 static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
10563 ObjCArrayLiteral *ArrayLiteral) {
10564 if (!S.NSArrayDecl)
10567 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
10568 if (!TargetObjCPtr)
10571 if (TargetObjCPtr->isUnspecialized() ||
10572 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
10573 != S.NSArrayDecl->getCanonicalDecl())
10576 auto TypeArgs = TargetObjCPtr->getTypeArgs();
10577 if (TypeArgs.size() != 1)
10580 QualType TargetElementType = TypeArgs[0];
10581 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
10582 checkObjCCollectionLiteralElement(S, TargetElementType,
10583 ArrayLiteral->getElement(I),
10588 /// Check an Objective-C dictionary literal being converted to the given
10591 checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
10592 ObjCDictionaryLiteral *DictionaryLiteral) {
10593 if (!S.NSDictionaryDecl)
10596 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
10597 if (!TargetObjCPtr)
10600 if (TargetObjCPtr->isUnspecialized() ||
10601 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
10602 != S.NSDictionaryDecl->getCanonicalDecl())
10605 auto TypeArgs = TargetObjCPtr->getTypeArgs();
10606 if (TypeArgs.size() != 2)
10609 QualType TargetKeyType = TypeArgs[0];
10610 QualType TargetObjectType = TypeArgs[1];
10611 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
10612 auto Element = DictionaryLiteral->getKeyValueElement(I);
10613 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1);
10614 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2);
10618 // Helper function to filter out cases for constant width constant conversion.
10619 // Don't warn on char array initialization or for non-decimal values.
10620 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
10621 SourceLocation CC) {
10622 // If initializing from a constant, and the constant starts with '0',
10623 // then it is a binary, octal, or hexadecimal. Allow these constants
10624 // to fill all the bits, even if there is a sign change.
10625 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) {
10626 const char FirstLiteralCharacter =
10627 S.getSourceManager().getCharacterData(IntLit->getLocStart())[0];
10628 if (FirstLiteralCharacter == '0')
10632 // If the CC location points to a '{', and the type is char, then assume
10633 // assume it is an array initialization.
10634 if (CC.isValid() && T->isCharType()) {
10635 const char FirstContextCharacter =
10636 S.getSourceManager().getCharacterData(CC)[0];
10637 if (FirstContextCharacter == '{')
10645 CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
10646 bool *ICContext = nullptr) {
10647 if (E->isTypeDependent() || E->isValueDependent()) return;
10649 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
10650 const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
10651 if (Source == Target) return;
10652 if (Target->isDependentType()) return;
10654 // If the conversion context location is invalid don't complain. We also
10655 // don't want to emit a warning if the issue occurs from the expansion of
10656 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
10657 // delay this check as long as possible. Once we detect we are in that
10658 // scenario, we just return.
10659 if (CC.isInvalid())
10662 // Diagnose implicit casts to bool.
10663 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
10664 if (isa<StringLiteral>(E))
10665 // Warn on string literal to bool. Checks for string literals in logical
10666 // and expressions, for instance, assert(0 && "error here"), are
10667 // prevented by a check in AnalyzeImplicitConversions().
10668 return DiagnoseImpCast(S, E, T, CC,
10669 diag::warn_impcast_string_literal_to_bool);
10670 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
10671 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
10672 // This covers the literal expressions that evaluate to Objective-C
10674 return DiagnoseImpCast(S, E, T, CC,
10675 diag::warn_impcast_objective_c_literal_to_bool);
10677 if (Source->isPointerType() || Source->canDecayToPointerType()) {
10678 // Warn on pointer to bool conversion that is always true.
10679 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
10684 // Check implicit casts from Objective-C collection literals to specialized
10685 // collection types, e.g., NSArray<NSString *> *.
10686 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
10687 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral);
10688 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E))
10689 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
10691 // Strip vector types.
10692 if (isa<VectorType>(Source)) {
10693 if (!isa<VectorType>(Target)) {
10694 if (S.SourceMgr.isInSystemMacro(CC))
10696 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
10699 // If the vector cast is cast between two vectors of the same size, it is
10700 // a bitcast, not a conversion.
10701 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
10704 Source = cast<VectorType>(Source)->getElementType().getTypePtr();
10705 Target = cast<VectorType>(Target)->getElementType().getTypePtr();
10707 if (auto VecTy = dyn_cast<VectorType>(Target))
10708 Target = VecTy->getElementType().getTypePtr();
10710 // Strip complex types.
10711 if (isa<ComplexType>(Source)) {
10712 if (!isa<ComplexType>(Target)) {
10713 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType())
10716 return DiagnoseImpCast(S, E, T, CC,
10717 S.getLangOpts().CPlusPlus
10718 ? diag::err_impcast_complex_scalar
10719 : diag::warn_impcast_complex_scalar);
10722 Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
10723 Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
10726 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
10727 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
10729 // If the source is floating point...
10730 if (SourceBT && SourceBT->isFloatingPoint()) {
10731 // ...and the target is floating point...
10732 if (TargetBT && TargetBT->isFloatingPoint()) {
10733 // ...then warn if we're dropping FP rank.
10735 // Builtin FP kinds are ordered by increasing FP rank.
10736 if (SourceBT->getKind() > TargetBT->getKind()) {
10737 // Don't warn about float constants that are precisely
10738 // representable in the target type.
10739 Expr::EvalResult result;
10740 if (E->EvaluateAsRValue(result, S.Context)) {
10741 // Value might be a float, a float vector, or a float complex.
10742 if (IsSameFloatAfterCast(result.Val,
10743 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
10744 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
10748 if (S.SourceMgr.isInSystemMacro(CC))
10751 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
10753 // ... or possibly if we're increasing rank, too
10754 else if (TargetBT->getKind() > SourceBT->getKind()) {
10755 if (S.SourceMgr.isInSystemMacro(CC))
10758 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
10763 // If the target is integral, always warn.
10764 if (TargetBT && TargetBT->isInteger()) {
10765 if (S.SourceMgr.isInSystemMacro(CC))
10768 DiagnoseFloatingImpCast(S, E, T, CC);
10771 // Detect the case where a call result is converted from floating-point to
10772 // to bool, and the final argument to the call is converted from bool, to
10773 // discover this typo:
10775 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
10777 // FIXME: This is an incredibly special case; is there some more general
10778 // way to detect this class of misplaced-parentheses bug?
10779 if (Target->isBooleanType() && isa<CallExpr>(E)) {
10780 // Check last argument of function call to see if it is an
10781 // implicit cast from a type matching the type the result
10782 // is being cast to.
10783 CallExpr *CEx = cast<CallExpr>(E);
10784 if (unsigned NumArgs = CEx->getNumArgs()) {
10785 Expr *LastA = CEx->getArg(NumArgs - 1);
10786 Expr *InnerE = LastA->IgnoreParenImpCasts();
10787 if (isa<ImplicitCastExpr>(LastA) &&
10788 InnerE->getType()->isBooleanType()) {
10789 // Warn on this floating-point to bool conversion
10790 DiagnoseImpCast(S, E, T, CC,
10791 diag::warn_impcast_floating_point_to_bool);
10798 DiagnoseNullConversion(S, E, T, CC);
10800 S.DiscardMisalignedMemberAddress(Target, E);
10802 if (!Source->isIntegerType() || !Target->isIntegerType())
10805 // TODO: remove this early return once the false positives for constant->bool
10806 // in templates, macros, etc, are reduced or removed.
10807 if (Target->isSpecificBuiltinType(BuiltinType::Bool))
10810 IntRange SourceRange = GetExprRange(S.Context, E);
10811 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
10813 if (SourceRange.Width > TargetRange.Width) {
10814 // If the source is a constant, use a default-on diagnostic.
10815 // TODO: this should happen for bitfield stores, too.
10816 llvm::APSInt Value(32);
10817 if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects)) {
10818 if (S.SourceMgr.isInSystemMacro(CC))
10821 std::string PrettySourceValue = Value.toString(10);
10822 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
10824 S.DiagRuntimeBehavior(E->getExprLoc(), E,
10825 S.PDiag(diag::warn_impcast_integer_precision_constant)
10826 << PrettySourceValue << PrettyTargetValue
10827 << E->getType() << T << E->getSourceRange()
10828 << clang::SourceRange(CC));
10832 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
10833 if (S.SourceMgr.isInSystemMacro(CC))
10836 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
10837 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
10838 /* pruneControlFlow */ true);
10839 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
10842 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative &&
10843 SourceRange.NonNegative && Source->isSignedIntegerType()) {
10844 // Warn when doing a signed to signed conversion, warn if the positive
10845 // source value is exactly the width of the target type, which will
10846 // cause a negative value to be stored.
10848 llvm::APSInt Value;
10849 if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects) &&
10850 !S.SourceMgr.isInSystemMacro(CC)) {
10851 if (isSameWidthConstantConversion(S, E, T, CC)) {
10852 std::string PrettySourceValue = Value.toString(10);
10853 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
10855 S.DiagRuntimeBehavior(
10856 E->getExprLoc(), E,
10857 S.PDiag(diag::warn_impcast_integer_precision_constant)
10858 << PrettySourceValue << PrettyTargetValue << E->getType() << T
10859 << E->getSourceRange() << clang::SourceRange(CC));
10864 // Fall through for non-constants to give a sign conversion warning.
10867 if ((TargetRange.NonNegative && !SourceRange.NonNegative) ||
10868 (!TargetRange.NonNegative && SourceRange.NonNegative &&
10869 SourceRange.Width == TargetRange.Width)) {
10870 if (S.SourceMgr.isInSystemMacro(CC))
10873 unsigned DiagID = diag::warn_impcast_integer_sign;
10875 // Traditionally, gcc has warned about this under -Wsign-compare.
10876 // We also want to warn about it in -Wconversion.
10877 // So if -Wconversion is off, use a completely identical diagnostic
10878 // in the sign-compare group.
10879 // The conditional-checking code will
10881 DiagID = diag::warn_impcast_integer_sign_conditional;
10885 return DiagnoseImpCast(S, E, T, CC, DiagID);
10888 // Diagnose conversions between different enumeration types.
10889 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
10890 // type, to give us better diagnostics.
10891 QualType SourceType = E->getType();
10892 if (!S.getLangOpts().CPlusPlus) {
10893 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
10894 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
10895 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
10896 SourceType = S.Context.getTypeDeclType(Enum);
10897 Source = S.Context.getCanonicalType(SourceType).getTypePtr();
10901 if (const EnumType *SourceEnum = Source->getAs<EnumType>())
10902 if (const EnumType *TargetEnum = Target->getAs<EnumType>())
10903 if (SourceEnum->getDecl()->hasNameForLinkage() &&
10904 TargetEnum->getDecl()->hasNameForLinkage() &&
10905 SourceEnum != TargetEnum) {
10906 if (S.SourceMgr.isInSystemMacro(CC))
10909 return DiagnoseImpCast(S, E, SourceType, T, CC,
10910 diag::warn_impcast_different_enum_types);
10914 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
10915 SourceLocation CC, QualType T);
10917 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
10918 SourceLocation CC, bool &ICContext) {
10919 E = E->IgnoreParenImpCasts();
10921 if (isa<ConditionalOperator>(E))
10922 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T);
10924 AnalyzeImplicitConversions(S, E, CC);
10925 if (E->getType() != T)
10926 return CheckImplicitConversion(S, E, T, CC, &ICContext);
10929 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
10930 SourceLocation CC, QualType T) {
10931 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
10933 bool Suspicious = false;
10934 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
10935 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
10937 // If -Wconversion would have warned about either of the candidates
10938 // for a signedness conversion to the context type...
10939 if (!Suspicious) return;
10941 // ...but it's currently ignored...
10942 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
10945 // ...then check whether it would have warned about either of the
10946 // candidates for a signedness conversion to the condition type.
10947 if (E->getType() == T) return;
10949 Suspicious = false;
10950 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
10951 E->getType(), CC, &Suspicious);
10953 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
10954 E->getType(), CC, &Suspicious);
10957 /// CheckBoolLikeConversion - Check conversion of given expression to boolean.
10958 /// Input argument E is a logical expression.
10959 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
10960 if (S.getLangOpts().Bool)
10962 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
10965 /// AnalyzeImplicitConversions - Find and report any interesting
10966 /// implicit conversions in the given expression. There are a couple
10967 /// of competing diagnostics here, -Wconversion and -Wsign-compare.
10968 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
10969 SourceLocation CC) {
10970 QualType T = OrigE->getType();
10971 Expr *E = OrigE->IgnoreParenImpCasts();
10973 if (E->isTypeDependent() || E->isValueDependent())
10976 // For conditional operators, we analyze the arguments as if they
10977 // were being fed directly into the output.
10978 if (isa<ConditionalOperator>(E)) {
10979 ConditionalOperator *CO = cast<ConditionalOperator>(E);
10980 CheckConditionalOperator(S, CO, CC, T);
10984 // Check implicit argument conversions for function calls.
10985 if (CallExpr *Call = dyn_cast<CallExpr>(E))
10986 CheckImplicitArgumentConversions(S, Call, CC);
10988 // Go ahead and check any implicit conversions we might have skipped.
10989 // The non-canonical typecheck is just an optimization;
10990 // CheckImplicitConversion will filter out dead implicit conversions.
10991 if (E->getType() != T)
10992 CheckImplicitConversion(S, E, T, CC);
10994 // Now continue drilling into this expression.
10996 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
10997 // The bound subexpressions in a PseudoObjectExpr are not reachable
10998 // as transitive children.
10999 // FIXME: Use a more uniform representation for this.
11000 for (auto *SE : POE->semantics())
11001 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
11002 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC);
11005 // Skip past explicit casts.
11006 if (isa<ExplicitCastExpr>(E)) {
11007 E = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreParenImpCasts();
11008 return AnalyzeImplicitConversions(S, E, CC);
11011 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
11012 // Do a somewhat different check with comparison operators.
11013 if (BO->isComparisonOp())
11014 return AnalyzeComparison(S, BO);
11016 // And with simple assignments.
11017 if (BO->getOpcode() == BO_Assign)
11018 return AnalyzeAssignment(S, BO);
11019 // And with compound assignments.
11020 if (BO->isAssignmentOp())
11021 return AnalyzeCompoundAssignment(S, BO);
11024 // These break the otherwise-useful invariant below. Fortunately,
11025 // we don't really need to recurse into them, because any internal
11026 // expressions should have been analyzed already when they were
11027 // built into statements.
11028 if (isa<StmtExpr>(E)) return;
11030 // Don't descend into unevaluated contexts.
11031 if (isa<UnaryExprOrTypeTraitExpr>(E)) return;
11033 // Now just recurse over the expression's children.
11034 CC = E->getExprLoc();
11035 BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
11036 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
11037 for (Stmt *SubStmt : E->children()) {
11038 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
11042 if (IsLogicalAndOperator &&
11043 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
11044 // Ignore checking string literals that are in logical and operators.
11045 // This is a common pattern for asserts.
11047 AnalyzeImplicitConversions(S, ChildExpr, CC);
11050 if (BO && BO->isLogicalOp()) {
11051 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
11052 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
11053 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
11055 SubExpr = BO->getRHS()->IgnoreParenImpCasts();
11056 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr))
11057 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
11060 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E))
11061 if (U->getOpcode() == UO_LNot)
11062 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
11065 /// Diagnose integer type and any valid implicit conversion to it.
11066 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
11067 // Taking into account implicit conversions,
11068 // allow any integer.
11069 if (!E->getType()->isIntegerType()) {
11070 S.Diag(E->getLocStart(),
11071 diag::err_opencl_enqueue_kernel_invalid_local_size_type);
11074 // Potentially emit standard warnings for implicit conversions if enabled
11075 // using -Wconversion.
11076 CheckImplicitConversion(S, E, IntT, E->getLocStart());
11080 // Helper function for Sema::DiagnoseAlwaysNonNullPointer.
11081 // Returns true when emitting a warning about taking the address of a reference.
11082 static bool CheckForReference(Sema &SemaRef, const Expr *E,
11083 const PartialDiagnostic &PD) {
11084 E = E->IgnoreParenImpCasts();
11086 const FunctionDecl *FD = nullptr;
11088 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
11089 if (!DRE->getDecl()->getType()->isReferenceType())
11091 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) {
11092 if (!M->getMemberDecl()->getType()->isReferenceType())
11094 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) {
11095 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType())
11097 FD = Call->getDirectCallee();
11102 SemaRef.Diag(E->getExprLoc(), PD);
11104 // If possible, point to location of function.
11106 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
11112 // Returns true if the SourceLocation is expanded from any macro body.
11113 // Returns false if the SourceLocation is invalid, is from not in a macro
11114 // expansion, or is from expanded from a top-level macro argument.
11115 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
11116 if (Loc.isInvalid())
11119 while (Loc.isMacroID()) {
11120 if (SM.isMacroBodyExpansion(Loc))
11122 Loc = SM.getImmediateMacroCallerLoc(Loc);
11128 /// Diagnose pointers that are always non-null.
11129 /// \param E the expression containing the pointer
11130 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
11131 /// compared to a null pointer
11132 /// \param IsEqual True when the comparison is equal to a null pointer
11133 /// \param Range Extra SourceRange to highlight in the diagnostic
11134 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
11135 Expr::NullPointerConstantKind NullKind,
11136 bool IsEqual, SourceRange Range) {
11140 // Don't warn inside macros.
11141 if (E->getExprLoc().isMacroID()) {
11142 const SourceManager &SM = getSourceManager();
11143 if (IsInAnyMacroBody(SM, E->getExprLoc()) ||
11144 IsInAnyMacroBody(SM, Range.getBegin()))
11147 E = E->IgnoreImpCasts();
11149 const bool IsCompare = NullKind != Expr::NPCK_NotNull;
11151 if (isa<CXXThisExpr>(E)) {
11152 unsigned DiagID = IsCompare ? diag::warn_this_null_compare
11153 : diag::warn_this_bool_conversion;
11154 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
11158 bool IsAddressOf = false;
11160 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
11161 if (UO->getOpcode() != UO_AddrOf)
11163 IsAddressOf = true;
11164 E = UO->getSubExpr();
11168 unsigned DiagID = IsCompare
11169 ? diag::warn_address_of_reference_null_compare
11170 : diag::warn_address_of_reference_bool_conversion;
11171 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
11173 if (CheckForReference(*this, E, PD)) {
11178 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) {
11179 bool IsParam = isa<NonNullAttr>(NonnullAttr);
11181 llvm::raw_string_ostream S(Str);
11182 E->printPretty(S, nullptr, getPrintingPolicy());
11183 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
11184 : diag::warn_cast_nonnull_to_bool;
11185 Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
11186 << E->getSourceRange() << Range << IsEqual;
11187 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam;
11190 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
11191 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) {
11192 if (auto *Callee = Call->getDirectCallee()) {
11193 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) {
11194 ComplainAboutNonnullParamOrCall(A);
11200 // Expect to find a single Decl. Skip anything more complicated.
11201 ValueDecl *D = nullptr;
11202 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
11204 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
11205 D = M->getMemberDecl();
11208 // Weak Decls can be null.
11209 if (!D || D->isWeak())
11212 // Check for parameter decl with nonnull attribute
11213 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) {
11214 if (getCurFunction() &&
11215 !getCurFunction()->ModifiedNonNullParams.count(PV)) {
11216 if (const Attr *A = PV->getAttr<NonNullAttr>()) {
11217 ComplainAboutNonnullParamOrCall(A);
11221 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
11222 auto ParamIter = llvm::find(FD->parameters(), PV);
11223 assert(ParamIter != FD->param_end());
11224 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
11226 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
11227 if (!NonNull->args_size()) {
11228 ComplainAboutNonnullParamOrCall(NonNull);
11232 for (const ParamIdx &ArgNo : NonNull->args()) {
11233 if (ArgNo.getASTIndex() == ParamNo) {
11234 ComplainAboutNonnullParamOrCall(NonNull);
11243 QualType T = D->getType();
11244 const bool IsArray = T->isArrayType();
11245 const bool IsFunction = T->isFunctionType();
11247 // Address of function is used to silence the function warning.
11248 if (IsAddressOf && IsFunction) {
11253 if (!IsAddressOf && !IsFunction && !IsArray)
11256 // Pretty print the expression for the diagnostic.
11258 llvm::raw_string_ostream S(Str);
11259 E->printPretty(S, nullptr, getPrintingPolicy());
11261 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
11262 : diag::warn_impcast_pointer_to_bool;
11269 DiagType = AddressOf;
11270 else if (IsFunction)
11271 DiagType = FunctionPointer;
11273 DiagType = ArrayPointer;
11275 llvm_unreachable("Could not determine diagnostic.");
11276 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
11277 << Range << IsEqual;
11282 // Suggest '&' to silence the function warning.
11283 Diag(E->getExprLoc(), diag::note_function_warning_silence)
11284 << FixItHint::CreateInsertion(E->getLocStart(), "&");
11286 // Check to see if '()' fixit should be emitted.
11287 QualType ReturnType;
11288 UnresolvedSet<4> NonTemplateOverloads;
11289 tryExprAsCall(*E, ReturnType, NonTemplateOverloads);
11290 if (ReturnType.isNull())
11294 // There are two cases here. If there is null constant, the only suggest
11295 // for a pointer return type. If the null is 0, then suggest if the return
11296 // type is a pointer or an integer type.
11297 if (!ReturnType->isPointerType()) {
11298 if (NullKind == Expr::NPCK_ZeroExpression ||
11299 NullKind == Expr::NPCK_ZeroLiteral) {
11300 if (!ReturnType->isIntegerType())
11306 } else { // !IsCompare
11307 // For function to bool, only suggest if the function pointer has bool
11309 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
11312 Diag(E->getExprLoc(), diag::note_function_to_function_call)
11313 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getLocEnd()), "()");
11316 /// Diagnoses "dangerous" implicit conversions within the given
11317 /// expression (which is a full expression). Implements -Wconversion
11318 /// and -Wsign-compare.
11320 /// \param CC the "context" location of the implicit conversion, i.e.
11321 /// the most location of the syntactic entity requiring the implicit
11323 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
11324 // Don't diagnose in unevaluated contexts.
11325 if (isUnevaluatedContext())
11328 // Don't diagnose for value- or type-dependent expressions.
11329 if (E->isTypeDependent() || E->isValueDependent())
11332 // Check for array bounds violations in cases where the check isn't triggered
11333 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
11334 // ArraySubscriptExpr is on the RHS of a variable initialization.
11335 CheckArrayAccess(E);
11337 // This is not the right CC for (e.g.) a variable initialization.
11338 AnalyzeImplicitConversions(*this, E, CC);
11341 /// CheckBoolLikeConversion - Check conversion of given expression to boolean.
11342 /// Input argument E is a logical expression.
11343 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
11344 ::CheckBoolLikeConversion(*this, E, CC);
11347 /// Diagnose when expression is an integer constant expression and its evaluation
11348 /// results in integer overflow
11349 void Sema::CheckForIntOverflow (Expr *E) {
11350 // Use a work list to deal with nested struct initializers.
11351 SmallVector<Expr *, 2> Exprs(1, E);
11354 Expr *OriginalE = Exprs.pop_back_val();
11355 Expr *E = OriginalE->IgnoreParenCasts();
11357 if (isa<BinaryOperator>(E)) {
11358 E->EvaluateForOverflow(Context);
11362 if (auto InitList = dyn_cast<InitListExpr>(OriginalE))
11363 Exprs.append(InitList->inits().begin(), InitList->inits().end());
11364 else if (isa<ObjCBoxedExpr>(OriginalE))
11365 E->EvaluateForOverflow(Context);
11366 else if (auto Call = dyn_cast<CallExpr>(E))
11367 Exprs.append(Call->arg_begin(), Call->arg_end());
11368 else if (auto Message = dyn_cast<ObjCMessageExpr>(E))
11369 Exprs.append(Message->arg_begin(), Message->arg_end());
11370 } while (!Exprs.empty());
11375 /// Visitor for expressions which looks for unsequenced operations on the
11377 class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
11378 using Base = EvaluatedExprVisitor<SequenceChecker>;
11380 /// A tree of sequenced regions within an expression. Two regions are
11381 /// unsequenced if one is an ancestor or a descendent of the other. When we
11382 /// finish processing an expression with sequencing, such as a comma
11383 /// expression, we fold its tree nodes into its parent, since they are
11384 /// unsequenced with respect to nodes we will visit later.
11385 class SequenceTree {
11387 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
11388 unsigned Parent : 31;
11389 unsigned Merged : 1;
11391 SmallVector<Value, 8> Values;
11394 /// A region within an expression which may be sequenced with respect
11395 /// to some other region.
11397 friend class SequenceTree;
11399 unsigned Index = 0;
11401 explicit Seq(unsigned N) : Index(N) {}
11407 SequenceTree() { Values.push_back(Value(0)); }
11408 Seq root() const { return Seq(0); }
11410 /// Create a new sequence of operations, which is an unsequenced
11411 /// subset of \p Parent. This sequence of operations is sequenced with
11412 /// respect to other children of \p Parent.
11413 Seq allocate(Seq Parent) {
11414 Values.push_back(Value(Parent.Index));
11415 return Seq(Values.size() - 1);
11418 /// Merge a sequence of operations into its parent.
11419 void merge(Seq S) {
11420 Values[S.Index].Merged = true;
11423 /// Determine whether two operations are unsequenced. This operation
11424 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
11425 /// should have been merged into its parent as appropriate.
11426 bool isUnsequenced(Seq Cur, Seq Old) {
11427 unsigned C = representative(Cur.Index);
11428 unsigned Target = representative(Old.Index);
11429 while (C >= Target) {
11432 C = Values[C].Parent;
11438 /// Pick a representative for a sequence.
11439 unsigned representative(unsigned K) {
11440 if (Values[K].Merged)
11441 // Perform path compression as we go.
11442 return Values[K].Parent = representative(Values[K].Parent);
11447 /// An object for which we can track unsequenced uses.
11448 using Object = NamedDecl *;
11450 /// Different flavors of object usage which we track. We only track the
11451 /// least-sequenced usage of each kind.
11453 /// A read of an object. Multiple unsequenced reads are OK.
11456 /// A modification of an object which is sequenced before the value
11457 /// computation of the expression, such as ++n in C++.
11460 /// A modification of an object which is not sequenced before the value
11461 /// computation of the expression, such as n++.
11462 UK_ModAsSideEffect,
11464 UK_Count = UK_ModAsSideEffect + 1
11468 Expr *Use = nullptr;
11469 SequenceTree::Seq Seq;
11475 Usage Uses[UK_Count];
11477 /// Have we issued a diagnostic for this variable already?
11478 bool Diagnosed = false;
11480 UsageInfo() = default;
11482 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
11486 /// Sequenced regions within the expression.
11489 /// Declaration modifications and references which we have seen.
11490 UsageInfoMap UsageMap;
11492 /// The region we are currently within.
11493 SequenceTree::Seq Region;
11495 /// Filled in with declarations which were modified as a side-effect
11496 /// (that is, post-increment operations).
11497 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr;
11499 /// Expressions to check later. We defer checking these to reduce
11501 SmallVectorImpl<Expr *> &WorkList;
11503 /// RAII object wrapping the visitation of a sequenced subexpression of an
11504 /// expression. At the end of this process, the side-effects of the evaluation
11505 /// become sequenced with respect to the value computation of the result, so
11506 /// we downgrade any UK_ModAsSideEffect within the evaluation to
11508 struct SequencedSubexpression {
11509 SequencedSubexpression(SequenceChecker &Self)
11510 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
11511 Self.ModAsSideEffect = &ModAsSideEffect;
11514 ~SequencedSubexpression() {
11515 for (auto &M : llvm::reverse(ModAsSideEffect)) {
11516 UsageInfo &U = Self.UsageMap[M.first];
11517 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect];
11518 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue);
11519 SideEffectUsage = M.second;
11521 Self.ModAsSideEffect = OldModAsSideEffect;
11524 SequenceChecker &Self;
11525 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
11526 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect;
11529 /// RAII object wrapping the visitation of a subexpression which we might
11530 /// choose to evaluate as a constant. If any subexpression is evaluated and
11531 /// found to be non-constant, this allows us to suppress the evaluation of
11532 /// the outer expression.
11533 class EvaluationTracker {
11535 EvaluationTracker(SequenceChecker &Self)
11536 : Self(Self), Prev(Self.EvalTracker) {
11537 Self.EvalTracker = this;
11540 ~EvaluationTracker() {
11541 Self.EvalTracker = Prev;
11543 Prev->EvalOK &= EvalOK;
11546 bool evaluate(const Expr *E, bool &Result) {
11547 if (!EvalOK || E->isValueDependent())
11549 EvalOK = E->EvaluateAsBooleanCondition(Result, Self.SemaRef.Context);
11554 SequenceChecker &Self;
11555 EvaluationTracker *Prev;
11556 bool EvalOK = true;
11557 } *EvalTracker = nullptr;
11559 /// Find the object which is produced by the specified expression,
11561 Object getObject(Expr *E, bool Mod) const {
11562 E = E->IgnoreParenCasts();
11563 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
11564 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
11565 return getObject(UO->getSubExpr(), Mod);
11566 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
11567 if (BO->getOpcode() == BO_Comma)
11568 return getObject(BO->getRHS(), Mod);
11569 if (Mod && BO->isAssignmentOp())
11570 return getObject(BO->getLHS(), Mod);
11571 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
11572 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
11573 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts()))
11574 return ME->getMemberDecl();
11575 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
11576 // FIXME: If this is a reference, map through to its value.
11577 return DRE->getDecl();
11581 /// Note that an object was modified or used by an expression.
11582 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) {
11583 Usage &U = UI.Uses[UK];
11584 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) {
11585 if (UK == UK_ModAsSideEffect && ModAsSideEffect)
11586 ModAsSideEffect->push_back(std::make_pair(O, U));
11592 /// Check whether a modification or use conflicts with a prior usage.
11593 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind,
11598 const Usage &U = UI.Uses[OtherKind];
11599 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq))
11603 Expr *ModOrUse = Ref;
11604 if (OtherKind == UK_Use)
11605 std::swap(Mod, ModOrUse);
11607 SemaRef.Diag(Mod->getExprLoc(),
11608 IsModMod ? diag::warn_unsequenced_mod_mod
11609 : diag::warn_unsequenced_mod_use)
11610 << O << SourceRange(ModOrUse->getExprLoc());
11611 UI.Diagnosed = true;
11614 void notePreUse(Object O, Expr *Use) {
11615 UsageInfo &U = UsageMap[O];
11616 // Uses conflict with other modifications.
11617 checkUsage(O, U, Use, UK_ModAsValue, false);
11620 void notePostUse(Object O, Expr *Use) {
11621 UsageInfo &U = UsageMap[O];
11622 checkUsage(O, U, Use, UK_ModAsSideEffect, false);
11623 addUsage(U, O, Use, UK_Use);
11626 void notePreMod(Object O, Expr *Mod) {
11627 UsageInfo &U = UsageMap[O];
11628 // Modifications conflict with other modifications and with uses.
11629 checkUsage(O, U, Mod, UK_ModAsValue, true);
11630 checkUsage(O, U, Mod, UK_Use, false);
11633 void notePostMod(Object O, Expr *Use, UsageKind UK) {
11634 UsageInfo &U = UsageMap[O];
11635 checkUsage(O, U, Use, UK_ModAsSideEffect, true);
11636 addUsage(U, O, Use, UK);
11640 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList)
11641 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
11645 void VisitStmt(Stmt *S) {
11646 // Skip all statements which aren't expressions for now.
11649 void VisitExpr(Expr *E) {
11650 // By default, just recurse to evaluated subexpressions.
11651 Base::VisitStmt(E);
11654 void VisitCastExpr(CastExpr *E) {
11655 Object O = Object();
11656 if (E->getCastKind() == CK_LValueToRValue)
11657 O = getObject(E->getSubExpr(), false);
11666 void VisitBinComma(BinaryOperator *BO) {
11667 // C++11 [expr.comma]p1:
11668 // Every value computation and side effect associated with the left
11669 // expression is sequenced before every value computation and side
11670 // effect associated with the right expression.
11671 SequenceTree::Seq LHS = Tree.allocate(Region);
11672 SequenceTree::Seq RHS = Tree.allocate(Region);
11673 SequenceTree::Seq OldRegion = Region;
11676 SequencedSubexpression SeqLHS(*this);
11678 Visit(BO->getLHS());
11682 Visit(BO->getRHS());
11684 Region = OldRegion;
11686 // Forget that LHS and RHS are sequenced. They are both unsequenced
11687 // with respect to other stuff.
11692 void VisitBinAssign(BinaryOperator *BO) {
11693 // The modification is sequenced after the value computation of the LHS
11694 // and RHS, so check it before inspecting the operands and update the
11696 Object O = getObject(BO->getLHS(), true);
11698 return VisitExpr(BO);
11702 // C++11 [expr.ass]p7:
11703 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated
11706 // Therefore, for a compound assignment operator, O is considered used
11707 // everywhere except within the evaluation of E1 itself.
11708 if (isa<CompoundAssignOperator>(BO))
11711 Visit(BO->getLHS());
11713 if (isa<CompoundAssignOperator>(BO))
11714 notePostUse(O, BO);
11716 Visit(BO->getRHS());
11718 // C++11 [expr.ass]p1:
11719 // the assignment is sequenced [...] before the value computation of the
11720 // assignment expression.
11721 // C11 6.5.16/3 has no such rule.
11722 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
11723 : UK_ModAsSideEffect);
11726 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) {
11727 VisitBinAssign(CAO);
11730 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
11731 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
11732 void VisitUnaryPreIncDec(UnaryOperator *UO) {
11733 Object O = getObject(UO->getSubExpr(), true);
11735 return VisitExpr(UO);
11738 Visit(UO->getSubExpr());
11739 // C++11 [expr.pre.incr]p1:
11740 // the expression ++x is equivalent to x+=1
11741 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
11742 : UK_ModAsSideEffect);
11745 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
11746 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
11747 void VisitUnaryPostIncDec(UnaryOperator *UO) {
11748 Object O = getObject(UO->getSubExpr(), true);
11750 return VisitExpr(UO);
11753 Visit(UO->getSubExpr());
11754 notePostMod(O, UO, UK_ModAsSideEffect);
11757 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated.
11758 void VisitBinLOr(BinaryOperator *BO) {
11759 // The side-effects of the LHS of an '&&' are sequenced before the
11760 // value computation of the RHS, and hence before the value computation
11761 // of the '&&' itself, unless the LHS evaluates to zero. We treat them
11762 // as if they were unconditionally sequenced.
11763 EvaluationTracker Eval(*this);
11765 SequencedSubexpression Sequenced(*this);
11766 Visit(BO->getLHS());
11770 if (Eval.evaluate(BO->getLHS(), Result)) {
11772 Visit(BO->getRHS());
11774 // Check for unsequenced operations in the RHS, treating it as an
11775 // entirely separate evaluation.
11777 // FIXME: If there are operations in the RHS which are unsequenced
11778 // with respect to operations outside the RHS, and those operations
11779 // are unconditionally evaluated, diagnose them.
11780 WorkList.push_back(BO->getRHS());
11783 void VisitBinLAnd(BinaryOperator *BO) {
11784 EvaluationTracker Eval(*this);
11786 SequencedSubexpression Sequenced(*this);
11787 Visit(BO->getLHS());
11791 if (Eval.evaluate(BO->getLHS(), Result)) {
11793 Visit(BO->getRHS());
11795 WorkList.push_back(BO->getRHS());
11799 // Only visit the condition, unless we can be sure which subexpression will
11801 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) {
11802 EvaluationTracker Eval(*this);
11804 SequencedSubexpression Sequenced(*this);
11805 Visit(CO->getCond());
11809 if (Eval.evaluate(CO->getCond(), Result))
11810 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr());
11812 WorkList.push_back(CO->getTrueExpr());
11813 WorkList.push_back(CO->getFalseExpr());
11817 void VisitCallExpr(CallExpr *CE) {
11818 // C++11 [intro.execution]p15:
11819 // When calling a function [...], every value computation and side effect
11820 // associated with any argument expression, or with the postfix expression
11821 // designating the called function, is sequenced before execution of every
11822 // expression or statement in the body of the function [and thus before
11823 // the value computation of its result].
11824 SequencedSubexpression Sequenced(*this);
11825 Base::VisitCallExpr(CE);
11827 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
11830 void VisitCXXConstructExpr(CXXConstructExpr *CCE) {
11831 // This is a call, so all subexpressions are sequenced before the result.
11832 SequencedSubexpression Sequenced(*this);
11834 if (!CCE->isListInitialization())
11835 return VisitExpr(CCE);
11837 // In C++11, list initializations are sequenced.
11838 SmallVector<SequenceTree::Seq, 32> Elts;
11839 SequenceTree::Seq Parent = Region;
11840 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(),
11841 E = CCE->arg_end();
11843 Region = Tree.allocate(Parent);
11844 Elts.push_back(Region);
11848 // Forget that the initializers are sequenced.
11850 for (unsigned I = 0; I < Elts.size(); ++I)
11851 Tree.merge(Elts[I]);
11854 void VisitInitListExpr(InitListExpr *ILE) {
11855 if (!SemaRef.getLangOpts().CPlusPlus11)
11856 return VisitExpr(ILE);
11858 // In C++11, list initializations are sequenced.
11859 SmallVector<SequenceTree::Seq, 32> Elts;
11860 SequenceTree::Seq Parent = Region;
11861 for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
11862 Expr *E = ILE->getInit(I);
11864 Region = Tree.allocate(Parent);
11865 Elts.push_back(Region);
11869 // Forget that the initializers are sequenced.
11871 for (unsigned I = 0; I < Elts.size(); ++I)
11872 Tree.merge(Elts[I]);
11878 void Sema::CheckUnsequencedOperations(Expr *E) {
11879 SmallVector<Expr *, 8> WorkList;
11880 WorkList.push_back(E);
11881 while (!WorkList.empty()) {
11882 Expr *Item = WorkList.pop_back_val();
11883 SequenceChecker(*this, Item, WorkList);
11887 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
11888 bool IsConstexpr) {
11889 CheckImplicitConversions(E, CheckLoc);
11890 if (!E->isInstantiationDependent())
11891 CheckUnsequencedOperations(E);
11892 if (!IsConstexpr && !E->isValueDependent())
11893 CheckForIntOverflow(E);
11894 DiagnoseMisalignedMembers();
11897 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
11898 FieldDecl *BitField,
11900 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
11903 static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
11904 SourceLocation Loc) {
11905 if (!PType->isVariablyModifiedType())
11907 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) {
11908 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc);
11911 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) {
11912 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc);
11915 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) {
11916 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc);
11920 const ArrayType *AT = S.Context.getAsArrayType(PType);
11924 if (AT->getSizeModifier() != ArrayType::Star) {
11925 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc);
11929 S.Diag(Loc, diag::err_array_star_in_function_definition);
11932 /// CheckParmsForFunctionDef - Check that the parameters of the given
11933 /// function are appropriate for the definition of a function. This
11934 /// takes care of any checks that cannot be performed on the
11935 /// declaration itself, e.g., that the types of each of the function
11936 /// parameters are complete.
11937 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
11938 bool CheckParameterNames) {
11939 bool HasInvalidParm = false;
11940 for (ParmVarDecl *Param : Parameters) {
11941 // C99 6.7.5.3p4: the parameters in a parameter type list in a
11942 // function declarator that is part of a function definition of
11943 // that function shall not have incomplete type.
11945 // This is also C++ [dcl.fct]p6.
11946 if (!Param->isInvalidDecl() &&
11947 RequireCompleteType(Param->getLocation(), Param->getType(),
11948 diag::err_typecheck_decl_incomplete_type)) {
11949 Param->setInvalidDecl();
11950 HasInvalidParm = true;
11953 // C99 6.9.1p5: If the declarator includes a parameter type list, the
11954 // declaration of each parameter shall include an identifier.
11955 if (CheckParameterNames &&
11956 Param->getIdentifier() == nullptr &&
11957 !Param->isImplicit() &&
11958 !getLangOpts().CPlusPlus)
11959 Diag(Param->getLocation(), diag::err_parameter_name_omitted);
11962 // If the function declarator is not part of a definition of that
11963 // function, parameters may have incomplete type and may use the [*]
11964 // notation in their sequences of declarator specifiers to specify
11965 // variable length array types.
11966 QualType PType = Param->getOriginalType();
11967 // FIXME: This diagnostic should point the '[*]' if source-location
11968 // information is added for it.
11969 diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
11971 // If the parameter is a c++ class type and it has to be destructed in the
11972 // callee function, declare the destructor so that it can be called by the
11973 // callee function. Do not perform any direct access check on the dtor here.
11974 if (!Param->isInvalidDecl()) {
11975 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
11976 if (!ClassDecl->isInvalidDecl() &&
11977 !ClassDecl->hasIrrelevantDestructor() &&
11978 !ClassDecl->isDependentContext() &&
11979 ClassDecl->isParamDestroyedInCallee()) {
11980 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
11981 MarkFunctionReferenced(Param->getLocation(), Destructor);
11982 DiagnoseUseOfDecl(Destructor, Param->getLocation());
11987 // Parameters with the pass_object_size attribute only need to be marked
11988 // constant at function definitions. Because we lack information about
11989 // whether we're on a declaration or definition when we're instantiating the
11990 // attribute, we need to check for constness here.
11991 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
11992 if (!Param->getType().isConstQualified())
11993 Diag(Param->getLocation(), diag::err_attribute_pointers_only)
11994 << Attr->getSpelling() << 1;
11997 return HasInvalidParm;
12000 /// A helper function to get the alignment of a Decl referred to by DeclRefExpr
12002 static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign,
12003 ASTContext &Context) {
12004 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
12005 return Context.getDeclAlign(DRE->getDecl());
12007 if (const auto *ME = dyn_cast<MemberExpr>(E))
12008 return Context.getDeclAlign(ME->getMemberDecl());
12013 /// CheckCastAlign - Implements -Wcast-align, which warns when a
12014 /// pointer cast increases the alignment requirements.
12015 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
12016 // This is actually a lot of work to potentially be doing on every
12017 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
12018 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
12021 // Ignore dependent types.
12022 if (T->isDependentType() || Op->getType()->isDependentType())
12025 // Require that the destination be a pointer type.
12026 const PointerType *DestPtr = T->getAs<PointerType>();
12027 if (!DestPtr) return;
12029 // If the destination has alignment 1, we're done.
12030 QualType DestPointee = DestPtr->getPointeeType();
12031 if (DestPointee->isIncompleteType()) return;
12032 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee);
12033 if (DestAlign.isOne()) return;
12035 // Require that the source be a pointer type.
12036 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
12037 if (!SrcPtr) return;
12038 QualType SrcPointee = SrcPtr->getPointeeType();
12040 // Whitelist casts from cv void*. We already implicitly
12041 // whitelisted casts to cv void*, since they have alignment 1.
12042 // Also whitelist casts involving incomplete types, which implicitly
12043 // includes 'void'.
12044 if (SrcPointee->isIncompleteType()) return;
12046 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
12048 if (auto *CE = dyn_cast<CastExpr>(Op)) {
12049 if (CE->getCastKind() == CK_ArrayToPointerDecay)
12050 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context);
12051 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) {
12052 if (UO->getOpcode() == UO_AddrOf)
12053 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context);
12056 if (SrcAlign >= DestAlign) return;
12058 Diag(TRange.getBegin(), diag::warn_cast_align)
12059 << Op->getType() << T
12060 << static_cast<unsigned>(SrcAlign.getQuantity())
12061 << static_cast<unsigned>(DestAlign.getQuantity())
12062 << TRange << Op->getSourceRange();
12065 /// Check whether this array fits the idiom of a size-one tail padded
12066 /// array member of a struct.
12068 /// We avoid emitting out-of-bounds access warnings for such arrays as they are
12069 /// commonly used to emulate flexible arrays in C89 code.
12070 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size,
12071 const NamedDecl *ND) {
12072 if (Size != 1 || !ND) return false;
12074 const FieldDecl *FD = dyn_cast<FieldDecl>(ND);
12075 if (!FD) return false;
12077 // Don't consider sizes resulting from macro expansions or template argument
12078 // substitution to form C89 tail-padded arrays.
12080 TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
12082 TypeLoc TL = TInfo->getTypeLoc();
12083 // Look through typedefs.
12084 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) {
12085 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
12086 TInfo = TDL->getTypeSourceInfo();
12089 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) {
12090 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
12091 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
12097 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
12098 if (!RD) return false;
12099 if (RD->isUnion()) return false;
12100 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
12101 if (!CRD->isStandardLayout()) return false;
12104 // See if this is the last field decl in the record.
12105 const Decl *D = FD;
12106 while ((D = D->getNextDeclInContext()))
12107 if (isa<FieldDecl>(D))
12112 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
12113 const ArraySubscriptExpr *ASE,
12114 bool AllowOnePastEnd, bool IndexNegated) {
12115 IndexExpr = IndexExpr->IgnoreParenImpCasts();
12116 if (IndexExpr->isValueDependent())
12119 const Type *EffectiveType =
12120 BaseExpr->getType()->getPointeeOrArrayElementType();
12121 BaseExpr = BaseExpr->IgnoreParenCasts();
12122 const ConstantArrayType *ArrayTy =
12123 Context.getAsConstantArrayType(BaseExpr->getType());
12127 llvm::APSInt index;
12128 if (!IndexExpr->EvaluateAsInt(index, Context, Expr::SE_AllowSideEffects))
12133 const NamedDecl *ND = nullptr;
12134 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
12135 ND = DRE->getDecl();
12136 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
12137 ND = ME->getMemberDecl();
12139 if (index.isUnsigned() || !index.isNegative()) {
12140 llvm::APInt size = ArrayTy->getSize();
12141 if (!size.isStrictlyPositive())
12144 const Type *BaseType = BaseExpr->getType()->getPointeeOrArrayElementType();
12145 if (BaseType != EffectiveType) {
12146 // Make sure we're comparing apples to apples when comparing index to size
12147 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
12148 uint64_t array_typesize = Context.getTypeSize(BaseType);
12149 // Handle ptrarith_typesize being zero, such as when casting to void*
12150 if (!ptrarith_typesize) ptrarith_typesize = 1;
12151 if (ptrarith_typesize != array_typesize) {
12152 // There's a cast to a different size type involved
12153 uint64_t ratio = array_typesize / ptrarith_typesize;
12154 // TODO: Be smarter about handling cases where array_typesize is not a
12155 // multiple of ptrarith_typesize
12156 if (ptrarith_typesize * ratio == array_typesize)
12157 size *= llvm::APInt(size.getBitWidth(), ratio);
12161 if (size.getBitWidth() > index.getBitWidth())
12162 index = index.zext(size.getBitWidth());
12163 else if (size.getBitWidth() < index.getBitWidth())
12164 size = size.zext(index.getBitWidth());
12166 // For array subscripting the index must be less than size, but for pointer
12167 // arithmetic also allow the index (offset) to be equal to size since
12168 // computing the next address after the end of the array is legal and
12169 // commonly done e.g. in C++ iterators and range-based for loops.
12170 if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
12173 // Also don't warn for arrays of size 1 which are members of some
12174 // structure. These are often used to approximate flexible arrays in C89
12176 if (IsTailPaddedMemberArray(*this, size, ND))
12179 // Suppress the warning if the subscript expression (as identified by the
12180 // ']' location) and the index expression are both from macro expansions
12181 // within a system header.
12183 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
12184 ASE->getRBracketLoc());
12185 if (SourceMgr.isInSystemHeader(RBracketLoc)) {
12186 SourceLocation IndexLoc = SourceMgr.getSpellingLoc(
12187 IndexExpr->getLocStart());
12188 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc))
12193 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds;
12195 DiagID = diag::warn_array_index_exceeds_bounds;
12197 DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
12198 PDiag(DiagID) << index.toString(10, true)
12199 << size.toString(10, true)
12200 << (unsigned)size.getLimitedValue(~0U)
12201 << IndexExpr->getSourceRange());
12203 unsigned DiagID = diag::warn_array_index_precedes_bounds;
12205 DiagID = diag::warn_ptr_arith_precedes_bounds;
12206 if (index.isNegative()) index = -index;
12209 DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
12210 PDiag(DiagID) << index.toString(10, true)
12211 << IndexExpr->getSourceRange());
12215 // Try harder to find a NamedDecl to point at in the note.
12216 while (const ArraySubscriptExpr *ASE =
12217 dyn_cast<ArraySubscriptExpr>(BaseExpr))
12218 BaseExpr = ASE->getBase()->IgnoreParenCasts();
12219 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
12220 ND = DRE->getDecl();
12221 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
12222 ND = ME->getMemberDecl();
12226 DiagRuntimeBehavior(ND->getLocStart(), BaseExpr,
12227 PDiag(diag::note_array_index_out_of_bounds)
12228 << ND->getDeclName());
12231 void Sema::CheckArrayAccess(const Expr *expr) {
12232 int AllowOnePastEnd = 0;
12234 expr = expr->IgnoreParenImpCasts();
12235 switch (expr->getStmtClass()) {
12236 case Stmt::ArraySubscriptExprClass: {
12237 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
12238 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
12239 AllowOnePastEnd > 0);
12240 expr = ASE->getBase();
12243 case Stmt::MemberExprClass: {
12244 expr = cast<MemberExpr>(expr)->getBase();
12247 case Stmt::OMPArraySectionExprClass: {
12248 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
12249 if (ASE->getLowerBound())
12250 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
12251 /*ASE=*/nullptr, AllowOnePastEnd > 0);
12254 case Stmt::UnaryOperatorClass: {
12255 // Only unwrap the * and & unary operators
12256 const UnaryOperator *UO = cast<UnaryOperator>(expr);
12257 expr = UO->getSubExpr();
12258 switch (UO->getOpcode()) {
12270 case Stmt::ConditionalOperatorClass: {
12271 const ConditionalOperator *cond = cast<ConditionalOperator>(expr);
12272 if (const Expr *lhs = cond->getLHS())
12273 CheckArrayAccess(lhs);
12274 if (const Expr *rhs = cond->getRHS())
12275 CheckArrayAccess(rhs);
12278 case Stmt::CXXOperatorCallExprClass: {
12279 const auto *OCE = cast<CXXOperatorCallExpr>(expr);
12280 for (const auto *Arg : OCE->arguments())
12281 CheckArrayAccess(Arg);
12290 //===--- CHECK: Objective-C retain cycles ----------------------------------//
12294 struct RetainCycleOwner {
12295 VarDecl *Variable = nullptr;
12297 SourceLocation Loc;
12298 bool Indirect = false;
12300 RetainCycleOwner() = default;
12302 void setLocsFrom(Expr *e) {
12303 Loc = e->getExprLoc();
12304 Range = e->getSourceRange();
12310 /// Consider whether capturing the given variable can possibly lead to
12311 /// a retain cycle.
12312 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
12313 // In ARC, it's captured strongly iff the variable has __strong
12314 // lifetime. In MRR, it's captured strongly if the variable is
12315 // __block and has an appropriate type.
12316 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
12319 owner.Variable = var;
12321 owner.setLocsFrom(ref);
12325 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
12327 e = e->IgnoreParens();
12328 if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
12329 switch (cast->getCastKind()) {
12331 case CK_LValueBitCast:
12332 case CK_LValueToRValue:
12333 case CK_ARCReclaimReturnedObject:
12334 e = cast->getSubExpr();
12342 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
12343 ObjCIvarDecl *ivar = ref->getDecl();
12344 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
12347 // Try to find a retain cycle in the base.
12348 if (!findRetainCycleOwner(S, ref->getBase(), owner))
12351 if (ref->isFreeIvar()) owner.setLocsFrom(ref);
12352 owner.Indirect = true;
12356 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
12357 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
12358 if (!var) return false;
12359 return considerVariable(var, ref, owner);
12362 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
12363 if (member->isArrow()) return false;
12365 // Don't count this as an indirect ownership.
12366 e = member->getBase();
12370 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
12371 // Only pay attention to pseudo-objects on property references.
12372 ObjCPropertyRefExpr *pre
12373 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
12375 if (!pre) return false;
12376 if (pre->isImplicitProperty()) return false;
12377 ObjCPropertyDecl *property = pre->getExplicitProperty();
12378 if (!property->isRetaining() &&
12379 !(property->getPropertyIvarDecl() &&
12380 property->getPropertyIvarDecl()->getType()
12381 .getObjCLifetime() == Qualifiers::OCL_Strong))
12384 owner.Indirect = true;
12385 if (pre->isSuperReceiver()) {
12386 owner.Variable = S.getCurMethodDecl()->getSelfDecl();
12387 if (!owner.Variable)
12389 owner.Loc = pre->getLocation();
12390 owner.Range = pre->getSourceRange();
12393 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
12394 ->getSourceExpr());
12406 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
12407 ASTContext &Context;
12409 Expr *Capturer = nullptr;
12410 bool VarWillBeReased = false;
12412 FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
12413 : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
12414 Context(Context), Variable(variable) {}
12416 void VisitDeclRefExpr(DeclRefExpr *ref) {
12417 if (ref->getDecl() == Variable && !Capturer)
12421 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
12422 if (Capturer) return;
12423 Visit(ref->getBase());
12424 if (Capturer && ref->isFreeIvar())
12428 void VisitBlockExpr(BlockExpr *block) {
12429 // Look inside nested blocks
12430 if (block->getBlockDecl()->capturesVariable(Variable))
12431 Visit(block->getBlockDecl()->getBody());
12434 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
12435 if (Capturer) return;
12436 if (OVE->getSourceExpr())
12437 Visit(OVE->getSourceExpr());
12440 void VisitBinaryOperator(BinaryOperator *BinOp) {
12441 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
12443 Expr *LHS = BinOp->getLHS();
12444 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
12445 if (DRE->getDecl() != Variable)
12447 if (Expr *RHS = BinOp->getRHS()) {
12448 RHS = RHS->IgnoreParenCasts();
12449 llvm::APSInt Value;
12451 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0);
12459 /// Check whether the given argument is a block which captures a
12461 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
12462 assert(owner.Variable && owner.Loc.isValid());
12464 e = e->IgnoreParenCasts();
12466 // Look through [^{...} copy] and Block_copy(^{...}).
12467 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
12468 Selector Cmd = ME->getSelector();
12469 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
12470 e = ME->getInstanceReceiver();
12473 e = e->IgnoreParenCasts();
12475 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
12476 if (CE->getNumArgs() == 1) {
12477 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
12479 const IdentifierInfo *FnI = Fn->getIdentifier();
12480 if (FnI && FnI->isStr("_Block_copy")) {
12481 e = CE->getArg(0)->IgnoreParenCasts();
12487 BlockExpr *block = dyn_cast<BlockExpr>(e);
12488 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
12491 FindCaptureVisitor visitor(S.Context, owner.Variable);
12492 visitor.Visit(block->getBlockDecl()->getBody());
12493 return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
12496 static void diagnoseRetainCycle(Sema &S, Expr *capturer,
12497 RetainCycleOwner &owner) {
12499 assert(owner.Variable && owner.Loc.isValid());
12501 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
12502 << owner.Variable << capturer->getSourceRange();
12503 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
12504 << owner.Indirect << owner.Range;
12507 /// Check for a keyword selector that starts with the word 'add' or
12509 static bool isSetterLikeSelector(Selector sel) {
12510 if (sel.isUnarySelector()) return false;
12512 StringRef str = sel.getNameForSlot(0);
12513 while (!str.empty() && str.front() == '_') str = str.substr(1);
12514 if (str.startswith("set"))
12515 str = str.substr(3);
12516 else if (str.startswith("add")) {
12517 // Specially whitelist 'addOperationWithBlock:'.
12518 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
12520 str = str.substr(3);
12525 if (str.empty()) return true;
12526 return !isLowercase(str.front());
12529 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
12530 ObjCMessageExpr *Message) {
12531 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
12532 Message->getReceiverInterface(),
12533 NSAPI::ClassId_NSMutableArray);
12534 if (!IsMutableArray) {
12538 Selector Sel = Message->getSelector();
12540 Optional<NSAPI::NSArrayMethodKind> MKOpt =
12541 S.NSAPIObj->getNSArrayMethodKind(Sel);
12546 NSAPI::NSArrayMethodKind MK = *MKOpt;
12549 case NSAPI::NSMutableArr_addObject:
12550 case NSAPI::NSMutableArr_insertObjectAtIndex:
12551 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
12553 case NSAPI::NSMutableArr_replaceObjectAtIndex:
12564 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
12565 ObjCMessageExpr *Message) {
12566 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
12567 Message->getReceiverInterface(),
12568 NSAPI::ClassId_NSMutableDictionary);
12569 if (!IsMutableDictionary) {
12573 Selector Sel = Message->getSelector();
12575 Optional<NSAPI::NSDictionaryMethodKind> MKOpt =
12576 S.NSAPIObj->getNSDictionaryMethodKind(Sel);
12581 NSAPI::NSDictionaryMethodKind MK = *MKOpt;
12584 case NSAPI::NSMutableDict_setObjectForKey:
12585 case NSAPI::NSMutableDict_setValueForKey:
12586 case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
12596 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
12597 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
12598 Message->getReceiverInterface(),
12599 NSAPI::ClassId_NSMutableSet);
12601 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
12602 Message->getReceiverInterface(),
12603 NSAPI::ClassId_NSMutableOrderedSet);
12604 if (!IsMutableSet && !IsMutableOrderedSet) {
12608 Selector Sel = Message->getSelector();
12610 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel);
12615 NSAPI::NSSetMethodKind MK = *MKOpt;
12618 case NSAPI::NSMutableSet_addObject:
12619 case NSAPI::NSOrderedSet_setObjectAtIndex:
12620 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
12621 case NSAPI::NSOrderedSet_insertObjectAtIndex:
12623 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
12630 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
12631 if (!Message->isInstanceMessage()) {
12635 Optional<int> ArgOpt;
12637 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
12638 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
12639 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
12643 int ArgIndex = *ArgOpt;
12645 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
12646 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
12647 Arg = OE->getSourceExpr()->IgnoreImpCasts();
12650 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
12651 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
12652 if (ArgRE->isObjCSelfExpr()) {
12653 Diag(Message->getSourceRange().getBegin(),
12654 diag::warn_objc_circular_container)
12655 << ArgRE->getDecl() << StringRef("'super'");
12659 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
12661 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
12662 Receiver = OE->getSourceExpr()->IgnoreImpCasts();
12665 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
12666 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
12667 if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
12668 ValueDecl *Decl = ReceiverRE->getDecl();
12669 Diag(Message->getSourceRange().getBegin(),
12670 diag::warn_objc_circular_container)
12672 if (!ArgRE->isObjCSelfExpr()) {
12673 Diag(Decl->getLocation(),
12674 diag::note_objc_circular_container_declared_here)
12679 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
12680 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
12681 if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
12682 ObjCIvarDecl *Decl = IvarRE->getDecl();
12683 Diag(Message->getSourceRange().getBegin(),
12684 diag::warn_objc_circular_container)
12686 Diag(Decl->getLocation(),
12687 diag::note_objc_circular_container_declared_here)
12695 /// Check a message send to see if it's likely to cause a retain cycle.
12696 void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
12697 // Only check instance methods whose selector looks like a setter.
12698 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
12701 // Try to find a variable that the receiver is strongly owned by.
12702 RetainCycleOwner owner;
12703 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
12704 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
12707 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
12708 owner.Variable = getCurMethodDecl()->getSelfDecl();
12709 owner.Loc = msg->getSuperLoc();
12710 owner.Range = msg->getSuperLoc();
12713 // Check whether the receiver is captured by any of the arguments.
12714 const ObjCMethodDecl *MD = msg->getMethodDecl();
12715 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
12716 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) {
12717 // noescape blocks should not be retained by the method.
12718 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
12720 return diagnoseRetainCycle(*this, capturer, owner);
12725 /// Check a property assign to see if it's likely to cause a retain cycle.
12726 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
12727 RetainCycleOwner owner;
12728 if (!findRetainCycleOwner(*this, receiver, owner))
12731 if (Expr *capturer = findCapturingExpr(*this, argument, owner))
12732 diagnoseRetainCycle(*this, capturer, owner);
12735 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
12736 RetainCycleOwner Owner;
12737 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
12740 // Because we don't have an expression for the variable, we have to set the
12741 // location explicitly here.
12742 Owner.Loc = Var->getLocation();
12743 Owner.Range = Var->getSourceRange();
12745 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
12746 diagnoseRetainCycle(*this, Capturer, Owner);
12749 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
12750 Expr *RHS, bool isProperty) {
12751 // Check if RHS is an Objective-C object literal, which also can get
12752 // immediately zapped in a weak reference. Note that we explicitly
12753 // allow ObjCStringLiterals, since those are designed to never really die.
12754 RHS = RHS->IgnoreParenImpCasts();
12756 // This enum needs to match with the 'select' in
12757 // warn_objc_arc_literal_assign (off-by-1).
12758 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS);
12759 if (Kind == Sema::LK_String || Kind == Sema::LK_None)
12762 S.Diag(Loc, diag::warn_arc_literal_assign)
12764 << (isProperty ? 0 : 1)
12765 << RHS->getSourceRange();
12770 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
12771 Qualifiers::ObjCLifetime LT,
12772 Expr *RHS, bool isProperty) {
12773 // Strip off any implicit cast added to get to the one ARC-specific.
12774 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
12775 if (cast->getCastKind() == CK_ARCConsumeObject) {
12776 S.Diag(Loc, diag::warn_arc_retained_assign)
12777 << (LT == Qualifiers::OCL_ExplicitNone)
12778 << (isProperty ? 0 : 1)
12779 << RHS->getSourceRange();
12782 RHS = cast->getSubExpr();
12785 if (LT == Qualifiers::OCL_Weak &&
12786 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
12792 bool Sema::checkUnsafeAssigns(SourceLocation Loc,
12793 QualType LHS, Expr *RHS) {
12794 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
12796 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
12799 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false))
12805 void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
12806 Expr *LHS, Expr *RHS) {
12808 // PropertyRef on LHS type need be directly obtained from
12809 // its declaration as it has a PseudoType.
12810 ObjCPropertyRefExpr *PRE
12811 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
12812 if (PRE && !PRE->isImplicitProperty()) {
12813 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
12815 LHSType = PD->getType();
12818 if (LHSType.isNull())
12819 LHSType = LHS->getType();
12821 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
12823 if (LT == Qualifiers::OCL_Weak) {
12824 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
12825 getCurFunction()->markSafeWeakUse(LHS);
12828 if (checkUnsafeAssigns(Loc, LHSType, RHS))
12831 // FIXME. Check for other life times.
12832 if (LT != Qualifiers::OCL_None)
12836 if (PRE->isImplicitProperty())
12838 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
12842 unsigned Attributes = PD->getPropertyAttributes();
12843 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
12844 // when 'assign' attribute was not explicitly specified
12845 // by user, ignore it and rely on property type itself
12846 // for lifetime info.
12847 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
12848 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
12849 LHSType->isObjCRetainableType())
12852 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
12853 if (cast->getCastKind() == CK_ARCConsumeObject) {
12854 Diag(Loc, diag::warn_arc_retained_property_assign)
12855 << RHS->getSourceRange();
12858 RHS = cast->getSubExpr();
12861 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) {
12862 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
12868 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
12870 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
12871 SourceLocation StmtLoc,
12872 const NullStmt *Body) {
12873 // Do not warn if the body is a macro that expands to nothing, e.g:
12878 if (Body->hasLeadingEmptyMacro())
12881 // Get line numbers of statement and body.
12882 bool StmtLineInvalid;
12883 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc,
12885 if (StmtLineInvalid)
12888 bool BodyLineInvalid;
12889 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
12891 if (BodyLineInvalid)
12894 // Warn if null statement and body are on the same line.
12895 if (StmtLine != BodyLine)
12901 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
12904 // Since this is a syntactic check, don't emit diagnostic for template
12905 // instantiations, this just adds noise.
12906 if (CurrentInstantiationScope)
12909 // The body should be a null statement.
12910 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
12914 // Do the usual checks.
12915 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
12918 Diag(NBody->getSemiLoc(), DiagID);
12919 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
12922 void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
12923 const Stmt *PossibleBody) {
12924 assert(!CurrentInstantiationScope); // Ensured by caller
12926 SourceLocation StmtLoc;
12929 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
12930 StmtLoc = FS->getRParenLoc();
12931 Body = FS->getBody();
12932 DiagID = diag::warn_empty_for_body;
12933 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
12934 StmtLoc = WS->getCond()->getSourceRange().getEnd();
12935 Body = WS->getBody();
12936 DiagID = diag::warn_empty_while_body;
12938 return; // Neither `for' nor `while'.
12940 // The body should be a null statement.
12941 const NullStmt *NBody = dyn_cast<NullStmt>(Body);
12945 // Skip expensive checks if diagnostic is disabled.
12946 if (Diags.isIgnored(DiagID, NBody->getSemiLoc()))
12949 // Do the usual checks.
12950 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
12953 // `for(...);' and `while(...);' are popular idioms, so in order to keep
12954 // noise level low, emit diagnostics only if for/while is followed by a
12955 // CompoundStmt, e.g.:
12956 // for (int i = 0; i < n; i++);
12960 // or if for/while is followed by a statement with more indentation
12961 // than for/while itself:
12962 // for (int i = 0; i < n; i++);
12964 bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
12965 if (!ProbableTypo) {
12966 bool BodyColInvalid;
12967 unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
12968 PossibleBody->getLocStart(),
12970 if (BodyColInvalid)
12973 bool StmtColInvalid;
12974 unsigned StmtCol = SourceMgr.getPresumedColumnNumber(
12977 if (StmtColInvalid)
12980 if (BodyCol > StmtCol)
12981 ProbableTypo = true;
12984 if (ProbableTypo) {
12985 Diag(NBody->getSemiLoc(), DiagID);
12986 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
12990 //===--- CHECK: Warn on self move with std::move. -------------------------===//
12992 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
12993 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
12994 SourceLocation OpLoc) {
12995 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
12998 if (inTemplateInstantiation())
13001 // Strip parens and casts away.
13002 LHSExpr = LHSExpr->IgnoreParenImpCasts();
13003 RHSExpr = RHSExpr->IgnoreParenImpCasts();
13005 // Check for a call expression
13006 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
13007 if (!CE || CE->getNumArgs() != 1)
13010 // Check for a call to std::move
13011 if (!CE->isCallToStdMove())
13014 // Get argument from std::move
13015 RHSExpr = CE->getArg(0);
13017 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
13018 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
13020 // Two DeclRefExpr's, check that the decls are the same.
13021 if (LHSDeclRef && RHSDeclRef) {
13022 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
13024 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
13025 RHSDeclRef->getDecl()->getCanonicalDecl())
13028 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13029 << LHSExpr->getSourceRange()
13030 << RHSExpr->getSourceRange();
13034 // Member variables require a different approach to check for self moves.
13035 // MemberExpr's are the same if every nested MemberExpr refers to the same
13036 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
13037 // the base Expr's are CXXThisExpr's.
13038 const Expr *LHSBase = LHSExpr;
13039 const Expr *RHSBase = RHSExpr;
13040 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr);
13041 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr);
13042 if (!LHSME || !RHSME)
13045 while (LHSME && RHSME) {
13046 if (LHSME->getMemberDecl()->getCanonicalDecl() !=
13047 RHSME->getMemberDecl()->getCanonicalDecl())
13050 LHSBase = LHSME->getBase();
13051 RHSBase = RHSME->getBase();
13052 LHSME = dyn_cast<MemberExpr>(LHSBase);
13053 RHSME = dyn_cast<MemberExpr>(RHSBase);
13056 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase);
13057 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase);
13058 if (LHSDeclRef && RHSDeclRef) {
13059 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
13061 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
13062 RHSDeclRef->getDecl()->getCanonicalDecl())
13065 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13066 << LHSExpr->getSourceRange()
13067 << RHSExpr->getSourceRange();
13071 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
13072 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
13073 << LHSExpr->getSourceRange()
13074 << RHSExpr->getSourceRange();
13077 //===--- Layout compatibility ----------------------------------------------//
13079 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
13081 /// Check if two enumeration types are layout-compatible.
13082 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
13083 // C++11 [dcl.enum] p8:
13084 // Two enumeration types are layout-compatible if they have the same
13085 // underlying type.
13086 return ED1->isComplete() && ED2->isComplete() &&
13087 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
13090 /// Check if two fields are layout-compatible.
13091 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
13092 FieldDecl *Field2) {
13093 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
13096 if (Field1->isBitField() != Field2->isBitField())
13099 if (Field1->isBitField()) {
13100 // Make sure that the bit-fields are the same length.
13101 unsigned Bits1 = Field1->getBitWidthValue(C);
13102 unsigned Bits2 = Field2->getBitWidthValue(C);
13104 if (Bits1 != Bits2)
13111 /// Check if two standard-layout structs are layout-compatible.
13112 /// (C++11 [class.mem] p17)
13113 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
13115 // If both records are C++ classes, check that base classes match.
13116 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
13117 // If one of records is a CXXRecordDecl we are in C++ mode,
13118 // thus the other one is a CXXRecordDecl, too.
13119 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
13120 // Check number of base classes.
13121 if (D1CXX->getNumBases() != D2CXX->getNumBases())
13124 // Check the base classes.
13125 for (CXXRecordDecl::base_class_const_iterator
13126 Base1 = D1CXX->bases_begin(),
13127 BaseEnd1 = D1CXX->bases_end(),
13128 Base2 = D2CXX->bases_begin();
13130 ++Base1, ++Base2) {
13131 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
13134 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
13135 // If only RD2 is a C++ class, it should have zero base classes.
13136 if (D2CXX->getNumBases() > 0)
13140 // Check the fields.
13141 RecordDecl::field_iterator Field2 = RD2->field_begin(),
13142 Field2End = RD2->field_end(),
13143 Field1 = RD1->field_begin(),
13144 Field1End = RD1->field_end();
13145 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
13146 if (!isLayoutCompatible(C, *Field1, *Field2))
13149 if (Field1 != Field1End || Field2 != Field2End)
13155 /// Check if two standard-layout unions are layout-compatible.
13156 /// (C++11 [class.mem] p18)
13157 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
13159 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
13160 for (auto *Field2 : RD2->fields())
13161 UnmatchedFields.insert(Field2);
13163 for (auto *Field1 : RD1->fields()) {
13164 llvm::SmallPtrSet<FieldDecl *, 8>::iterator
13165 I = UnmatchedFields.begin(),
13166 E = UnmatchedFields.end();
13168 for ( ; I != E; ++I) {
13169 if (isLayoutCompatible(C, Field1, *I)) {
13170 bool Result = UnmatchedFields.erase(*I);
13180 return UnmatchedFields.empty();
13183 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
13185 if (RD1->isUnion() != RD2->isUnion())
13188 if (RD1->isUnion())
13189 return isLayoutCompatibleUnion(C, RD1, RD2);
13191 return isLayoutCompatibleStruct(C, RD1, RD2);
13194 /// Check if two types are layout-compatible in C++11 sense.
13195 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
13196 if (T1.isNull() || T2.isNull())
13199 // C++11 [basic.types] p11:
13200 // If two types T1 and T2 are the same type, then T1 and T2 are
13201 // layout-compatible types.
13202 if (C.hasSameType(T1, T2))
13205 T1 = T1.getCanonicalType().getUnqualifiedType();
13206 T2 = T2.getCanonicalType().getUnqualifiedType();
13208 const Type::TypeClass TC1 = T1->getTypeClass();
13209 const Type::TypeClass TC2 = T2->getTypeClass();
13214 if (TC1 == Type::Enum) {
13215 return isLayoutCompatible(C,
13216 cast<EnumType>(T1)->getDecl(),
13217 cast<EnumType>(T2)->getDecl());
13218 } else if (TC1 == Type::Record) {
13219 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
13222 return isLayoutCompatible(C,
13223 cast<RecordType>(T1)->getDecl(),
13224 cast<RecordType>(T2)->getDecl());
13230 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
13232 /// Given a type tag expression find the type tag itself.
13234 /// \param TypeExpr Type tag expression, as it appears in user's code.
13236 /// \param VD Declaration of an identifier that appears in a type tag.
13238 /// \param MagicValue Type tag magic value.
13239 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
13240 const ValueDecl **VD, uint64_t *MagicValue) {
13245 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
13247 switch (TypeExpr->getStmtClass()) {
13248 case Stmt::UnaryOperatorClass: {
13249 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr);
13250 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
13251 TypeExpr = UO->getSubExpr();
13257 case Stmt::DeclRefExprClass: {
13258 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr);
13259 *VD = DRE->getDecl();
13263 case Stmt::IntegerLiteralClass: {
13264 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr);
13265 llvm::APInt MagicValueAPInt = IL->getValue();
13266 if (MagicValueAPInt.getActiveBits() <= 64) {
13267 *MagicValue = MagicValueAPInt.getZExtValue();
13273 case Stmt::BinaryConditionalOperatorClass:
13274 case Stmt::ConditionalOperatorClass: {
13275 const AbstractConditionalOperator *ACO =
13276 cast<AbstractConditionalOperator>(TypeExpr);
13278 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) {
13280 TypeExpr = ACO->getTrueExpr();
13282 TypeExpr = ACO->getFalseExpr();
13288 case Stmt::BinaryOperatorClass: {
13289 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr);
13290 if (BO->getOpcode() == BO_Comma) {
13291 TypeExpr = BO->getRHS();
13303 /// Retrieve the C type corresponding to type tag TypeExpr.
13305 /// \param TypeExpr Expression that specifies a type tag.
13307 /// \param MagicValues Registered magic values.
13309 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
13312 /// \param TypeInfo Information about the corresponding C type.
13314 /// \returns true if the corresponding C type was found.
13315 static bool GetMatchingCType(
13316 const IdentifierInfo *ArgumentKind,
13317 const Expr *TypeExpr, const ASTContext &Ctx,
13318 const llvm::DenseMap<Sema::TypeTagMagicValue,
13319 Sema::TypeTagData> *MagicValues,
13320 bool &FoundWrongKind,
13321 Sema::TypeTagData &TypeInfo) {
13322 FoundWrongKind = false;
13324 // Variable declaration that has type_tag_for_datatype attribute.
13325 const ValueDecl *VD = nullptr;
13327 uint64_t MagicValue;
13329 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue))
13333 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
13334 if (I->getArgumentKind() != ArgumentKind) {
13335 FoundWrongKind = true;
13338 TypeInfo.Type = I->getMatchingCType();
13339 TypeInfo.LayoutCompatible = I->getLayoutCompatible();
13340 TypeInfo.MustBeNull = I->getMustBeNull();
13349 llvm::DenseMap<Sema::TypeTagMagicValue,
13350 Sema::TypeTagData>::const_iterator I =
13351 MagicValues->find(std::make_pair(ArgumentKind, MagicValue));
13352 if (I == MagicValues->end())
13355 TypeInfo = I->second;
13359 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
13360 uint64_t MagicValue, QualType Type,
13361 bool LayoutCompatible,
13363 if (!TypeTagForDatatypeMagicValues)
13364 TypeTagForDatatypeMagicValues.reset(
13365 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
13367 TypeTagMagicValue Magic(ArgumentKind, MagicValue);
13368 (*TypeTagForDatatypeMagicValues)[Magic] =
13369 TypeTagData(Type, LayoutCompatible, MustBeNull);
13372 static bool IsSameCharType(QualType T1, QualType T2) {
13373 const BuiltinType *BT1 = T1->getAs<BuiltinType>();
13377 const BuiltinType *BT2 = T2->getAs<BuiltinType>();
13381 BuiltinType::Kind T1Kind = BT1->getKind();
13382 BuiltinType::Kind T2Kind = BT2->getKind();
13384 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
13385 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
13386 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
13387 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
13390 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
13391 const ArrayRef<const Expr *> ExprArgs,
13392 SourceLocation CallSiteLoc) {
13393 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
13394 bool IsPointerAttr = Attr->getIsPointer();
13396 // Retrieve the argument representing the 'type_tag'.
13397 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
13398 if (TypeTagIdxAST >= ExprArgs.size()) {
13399 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
13400 << 0 << Attr->getTypeTagIdx().getSourceIndex();
13403 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
13404 bool FoundWrongKind;
13405 TypeTagData TypeInfo;
13406 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
13407 TypeTagForDatatypeMagicValues.get(),
13408 FoundWrongKind, TypeInfo)) {
13409 if (FoundWrongKind)
13410 Diag(TypeTagExpr->getExprLoc(),
13411 diag::warn_type_tag_for_datatype_wrong_kind)
13412 << TypeTagExpr->getSourceRange();
13416 // Retrieve the argument representing the 'arg_idx'.
13417 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
13418 if (ArgumentIdxAST >= ExprArgs.size()) {
13419 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
13420 << 1 << Attr->getArgumentIdx().getSourceIndex();
13423 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
13424 if (IsPointerAttr) {
13425 // Skip implicit cast of pointer to `void *' (as a function argument).
13426 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
13427 if (ICE->getType()->isVoidPointerType() &&
13428 ICE->getCastKind() == CK_BitCast)
13429 ArgumentExpr = ICE->getSubExpr();
13431 QualType ArgumentType = ArgumentExpr->getType();
13433 // Passing a `void*' pointer shouldn't trigger a warning.
13434 if (IsPointerAttr && ArgumentType->isVoidPointerType())
13437 if (TypeInfo.MustBeNull) {
13438 // Type tag with matching void type requires a null pointer.
13439 if (!ArgumentExpr->isNullPointerConstant(Context,
13440 Expr::NPC_ValueDependentIsNotNull)) {
13441 Diag(ArgumentExpr->getExprLoc(),
13442 diag::warn_type_safety_null_pointer_required)
13443 << ArgumentKind->getName()
13444 << ArgumentExpr->getSourceRange()
13445 << TypeTagExpr->getSourceRange();
13450 QualType RequiredType = TypeInfo.Type;
13452 RequiredType = Context.getPointerType(RequiredType);
13454 bool mismatch = false;
13455 if (!TypeInfo.LayoutCompatible) {
13456 mismatch = !Context.hasSameType(ArgumentType, RequiredType);
13458 // C++11 [basic.fundamental] p1:
13459 // Plain char, signed char, and unsigned char are three distinct types.
13461 // But we treat plain `char' as equivalent to `signed char' or `unsigned
13462 // char' depending on the current char signedness mode.
13464 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(),
13465 RequiredType->getPointeeType())) ||
13466 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType)))
13470 mismatch = !isLayoutCompatible(Context,
13471 ArgumentType->getPointeeType(),
13472 RequiredType->getPointeeType());
13474 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType);
13477 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
13478 << ArgumentType << ArgumentKind
13479 << TypeInfo.LayoutCompatible << RequiredType
13480 << ArgumentExpr->getSourceRange()
13481 << TypeTagExpr->getSourceRange();
13484 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
13485 CharUnits Alignment) {
13486 MisalignedMembers.emplace_back(E, RD, MD, Alignment);
13489 void Sema::DiagnoseMisalignedMembers() {
13490 for (MisalignedMember &m : MisalignedMembers) {
13491 const NamedDecl *ND = m.RD;
13492 if (ND->getName().empty()) {
13493 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
13496 Diag(m.E->getLocStart(), diag::warn_taking_address_of_packed_member)
13497 << m.MD << ND << m.E->getSourceRange();
13499 MisalignedMembers.clear();
13502 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
13503 E = E->IgnoreParens();
13504 if (!T->isPointerType() && !T->isIntegerType())
13506 if (isa<UnaryOperator>(E) &&
13507 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
13508 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
13509 if (isa<MemberExpr>(Op)) {
13510 auto MA = std::find(MisalignedMembers.begin(), MisalignedMembers.end(),
13511 MisalignedMember(Op));
13512 if (MA != MisalignedMembers.end() &&
13513 (T->isIntegerType() ||
13514 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
13515 Context.getTypeAlignInChars(
13516 T->getPointeeType()) <= MA->Alignment))))
13517 MisalignedMembers.erase(MA);
13522 void Sema::RefersToMemberWithReducedAlignment(
13524 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
13526 const auto *ME = dyn_cast<MemberExpr>(E);
13530 // No need to check expressions with an __unaligned-qualified type.
13531 if (E->getType().getQualifiers().hasUnaligned())
13534 // For a chain of MemberExpr like "a.b.c.d" this list
13535 // will keep FieldDecl's like [d, c, b].
13536 SmallVector<FieldDecl *, 4> ReverseMemberChain;
13537 const MemberExpr *TopME = nullptr;
13538 bool AnyIsPacked = false;
13540 QualType BaseType = ME->getBase()->getType();
13542 BaseType = BaseType->getPointeeType();
13543 RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl();
13544 if (RD->isInvalidDecl())
13547 ValueDecl *MD = ME->getMemberDecl();
13548 auto *FD = dyn_cast<FieldDecl>(MD);
13549 // We do not care about non-data members.
13550 if (!FD || FD->isInvalidDecl())
13554 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
13555 ReverseMemberChain.push_back(FD);
13558 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens());
13560 assert(TopME && "We did not compute a topmost MemberExpr!");
13562 // Not the scope of this diagnostic.
13566 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
13567 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase);
13568 // TODO: The innermost base of the member expression may be too complicated.
13569 // For now, just disregard these cases. This is left for future
13571 if (!DRE && !isa<CXXThisExpr>(TopBase))
13574 // Alignment expected by the whole expression.
13575 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType());
13577 // No need to do anything else with this case.
13578 if (ExpectedAlignment.isOne())
13581 // Synthesize offset of the whole access.
13583 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend();
13585 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I));
13588 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
13589 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
13590 ReverseMemberChain.back()->getParent()->getTypeForDecl());
13592 // The base expression of the innermost MemberExpr may give
13593 // stronger guarantees than the class containing the member.
13594 if (DRE && !TopME->isArrow()) {
13595 const ValueDecl *VD = DRE->getDecl();
13596 if (!VD->getType()->isReferenceType())
13597 CompleteObjectAlignment =
13598 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD));
13601 // Check if the synthesized offset fulfills the alignment.
13602 if (Offset % ExpectedAlignment != 0 ||
13603 // It may fulfill the offset it but the effective alignment may still be
13604 // lower than the expected expression alignment.
13605 CompleteObjectAlignment < ExpectedAlignment) {
13606 // If this happens, we want to determine a sensible culprit of this.
13607 // Intuitively, watching the chain of member expressions from right to
13608 // left, we start with the required alignment (as required by the field
13609 // type) but some packed attribute in that chain has reduced the alignment.
13610 // It may happen that another packed structure increases it again. But if
13611 // we are here such increase has not been enough. So pointing the first
13612 // FieldDecl that either is packed or else its RecordDecl is,
13613 // seems reasonable.
13614 FieldDecl *FD = nullptr;
13615 CharUnits Alignment;
13616 for (FieldDecl *FDI : ReverseMemberChain) {
13617 if (FDI->hasAttr<PackedAttr>() ||
13618 FDI->getParent()->hasAttr<PackedAttr>()) {
13620 Alignment = std::min(
13621 Context.getTypeAlignInChars(FD->getType()),
13622 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
13626 assert(FD && "We did not find a packed FieldDecl!");
13627 Action(E, FD->getParent(), FD, Alignment);
13631 void Sema::CheckAddressOfPackedMember(Expr *rhs) {
13632 using namespace std::placeholders;
13634 RefersToMemberWithReducedAlignment(
13635 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,