1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
12 //===----------------------------------------------------------------------===//
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGObjCRuntime.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/ASTContext.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/Expr.h"
24 #include "clang/AST/RecordLayout.h"
25 #include "clang/AST/StmtVisitor.h"
26 #include "clang/Basic/CodeGenOptions.h"
27 #include "clang/Basic/FixedPoint.h"
28 #include "clang/Basic/TargetInfo.h"
29 #include "llvm/ADT/Optional.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GetElementPtrTypeIterator.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/Module.h"
40 using namespace clang;
41 using namespace CodeGen;
44 //===----------------------------------------------------------------------===//
45 // Scalar Expression Emitter
46 //===----------------------------------------------------------------------===//
50 /// Determine whether the given binary operation may overflow.
51 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
52 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
53 /// the returned overflow check is precise. The returned value is 'true' for
54 /// all other opcodes, to be conservative.
55 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
56 BinaryOperator::Opcode Opcode, bool Signed,
57 llvm::APInt &Result) {
58 // Assume overflow is possible, unless we can prove otherwise.
60 const auto &LHSAP = LHS->getValue();
61 const auto &RHSAP = RHS->getValue();
62 if (Opcode == BO_Add) {
64 Result = LHSAP.sadd_ov(RHSAP, Overflow);
66 Result = LHSAP.uadd_ov(RHSAP, Overflow);
67 } else if (Opcode == BO_Sub) {
69 Result = LHSAP.ssub_ov(RHSAP, Overflow);
71 Result = LHSAP.usub_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Mul) {
74 Result = LHSAP.smul_ov(RHSAP, Overflow);
76 Result = LHSAP.umul_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
78 if (Signed && !RHS->isZero())
79 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
89 QualType Ty; // Computation Type.
90 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
92 const Expr *E; // Entire expr, for error unsupported. May not be binop.
94 /// Check if the binop can result in integer overflow.
95 bool mayHaveIntegerOverflow() const {
96 // Without constant input, we can't rule out overflow.
97 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
98 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
103 return ::mayHaveIntegerOverflow(
104 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
107 /// Check if the binop computes a division or a remainder.
108 bool isDivremOp() const {
109 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
110 Opcode == BO_RemAssign;
113 /// Check if the binop can result in an integer division by zero.
114 bool mayHaveIntegerDivisionByZero() const {
116 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
121 /// Check if the binop can result in a float division by zero.
122 bool mayHaveFloatDivisionByZero() const {
124 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
125 return CFP->isZero();
130 static bool MustVisitNullValue(const Expr *E) {
131 // If a null pointer expression's type is the C++0x nullptr_t, then
132 // it's not necessarily a simple constant and it must be evaluated
133 // for its potential side effects.
134 return E->getType()->isNullPtrType();
137 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
138 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
140 const Expr *Base = E->IgnoreImpCasts();
144 QualType BaseTy = Base->getType();
145 if (!BaseTy->isPromotableIntegerType() ||
146 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
152 /// Check if \p E is a widened promoted integer.
153 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
154 return getUnwidenedIntegerType(Ctx, E).hasValue();
157 /// Check if we can skip the overflow check for \p Op.
158 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
159 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
160 "Expected a unary or binary operator");
162 // If the binop has constant inputs and we can prove there is no overflow,
163 // we can elide the overflow check.
164 if (!Op.mayHaveIntegerOverflow())
167 // If a unary op has a widened operand, the op cannot overflow.
168 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
169 return !UO->canOverflow();
171 // We usually don't need overflow checks for binops with widened operands.
172 // Multiplication with promoted unsigned operands is a special case.
173 const auto *BO = cast<BinaryOperator>(Op.E);
174 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
178 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
182 QualType LHSTy = *OptionalLHSTy;
183 QualType RHSTy = *OptionalRHSTy;
185 // This is the simple case: binops without unsigned multiplication, and with
186 // widened operands. No overflow check is needed here.
187 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
188 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
191 // For unsigned multiplication the overflow check can be elided if either one
192 // of the unpromoted types are less than half the size of the promoted type.
193 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
194 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
195 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
198 /// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
199 static void updateFastMathFlags(llvm::FastMathFlags &FMF,
200 FPOptions FPFeatures) {
201 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
204 /// Propagate fast-math flags from \p Op to the instruction in \p V.
205 static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
206 if (auto *I = dyn_cast<llvm::Instruction>(V)) {
207 llvm::FastMathFlags FMF = I->getFastMathFlags();
208 updateFastMathFlags(FMF, Op.FPFeatures);
209 I->setFastMathFlags(FMF);
214 class ScalarExprEmitter
215 : public StmtVisitor<ScalarExprEmitter, Value*> {
216 CodeGenFunction &CGF;
217 CGBuilderTy &Builder;
218 bool IgnoreResultAssign;
219 llvm::LLVMContext &VMContext;
222 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
223 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
224 VMContext(cgf.getLLVMContext()) {
227 //===--------------------------------------------------------------------===//
229 //===--------------------------------------------------------------------===//
231 bool TestAndClearIgnoreResultAssign() {
232 bool I = IgnoreResultAssign;
233 IgnoreResultAssign = false;
237 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
238 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
239 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
240 return CGF.EmitCheckedLValue(E, TCK);
243 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
244 const BinOpInfo &Info);
246 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
247 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
250 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
251 const AlignValueAttr *AVAttr = nullptr;
252 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
253 const ValueDecl *VD = DRE->getDecl();
255 if (VD->getType()->isReferenceType()) {
256 if (const auto *TTy =
257 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
258 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
260 // Assumptions for function parameters are emitted at the start of the
261 // function, so there is no need to repeat that here,
262 // unless the alignment-assumption sanitizer is enabled,
263 // then we prefer the assumption over alignment attribute
264 // on IR function param.
265 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
268 AVAttr = VD->getAttr<AlignValueAttr>();
273 if (const auto *TTy =
274 dyn_cast<TypedefType>(E->getType()))
275 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
280 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
281 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
282 CGF.EmitAlignmentAssumption(V, E, AVAttr->getLocation(),
283 AlignmentCI->getZExtValue());
286 /// EmitLoadOfLValue - Given an expression with complex type that represents a
287 /// value l-value, this method emits the address of the l-value, then loads
288 /// and returns the result.
289 Value *EmitLoadOfLValue(const Expr *E) {
290 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
293 EmitLValueAlignmentAssumption(E, V);
297 /// EmitConversionToBool - Convert the specified expression value to a
298 /// boolean (i1) truth value. This is equivalent to "Val != 0".
299 Value *EmitConversionToBool(Value *Src, QualType DstTy);
301 /// Emit a check that a conversion to or from a floating-point type does not
303 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
304 Value *Src, QualType SrcType, QualType DstType,
305 llvm::Type *DstTy, SourceLocation Loc);
307 /// Known implicit conversion check kinds.
308 /// Keep in sync with the enum of the same name in ubsan_handlers.h
309 enum ImplicitConversionCheckKind : unsigned char {
310 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
311 ICCK_UnsignedIntegerTruncation = 1,
312 ICCK_SignedIntegerTruncation = 2,
313 ICCK_IntegerSignChange = 3,
314 ICCK_SignedIntegerTruncationOrSignChange = 4,
317 /// Emit a check that an [implicit] truncation of an integer does not
318 /// discard any bits. It is not UB, so we use the value after truncation.
319 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
320 QualType DstType, SourceLocation Loc);
322 /// Emit a check that an [implicit] conversion of an integer does not change
323 /// the sign of the value. It is not UB, so we use the value after conversion.
324 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
325 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
326 QualType DstType, SourceLocation Loc);
328 /// Emit a conversion from the specified type to the specified destination
329 /// type, both of which are LLVM scalar types.
330 struct ScalarConversionOpts {
331 bool TreatBooleanAsSigned;
332 bool EmitImplicitIntegerTruncationChecks;
333 bool EmitImplicitIntegerSignChangeChecks;
335 ScalarConversionOpts()
336 : TreatBooleanAsSigned(false),
337 EmitImplicitIntegerTruncationChecks(false),
338 EmitImplicitIntegerSignChangeChecks(false) {}
340 ScalarConversionOpts(clang::SanitizerSet SanOpts)
341 : TreatBooleanAsSigned(false),
342 EmitImplicitIntegerTruncationChecks(
343 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
344 EmitImplicitIntegerSignChangeChecks(
345 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
348 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
350 ScalarConversionOpts Opts = ScalarConversionOpts());
352 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
355 /// Emit a conversion from the specified complex type to the specified
356 /// destination type, where the destination type is an LLVM scalar type.
357 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
358 QualType SrcTy, QualType DstTy,
361 /// EmitNullValue - Emit a value that corresponds to null for the given type.
362 Value *EmitNullValue(QualType Ty);
364 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
365 Value *EmitFloatToBoolConversion(Value *V) {
366 // Compare against 0.0 for fp scalars.
367 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
368 return Builder.CreateFCmpUNE(V, Zero, "tobool");
371 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
372 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
373 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
375 return Builder.CreateICmpNE(V, Zero, "tobool");
378 Value *EmitIntToBoolConversion(Value *V) {
379 // Because of the type rules of C, we often end up computing a
380 // logical value, then zero extending it to int, then wanting it
381 // as a logical value again. Optimize this common case.
382 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
383 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
384 Value *Result = ZI->getOperand(0);
385 // If there aren't any more uses, zap the instruction to save space.
386 // Note that there can be more uses, for example if this
387 // is the result of an assignment.
389 ZI->eraseFromParent();
394 return Builder.CreateIsNotNull(V, "tobool");
397 //===--------------------------------------------------------------------===//
399 //===--------------------------------------------------------------------===//
401 Value *Visit(Expr *E) {
402 ApplyDebugLocation DL(CGF, E);
403 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
406 Value *VisitStmt(Stmt *S) {
407 S->dump(CGF.getContext().getSourceManager());
408 llvm_unreachable("Stmt can't have complex result type!");
410 Value *VisitExpr(Expr *S);
412 Value *VisitConstantExpr(ConstantExpr *E) {
413 return Visit(E->getSubExpr());
415 Value *VisitParenExpr(ParenExpr *PE) {
416 return Visit(PE->getSubExpr());
418 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
419 return Visit(E->getReplacement());
421 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
422 return Visit(GE->getResultExpr());
424 Value *VisitCoawaitExpr(CoawaitExpr *S) {
425 return CGF.EmitCoawaitExpr(*S).getScalarVal();
427 Value *VisitCoyieldExpr(CoyieldExpr *S) {
428 return CGF.EmitCoyieldExpr(*S).getScalarVal();
430 Value *VisitUnaryCoawait(const UnaryOperator *E) {
431 return Visit(E->getSubExpr());
435 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
436 return Builder.getInt(E->getValue());
438 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
439 return Builder.getInt(E->getValue());
441 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
442 return llvm::ConstantFP::get(VMContext, E->getValue());
444 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
445 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
447 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
448 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
450 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
451 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
453 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
454 return EmitNullValue(E->getType());
456 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
457 return EmitNullValue(E->getType());
459 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
460 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
461 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
462 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
463 return Builder.CreateBitCast(V, ConvertType(E->getType()));
466 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
467 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
470 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
471 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
474 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
476 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
479 // Otherwise, assume the mapping is the scalar directly.
480 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
484 Value *VisitDeclRefExpr(DeclRefExpr *E) {
485 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
486 return CGF.emitScalarConstant(Constant, E);
487 return EmitLoadOfLValue(E);
490 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
491 return CGF.EmitObjCSelectorExpr(E);
493 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
494 return CGF.EmitObjCProtocolExpr(E);
496 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
497 return EmitLoadOfLValue(E);
499 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
500 if (E->getMethodDecl() &&
501 E->getMethodDecl()->getReturnType()->isReferenceType())
502 return EmitLoadOfLValue(E);
503 return CGF.EmitObjCMessageExpr(E).getScalarVal();
506 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
507 LValue LV = CGF.EmitObjCIsaExpr(E);
508 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
512 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
513 VersionTuple Version = E->getVersion();
515 // If we're checking for a platform older than our minimum deployment
516 // target, we can fold the check away.
517 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
518 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
520 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
521 llvm::Value *Args[] = {
522 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
523 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
524 llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
527 return CGF.EmitBuiltinAvailable(Args);
530 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
531 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
532 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
533 Value *VisitMemberExpr(MemberExpr *E);
534 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
535 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
536 return EmitLoadOfLValue(E);
539 Value *VisitInitListExpr(InitListExpr *E);
541 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
542 assert(CGF.getArrayInitIndex() &&
543 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
544 return CGF.getArrayInitIndex();
547 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
548 return EmitNullValue(E->getType());
550 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
551 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
552 return VisitCastExpr(E);
554 Value *VisitCastExpr(CastExpr *E);
556 Value *VisitCallExpr(const CallExpr *E) {
557 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
558 return EmitLoadOfLValue(E);
560 Value *V = CGF.EmitCallExpr(E).getScalarVal();
562 EmitLValueAlignmentAssumption(E, V);
566 Value *VisitStmtExpr(const StmtExpr *E);
569 Value *VisitUnaryPostDec(const UnaryOperator *E) {
570 LValue LV = EmitLValue(E->getSubExpr());
571 return EmitScalarPrePostIncDec(E, LV, false, false);
573 Value *VisitUnaryPostInc(const UnaryOperator *E) {
574 LValue LV = EmitLValue(E->getSubExpr());
575 return EmitScalarPrePostIncDec(E, LV, true, false);
577 Value *VisitUnaryPreDec(const UnaryOperator *E) {
578 LValue LV = EmitLValue(E->getSubExpr());
579 return EmitScalarPrePostIncDec(E, LV, false, true);
581 Value *VisitUnaryPreInc(const UnaryOperator *E) {
582 LValue LV = EmitLValue(E->getSubExpr());
583 return EmitScalarPrePostIncDec(E, LV, true, true);
586 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
590 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
591 bool isInc, bool isPre);
594 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
595 if (isa<MemberPointerType>(E->getType())) // never sugared
596 return CGF.CGM.getMemberPointerConstant(E);
598 return EmitLValue(E->getSubExpr()).getPointer();
600 Value *VisitUnaryDeref(const UnaryOperator *E) {
601 if (E->getType()->isVoidType())
602 return Visit(E->getSubExpr()); // the actual value should be unused
603 return EmitLoadOfLValue(E);
605 Value *VisitUnaryPlus(const UnaryOperator *E) {
606 // This differs from gcc, though, most likely due to a bug in gcc.
607 TestAndClearIgnoreResultAssign();
608 return Visit(E->getSubExpr());
610 Value *VisitUnaryMinus (const UnaryOperator *E);
611 Value *VisitUnaryNot (const UnaryOperator *E);
612 Value *VisitUnaryLNot (const UnaryOperator *E);
613 Value *VisitUnaryReal (const UnaryOperator *E);
614 Value *VisitUnaryImag (const UnaryOperator *E);
615 Value *VisitUnaryExtension(const UnaryOperator *E) {
616 return Visit(E->getSubExpr());
620 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
621 return EmitLoadOfLValue(E);
624 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
625 return Visit(DAE->getExpr());
627 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
628 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
629 return Visit(DIE->getExpr());
631 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
632 return CGF.LoadCXXThis();
635 Value *VisitExprWithCleanups(ExprWithCleanups *E);
636 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
637 return CGF.EmitCXXNewExpr(E);
639 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
640 CGF.EmitCXXDeleteExpr(E);
644 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
645 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
648 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
649 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
652 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
653 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
656 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
657 // C++ [expr.pseudo]p1:
658 // The result shall only be used as the operand for the function call
659 // operator (), and the result of such a call has type void. The only
660 // effect is the evaluation of the postfix-expression before the dot or
662 CGF.EmitScalarExpr(E->getBase());
666 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
667 return EmitNullValue(E->getType());
670 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
671 CGF.EmitCXXThrowExpr(E);
675 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
676 return Builder.getInt1(E->getValue());
680 Value *EmitMul(const BinOpInfo &Ops) {
681 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
682 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
683 case LangOptions::SOB_Defined:
684 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
685 case LangOptions::SOB_Undefined:
686 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
687 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
689 case LangOptions::SOB_Trapping:
690 if (CanElideOverflowCheck(CGF.getContext(), Ops))
691 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
692 return EmitOverflowCheckedBinOp(Ops);
696 if (Ops.Ty->isUnsignedIntegerType() &&
697 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
698 !CanElideOverflowCheck(CGF.getContext(), Ops))
699 return EmitOverflowCheckedBinOp(Ops);
701 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
702 Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
703 return propagateFMFlags(V, Ops);
705 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
707 /// Create a binary op that checks for overflow.
708 /// Currently only supports +, - and *.
709 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
711 // Check for undefined division and modulus behaviors.
712 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
713 llvm::Value *Zero,bool isDiv);
714 // Common helper for getting how wide LHS of shift is.
715 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
716 Value *EmitDiv(const BinOpInfo &Ops);
717 Value *EmitRem(const BinOpInfo &Ops);
718 Value *EmitAdd(const BinOpInfo &Ops);
719 Value *EmitSub(const BinOpInfo &Ops);
720 Value *EmitShl(const BinOpInfo &Ops);
721 Value *EmitShr(const BinOpInfo &Ops);
722 Value *EmitAnd(const BinOpInfo &Ops) {
723 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
725 Value *EmitXor(const BinOpInfo &Ops) {
726 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
728 Value *EmitOr (const BinOpInfo &Ops) {
729 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
732 BinOpInfo EmitBinOps(const BinaryOperator *E);
733 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
734 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
737 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
738 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
740 // Binary operators and binary compound assignment operators.
741 #define HANDLEBINOP(OP) \
742 Value *VisitBin ## OP(const BinaryOperator *E) { \
743 return Emit ## OP(EmitBinOps(E)); \
745 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
746 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
761 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
762 llvm::CmpInst::Predicate SICmpOpc,
763 llvm::CmpInst::Predicate FCmpOpc);
764 #define VISITCOMP(CODE, UI, SI, FP) \
765 Value *VisitBin##CODE(const BinaryOperator *E) { \
766 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
767 llvm::FCmpInst::FP); }
768 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
769 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
770 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
771 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
772 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
773 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
776 Value *VisitBinAssign (const BinaryOperator *E);
778 Value *VisitBinLAnd (const BinaryOperator *E);
779 Value *VisitBinLOr (const BinaryOperator *E);
780 Value *VisitBinComma (const BinaryOperator *E);
782 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
783 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
786 Value *VisitBlockExpr(const BlockExpr *BE);
787 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
788 Value *VisitChooseExpr(ChooseExpr *CE);
789 Value *VisitVAArgExpr(VAArgExpr *VE);
790 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
791 return CGF.EmitObjCStringLiteral(E);
793 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
794 return CGF.EmitObjCBoxedExpr(E);
796 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
797 return CGF.EmitObjCArrayLiteral(E);
799 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
800 return CGF.EmitObjCDictionaryLiteral(E);
802 Value *VisitAsTypeExpr(AsTypeExpr *CE);
803 Value *VisitAtomicExpr(AtomicExpr *AE);
805 } // end anonymous namespace.
807 //===----------------------------------------------------------------------===//
809 //===----------------------------------------------------------------------===//
811 /// EmitConversionToBool - Convert the specified expression value to a
812 /// boolean (i1) truth value. This is equivalent to "Val != 0".
813 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
814 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
816 if (SrcType->isRealFloatingType())
817 return EmitFloatToBoolConversion(Src);
819 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
820 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
822 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
823 "Unknown scalar type to convert");
825 if (isa<llvm::IntegerType>(Src->getType()))
826 return EmitIntToBoolConversion(Src);
828 assert(isa<llvm::PointerType>(Src->getType()));
829 return EmitPointerToBoolConversion(Src, SrcType);
832 void ScalarExprEmitter::EmitFloatConversionCheck(
833 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
834 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
835 CodeGenFunction::SanitizerScope SanScope(&CGF);
839 llvm::Type *SrcTy = Src->getType();
841 llvm::Value *Check = nullptr;
842 if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
843 // Integer to floating-point. This can fail for unsigned short -> __half
844 // or unsigned __int128 -> float.
845 assert(DstType->isFloatingType());
846 bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
848 APFloat LargestFloat =
849 APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
850 APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
853 if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
854 &IsExact) != APFloat::opOK)
855 // The range of representable values of this floating point type includes
856 // all values of this integer type. Don't need an overflow check.
859 llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
861 Check = Builder.CreateICmpULE(Src, Max);
863 llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
864 llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
865 llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
866 Check = Builder.CreateAnd(GE, LE);
869 const llvm::fltSemantics &SrcSema =
870 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
871 if (isa<llvm::IntegerType>(DstTy)) {
872 // Floating-point to integer. This has undefined behavior if the source is
873 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
875 unsigned Width = CGF.getContext().getIntWidth(DstType);
876 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
878 APSInt Min = APSInt::getMinValue(Width, Unsigned);
879 APFloat MinSrc(SrcSema, APFloat::uninitialized);
880 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
882 // Don't need an overflow check for lower bound. Just check for
884 MinSrc = APFloat::getInf(SrcSema, true);
886 // Find the largest value which is too small to represent (before
887 // truncation toward zero).
888 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
890 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
891 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
892 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
894 // Don't need an overflow check for upper bound. Just check for
896 MaxSrc = APFloat::getInf(SrcSema, false);
898 // Find the smallest value which is too large to represent (before
899 // truncation toward zero).
900 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
902 // If we're converting from __half, convert the range to float to match
904 if (OrigSrcType->isHalfType()) {
905 const llvm::fltSemantics &Sema =
906 CGF.getContext().getFloatTypeSemantics(SrcType);
908 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
909 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
913 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
915 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
916 Check = Builder.CreateAnd(GE, LE);
918 // FIXME: Maybe split this sanitizer out from float-cast-overflow.
920 // Floating-point to floating-point. This has undefined behavior if the
921 // source is not in the range of representable values of the destination
922 // type. The C and C++ standards are spectacularly unclear here. We
923 // diagnose finite out-of-range conversions, but allow infinities and NaNs
924 // to convert to the corresponding value in the smaller type.
926 // C11 Annex F gives all such conversions defined behavior for IEC 60559
927 // conforming implementations. Unfortunately, LLVM's fptrunc instruction
930 // Converting from a lower rank to a higher rank can never have
931 // undefined behavior, since higher-rank types must have a superset
932 // of values of lower-rank types.
933 if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
936 assert(!OrigSrcType->isHalfType() &&
937 "should not check conversion from __half, it has the lowest rank");
939 const llvm::fltSemantics &DstSema =
940 CGF.getContext().getFloatTypeSemantics(DstType);
941 APFloat MinBad = APFloat::getLargest(DstSema, false);
942 APFloat MaxBad = APFloat::getInf(DstSema, false);
945 MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
946 MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
948 Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
949 CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
951 Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
953 Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
954 Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
958 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
959 CGF.EmitCheckTypeDescriptor(OrigSrcType),
960 CGF.EmitCheckTypeDescriptor(DstType)};
961 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
962 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
965 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
966 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
967 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
968 std::pair<llvm::Value *, SanitizerMask>>
969 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
970 QualType DstType, CGBuilderTy &Builder) {
971 llvm::Type *SrcTy = Src->getType();
972 llvm::Type *DstTy = Dst->getType();
973 (void)DstTy; // Only used in assert()
975 // This should be truncation of integral types.
977 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
978 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
979 "non-integer llvm type");
981 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
982 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
984 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
985 // Else, it is a signed truncation.
986 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
988 if (!SrcSigned && !DstSigned) {
989 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
990 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
992 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
993 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
996 llvm::Value *Check = nullptr;
997 // 1. Extend the truncated value back to the same width as the Src.
998 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
999 // 2. Equality-compare with the original source value
1000 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1001 // If the comparison result is 'i1 false', then the truncation was lossy.
1002 return std::make_pair(Kind, std::make_pair(Check, Mask));
1005 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1006 Value *Dst, QualType DstType,
1007 SourceLocation Loc) {
1008 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1011 // We only care about int->int conversions here.
1012 // We ignore conversions to/from pointer and/or bool.
1013 if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
1016 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1017 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1018 // This must be truncation. Else we do not care.
1019 if (SrcBits <= DstBits)
1022 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1024 // If the integer sign change sanitizer is enabled,
1025 // and we are truncating from larger unsigned type to smaller signed type,
1026 // let that next sanitizer deal with it.
1027 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1028 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1029 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1030 (!SrcSigned && DstSigned))
1033 CodeGenFunction::SanitizerScope SanScope(&CGF);
1035 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1036 std::pair<llvm::Value *, SanitizerMask>>
1038 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1039 // If the comparison result is 'i1 false', then the truncation was lossy.
1041 // Do we care about this type of truncation?
1042 if (!CGF.SanOpts.has(Check.second.second))
1045 llvm::Constant *StaticArgs[] = {
1046 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1047 CGF.EmitCheckTypeDescriptor(DstType),
1048 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1049 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1053 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1054 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1055 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1056 std::pair<llvm::Value *, SanitizerMask>>
1057 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1058 QualType DstType, CGBuilderTy &Builder) {
1059 llvm::Type *SrcTy = Src->getType();
1060 llvm::Type *DstTy = Dst->getType();
1062 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1063 "non-integer llvm type");
1065 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1066 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1067 (void)SrcSigned; // Only used in assert()
1068 (void)DstSigned; // Only used in assert()
1069 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1070 unsigned DstBits = DstTy->getScalarSizeInBits();
1071 (void)SrcBits; // Only used in assert()
1072 (void)DstBits; // Only used in assert()
1074 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1075 "either the widths should be different, or the signednesses.");
1077 // NOTE: zero value is considered to be non-negative.
1078 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1079 const char *Name) -> Value * {
1080 // Is this value a signed type?
1081 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1082 llvm::Type *VTy = V->getType();
1084 // If the value is unsigned, then it is never negative.
1085 // FIXME: can we encounter non-scalar VTy here?
1086 return llvm::ConstantInt::getFalse(VTy->getContext());
1088 // Get the zero of the same type with which we will be comparing.
1089 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1090 // %V.isnegative = icmp slt %V, 0
1091 // I.e is %V *strictly* less than zero, does it have negative value?
1092 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1093 llvm::Twine(Name) + "." + V->getName() +
1094 ".negativitycheck");
1097 // 1. Was the old Value negative?
1098 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1099 // 2. Is the new Value negative?
1100 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1101 // 3. Now, was the 'negativity status' preserved during the conversion?
1102 // NOTE: conversion from negative to zero is considered to change the sign.
1103 // (We want to get 'false' when the conversion changed the sign)
1104 // So we should just equality-compare the negativity statuses.
1105 llvm::Value *Check = nullptr;
1106 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1107 // If the comparison result is 'false', then the conversion changed the sign.
1108 return std::make_pair(
1109 ScalarExprEmitter::ICCK_IntegerSignChange,
1110 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1113 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1114 Value *Dst, QualType DstType,
1115 SourceLocation Loc) {
1116 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1119 llvm::Type *SrcTy = Src->getType();
1120 llvm::Type *DstTy = Dst->getType();
1122 // We only care about int->int conversions here.
1123 // We ignore conversions to/from pointer and/or bool.
1124 if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
1127 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1128 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1129 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1130 unsigned DstBits = DstTy->getScalarSizeInBits();
1132 // Now, we do not need to emit the check in *all* of the cases.
1133 // We can avoid emitting it in some obvious cases where it would have been
1134 // dropped by the opt passes (instcombine) always anyways.
1135 // If it's a cast between effectively the same type, no check.
1136 // NOTE: this is *not* equivalent to checking the canonical types.
1137 if (SrcSigned == DstSigned && SrcBits == DstBits)
1139 // At least one of the values needs to have signed type.
1140 // If both are unsigned, then obviously, neither of them can be negative.
1141 if (!SrcSigned && !DstSigned)
1143 // If the conversion is to *larger* *signed* type, then no check is needed.
1144 // Because either sign-extension happens (so the sign will remain),
1145 // or zero-extension will happen (the sign bit will be zero.)
1146 if ((DstBits > SrcBits) && DstSigned)
1148 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1149 (SrcBits > DstBits) && SrcSigned) {
1150 // If the signed integer truncation sanitizer is enabled,
1151 // and this is a truncation from signed type, then no check is needed.
1152 // Because here sign change check is interchangeable with truncation check.
1155 // That's it. We can't rule out any more cases with the data we have.
1157 CodeGenFunction::SanitizerScope SanScope(&CGF);
1159 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1160 std::pair<llvm::Value *, SanitizerMask>>
1163 // Each of these checks needs to return 'false' when an issue was detected.
1164 ImplicitConversionCheckKind CheckKind;
1165 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1166 // So we can 'and' all the checks together, and still get 'false',
1167 // if at least one of the checks detected an issue.
1169 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1170 CheckKind = Check.first;
1171 Checks.emplace_back(Check.second);
1173 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1174 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1175 // If the signed integer truncation sanitizer was enabled,
1176 // and we are truncating from larger unsigned type to smaller signed type,
1177 // let's handle the case we skipped in that check.
1179 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1180 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1181 Checks.emplace_back(Check.second);
1182 // If the comparison result is 'i1 false', then the truncation was lossy.
1185 llvm::Constant *StaticArgs[] = {
1186 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1187 CGF.EmitCheckTypeDescriptor(DstType),
1188 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1189 // EmitCheck() will 'and' all the checks together.
1190 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1194 /// Emit a conversion from the specified type to the specified destination type,
1195 /// both of which are LLVM scalar types.
1196 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1199 ScalarConversionOpts Opts) {
1200 // All conversions involving fixed point types should be handled by the
1201 // EmitFixedPoint family functions. This is done to prevent bloating up this
1202 // function more, and although fixed point numbers are represented by
1203 // integers, we do not want to follow any logic that assumes they should be
1204 // treated as integers.
1205 // TODO(leonardchan): When necessary, add another if statement checking for
1206 // conversions to fixed point types from other types.
1207 if (SrcType->isFixedPointType()) {
1208 if (DstType->isFixedPointType()) {
1209 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1210 } else if (DstType->isBooleanType()) {
1211 // We do not need to check the padding bit on unsigned types if unsigned
1212 // padding is enabled because overflow into this bit is undefined
1214 return Builder.CreateIsNotNull(Src, "tobool");
1218 "Unhandled scalar conversion involving a fixed point type.");
1221 QualType NoncanonicalSrcType = SrcType;
1222 QualType NoncanonicalDstType = DstType;
1224 SrcType = CGF.getContext().getCanonicalType(SrcType);
1225 DstType = CGF.getContext().getCanonicalType(DstType);
1226 if (SrcType == DstType) return Src;
1228 if (DstType->isVoidType()) return nullptr;
1230 llvm::Value *OrigSrc = Src;
1231 QualType OrigSrcType = SrcType;
1232 llvm::Type *SrcTy = Src->getType();
1234 // Handle conversions to bool first, they are special: comparisons against 0.
1235 if (DstType->isBooleanType())
1236 return EmitConversionToBool(Src, SrcType);
1238 llvm::Type *DstTy = ConvertType(DstType);
1240 // Cast from half through float if half isn't a native type.
1241 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1242 // Cast to FP using the intrinsic if the half type itself isn't supported.
1243 if (DstTy->isFloatingPointTy()) {
1244 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1245 return Builder.CreateCall(
1246 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1249 // Cast to other types through float, using either the intrinsic or FPExt,
1250 // depending on whether the half type itself is supported
1251 // (as opposed to operations on half, available with NativeHalfType).
1252 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1253 Src = Builder.CreateCall(
1254 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1258 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1260 SrcType = CGF.getContext().FloatTy;
1261 SrcTy = CGF.FloatTy;
1265 // Ignore conversions like int -> uint.
1266 if (SrcTy == DstTy) {
1267 if (Opts.EmitImplicitIntegerSignChangeChecks)
1268 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1269 NoncanonicalDstType, Loc);
1274 // Handle pointer conversions next: pointers can only be converted to/from
1275 // other pointers and integers. Check for pointer types in terms of LLVM, as
1276 // some native types (like Obj-C id) may map to a pointer type.
1277 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1278 // The source value may be an integer, or a pointer.
1279 if (isa<llvm::PointerType>(SrcTy))
1280 return Builder.CreateBitCast(Src, DstTy, "conv");
1282 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1283 // First, convert to the correct width so that we control the kind of
1285 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1286 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1287 llvm::Value* IntResult =
1288 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1289 // Then, cast to pointer.
1290 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1293 if (isa<llvm::PointerType>(SrcTy)) {
1294 // Must be an ptr to int cast.
1295 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1296 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1299 // A scalar can be splatted to an extended vector of the same element type
1300 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1301 // Sema should add casts to make sure that the source expression's type is
1302 // the same as the vector's element type (sans qualifiers)
1303 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1304 SrcType.getTypePtr() &&
1305 "Splatted expr doesn't match with vector element type?");
1307 // Splat the element across to all elements
1308 unsigned NumElements = DstTy->getVectorNumElements();
1309 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1312 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1313 // Allow bitcast from vector to integer/fp of the same size.
1314 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1315 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1316 if (SrcSize == DstSize)
1317 return Builder.CreateBitCast(Src, DstTy, "conv");
1319 // Conversions between vectors of different sizes are not allowed except
1320 // when vectors of half are involved. Operations on storage-only half
1321 // vectors require promoting half vector operands to float vectors and
1322 // truncating the result, which is either an int or float vector, to a
1323 // short or half vector.
1325 // Source and destination are both expected to be vectors.
1326 llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1327 llvm::Type *DstElementTy = DstTy->getVectorElementType();
1330 assert(((SrcElementTy->isIntegerTy() &&
1331 DstElementTy->isIntegerTy()) ||
1332 (SrcElementTy->isFloatingPointTy() &&
1333 DstElementTy->isFloatingPointTy())) &&
1334 "unexpected conversion between a floating-point vector and an "
1337 // Truncate an i32 vector to an i16 vector.
1338 if (SrcElementTy->isIntegerTy())
1339 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1341 // Truncate a float vector to a half vector.
1342 if (SrcSize > DstSize)
1343 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1345 // Promote a half vector to a float vector.
1346 return Builder.CreateFPExt(Src, DstTy, "conv");
1349 // Finally, we have the arithmetic types: real int/float.
1350 Value *Res = nullptr;
1351 llvm::Type *ResTy = DstTy;
1353 // An overflowing conversion has undefined behavior if either the source type
1354 // or the destination type is a floating-point type.
1355 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1356 (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
1357 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1360 // Cast to half through float if half isn't a native type.
1361 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1362 // Make sure we cast in a single step if from another FP type.
1363 if (SrcTy->isFloatingPointTy()) {
1364 // Use the intrinsic if the half type itself isn't supported
1365 // (as opposed to operations on half, available with NativeHalfType).
1366 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1367 return Builder.CreateCall(
1368 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1369 // If the half type is supported, just use an fptrunc.
1370 return Builder.CreateFPTrunc(Src, DstTy);
1372 DstTy = CGF.FloatTy;
1375 if (isa<llvm::IntegerType>(SrcTy)) {
1376 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1377 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1380 if (isa<llvm::IntegerType>(DstTy))
1381 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1382 else if (InputSigned)
1383 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1385 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1386 } else if (isa<llvm::IntegerType>(DstTy)) {
1387 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
1388 if (DstType->isSignedIntegerOrEnumerationType())
1389 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1391 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1393 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
1394 "Unknown real conversion");
1395 if (DstTy->getTypeID() < SrcTy->getTypeID())
1396 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1398 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1401 if (DstTy != ResTy) {
1402 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1403 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1404 Res = Builder.CreateCall(
1405 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1408 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1412 if (Opts.EmitImplicitIntegerTruncationChecks)
1413 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1414 NoncanonicalDstType, Loc);
1416 if (Opts.EmitImplicitIntegerSignChangeChecks)
1417 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1418 NoncanonicalDstType, Loc);
1423 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1425 SourceLocation Loc) {
1427 using llvm::ConstantInt;
1430 assert(SrcTy->isFixedPointType());
1431 assert(DstTy->isFixedPointType());
1433 FixedPointSemantics SrcFPSema =
1434 CGF.getContext().getFixedPointSemantics(SrcTy);
1435 FixedPointSemantics DstFPSema =
1436 CGF.getContext().getFixedPointSemantics(DstTy);
1437 unsigned SrcWidth = SrcFPSema.getWidth();
1438 unsigned DstWidth = DstFPSema.getWidth();
1439 unsigned SrcScale = SrcFPSema.getScale();
1440 unsigned DstScale = DstFPSema.getScale();
1441 bool SrcIsSigned = SrcFPSema.isSigned();
1442 bool DstIsSigned = DstFPSema.isSigned();
1444 llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
1446 Value *Result = Src;
1447 unsigned ResultWidth = SrcWidth;
1449 if (!DstFPSema.isSaturated()) {
1451 if (DstScale < SrcScale)
1452 Result = SrcIsSigned ?
1453 Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
1454 Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1457 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1460 if (DstScale > SrcScale)
1461 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1463 // Adjust the number of fractional bits.
1464 if (DstScale > SrcScale) {
1465 ResultWidth = SrcWidth + DstScale - SrcScale;
1466 llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
1467 Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
1468 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1469 } else if (DstScale < SrcScale) {
1470 Result = SrcIsSigned ?
1471 Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
1472 Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1475 // Handle saturation.
1476 bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
1478 Value *Max = ConstantInt::get(
1479 CGF.getLLVMContext(),
1480 APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
1481 Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
1482 : Builder.CreateICmpUGT(Result, Max);
1483 Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
1485 // Cannot overflow min to dest type if src is unsigned since all fixed
1486 // point types can cover the unsigned min of 0.
1487 if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
1488 Value *Min = ConstantInt::get(
1489 CGF.getLLVMContext(),
1490 APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
1491 Value *TooLow = Builder.CreateICmpSLT(Result, Min);
1492 Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
1495 // Resize the integer part to get the final destination size.
1496 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1501 /// Emit a conversion from the specified complex type to the specified
1502 /// destination type, where the destination type is an LLVM scalar type.
1503 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1504 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1505 SourceLocation Loc) {
1506 // Get the source element type.
1507 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1509 // Handle conversions to bool first, they are special: comparisons against 0.
1510 if (DstTy->isBooleanType()) {
1511 // Complex != 0 -> (Real != 0) | (Imag != 0)
1512 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1513 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1514 return Builder.CreateOr(Src.first, Src.second, "tobool");
1517 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1518 // the imaginary part of the complex value is discarded and the value of the
1519 // real part is converted according to the conversion rules for the
1520 // corresponding real type.
1521 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1524 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1525 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1528 /// Emit a sanitization check for the given "binary" operation (which
1529 /// might actually be a unary increment which has been lowered to a binary
1530 /// operation). The check passes if all values in \p Checks (which are \c i1),
1532 void ScalarExprEmitter::EmitBinOpCheck(
1533 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1534 assert(CGF.IsSanitizerScope);
1535 SanitizerHandler Check;
1536 SmallVector<llvm::Constant *, 4> StaticData;
1537 SmallVector<llvm::Value *, 2> DynamicData;
1539 BinaryOperatorKind Opcode = Info.Opcode;
1540 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1541 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1543 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1544 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1545 if (UO && UO->getOpcode() == UO_Minus) {
1546 Check = SanitizerHandler::NegateOverflow;
1547 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1548 DynamicData.push_back(Info.RHS);
1550 if (BinaryOperator::isShiftOp(Opcode)) {
1551 // Shift LHS negative or too large, or RHS out of bounds.
1552 Check = SanitizerHandler::ShiftOutOfBounds;
1553 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1554 StaticData.push_back(
1555 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1556 StaticData.push_back(
1557 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1558 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1559 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1560 Check = SanitizerHandler::DivremOverflow;
1561 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1563 // Arithmetic overflow (+, -, *).
1565 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1566 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1567 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1568 default: llvm_unreachable("unexpected opcode for bin op check");
1570 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1572 DynamicData.push_back(Info.LHS);
1573 DynamicData.push_back(Info.RHS);
1576 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1579 //===----------------------------------------------------------------------===//
1581 //===----------------------------------------------------------------------===//
1583 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1584 CGF.ErrorUnsupported(E, "scalar expression");
1585 if (E->getType()->isVoidType())
1587 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1590 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1592 if (E->getNumSubExprs() == 2) {
1593 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1594 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1597 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1598 unsigned LHSElts = LTy->getNumElements();
1602 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1604 // Mask off the high bits of each shuffle index.
1606 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1607 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1610 // mask = mask & maskbits
1612 // n = extract mask i
1613 // x = extract val n
1614 // newv = insert newv, x, i
1615 llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1616 MTy->getNumElements());
1617 Value* NewV = llvm::UndefValue::get(RTy);
1618 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1619 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1620 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1622 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1623 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1628 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1629 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1631 SmallVector<llvm::Constant*, 32> indices;
1632 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1633 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1634 // Check for -1 and output it as undef in the IR.
1635 if (Idx.isSigned() && Idx.isAllOnesValue())
1636 indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1638 indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1641 Value *SV = llvm::ConstantVector::get(indices);
1642 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1645 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1646 QualType SrcType = E->getSrcExpr()->getType(),
1647 DstType = E->getType();
1649 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1651 SrcType = CGF.getContext().getCanonicalType(SrcType);
1652 DstType = CGF.getContext().getCanonicalType(DstType);
1653 if (SrcType == DstType) return Src;
1655 assert(SrcType->isVectorType() &&
1656 "ConvertVector source type must be a vector");
1657 assert(DstType->isVectorType() &&
1658 "ConvertVector destination type must be a vector");
1660 llvm::Type *SrcTy = Src->getType();
1661 llvm::Type *DstTy = ConvertType(DstType);
1663 // Ignore conversions like int -> uint.
1667 QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
1668 DstEltType = DstType->getAs<VectorType>()->getElementType();
1670 assert(SrcTy->isVectorTy() &&
1671 "ConvertVector source IR type must be a vector");
1672 assert(DstTy->isVectorTy() &&
1673 "ConvertVector destination IR type must be a vector");
1675 llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1676 *DstEltTy = DstTy->getVectorElementType();
1678 if (DstEltType->isBooleanType()) {
1679 assert((SrcEltTy->isFloatingPointTy() ||
1680 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1682 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1683 if (SrcEltTy->isFloatingPointTy()) {
1684 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1686 return Builder.CreateICmpNE(Src, Zero, "tobool");
1690 // We have the arithmetic types: real int/float.
1691 Value *Res = nullptr;
1693 if (isa<llvm::IntegerType>(SrcEltTy)) {
1694 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1695 if (isa<llvm::IntegerType>(DstEltTy))
1696 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1697 else if (InputSigned)
1698 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1700 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1701 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1702 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1703 if (DstEltType->isSignedIntegerOrEnumerationType())
1704 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1706 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1708 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1709 "Unknown real conversion");
1710 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1711 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1713 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1719 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1720 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1721 CGF.EmitIgnoredExpr(E->getBase());
1722 return CGF.emitScalarConstant(Constant, E);
1724 Expr::EvalResult Result;
1725 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1726 llvm::APSInt Value = Result.Val.getInt();
1727 CGF.EmitIgnoredExpr(E->getBase());
1728 return Builder.getInt(Value);
1732 return EmitLoadOfLValue(E);
1735 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1736 TestAndClearIgnoreResultAssign();
1738 // Emit subscript expressions in rvalue context's. For most cases, this just
1739 // loads the lvalue formed by the subscript expr. However, we have to be
1740 // careful, because the base of a vector subscript is occasionally an rvalue,
1741 // so we can't get it as an lvalue.
1742 if (!E->getBase()->getType()->isVectorType())
1743 return EmitLoadOfLValue(E);
1745 // Handle the vector case. The base must be a vector, the index must be an
1747 Value *Base = Visit(E->getBase());
1748 Value *Idx = Visit(E->getIdx());
1749 QualType IdxTy = E->getIdx()->getType();
1751 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1752 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1754 return Builder.CreateExtractElement(Base, Idx, "vecext");
1757 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1758 unsigned Off, llvm::Type *I32Ty) {
1759 int MV = SVI->getMaskValue(Idx);
1761 return llvm::UndefValue::get(I32Ty);
1762 return llvm::ConstantInt::get(I32Ty, Off+MV);
1765 static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1766 if (C->getBitWidth() != 32) {
1767 assert(llvm::ConstantInt::isValueValidForType(I32Ty,
1768 C->getZExtValue()) &&
1769 "Index operand too large for shufflevector mask!");
1770 return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1775 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1776 bool Ignore = TestAndClearIgnoreResultAssign();
1778 assert (Ignore == false && "init list ignored");
1779 unsigned NumInitElements = E->getNumInits();
1781 if (E->hadArrayRangeDesignator())
1782 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1784 llvm::VectorType *VType =
1785 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1788 if (NumInitElements == 0) {
1789 // C++11 value-initialization for the scalar.
1790 return EmitNullValue(E->getType());
1792 // We have a scalar in braces. Just use the first element.
1793 return Visit(E->getInit(0));
1796 unsigned ResElts = VType->getNumElements();
1798 // Loop over initializers collecting the Value for each, and remembering
1799 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1800 // us to fold the shuffle for the swizzle into the shuffle for the vector
1801 // initializer, since LLVM optimizers generally do not want to touch
1803 unsigned CurIdx = 0;
1804 bool VIsUndefShuffle = false;
1805 llvm::Value *V = llvm::UndefValue::get(VType);
1806 for (unsigned i = 0; i != NumInitElements; ++i) {
1807 Expr *IE = E->getInit(i);
1808 Value *Init = Visit(IE);
1809 SmallVector<llvm::Constant*, 16> Args;
1811 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1813 // Handle scalar elements. If the scalar initializer is actually one
1814 // element of a different vector of the same width, use shuffle instead of
1817 if (isa<ExtVectorElementExpr>(IE)) {
1818 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1820 if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1821 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1822 Value *LHS = nullptr, *RHS = nullptr;
1824 // insert into undef -> shuffle (src, undef)
1825 // shufflemask must use an i32
1826 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1827 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1829 LHS = EI->getVectorOperand();
1831 VIsUndefShuffle = true;
1832 } else if (VIsUndefShuffle) {
1833 // insert into undefshuffle && size match -> shuffle (v, src)
1834 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1835 for (unsigned j = 0; j != CurIdx; ++j)
1836 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1837 Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1838 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1840 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1841 RHS = EI->getVectorOperand();
1842 VIsUndefShuffle = false;
1844 if (!Args.empty()) {
1845 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1846 V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1852 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1854 VIsUndefShuffle = false;
1859 unsigned InitElts = VVT->getNumElements();
1861 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1862 // input is the same width as the vector being constructed, generate an
1863 // optimized shuffle of the swizzle input into the result.
1864 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1865 if (isa<ExtVectorElementExpr>(IE)) {
1866 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1867 Value *SVOp = SVI->getOperand(0);
1868 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1870 if (OpTy->getNumElements() == ResElts) {
1871 for (unsigned j = 0; j != CurIdx; ++j) {
1872 // If the current vector initializer is a shuffle with undef, merge
1873 // this shuffle directly into it.
1874 if (VIsUndefShuffle) {
1875 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1878 Args.push_back(Builder.getInt32(j));
1881 for (unsigned j = 0, je = InitElts; j != je; ++j)
1882 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1883 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1885 if (VIsUndefShuffle)
1886 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1892 // Extend init to result vector length, and then shuffle its contribution
1893 // to the vector initializer into V.
1895 for (unsigned j = 0; j != InitElts; ++j)
1896 Args.push_back(Builder.getInt32(j));
1897 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1898 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1899 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1903 for (unsigned j = 0; j != CurIdx; ++j)
1904 Args.push_back(Builder.getInt32(j));
1905 for (unsigned j = 0; j != InitElts; ++j)
1906 Args.push_back(Builder.getInt32(j+Offset));
1907 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1910 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1911 // merging subsequent shuffles into this one.
1914 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1915 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1916 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1920 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1921 // Emit remaining default initializers.
1922 llvm::Type *EltTy = VType->getElementType();
1924 // Emit remaining default initializers
1925 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1926 Value *Idx = Builder.getInt32(CurIdx);
1927 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1928 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1933 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1934 const Expr *E = CE->getSubExpr();
1936 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1939 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1940 // We always assume that 'this' is never null.
1944 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1945 // And that glvalue casts are never null.
1946 if (ICE->getValueKind() != VK_RValue)
1953 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1954 // have to handle a more broad range of conversions than explicit casts, as they
1955 // handle things like function to ptr-to-function decay etc.
1956 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1957 Expr *E = CE->getSubExpr();
1958 QualType DestTy = CE->getType();
1959 CastKind Kind = CE->getCastKind();
1961 // These cases are generally not written to ignore the result of
1962 // evaluating their sub-expressions, so we clear this now.
1963 bool Ignored = TestAndClearIgnoreResultAssign();
1965 // Since almost all cast kinds apply to scalars, this switch doesn't have
1966 // a default case, so the compiler will warn on a missing case. The cases
1967 // are in the same order as in the CastKind enum.
1969 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1970 case CK_BuiltinFnToFnPtr:
1971 llvm_unreachable("builtin functions are handled elsewhere");
1973 case CK_LValueBitCast:
1974 case CK_ObjCObjectLValueCast: {
1975 Address Addr = EmitLValue(E).getAddress();
1976 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1977 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1978 return EmitLoadOfLValue(LV, CE->getExprLoc());
1981 case CK_CPointerToObjCPointerCast:
1982 case CK_BlockPointerToObjCPointerCast:
1983 case CK_AnyPointerToBlockPointerCast:
1985 Value *Src = Visit(const_cast<Expr*>(E));
1986 llvm::Type *SrcTy = Src->getType();
1987 llvm::Type *DstTy = ConvertType(DestTy);
1988 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1989 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
1990 llvm_unreachable("wrong cast for pointers in different address spaces"
1991 "(must be an address space cast)!");
1994 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
1995 if (auto PT = DestTy->getAs<PointerType>())
1996 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
1998 CodeGenFunction::CFITCK_UnrelatedCast,
2002 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2003 const QualType SrcType = E->getType();
2005 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2006 // Casting to pointer that could carry dynamic information (provided by
2007 // invariant.group) requires launder.
2008 Src = Builder.CreateLaunderInvariantGroup(Src);
2009 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2010 // Casting to pointer that does not carry dynamic information (provided
2011 // by invariant.group) requires stripping it. Note that we don't do it
2012 // if the source could not be dynamic type and destination could be
2013 // dynamic because dynamic information is already laundered. It is
2014 // because launder(strip(src)) == launder(src), so there is no need to
2015 // add extra strip before launder.
2016 Src = Builder.CreateStripInvariantGroup(Src);
2020 return Builder.CreateBitCast(Src, DstTy);
2022 case CK_AddressSpaceConversion: {
2023 Expr::EvalResult Result;
2024 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2025 Result.Val.isNullPointer()) {
2026 // If E has side effect, it is emitted even if its final result is a
2027 // null pointer. In that case, a DCE pass should be able to
2028 // eliminate the useless instructions emitted during translating E.
2029 if (Result.HasSideEffects)
2031 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2032 ConvertType(DestTy)), DestTy);
2034 // Since target may map different address spaces in AST to the same address
2035 // space, an address space conversion may end up as a bitcast.
2036 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2037 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2038 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2040 case CK_AtomicToNonAtomic:
2041 case CK_NonAtomicToAtomic:
2043 case CK_UserDefinedConversion:
2044 return Visit(const_cast<Expr*>(E));
2046 case CK_BaseToDerived: {
2047 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2048 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2050 Address Base = CGF.EmitPointerWithAlignment(E);
2052 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2053 CE->path_begin(), CE->path_end(),
2054 CGF.ShouldNullCheckClassCastValue(CE));
2056 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2057 // performed and the object is not of the derived type.
2058 if (CGF.sanitizePerformTypeCheck())
2059 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2060 Derived.getPointer(), DestTy->getPointeeType());
2062 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2063 CGF.EmitVTablePtrCheckForCast(
2064 DestTy->getPointeeType(), Derived.getPointer(),
2065 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2068 return Derived.getPointer();
2070 case CK_UncheckedDerivedToBase:
2071 case CK_DerivedToBase: {
2072 // The EmitPointerWithAlignment path does this fine; just discard
2074 return CGF.EmitPointerWithAlignment(CE).getPointer();
2078 Address V = CGF.EmitPointerWithAlignment(E);
2079 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2080 return CGF.EmitDynamicCast(V, DCE);
2083 case CK_ArrayToPointerDecay:
2084 return CGF.EmitArrayToPointerDecay(E).getPointer();
2085 case CK_FunctionToPointerDecay:
2086 return EmitLValue(E).getPointer();
2088 case CK_NullToPointer:
2089 if (MustVisitNullValue(E))
2092 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2095 case CK_NullToMemberPointer: {
2096 if (MustVisitNullValue(E))
2099 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2100 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2103 case CK_ReinterpretMemberPointer:
2104 case CK_BaseToDerivedMemberPointer:
2105 case CK_DerivedToBaseMemberPointer: {
2106 Value *Src = Visit(E);
2108 // Note that the AST doesn't distinguish between checked and
2109 // unchecked member pointer conversions, so we always have to
2110 // implement checked conversions here. This is inefficient when
2111 // actual control flow may be required in order to perform the
2112 // check, which it is for data member pointers (but not member
2113 // function pointers on Itanium and ARM).
2114 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2117 case CK_ARCProduceObject:
2118 return CGF.EmitARCRetainScalarExpr(E);
2119 case CK_ARCConsumeObject:
2120 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2121 case CK_ARCReclaimReturnedObject:
2122 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2123 case CK_ARCExtendBlockObject:
2124 return CGF.EmitARCExtendBlockObject(E);
2126 case CK_CopyAndAutoreleaseBlockObject:
2127 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2129 case CK_FloatingRealToComplex:
2130 case CK_FloatingComplexCast:
2131 case CK_IntegralRealToComplex:
2132 case CK_IntegralComplexCast:
2133 case CK_IntegralComplexToFloatingComplex:
2134 case CK_FloatingComplexToIntegralComplex:
2135 case CK_ConstructorConversion:
2137 llvm_unreachable("scalar cast to non-scalar value");
2139 case CK_LValueToRValue:
2140 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2141 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2142 return Visit(const_cast<Expr*>(E));
2144 case CK_IntegralToPointer: {
2145 Value *Src = Visit(const_cast<Expr*>(E));
2147 // First, convert to the correct width so that we control the kind of
2149 auto DestLLVMTy = ConvertType(DestTy);
2150 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2151 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2152 llvm::Value* IntResult =
2153 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2155 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2157 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2158 // Going from integer to pointer that could be dynamic requires reloading
2159 // dynamic information from invariant.group.
2160 if (DestTy.mayBeDynamicClass())
2161 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2165 case CK_PointerToIntegral: {
2166 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2167 auto *PtrExpr = Visit(E);
2169 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2170 const QualType SrcType = E->getType();
2172 // Casting to integer requires stripping dynamic information as it does
2174 if (SrcType.mayBeDynamicClass())
2175 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2178 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2181 CGF.EmitIgnoredExpr(E);
2184 case CK_VectorSplat: {
2185 llvm::Type *DstTy = ConvertType(DestTy);
2186 Value *Elt = Visit(const_cast<Expr*>(E));
2187 // Splat the element across to all elements
2188 unsigned NumElements = DstTy->getVectorNumElements();
2189 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2192 case CK_FixedPointCast:
2193 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2196 case CK_FixedPointToBoolean:
2197 assert(E->getType()->isFixedPointType() &&
2198 "Expected src type to be fixed point type");
2199 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2200 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2203 case CK_IntegralCast: {
2204 ScalarConversionOpts Opts;
2205 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2206 if (!ICE->isPartOfExplicitCast())
2207 Opts = ScalarConversionOpts(CGF.SanOpts);
2209 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2210 CE->getExprLoc(), Opts);
2212 case CK_IntegralToFloating:
2213 case CK_FloatingToIntegral:
2214 case CK_FloatingCast:
2215 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2217 case CK_BooleanToSignedIntegral: {
2218 ScalarConversionOpts Opts;
2219 Opts.TreatBooleanAsSigned = true;
2220 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2221 CE->getExprLoc(), Opts);
2223 case CK_IntegralToBoolean:
2224 return EmitIntToBoolConversion(Visit(E));
2225 case CK_PointerToBoolean:
2226 return EmitPointerToBoolConversion(Visit(E), E->getType());
2227 case CK_FloatingToBoolean:
2228 return EmitFloatToBoolConversion(Visit(E));
2229 case CK_MemberPointerToBoolean: {
2230 llvm::Value *MemPtr = Visit(E);
2231 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2232 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2235 case CK_FloatingComplexToReal:
2236 case CK_IntegralComplexToReal:
2237 return CGF.EmitComplexExpr(E, false, true).first;
2239 case CK_FloatingComplexToBoolean:
2240 case CK_IntegralComplexToBoolean: {
2241 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2243 // TODO: kill this function off, inline appropriate case here
2244 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2248 case CK_ZeroToOCLOpaqueType: {
2249 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2250 DestTy->isOCLIntelSubgroupAVCType()) &&
2251 "CK_ZeroToOCLEvent cast on non-event type");
2252 return llvm::Constant::getNullValue(ConvertType(DestTy));
2255 case CK_IntToOCLSampler:
2256 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2260 llvm_unreachable("unknown scalar cast");
2263 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2264 CodeGenFunction::StmtExprEvaluation eval(CGF);
2265 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2266 !E->getType()->isVoidType());
2267 if (!RetAlloca.isValid())
2269 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2273 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2274 CGF.enterFullExpression(E);
2275 CodeGenFunction::RunCleanupsScope Scope(CGF);
2276 Value *V = Visit(E->getSubExpr());
2277 // Defend against dominance problems caused by jumps out of expression
2278 // evaluation through the shared cleanup block.
2279 Scope.ForceCleanup({&V});
2283 //===----------------------------------------------------------------------===//
2285 //===----------------------------------------------------------------------===//
2287 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2288 llvm::Value *InVal, bool IsInc) {
2291 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2292 BinOp.Ty = E->getType();
2293 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2294 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2299 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2300 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2301 llvm::Value *Amount =
2302 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2303 StringRef Name = IsInc ? "inc" : "dec";
2304 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2305 case LangOptions::SOB_Defined:
2306 return Builder.CreateAdd(InVal, Amount, Name);
2307 case LangOptions::SOB_Undefined:
2308 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2309 return Builder.CreateNSWAdd(InVal, Amount, Name);
2311 case LangOptions::SOB_Trapping:
2312 if (!E->canOverflow())
2313 return Builder.CreateNSWAdd(InVal, Amount, Name);
2314 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
2316 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2320 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2321 bool isInc, bool isPre) {
2323 QualType type = E->getSubExpr()->getType();
2324 llvm::PHINode *atomicPHI = nullptr;
2328 int amount = (isInc ? 1 : -1);
2329 bool isSubtraction = !isInc;
2331 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2332 type = atomicTy->getValueType();
2333 if (isInc && type->isBooleanType()) {
2334 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2336 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
2337 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2338 return Builder.getTrue();
2340 // For atomic bool increment, we just store true and return it for
2341 // preincrement, do an atomic swap with true for postincrement
2342 return Builder.CreateAtomicRMW(
2343 llvm::AtomicRMWInst::Xchg, LV.getPointer(), True,
2344 llvm::AtomicOrdering::SequentiallyConsistent);
2346 // Special case for atomic increment / decrement on integers, emit
2347 // atomicrmw instructions. We skip this if we want to be doing overflow
2348 // checking, and fall into the slow path with the atomic cmpxchg loop.
2349 if (!type->isBooleanType() && type->isIntegerType() &&
2350 !(type->isUnsignedIntegerType() &&
2351 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2352 CGF.getLangOpts().getSignedOverflowBehavior() !=
2353 LangOptions::SOB_Trapping) {
2354 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2355 llvm::AtomicRMWInst::Sub;
2356 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2357 llvm::Instruction::Sub;
2358 llvm::Value *amt = CGF.EmitToMemory(
2359 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2360 llvm::Value *old = Builder.CreateAtomicRMW(aop,
2361 LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent);
2362 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2364 value = EmitLoadOfLValue(LV, E->getExprLoc());
2366 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2367 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2368 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2369 value = CGF.EmitToMemory(value, type);
2370 Builder.CreateBr(opBB);
2371 Builder.SetInsertPoint(opBB);
2372 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2373 atomicPHI->addIncoming(value, startBB);
2376 value = EmitLoadOfLValue(LV, E->getExprLoc());
2380 // Special case of integer increment that we have to check first: bool++.
2381 // Due to promotion rules, we get:
2382 // bool++ -> bool = bool + 1
2383 // -> bool = (int)bool + 1
2384 // -> bool = ((int)bool + 1 != 0)
2385 // An interesting aspect of this is that increment is always true.
2386 // Decrement does not have this property.
2387 if (isInc && type->isBooleanType()) {
2388 value = Builder.getTrue();
2390 // Most common case by far: integer increment.
2391 } else if (type->isIntegerType()) {
2392 // Note that signed integer inc/dec with width less than int can't
2393 // overflow because of promotion rules; we're just eliding a few steps here.
2394 if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2395 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2396 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2397 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2399 EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2401 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2402 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2405 // Next most common: pointer increment.
2406 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2407 QualType type = ptr->getPointeeType();
2409 // VLA types don't have constant size.
2410 if (const VariableArrayType *vla
2411 = CGF.getContext().getAsVariableArrayType(type)) {
2412 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2413 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2414 if (CGF.getLangOpts().isSignedOverflowDefined())
2415 value = Builder.CreateGEP(value, numElts, "vla.inc");
2417 value = CGF.EmitCheckedInBoundsGEP(
2418 value, numElts, /*SignedIndices=*/false, isSubtraction,
2419 E->getExprLoc(), "vla.inc");
2421 // Arithmetic on function pointers (!) is just +-1.
2422 } else if (type->isFunctionType()) {
2423 llvm::Value *amt = Builder.getInt32(amount);
2425 value = CGF.EmitCastToVoidPtr(value);
2426 if (CGF.getLangOpts().isSignedOverflowDefined())
2427 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2429 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2430 isSubtraction, E->getExprLoc(),
2432 value = Builder.CreateBitCast(value, input->getType());
2434 // For everything else, we can just do a simple increment.
2436 llvm::Value *amt = Builder.getInt32(amount);
2437 if (CGF.getLangOpts().isSignedOverflowDefined())
2438 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2440 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2441 isSubtraction, E->getExprLoc(),
2445 // Vector increment/decrement.
2446 } else if (type->isVectorType()) {
2447 if (type->hasIntegerRepresentation()) {
2448 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2450 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2452 value = Builder.CreateFAdd(
2454 llvm::ConstantFP::get(value->getType(), amount),
2455 isInc ? "inc" : "dec");
2459 } else if (type->isRealFloatingType()) {
2460 // Add the inc/dec to the real part.
2463 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2464 // Another special case: half FP increment should be done via float
2465 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2466 value = Builder.CreateCall(
2467 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2469 input, "incdec.conv");
2471 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2475 if (value->getType()->isFloatTy())
2476 amt = llvm::ConstantFP::get(VMContext,
2477 llvm::APFloat(static_cast<float>(amount)));
2478 else if (value->getType()->isDoubleTy())
2479 amt = llvm::ConstantFP::get(VMContext,
2480 llvm::APFloat(static_cast<double>(amount)));
2482 // Remaining types are Half, LongDouble or __float128. Convert from float.
2483 llvm::APFloat F(static_cast<float>(amount));
2485 const llvm::fltSemantics *FS;
2486 // Don't use getFloatTypeSemantics because Half isn't
2487 // necessarily represented using the "half" LLVM type.
2488 if (value->getType()->isFP128Ty())
2489 FS = &CGF.getTarget().getFloat128Format();
2490 else if (value->getType()->isHalfTy())
2491 FS = &CGF.getTarget().getHalfFormat();
2493 FS = &CGF.getTarget().getLongDoubleFormat();
2494 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2495 amt = llvm::ConstantFP::get(VMContext, F);
2497 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2499 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2500 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2501 value = Builder.CreateCall(
2502 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2504 value, "incdec.conv");
2506 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2510 // Objective-C pointer types.
2512 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2513 value = CGF.EmitCastToVoidPtr(value);
2515 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2516 if (!isInc) size = -size;
2517 llvm::Value *sizeValue =
2518 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2520 if (CGF.getLangOpts().isSignedOverflowDefined())
2521 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2523 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2524 /*SignedIndices=*/false, isSubtraction,
2525 E->getExprLoc(), "incdec.objptr");
2526 value = Builder.CreateBitCast(value, input->getType());
2530 llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2531 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2532 auto Pair = CGF.EmitAtomicCompareExchange(
2533 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2534 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2535 llvm::Value *success = Pair.second;
2536 atomicPHI->addIncoming(old, opBB);
2537 Builder.CreateCondBr(success, contBB, opBB);
2538 Builder.SetInsertPoint(contBB);
2539 return isPre ? value : input;
2542 // Store the updated result through the lvalue.
2543 if (LV.isBitField())
2544 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2546 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2548 // If this is a postinc, return the value read from memory, otherwise use the
2550 return isPre ? value : input;
2555 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2556 TestAndClearIgnoreResultAssign();
2557 // Emit unary minus with EmitSub so we handle overflow cases etc.
2559 BinOp.RHS = Visit(E->getSubExpr());
2561 if (BinOp.RHS->getType()->isFPOrFPVectorTy())
2562 BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
2564 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2565 BinOp.Ty = E->getType();
2566 BinOp.Opcode = BO_Sub;
2567 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2569 return EmitSub(BinOp);
2572 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2573 TestAndClearIgnoreResultAssign();
2574 Value *Op = Visit(E->getSubExpr());
2575 return Builder.CreateNot(Op, "neg");
2578 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2579 // Perform vector logical not on comparison with zero vector.
2580 if (E->getType()->isExtVectorType()) {
2581 Value *Oper = Visit(E->getSubExpr());
2582 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2584 if (Oper->getType()->isFPOrFPVectorTy())
2585 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2587 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2588 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2591 // Compare operand to zero.
2592 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2595 // TODO: Could dynamically modify easy computations here. For example, if
2596 // the operand is an icmp ne, turn into icmp eq.
2597 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2599 // ZExt result to the expr type.
2600 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2603 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2604 // Try folding the offsetof to a constant.
2605 Expr::EvalResult EVResult;
2606 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2607 llvm::APSInt Value = EVResult.Val.getInt();
2608 return Builder.getInt(Value);
2611 // Loop over the components of the offsetof to compute the value.
2612 unsigned n = E->getNumComponents();
2613 llvm::Type* ResultType = ConvertType(E->getType());
2614 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2615 QualType CurrentType = E->getTypeSourceInfo()->getType();
2616 for (unsigned i = 0; i != n; ++i) {
2617 OffsetOfNode ON = E->getComponent(i);
2618 llvm::Value *Offset = nullptr;
2619 switch (ON.getKind()) {
2620 case OffsetOfNode::Array: {
2621 // Compute the index
2622 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2623 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2624 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2625 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2627 // Save the element type
2629 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2631 // Compute the element size
2632 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2633 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2635 // Multiply out to compute the result
2636 Offset = Builder.CreateMul(Idx, ElemSize);
2640 case OffsetOfNode::Field: {
2641 FieldDecl *MemberDecl = ON.getField();
2642 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2643 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2645 // Compute the index of the field in its parent.
2647 // FIXME: It would be nice if we didn't have to loop here!
2648 for (RecordDecl::field_iterator Field = RD->field_begin(),
2649 FieldEnd = RD->field_end();
2650 Field != FieldEnd; ++Field, ++i) {
2651 if (*Field == MemberDecl)
2654 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2656 // Compute the offset to the field
2657 int64_t OffsetInt = RL.getFieldOffset(i) /
2658 CGF.getContext().getCharWidth();
2659 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2661 // Save the element type.
2662 CurrentType = MemberDecl->getType();
2666 case OffsetOfNode::Identifier:
2667 llvm_unreachable("dependent __builtin_offsetof");
2669 case OffsetOfNode::Base: {
2670 if (ON.getBase()->isVirtual()) {
2671 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2675 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2676 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2678 // Save the element type.
2679 CurrentType = ON.getBase()->getType();
2681 // Compute the offset to the base.
2682 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2683 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2684 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2685 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2689 Result = Builder.CreateAdd(Result, Offset);
2694 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2695 /// argument of the sizeof expression as an integer.
2697 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2698 const UnaryExprOrTypeTraitExpr *E) {
2699 QualType TypeToSize = E->getTypeOfArgument();
2700 if (E->getKind() == UETT_SizeOf) {
2701 if (const VariableArrayType *VAT =
2702 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2703 if (E->isArgumentType()) {
2704 // sizeof(type) - make sure to emit the VLA size.
2705 CGF.EmitVariablyModifiedType(TypeToSize);
2707 // C99 6.5.3.4p2: If the argument is an expression of type
2708 // VLA, it is evaluated.
2709 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2712 auto VlaSize = CGF.getVLASize(VAT);
2713 llvm::Value *size = VlaSize.NumElts;
2715 // Scale the number of non-VLA elements by the non-VLA element size.
2716 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2717 if (!eltSize.isOne())
2718 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2722 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2725 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2726 E->getTypeOfArgument()->getPointeeType()))
2728 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2731 // If this isn't sizeof(vla), the result must be constant; use the constant
2732 // folding logic so we don't have to duplicate it here.
2733 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2736 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2737 Expr *Op = E->getSubExpr();
2738 if (Op->getType()->isAnyComplexType()) {
2739 // If it's an l-value, load through the appropriate subobject l-value.
2740 // Note that we have to ask E because Op might be an l-value that
2741 // this won't work for, e.g. an Obj-C property.
2743 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2744 E->getExprLoc()).getScalarVal();
2746 // Otherwise, calculate and project.
2747 return CGF.EmitComplexExpr(Op, false, true).first;
2753 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2754 Expr *Op = E->getSubExpr();
2755 if (Op->getType()->isAnyComplexType()) {
2756 // If it's an l-value, load through the appropriate subobject l-value.
2757 // Note that we have to ask E because Op might be an l-value that
2758 // this won't work for, e.g. an Obj-C property.
2759 if (Op->isGLValue())
2760 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2761 E->getExprLoc()).getScalarVal();
2763 // Otherwise, calculate and project.
2764 return CGF.EmitComplexExpr(Op, true, false).second;
2767 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2768 // effects are evaluated, but not the actual value.
2769 if (Op->isGLValue())
2772 CGF.EmitScalarExpr(Op, true);
2773 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2776 //===----------------------------------------------------------------------===//
2778 //===----------------------------------------------------------------------===//
2780 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2781 TestAndClearIgnoreResultAssign();
2783 Result.LHS = Visit(E->getLHS());
2784 Result.RHS = Visit(E->getRHS());
2785 Result.Ty = E->getType();
2786 Result.Opcode = E->getOpcode();
2787 Result.FPFeatures = E->getFPFeatures();
2792 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2793 const CompoundAssignOperator *E,
2794 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2796 QualType LHSTy = E->getLHS()->getType();
2799 if (E->getComputationResultType()->isAnyComplexType())
2800 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2802 // Emit the RHS first. __block variables need to have the rhs evaluated
2803 // first, plus this should improve codegen a little.
2804 OpInfo.RHS = Visit(E->getRHS());
2805 OpInfo.Ty = E->getComputationResultType();
2806 OpInfo.Opcode = E->getOpcode();
2807 OpInfo.FPFeatures = E->getFPFeatures();
2809 // Load/convert the LHS.
2810 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2812 llvm::PHINode *atomicPHI = nullptr;
2813 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2814 QualType type = atomicTy->getValueType();
2815 if (!type->isBooleanType() && type->isIntegerType() &&
2816 !(type->isUnsignedIntegerType() &&
2817 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2818 CGF.getLangOpts().getSignedOverflowBehavior() !=
2819 LangOptions::SOB_Trapping) {
2820 llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
2821 switch (OpInfo.Opcode) {
2822 // We don't have atomicrmw operands for *, %, /, <<, >>
2823 case BO_MulAssign: case BO_DivAssign:
2829 aop = llvm::AtomicRMWInst::Add;
2832 aop = llvm::AtomicRMWInst::Sub;
2835 aop = llvm::AtomicRMWInst::And;
2838 aop = llvm::AtomicRMWInst::Xor;
2841 aop = llvm::AtomicRMWInst::Or;
2844 llvm_unreachable("Invalid compound assignment type");
2846 if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
2847 llvm::Value *amt = CGF.EmitToMemory(
2848 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2851 Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
2852 llvm::AtomicOrdering::SequentiallyConsistent);
2856 // FIXME: For floating point types, we should be saving and restoring the
2857 // floating point environment in the loop.
2858 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2859 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2860 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2861 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2862 Builder.CreateBr(opBB);
2863 Builder.SetInsertPoint(opBB);
2864 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2865 atomicPHI->addIncoming(OpInfo.LHS, startBB);
2866 OpInfo.LHS = atomicPHI;
2869 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2871 SourceLocation Loc = E->getExprLoc();
2873 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2875 // Expand the binary operator.
2876 Result = (this->*Func)(OpInfo);
2878 // Convert the result back to the LHS type,
2879 // potentially with Implicit Conversion sanitizer check.
2880 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
2881 Loc, ScalarConversionOpts(CGF.SanOpts));
2884 llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2885 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2886 auto Pair = CGF.EmitAtomicCompareExchange(
2887 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
2888 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
2889 llvm::Value *success = Pair.second;
2890 atomicPHI->addIncoming(old, opBB);
2891 Builder.CreateCondBr(success, contBB, opBB);
2892 Builder.SetInsertPoint(contBB);
2896 // Store the result value into the LHS lvalue. Bit-fields are handled
2897 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
2898 // 'An assignment expression has the value of the left operand after the
2900 if (LHSLV.isBitField())
2901 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
2903 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
2908 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
2909 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
2910 bool Ignore = TestAndClearIgnoreResultAssign();
2912 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
2914 // If the result is clearly ignored, return now.
2918 // The result of an assignment in C is the assigned r-value.
2919 if (!CGF.getLangOpts().CPlusPlus)
2922 // If the lvalue is non-volatile, return the computed value of the assignment.
2923 if (!LHS.isVolatileQualified())
2926 // Otherwise, reload the value.
2927 return EmitLoadOfLValue(LHS, E->getExprLoc());
2930 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
2931 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
2932 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
2934 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2935 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
2936 SanitizerKind::IntegerDivideByZero));
2939 const auto *BO = cast<BinaryOperator>(Ops.E);
2940 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
2941 Ops.Ty->hasSignedIntegerRepresentation() &&
2942 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
2943 Ops.mayHaveIntegerOverflow()) {
2944 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
2946 llvm::Value *IntMin =
2947 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
2948 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
2950 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
2951 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
2952 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
2954 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
2957 if (Checks.size() > 0)
2958 EmitBinOpCheck(Checks, Ops);
2961 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
2963 CodeGenFunction::SanitizerScope SanScope(&CGF);
2964 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2965 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2966 Ops.Ty->isIntegerType() &&
2967 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
2968 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2969 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
2970 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
2971 Ops.Ty->isRealFloatingType() &&
2972 Ops.mayHaveFloatDivisionByZero()) {
2973 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2974 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
2975 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
2980 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
2981 llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
2982 if (CGF.getLangOpts().OpenCL &&
2983 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
2984 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
2985 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
2986 // build option allows an application to specify that single precision
2987 // floating-point divide (x/y and 1/x) and sqrt used in the program
2988 // source are correctly rounded.
2989 llvm::Type *ValTy = Val->getType();
2990 if (ValTy->isFloatTy() ||
2991 (isa<llvm::VectorType>(ValTy) &&
2992 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
2993 CGF.SetFPAccuracy(Val, 2.5);
2997 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
2998 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3000 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3003 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3004 // Rem in C can't be a floating point type: C99 6.5.5p2.
3005 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3006 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3007 Ops.Ty->isIntegerType() &&
3008 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3009 CodeGenFunction::SanitizerScope SanScope(&CGF);
3010 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3011 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3014 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3015 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3017 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3020 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3024 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3025 switch (Ops.Opcode) {
3029 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3030 llvm::Intrinsic::uadd_with_overflow;
3035 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3036 llvm::Intrinsic::usub_with_overflow;
3041 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3042 llvm::Intrinsic::umul_with_overflow;
3045 llvm_unreachable("Unsupported operation for overflow detection");
3051 CodeGenFunction::SanitizerScope SanScope(&CGF);
3052 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3054 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3056 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3057 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3058 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3060 // Handle overflow with llvm.trap if no custom handler has been specified.
3061 const std::string *handlerName =
3062 &CGF.getLangOpts().OverflowHandler;
3063 if (handlerName->empty()) {
3064 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3065 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3066 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3067 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3068 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3069 : SanitizerKind::UnsignedIntegerOverflow;
3070 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3072 CGF.EmitTrapCheck(Builder.CreateNot(overflow));
3076 // Branch in case of overflow.
3077 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3078 llvm::BasicBlock *continueBB =
3079 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3080 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3082 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3084 // If an overflow handler is set, then we want to call it and then use its
3085 // result, if it returns.
3086 Builder.SetInsertPoint(overflowBB);
3088 // Get the overflow handler.
3089 llvm::Type *Int8Ty = CGF.Int8Ty;
3090 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3091 llvm::FunctionType *handlerTy =
3092 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3093 llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3095 // Sign extend the args to 64-bit, so that we can use the same handler for
3096 // all types of overflow.
3097 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3098 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3100 // Call the handler with the two arguments, the operation, and the size of
3102 llvm::Value *handlerArgs[] = {
3105 Builder.getInt8(OpID),
3106 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3108 llvm::Value *handlerResult =
3109 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3111 // Truncate the result back to the desired size.
3112 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3113 Builder.CreateBr(continueBB);
3115 Builder.SetInsertPoint(continueBB);
3116 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3117 phi->addIncoming(result, initialBB);
3118 phi->addIncoming(handlerResult, overflowBB);
3123 /// Emit pointer + index arithmetic.
3124 static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3125 const BinOpInfo &op,
3126 bool isSubtraction) {
3127 // Must have binary (not unary) expr here. Unary pointer
3128 // increment/decrement doesn't use this path.
3129 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3131 Value *pointer = op.LHS;
3132 Expr *pointerOperand = expr->getLHS();
3133 Value *index = op.RHS;
3134 Expr *indexOperand = expr->getRHS();
3136 // In a subtraction, the LHS is always the pointer.
3137 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3138 std::swap(pointer, index);
3139 std::swap(pointerOperand, indexOperand);
3142 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3144 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3145 auto &DL = CGF.CGM.getDataLayout();
3146 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3148 // Some versions of glibc and gcc use idioms (particularly in their malloc
3149 // routines) that add a pointer-sized integer (known to be a pointer value)
3150 // to a null pointer in order to cast the value back to an integer or as
3151 // part of a pointer alignment algorithm. This is undefined behavior, but
3152 // we'd like to be able to compile programs that use it.
3154 // Normally, we'd generate a GEP with a null-pointer base here in response
3155 // to that code, but it's also UB to dereference a pointer created that
3156 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3157 // generate a direct cast of the integer value to a pointer.
3159 // The idiom (p = nullptr + N) is not met if any of the following are true:
3161 // The operation is subtraction.
3162 // The index is not pointer-sized.
3163 // The pointer type is not byte-sized.
3165 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3169 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3171 if (width != DL.getTypeSizeInBits(PtrTy)) {
3172 // Zero-extend or sign-extend the pointer value according to
3173 // whether the index is signed or not.
3174 index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
3178 // If this is subtraction, negate the index.
3180 index = CGF.Builder.CreateNeg(index, "idx.neg");
3182 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3183 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3184 /*Accessed*/ false);
3186 const PointerType *pointerType
3187 = pointerOperand->getType()->getAs<PointerType>();
3189 QualType objectType = pointerOperand->getType()
3190 ->castAs<ObjCObjectPointerType>()
3192 llvm::Value *objectSize
3193 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3195 index = CGF.Builder.CreateMul(index, objectSize);
3197 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3198 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3199 return CGF.Builder.CreateBitCast(result, pointer->getType());
3202 QualType elementType = pointerType->getPointeeType();
3203 if (const VariableArrayType *vla
3204 = CGF.getContext().getAsVariableArrayType(elementType)) {
3205 // The element count here is the total number of non-VLA elements.
3206 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3208 // Effectively, the multiply by the VLA size is part of the GEP.
3209 // GEP indexes are signed, and scaling an index isn't permitted to
3210 // signed-overflow, so we use the same semantics for our explicit
3211 // multiply. We suppress this if overflow is not undefined behavior.
3212 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3213 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3214 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3216 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3218 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3219 op.E->getExprLoc(), "add.ptr");
3224 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3225 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3227 if (elementType->isVoidType() || elementType->isFunctionType()) {
3228 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3229 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3230 return CGF.Builder.CreateBitCast(result, pointer->getType());
3233 if (CGF.getLangOpts().isSignedOverflowDefined())
3234 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3236 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3237 op.E->getExprLoc(), "add.ptr");
3240 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3241 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3242 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3243 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3244 // efficient operations.
3245 static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
3246 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3247 bool negMul, bool negAdd) {
3248 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3250 Value *MulOp0 = MulOp->getOperand(0);
3251 Value *MulOp1 = MulOp->getOperand(1);
3255 llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
3257 } else if (negAdd) {
3260 llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
3264 Value *FMulAdd = Builder.CreateCall(
3265 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3266 {MulOp0, MulOp1, Addend});
3267 MulOp->eraseFromParent();
3272 // Check whether it would be legal to emit an fmuladd intrinsic call to
3273 // represent op and if so, build the fmuladd.
3275 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3276 // Does NOT check the type of the operation - it's assumed that this function
3277 // will be called from contexts where it's known that the type is contractable.
3278 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3279 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3282 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3283 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3284 "Only fadd/fsub can be the root of an fmuladd.");
3286 // Check whether this op is marked as fusable.
3287 if (!op.FPFeatures.allowFPContractWithinStatement())
3290 // We have a potentially fusable op. Look for a mul on one of the operands.
3291 // Also, make sure that the mul result isn't used directly. In that case,
3292 // there's no point creating a muladd operation.
3293 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3294 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3295 LHSBinOp->use_empty())
3296 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3298 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3299 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3300 RHSBinOp->use_empty())
3301 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3307 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3308 if (op.LHS->getType()->isPointerTy() ||
3309 op.RHS->getType()->isPointerTy())
3310 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3312 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3313 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3314 case LangOptions::SOB_Defined:
3315 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3316 case LangOptions::SOB_Undefined:
3317 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3318 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3320 case LangOptions::SOB_Trapping:
3321 if (CanElideOverflowCheck(CGF.getContext(), op))
3322 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3323 return EmitOverflowCheckedBinOp(op);
3327 if (op.Ty->isUnsignedIntegerType() &&
3328 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3329 !CanElideOverflowCheck(CGF.getContext(), op))
3330 return EmitOverflowCheckedBinOp(op);
3332 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3333 // Try to form an fmuladd.
3334 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3337 Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3338 return propagateFMFlags(V, op);
3341 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3344 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3345 // The LHS is always a pointer if either side is.
3346 if (!op.LHS->getType()->isPointerTy()) {
3347 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3348 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3349 case LangOptions::SOB_Defined:
3350 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3351 case LangOptions::SOB_Undefined:
3352 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3353 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3355 case LangOptions::SOB_Trapping:
3356 if (CanElideOverflowCheck(CGF.getContext(), op))
3357 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3358 return EmitOverflowCheckedBinOp(op);
3362 if (op.Ty->isUnsignedIntegerType() &&
3363 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3364 !CanElideOverflowCheck(CGF.getContext(), op))
3365 return EmitOverflowCheckedBinOp(op);
3367 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3368 // Try to form an fmuladd.
3369 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3371 Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3372 return propagateFMFlags(V, op);
3375 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3378 // If the RHS is not a pointer, then we have normal pointer
3380 if (!op.RHS->getType()->isPointerTy())
3381 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3383 // Otherwise, this is a pointer subtraction.
3385 // Do the raw subtraction part.
3387 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3389 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3390 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3392 // Okay, figure out the element size.
3393 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3394 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3396 llvm::Value *divisor = nullptr;
3398 // For a variable-length array, this is going to be non-constant.
3399 if (const VariableArrayType *vla
3400 = CGF.getContext().getAsVariableArrayType(elementType)) {
3401 auto VlaSize = CGF.getVLASize(vla);
3402 elementType = VlaSize.Type;
3403 divisor = VlaSize.NumElts;
3405 // Scale the number of non-VLA elements by the non-VLA element size.
3406 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3407 if (!eltSize.isOne())
3408 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3410 // For everything elese, we can just compute it, safe in the
3411 // assumption that Sema won't let anything through that we can't
3412 // safely compute the size of.
3414 CharUnits elementSize;
3415 // Handle GCC extension for pointer arithmetic on void* and
3416 // function pointer types.
3417 if (elementType->isVoidType() || elementType->isFunctionType())
3418 elementSize = CharUnits::One();
3420 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3422 // Don't even emit the divide for element size of 1.
3423 if (elementSize.isOne())
3426 divisor = CGF.CGM.getSize(elementSize);
3429 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3430 // pointer difference in C is only defined in the case where both operands
3431 // are pointing to elements of an array.
3432 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3435 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3436 llvm::IntegerType *Ty;
3437 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3438 Ty = cast<llvm::IntegerType>(VT->getElementType());
3440 Ty = cast<llvm::IntegerType>(LHS->getType());
3441 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3444 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3445 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3446 // RHS to the same size as the LHS.
3447 Value *RHS = Ops.RHS;
3448 if (Ops.LHS->getType() != RHS->getType())
3449 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3451 bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3452 Ops.Ty->hasSignedIntegerRepresentation() &&
3453 !CGF.getLangOpts().isSignedOverflowDefined();
3454 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3455 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3456 if (CGF.getLangOpts().OpenCL)
3458 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3459 else if ((SanitizeBase || SanitizeExponent) &&
3460 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3461 CodeGenFunction::SanitizerScope SanScope(&CGF);
3462 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3463 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3464 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3466 if (SanitizeExponent) {
3468 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3472 // Check whether we are shifting any non-zero bits off the top of the
3473 // integer. We only emit this check if exponent is valid - otherwise
3474 // instructions below will have undefined behavior themselves.
3475 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3476 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3477 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3478 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3479 llvm::Value *PromotedWidthMinusOne =
3480 (RHS == Ops.RHS) ? WidthMinusOne
3481 : GetWidthMinusOneValue(Ops.LHS, RHS);
3482 CGF.EmitBlock(CheckShiftBase);
3483 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3484 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3485 /*NUW*/ true, /*NSW*/ true),
3487 if (CGF.getLangOpts().CPlusPlus) {
3488 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3489 // Under C++11's rules, shifting a 1 bit into the sign bit is
3490 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3491 // define signed left shifts, so we use the C99 and C++11 rules there).
3492 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3493 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3495 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3496 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3497 CGF.EmitBlock(Cont);
3498 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3499 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3500 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3501 Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3504 assert(!Checks.empty());
3505 EmitBinOpCheck(Checks, Ops);
3508 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3511 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3512 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3513 // RHS to the same size as the LHS.
3514 Value *RHS = Ops.RHS;
3515 if (Ops.LHS->getType() != RHS->getType())
3516 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3518 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3519 if (CGF.getLangOpts().OpenCL)
3521 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3522 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3523 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3524 CodeGenFunction::SanitizerScope SanScope(&CGF);
3525 llvm::Value *Valid =
3526 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3527 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3530 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3531 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3532 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3535 enum IntrinsicType { VCMPEQ, VCMPGT };
3536 // return corresponding comparison intrinsic for given vector type
3537 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3538 BuiltinType::Kind ElemKind) {
3540 default: llvm_unreachable("unexpected element type");
3541 case BuiltinType::Char_U:
3542 case BuiltinType::UChar:
3543 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3544 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3545 case BuiltinType::Char_S:
3546 case BuiltinType::SChar:
3547 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3548 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3549 case BuiltinType::UShort:
3550 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3551 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3552 case BuiltinType::Short:
3553 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3554 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3555 case BuiltinType::UInt:
3556 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3557 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3558 case BuiltinType::Int:
3559 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3560 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3561 case BuiltinType::ULong:
3562 case BuiltinType::ULongLong:
3563 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3564 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3565 case BuiltinType::Long:
3566 case BuiltinType::LongLong:
3567 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3568 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3569 case BuiltinType::Float:
3570 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3571 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3572 case BuiltinType::Double:
3573 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3574 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3578 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3579 llvm::CmpInst::Predicate UICmpOpc,
3580 llvm::CmpInst::Predicate SICmpOpc,
3581 llvm::CmpInst::Predicate FCmpOpc) {
3582 TestAndClearIgnoreResultAssign();
3584 QualType LHSTy = E->getLHS()->getType();
3585 QualType RHSTy = E->getRHS()->getType();
3586 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3587 assert(E->getOpcode() == BO_EQ ||
3588 E->getOpcode() == BO_NE);
3589 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3590 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3591 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3592 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3593 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3594 Value *LHS = Visit(E->getLHS());
3595 Value *RHS = Visit(E->getRHS());
3597 // If AltiVec, the comparison results in a numeric type, so we use
3598 // intrinsics comparing vectors and giving 0 or 1 as a result
3599 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3600 // constants for mapping CR6 register bits to predicate result
3601 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3603 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3605 // in several cases vector arguments order will be reversed
3606 Value *FirstVecArg = LHS,
3607 *SecondVecArg = RHS;
3609 QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
3610 const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
3611 BuiltinType::Kind ElementKind = BTy->getKind();
3613 switch(E->getOpcode()) {
3614 default: llvm_unreachable("is not a comparison operation");
3617 ID = GetIntrinsic(VCMPEQ, ElementKind);
3621 ID = GetIntrinsic(VCMPEQ, ElementKind);
3625 ID = GetIntrinsic(VCMPGT, ElementKind);
3626 std::swap(FirstVecArg, SecondVecArg);
3630 ID = GetIntrinsic(VCMPGT, ElementKind);
3633 if (ElementKind == BuiltinType::Float) {
3635 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3636 std::swap(FirstVecArg, SecondVecArg);
3640 ID = GetIntrinsic(VCMPGT, ElementKind);
3644 if (ElementKind == BuiltinType::Float) {
3646 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3650 ID = GetIntrinsic(VCMPGT, ElementKind);
3651 std::swap(FirstVecArg, SecondVecArg);
3656 Value *CR6Param = Builder.getInt32(CR6);
3657 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3658 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3660 // The result type of intrinsic may not be same as E->getType().
3661 // If E->getType() is not BoolTy, EmitScalarConversion will do the
3662 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3663 // do nothing, if ResultTy is not i1 at the same time, it will cause
3665 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3666 if (ResultTy->getBitWidth() > 1 &&
3667 E->getType() == CGF.getContext().BoolTy)
3668 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3669 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3673 if (LHS->getType()->isFPOrFPVectorTy()) {
3674 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3675 } else if (LHSTy->hasSignedIntegerRepresentation()) {
3676 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3678 // Unsigned integers and pointers.
3680 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3681 !isa<llvm::ConstantPointerNull>(LHS) &&
3682 !isa<llvm::ConstantPointerNull>(RHS)) {
3684 // Dynamic information is required to be stripped for comparisons,
3685 // because it could leak the dynamic information. Based on comparisons
3686 // of pointers to dynamic objects, the optimizer can replace one pointer
3687 // with another, which might be incorrect in presence of invariant
3688 // groups. Comparison with null is safe because null does not carry any
3689 // dynamic information.
3690 if (LHSTy.mayBeDynamicClass())
3691 LHS = Builder.CreateStripInvariantGroup(LHS);
3692 if (RHSTy.mayBeDynamicClass())
3693 RHS = Builder.CreateStripInvariantGroup(RHS);
3696 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3699 // If this is a vector comparison, sign extend the result to the appropriate
3700 // vector integer type and return it (don't convert to bool).
3701 if (LHSTy->isVectorType())
3702 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3705 // Complex Comparison: can only be an equality comparison.
3706 CodeGenFunction::ComplexPairTy LHS, RHS;
3708 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3709 LHS = CGF.EmitComplexExpr(E->getLHS());
3710 CETy = CTy->getElementType();
3712 LHS.first = Visit(E->getLHS());
3713 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3716 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3717 RHS = CGF.EmitComplexExpr(E->getRHS());
3718 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
3719 CTy->getElementType()) &&
3720 "The element types must always match.");
3723 RHS.first = Visit(E->getRHS());
3724 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3725 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
3726 "The element types must always match.");
3729 Value *ResultR, *ResultI;
3730 if (CETy->isRealFloatingType()) {
3731 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3732 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3734 // Complex comparisons can only be equality comparisons. As such, signed
3735 // and unsigned opcodes are the same.
3736 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3737 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
3740 if (E->getOpcode() == BO_EQ) {
3741 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
3743 assert(E->getOpcode() == BO_NE &&
3744 "Complex comparison other than == or != ?");
3745 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
3749 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3753 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3754 bool Ignore = TestAndClearIgnoreResultAssign();
3759 switch (E->getLHS()->getType().getObjCLifetime()) {
3760 case Qualifiers::OCL_Strong:
3761 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
3764 case Qualifiers::OCL_Autoreleasing:
3765 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
3768 case Qualifiers::OCL_ExplicitNone:
3769 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
3772 case Qualifiers::OCL_Weak:
3773 RHS = Visit(E->getRHS());
3774 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3775 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
3778 case Qualifiers::OCL_None:
3779 // __block variables need to have the rhs evaluated first, plus
3780 // this should improve codegen just a little.
3781 RHS = Visit(E->getRHS());
3782 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3784 // Store the value into the LHS. Bit-fields are handled specially
3785 // because the result is altered by the store, i.e., [C99 6.5.16p1]
3786 // 'An assignment expression has the value of the left operand after
3787 // the assignment...'.
3788 if (LHS.isBitField()) {
3789 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
3791 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
3792 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
3796 // If the result is clearly ignored, return now.
3800 // The result of an assignment in C is the assigned r-value.
3801 if (!CGF.getLangOpts().CPlusPlus)
3804 // If the lvalue is non-volatile, return the computed value of the assignment.
3805 if (!LHS.isVolatileQualified())
3808 // Otherwise, reload the value.
3809 return EmitLoadOfLValue(LHS, E->getExprLoc());
3812 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
3813 // Perform vector logical and on comparisons with zero vectors.
3814 if (E->getType()->isVectorType()) {
3815 CGF.incrementProfileCounter(E);
3817 Value *LHS = Visit(E->getLHS());
3818 Value *RHS = Visit(E->getRHS());
3819 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3820 if (LHS->getType()->isFPOrFPVectorTy()) {
3821 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3822 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3824 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3825 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3827 Value *And = Builder.CreateAnd(LHS, RHS);
3828 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
3831 llvm::Type *ResTy = ConvertType(E->getType());
3833 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
3834 // If we have 1 && X, just emit X without inserting the control flow.
3836 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3837 if (LHSCondVal) { // If we have 1 && X, just emit X.
3838 CGF.incrementProfileCounter(E);
3840 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3841 // ZExt result to int or bool.
3842 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
3845 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
3846 if (!CGF.ContainsLabel(E->getRHS()))
3847 return llvm::Constant::getNullValue(ResTy);
3850 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
3851 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
3853 CodeGenFunction::ConditionalEvaluation eval(CGF);
3855 // Branch on the LHS first. If it is false, go to the failure (cont) block.
3856 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
3857 CGF.getProfileCount(E->getRHS()));
3859 // Any edges into the ContBlock are now from an (indeterminate number of)
3860 // edges from this first condition. All of these values will be false. Start
3861 // setting up the PHI node in the Cont Block for this.
3862 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3864 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3866 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
3869 CGF.EmitBlock(RHSBlock);
3870 CGF.incrementProfileCounter(E);
3871 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3874 // Reaquire the RHS block, as there may be subblocks inserted.
3875 RHSBlock = Builder.GetInsertBlock();
3877 // Emit an unconditional branch from this block to ContBlock.
3879 // There is no need to emit line number for unconditional branch.
3880 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
3881 CGF.EmitBlock(ContBlock);
3883 // Insert an entry into the phi node for the edge with the value of RHSCond.
3884 PN->addIncoming(RHSCond, RHSBlock);
3886 // Artificial location to preserve the scope information
3888 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
3889 PN->setDebugLoc(Builder.getCurrentDebugLocation());
3892 // ZExt result to int.
3893 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
3896 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
3897 // Perform vector logical or on comparisons with zero vectors.
3898 if (E->getType()->isVectorType()) {
3899 CGF.incrementProfileCounter(E);
3901 Value *LHS = Visit(E->getLHS());
3902 Value *RHS = Visit(E->getRHS());
3903 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3904 if (LHS->getType()->isFPOrFPVectorTy()) {
3905 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3906 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3908 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3909 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3911 Value *Or = Builder.CreateOr(LHS, RHS);
3912 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
3915 llvm::Type *ResTy = ConvertType(E->getType());
3917 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
3918 // If we have 0 || X, just emit X without inserting the control flow.
3920 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3921 if (!LHSCondVal) { // If we have 0 || X, just emit X.
3922 CGF.incrementProfileCounter(E);
3924 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3925 // ZExt result to int or bool.
3926 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
3929 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
3930 if (!CGF.ContainsLabel(E->getRHS()))
3931 return llvm::ConstantInt::get(ResTy, 1);
3934 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
3935 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
3937 CodeGenFunction::ConditionalEvaluation eval(CGF);
3939 // Branch on the LHS first. If it is true, go to the success (cont) block.
3940 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
3941 CGF.getCurrentProfileCount() -
3942 CGF.getProfileCount(E->getRHS()));
3944 // Any edges into the ContBlock are now from an (indeterminate number of)
3945 // edges from this first condition. All of these values will be true. Start
3946 // setting up the PHI node in the Cont Block for this.
3947 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3949 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3951 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
3955 // Emit the RHS condition as a bool value.
3956 CGF.EmitBlock(RHSBlock);
3957 CGF.incrementProfileCounter(E);
3958 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3962 // Reaquire the RHS block, as there may be subblocks inserted.
3963 RHSBlock = Builder.GetInsertBlock();
3965 // Emit an unconditional branch from this block to ContBlock. Insert an entry
3966 // into the phi node for the edge with the value of RHSCond.
3967 CGF.EmitBlock(ContBlock);
3968 PN->addIncoming(RHSCond, RHSBlock);
3970 // ZExt result to int.
3971 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
3974 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
3975 CGF.EmitIgnoredExpr(E->getLHS());
3976 CGF.EnsureInsertPoint();
3977 return Visit(E->getRHS());
3980 //===----------------------------------------------------------------------===//
3982 //===----------------------------------------------------------------------===//
3984 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
3985 /// expression is cheap enough and side-effect-free enough to evaluate
3986 /// unconditionally instead of conditionally. This is used to convert control
3987 /// flow into selects in some cases.
3988 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
3989 CodeGenFunction &CGF) {
3990 // Anything that is an integer or floating point constant is fine.
3991 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
3993 // Even non-volatile automatic variables can't be evaluated unconditionally.
3994 // Referencing a thread_local may cause non-trivial initialization work to
3995 // occur. If we're inside a lambda and one of the variables is from the scope
3996 // outside the lambda, that function may have returned already. Reading its
3997 // locals is a bad idea. Also, these reads may introduce races there didn't
3998 // exist in the source-level program.
4002 Value *ScalarExprEmitter::
4003 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4004 TestAndClearIgnoreResultAssign();
4006 // Bind the common expression if necessary.
4007 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4009 Expr *condExpr = E->getCond();
4010 Expr *lhsExpr = E->getTrueExpr();
4011 Expr *rhsExpr = E->getFalseExpr();
4013 // If the condition constant folds and can be elided, try to avoid emitting
4014 // the condition and the dead arm.
4016 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4017 Expr *live = lhsExpr, *dead = rhsExpr;
4018 if (!CondExprBool) std::swap(live, dead);
4020 // If the dead side doesn't have labels we need, just emit the Live part.
4021 if (!CGF.ContainsLabel(dead)) {
4023 CGF.incrementProfileCounter(E);
4024 Value *Result = Visit(live);
4026 // If the live part is a throw expression, it acts like it has a void
4027 // type, so evaluating it returns a null Value*. However, a conditional
4028 // with non-void type must return a non-null Value*.
4029 if (!Result && !E->getType()->isVoidType())
4030 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4036 // OpenCL: If the condition is a vector, we can treat this condition like
4037 // the select function.
4038 if (CGF.getLangOpts().OpenCL
4039 && condExpr->getType()->isVectorType()) {
4040 CGF.incrementProfileCounter(E);
4042 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4043 llvm::Value *LHS = Visit(lhsExpr);
4044 llvm::Value *RHS = Visit(rhsExpr);
4046 llvm::Type *condType = ConvertType(condExpr->getType());
4047 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
4049 unsigned numElem = vecTy->getNumElements();
4050 llvm::Type *elemType = vecTy->getElementType();
4052 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4053 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4054 llvm::Value *tmp = Builder.CreateSExt(TestMSB,
4055 llvm::VectorType::get(elemType,
4058 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4060 // Cast float to int to perform ANDs if necessary.
4061 llvm::Value *RHSTmp = RHS;
4062 llvm::Value *LHSTmp = LHS;
4063 bool wasCast = false;
4064 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4065 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4066 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4067 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4071 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4072 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4073 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4075 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4080 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4081 // select instead of as control flow. We can only do this if it is cheap and
4082 // safe to evaluate the LHS and RHS unconditionally.
4083 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4084 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4085 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4086 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4088 CGF.incrementProfileCounter(E, StepV);
4090 llvm::Value *LHS = Visit(lhsExpr);
4091 llvm::Value *RHS = Visit(rhsExpr);
4093 // If the conditional has void type, make sure we return a null Value*.
4094 assert(!RHS && "LHS and RHS types must match");
4097 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4100 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4101 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4102 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4104 CodeGenFunction::ConditionalEvaluation eval(CGF);
4105 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4106 CGF.getProfileCount(lhsExpr));
4108 CGF.EmitBlock(LHSBlock);
4109 CGF.incrementProfileCounter(E);
4111 Value *LHS = Visit(lhsExpr);
4114 LHSBlock = Builder.GetInsertBlock();
4115 Builder.CreateBr(ContBlock);
4117 CGF.EmitBlock(RHSBlock);
4119 Value *RHS = Visit(rhsExpr);
4122 RHSBlock = Builder.GetInsertBlock();
4123 CGF.EmitBlock(ContBlock);
4125 // If the LHS or RHS is a throw expression, it will be legitimately null.
4131 // Create a PHI node for the real part.
4132 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4133 PN->addIncoming(LHS, LHSBlock);
4134 PN->addIncoming(RHS, RHSBlock);
4138 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4139 return Visit(E->getChosenSubExpr());
4142 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4143 QualType Ty = VE->getType();
4145 if (Ty->isVariablyModifiedType())
4146 CGF.EmitVariablyModifiedType(Ty);
4148 Address ArgValue = Address::invalid();
4149 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4151 llvm::Type *ArgTy = ConvertType(VE->getType());
4153 // If EmitVAArg fails, emit an error.
4154 if (!ArgPtr.isValid()) {
4155 CGF.ErrorUnsupported(VE, "va_arg expression");
4156 return llvm::UndefValue::get(ArgTy);
4159 // FIXME Volatility.
4160 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4162 // If EmitVAArg promoted the type, we must truncate it.
4163 if (ArgTy != Val->getType()) {
4164 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4165 Val = Builder.CreateIntToPtr(Val, ArgTy);
4167 Val = Builder.CreateTrunc(Val, ArgTy);
4173 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4174 return CGF.EmitBlockLiteral(block);
4177 // Convert a vec3 to vec4, or vice versa.
4178 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4179 Value *Src, unsigned NumElementsDst) {
4180 llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
4181 SmallVector<llvm::Constant*, 4> Args;
4182 Args.push_back(Builder.getInt32(0));
4183 Args.push_back(Builder.getInt32(1));
4184 Args.push_back(Builder.getInt32(2));
4185 if (NumElementsDst == 4)
4186 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
4187 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
4188 return Builder.CreateShuffleVector(Src, UnV, Mask);
4191 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4192 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4193 // but could be scalar or vectors of different lengths, and either can be
4195 // There are 4 cases:
4196 // 1. non-pointer -> non-pointer : needs 1 bitcast
4197 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4198 // 3. pointer -> non-pointer
4199 // a) pointer -> intptr_t : needs 1 ptrtoint
4200 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4201 // 4. non-pointer -> pointer
4202 // a) intptr_t -> pointer : needs 1 inttoptr
4203 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4204 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4205 // allow casting directly between pointer types and non-integer non-pointer
4207 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4208 const llvm::DataLayout &DL,
4209 Value *Src, llvm::Type *DstTy,
4210 StringRef Name = "") {
4211 auto SrcTy = Src->getType();
4214 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4215 return Builder.CreateBitCast(Src, DstTy, Name);
4218 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4219 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4222 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4224 if (!DstTy->isIntegerTy())
4225 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4227 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4231 if (!SrcTy->isIntegerTy())
4232 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4234 return Builder.CreateIntToPtr(Src, DstTy, Name);
4237 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4238 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4239 llvm::Type *DstTy = ConvertType(E->getType());
4241 llvm::Type *SrcTy = Src->getType();
4242 unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
4243 cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
4244 unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
4245 cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
4247 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4248 // vector to get a vec4, then a bitcast if the target type is different.
4249 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4250 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4252 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4253 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4257 Src->setName("astype");
4261 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4262 // to vec4 if the original type is not vec4, then a shuffle vector to
4264 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4265 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4266 auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
4267 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4271 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4272 Src->setName("astype");
4276 return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4277 Src, DstTy, "astype");
4280 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4281 return CGF.EmitAtomicExpr(E).getScalarVal();
4284 //===----------------------------------------------------------------------===//
4285 // Entry Point into this File
4286 //===----------------------------------------------------------------------===//
4288 /// Emit the computation of the specified expression of scalar type, ignoring
4290 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4291 assert(E && hasScalarEvaluationKind(E->getType()) &&
4292 "Invalid scalar expression to emit");
4294 return ScalarExprEmitter(*this, IgnoreResultAssign)
4295 .Visit(const_cast<Expr *>(E));
4298 /// Emit a conversion from the specified type to the specified destination type,
4299 /// both of which are LLVM scalar types.
4300 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4302 SourceLocation Loc) {
4303 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4304 "Invalid scalar expression to emit");
4305 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4308 /// Emit a conversion from the specified complex type to the specified
4309 /// destination type, where the destination type is an LLVM scalar type.
4310 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4313 SourceLocation Loc) {
4314 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4315 "Invalid complex -> scalar conversion");
4316 return ScalarExprEmitter(*this)
4317 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4321 llvm::Value *CodeGenFunction::
4322 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4323 bool isInc, bool isPre) {
4324 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4327 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4328 // object->isa or (*object).isa
4329 // Generate code as for: *(Class*)object
4331 Expr *BaseExpr = E->getBase();
4332 Address Addr = Address::invalid();
4333 if (BaseExpr->isRValue()) {
4334 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4336 Addr = EmitLValue(BaseExpr).getAddress();
4339 // Cast the address to Class*.
4340 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4341 return MakeAddrLValue(Addr, E->getType());
4345 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4346 const CompoundAssignOperator *E) {
4347 ScalarExprEmitter Scalar(*this);
4348 Value *Result = nullptr;
4349 switch (E->getOpcode()) {
4350 #define COMPOUND_OP(Op) \
4351 case BO_##Op##Assign: \
4352 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4389 llvm_unreachable("Not valid compound assignment operators");
4392 llvm_unreachable("Unhandled compound assignment operator");
4395 Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr,
4396 ArrayRef<Value *> IdxList,
4400 const Twine &Name) {
4401 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4403 // If the pointer overflow sanitizer isn't enabled, do nothing.
4404 if (!SanOpts.has(SanitizerKind::PointerOverflow))
4407 // If the GEP has already been reduced to a constant, leave it be.
4408 if (isa<llvm::Constant>(GEPVal))
4411 // Only check for overflows in the default address space.
4412 if (GEPVal->getType()->getPointerAddressSpace())
4415 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4416 assert(GEP->isInBounds() && "Expected inbounds GEP");
4418 SanitizerScope SanScope(this);
4419 auto &VMContext = getLLVMContext();
4420 const auto &DL = CGM.getDataLayout();
4421 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4423 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4424 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4425 auto *SAddIntrinsic =
4426 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4427 auto *SMulIntrinsic =
4428 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4430 // The total (signed) byte offset for the GEP.
4431 llvm::Value *TotalOffset = nullptr;
4432 // The offset overflow flag - true if the total offset overflows.
4433 llvm::Value *OffsetOverflows = Builder.getFalse();
4435 /// Return the result of the given binary operation.
4436 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4437 llvm::Value *RHS) -> llvm::Value * {
4438 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4440 // If the operands are constants, return a constant result.
4441 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4442 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4444 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4445 /*Signed=*/true, N);
4447 OffsetOverflows = Builder.getTrue();
4448 return llvm::ConstantInt::get(VMContext, N);
4452 // Otherwise, compute the result with checked arithmetic.
4453 auto *ResultAndOverflow = Builder.CreateCall(
4454 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4455 OffsetOverflows = Builder.CreateOr(
4456 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4457 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4460 // Determine the total byte offset by looking at each GEP operand.
4461 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4462 GTI != GTE; ++GTI) {
4463 llvm::Value *LocalOffset;
4464 auto *Index = GTI.getOperand();
4465 // Compute the local offset contributed by this indexing step:
4466 if (auto *STy = GTI.getStructTypeOrNull()) {
4467 // For struct indexing, the local offset is the byte position of the
4469 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4470 LocalOffset = llvm::ConstantInt::get(
4471 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4473 // Otherwise this is array-like indexing. The local offset is the index
4474 // multiplied by the element size.
4475 auto *ElementSize = llvm::ConstantInt::get(
4476 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4477 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4478 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4481 // If this is the first offset, set it as the total offset. Otherwise, add
4482 // the local offset into the running total.
4483 if (!TotalOffset || TotalOffset == Zero)
4484 TotalOffset = LocalOffset;
4486 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4489 // Common case: if the total offset is zero, don't emit a check.
4490 if (TotalOffset == Zero)
4493 // Now that we've computed the total offset, add it to the base pointer (with
4494 // wrapping semantics).
4495 auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy);
4496 auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset);
4498 // The GEP is valid if:
4499 // 1) The total offset doesn't overflow, and
4500 // 2) The sign of the difference between the computed address and the base
4501 // pointer matches the sign of the total offset.
4502 llvm::Value *ValidGEP;
4503 auto *NoOffsetOverflow = Builder.CreateNot(OffsetOverflows);
4504 if (SignedIndices) {
4505 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4506 auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero);
4507 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4508 ValidGEP = Builder.CreateAnd(
4509 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid),
4511 } else if (!SignedIndices && !IsSubtraction) {
4512 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4513 ValidGEP = Builder.CreateAnd(PosOrZeroValid, NoOffsetOverflow);
4515 auto *NegOrZeroValid = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4516 ValidGEP = Builder.CreateAnd(NegOrZeroValid, NoOffsetOverflow);
4519 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4520 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4521 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4522 EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow),
4523 SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);