1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "ConstantEmitter.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/RecordLayout.h"
27 #include "clang/AST/StmtVisitor.h"
28 #include "clang/Basic/CodeGenOptions.h"
29 #include "clang/Basic/FixedPoint.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/GetElementPtrTypeIterator.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicsPowerPC.h"
40 #include "llvm/IR/Module.h"
43 using namespace clang;
44 using namespace CodeGen;
47 //===----------------------------------------------------------------------===//
48 // Scalar Expression Emitter
49 //===----------------------------------------------------------------------===//
53 /// Determine whether the given binary operation may overflow.
54 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
55 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
56 /// the returned overflow check is precise. The returned value is 'true' for
57 /// all other opcodes, to be conservative.
58 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
59 BinaryOperator::Opcode Opcode, bool Signed,
60 llvm::APInt &Result) {
61 // Assume overflow is possible, unless we can prove otherwise.
63 const auto &LHSAP = LHS->getValue();
64 const auto &RHSAP = RHS->getValue();
65 if (Opcode == BO_Add) {
67 Result = LHSAP.sadd_ov(RHSAP, Overflow);
69 Result = LHSAP.uadd_ov(RHSAP, Overflow);
70 } else if (Opcode == BO_Sub) {
72 Result = LHSAP.ssub_ov(RHSAP, Overflow);
74 Result = LHSAP.usub_ov(RHSAP, Overflow);
75 } else if (Opcode == BO_Mul) {
77 Result = LHSAP.smul_ov(RHSAP, Overflow);
79 Result = LHSAP.umul_ov(RHSAP, Overflow);
80 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
81 if (Signed && !RHS->isZero())
82 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
92 QualType Ty; // Computation Type.
93 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
95 const Expr *E; // Entire expr, for error unsupported. May not be binop.
97 /// Check if the binop can result in integer overflow.
98 bool mayHaveIntegerOverflow() const {
99 // Without constant input, we can't rule out overflow.
100 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
101 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
102 if (!LHSCI || !RHSCI)
106 return ::mayHaveIntegerOverflow(
107 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 /// Check if the binop computes a division or a remainder.
111 bool isDivremOp() const {
112 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
113 Opcode == BO_RemAssign;
116 /// Check if the binop can result in an integer division by zero.
117 bool mayHaveIntegerDivisionByZero() const {
119 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
124 /// Check if the binop can result in a float division by zero.
125 bool mayHaveFloatDivisionByZero() const {
127 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
128 return CFP->isZero();
132 /// Check if either operand is a fixed point type or integer type, with at
133 /// least one being a fixed point type. In any case, this
134 /// operation did not follow usual arithmetic conversion and both operands may
136 bool isFixedPointBinOp() const {
137 // We cannot simply check the result type since comparison operations return
139 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
140 QualType LHSType = BinOp->getLHS()->getType();
141 QualType RHSType = BinOp->getRHS()->getType();
142 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
148 static bool MustVisitNullValue(const Expr *E) {
149 // If a null pointer expression's type is the C++0x nullptr_t, then
150 // it's not necessarily a simple constant and it must be evaluated
151 // for its potential side effects.
152 return E->getType()->isNullPtrType();
155 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
156 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
158 const Expr *Base = E->IgnoreImpCasts();
162 QualType BaseTy = Base->getType();
163 if (!BaseTy->isPromotableIntegerType() ||
164 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
170 /// Check if \p E is a widened promoted integer.
171 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
172 return getUnwidenedIntegerType(Ctx, E).hasValue();
175 /// Check if we can skip the overflow check for \p Op.
176 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
177 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
178 "Expected a unary or binary operator");
180 // If the binop has constant inputs and we can prove there is no overflow,
181 // we can elide the overflow check.
182 if (!Op.mayHaveIntegerOverflow())
185 // If a unary op has a widened operand, the op cannot overflow.
186 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
187 return !UO->canOverflow();
189 // We usually don't need overflow checks for binops with widened operands.
190 // Multiplication with promoted unsigned operands is a special case.
191 const auto *BO = cast<BinaryOperator>(Op.E);
192 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 QualType LHSTy = *OptionalLHSTy;
201 QualType RHSTy = *OptionalRHSTy;
203 // This is the simple case: binops without unsigned multiplication, and with
204 // widened operands. No overflow check is needed here.
205 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
206 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
209 // For unsigned multiplication the overflow check can be elided if either one
210 // of the unpromoted types are less than half the size of the promoted type.
211 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
212 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
213 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
216 /// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
217 static void updateFastMathFlags(llvm::FastMathFlags &FMF,
218 FPOptions FPFeatures) {
219 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
222 /// Propagate fast-math flags from \p Op to the instruction in \p V.
223 static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
224 if (auto *I = dyn_cast<llvm::Instruction>(V)) {
225 llvm::FastMathFlags FMF = I->getFastMathFlags();
226 updateFastMathFlags(FMF, Op.FPFeatures);
227 I->setFastMathFlags(FMF);
232 class ScalarExprEmitter
233 : public StmtVisitor<ScalarExprEmitter, Value*> {
234 CodeGenFunction &CGF;
235 CGBuilderTy &Builder;
236 bool IgnoreResultAssign;
237 llvm::LLVMContext &VMContext;
240 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
241 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
242 VMContext(cgf.getLLVMContext()) {
245 //===--------------------------------------------------------------------===//
247 //===--------------------------------------------------------------------===//
249 bool TestAndClearIgnoreResultAssign() {
250 bool I = IgnoreResultAssign;
251 IgnoreResultAssign = false;
255 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
256 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
257 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
258 return CGF.EmitCheckedLValue(E, TCK);
261 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
262 const BinOpInfo &Info);
264 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
265 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
268 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
269 const AlignValueAttr *AVAttr = nullptr;
270 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
271 const ValueDecl *VD = DRE->getDecl();
273 if (VD->getType()->isReferenceType()) {
274 if (const auto *TTy =
275 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
276 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
278 // Assumptions for function parameters are emitted at the start of the
279 // function, so there is no need to repeat that here,
280 // unless the alignment-assumption sanitizer is enabled,
281 // then we prefer the assumption over alignment attribute
282 // on IR function param.
283 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
286 AVAttr = VD->getAttr<AlignValueAttr>();
291 if (const auto *TTy =
292 dyn_cast<TypedefType>(E->getType()))
293 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
298 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
299 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
300 CGF.EmitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
303 /// EmitLoadOfLValue - Given an expression with complex type that represents a
304 /// value l-value, this method emits the address of the l-value, then loads
305 /// and returns the result.
306 Value *EmitLoadOfLValue(const Expr *E) {
307 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
310 EmitLValueAlignmentAssumption(E, V);
314 /// EmitConversionToBool - Convert the specified expression value to a
315 /// boolean (i1) truth value. This is equivalent to "Val != 0".
316 Value *EmitConversionToBool(Value *Src, QualType DstTy);
318 /// Emit a check that a conversion from a floating-point type does not
320 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
321 Value *Src, QualType SrcType, QualType DstType,
322 llvm::Type *DstTy, SourceLocation Loc);
324 /// Known implicit conversion check kinds.
325 /// Keep in sync with the enum of the same name in ubsan_handlers.h
326 enum ImplicitConversionCheckKind : unsigned char {
327 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
328 ICCK_UnsignedIntegerTruncation = 1,
329 ICCK_SignedIntegerTruncation = 2,
330 ICCK_IntegerSignChange = 3,
331 ICCK_SignedIntegerTruncationOrSignChange = 4,
334 /// Emit a check that an [implicit] truncation of an integer does not
335 /// discard any bits. It is not UB, so we use the value after truncation.
336 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
337 QualType DstType, SourceLocation Loc);
339 /// Emit a check that an [implicit] conversion of an integer does not change
340 /// the sign of the value. It is not UB, so we use the value after conversion.
341 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
342 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
343 QualType DstType, SourceLocation Loc);
345 /// Emit a conversion from the specified type to the specified destination
346 /// type, both of which are LLVM scalar types.
347 struct ScalarConversionOpts {
348 bool TreatBooleanAsSigned;
349 bool EmitImplicitIntegerTruncationChecks;
350 bool EmitImplicitIntegerSignChangeChecks;
352 ScalarConversionOpts()
353 : TreatBooleanAsSigned(false),
354 EmitImplicitIntegerTruncationChecks(false),
355 EmitImplicitIntegerSignChangeChecks(false) {}
357 ScalarConversionOpts(clang::SanitizerSet SanOpts)
358 : TreatBooleanAsSigned(false),
359 EmitImplicitIntegerTruncationChecks(
360 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
361 EmitImplicitIntegerSignChangeChecks(
362 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
365 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
367 ScalarConversionOpts Opts = ScalarConversionOpts());
369 /// Convert between either a fixed point and other fixed point or fixed point
371 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
373 Value *EmitFixedPointConversion(Value *Src, FixedPointSemantics &SrcFixedSema,
374 FixedPointSemantics &DstFixedSema,
376 bool DstIsInteger = false);
378 /// Emit a conversion from the specified complex type to the specified
379 /// destination type, where the destination type is an LLVM scalar type.
380 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
381 QualType SrcTy, QualType DstTy,
384 /// EmitNullValue - Emit a value that corresponds to null for the given type.
385 Value *EmitNullValue(QualType Ty);
387 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
388 Value *EmitFloatToBoolConversion(Value *V) {
389 // Compare against 0.0 for fp scalars.
390 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
391 return Builder.CreateFCmpUNE(V, Zero, "tobool");
394 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
395 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
396 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
398 return Builder.CreateICmpNE(V, Zero, "tobool");
401 Value *EmitIntToBoolConversion(Value *V) {
402 // Because of the type rules of C, we often end up computing a
403 // logical value, then zero extending it to int, then wanting it
404 // as a logical value again. Optimize this common case.
405 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
406 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
407 Value *Result = ZI->getOperand(0);
408 // If there aren't any more uses, zap the instruction to save space.
409 // Note that there can be more uses, for example if this
410 // is the result of an assignment.
412 ZI->eraseFromParent();
417 return Builder.CreateIsNotNull(V, "tobool");
420 //===--------------------------------------------------------------------===//
422 //===--------------------------------------------------------------------===//
424 Value *Visit(Expr *E) {
425 ApplyDebugLocation DL(CGF, E);
426 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
429 Value *VisitStmt(Stmt *S) {
430 S->dump(CGF.getContext().getSourceManager());
431 llvm_unreachable("Stmt can't have complex result type!");
433 Value *VisitExpr(Expr *S);
435 Value *VisitConstantExpr(ConstantExpr *E) {
436 return Visit(E->getSubExpr());
438 Value *VisitParenExpr(ParenExpr *PE) {
439 return Visit(PE->getSubExpr());
441 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
442 return Visit(E->getReplacement());
444 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
445 return Visit(GE->getResultExpr());
447 Value *VisitCoawaitExpr(CoawaitExpr *S) {
448 return CGF.EmitCoawaitExpr(*S).getScalarVal();
450 Value *VisitCoyieldExpr(CoyieldExpr *S) {
451 return CGF.EmitCoyieldExpr(*S).getScalarVal();
453 Value *VisitUnaryCoawait(const UnaryOperator *E) {
454 return Visit(E->getSubExpr());
458 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
459 return Builder.getInt(E->getValue());
461 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
462 return Builder.getInt(E->getValue());
464 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
465 return llvm::ConstantFP::get(VMContext, E->getValue());
467 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
468 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
470 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
471 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
473 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
474 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
476 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
477 return EmitNullValue(E->getType());
479 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
480 return EmitNullValue(E->getType());
482 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
483 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
484 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
485 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
486 return Builder.CreateBitCast(V, ConvertType(E->getType()));
489 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
490 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
493 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
494 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
497 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
499 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
502 // Otherwise, assume the mapping is the scalar directly.
503 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
507 Value *VisitDeclRefExpr(DeclRefExpr *E) {
508 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
509 return CGF.emitScalarConstant(Constant, E);
510 return EmitLoadOfLValue(E);
513 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
514 return CGF.EmitObjCSelectorExpr(E);
516 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
517 return CGF.EmitObjCProtocolExpr(E);
519 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
520 return EmitLoadOfLValue(E);
522 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
523 if (E->getMethodDecl() &&
524 E->getMethodDecl()->getReturnType()->isReferenceType())
525 return EmitLoadOfLValue(E);
526 return CGF.EmitObjCMessageExpr(E).getScalarVal();
529 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
530 LValue LV = CGF.EmitObjCIsaExpr(E);
531 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
535 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
536 VersionTuple Version = E->getVersion();
538 // If we're checking for a platform older than our minimum deployment
539 // target, we can fold the check away.
540 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
541 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
543 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
544 llvm::Value *Args[] = {
545 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
546 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
547 llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
550 return CGF.EmitBuiltinAvailable(Args);
553 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
554 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
555 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
556 Value *VisitMemberExpr(MemberExpr *E);
557 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
558 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
559 return EmitLoadOfLValue(E);
562 Value *VisitInitListExpr(InitListExpr *E);
564 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
565 assert(CGF.getArrayInitIndex() &&
566 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
567 return CGF.getArrayInitIndex();
570 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
571 return EmitNullValue(E->getType());
573 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
574 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
575 return VisitCastExpr(E);
577 Value *VisitCastExpr(CastExpr *E);
579 Value *VisitCallExpr(const CallExpr *E) {
580 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
581 return EmitLoadOfLValue(E);
583 Value *V = CGF.EmitCallExpr(E).getScalarVal();
585 EmitLValueAlignmentAssumption(E, V);
589 Value *VisitStmtExpr(const StmtExpr *E);
592 Value *VisitUnaryPostDec(const UnaryOperator *E) {
593 LValue LV = EmitLValue(E->getSubExpr());
594 return EmitScalarPrePostIncDec(E, LV, false, false);
596 Value *VisitUnaryPostInc(const UnaryOperator *E) {
597 LValue LV = EmitLValue(E->getSubExpr());
598 return EmitScalarPrePostIncDec(E, LV, true, false);
600 Value *VisitUnaryPreDec(const UnaryOperator *E) {
601 LValue LV = EmitLValue(E->getSubExpr());
602 return EmitScalarPrePostIncDec(E, LV, false, true);
604 Value *VisitUnaryPreInc(const UnaryOperator *E) {
605 LValue LV = EmitLValue(E->getSubExpr());
606 return EmitScalarPrePostIncDec(E, LV, true, true);
609 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
613 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
614 bool isInc, bool isPre);
617 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
618 if (isa<MemberPointerType>(E->getType())) // never sugared
619 return CGF.CGM.getMemberPointerConstant(E);
621 return EmitLValue(E->getSubExpr()).getPointer(CGF);
623 Value *VisitUnaryDeref(const UnaryOperator *E) {
624 if (E->getType()->isVoidType())
625 return Visit(E->getSubExpr()); // the actual value should be unused
626 return EmitLoadOfLValue(E);
628 Value *VisitUnaryPlus(const UnaryOperator *E) {
629 // This differs from gcc, though, most likely due to a bug in gcc.
630 TestAndClearIgnoreResultAssign();
631 return Visit(E->getSubExpr());
633 Value *VisitUnaryMinus (const UnaryOperator *E);
634 Value *VisitUnaryNot (const UnaryOperator *E);
635 Value *VisitUnaryLNot (const UnaryOperator *E);
636 Value *VisitUnaryReal (const UnaryOperator *E);
637 Value *VisitUnaryImag (const UnaryOperator *E);
638 Value *VisitUnaryExtension(const UnaryOperator *E) {
639 return Visit(E->getSubExpr());
643 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
644 return EmitLoadOfLValue(E);
646 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
647 auto &Ctx = CGF.getContext();
649 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
650 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
654 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
655 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
656 return Visit(DAE->getExpr());
658 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
659 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
660 return Visit(DIE->getExpr());
662 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
663 return CGF.LoadCXXThis();
666 Value *VisitExprWithCleanups(ExprWithCleanups *E);
667 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
668 return CGF.EmitCXXNewExpr(E);
670 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
671 CGF.EmitCXXDeleteExpr(E);
675 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
676 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
679 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
680 return Builder.getInt1(E->isSatisfied());
683 Value *VisitRequiresExpr(const RequiresExpr *E) {
684 return Builder.getInt1(E->isSatisfied());
687 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
688 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
691 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
692 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
695 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
696 // C++ [expr.pseudo]p1:
697 // The result shall only be used as the operand for the function call
698 // operator (), and the result of such a call has type void. The only
699 // effect is the evaluation of the postfix-expression before the dot or
701 CGF.EmitScalarExpr(E->getBase());
705 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
706 return EmitNullValue(E->getType());
709 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
710 CGF.EmitCXXThrowExpr(E);
714 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
715 return Builder.getInt1(E->getValue());
719 Value *EmitMul(const BinOpInfo &Ops) {
720 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
721 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
722 case LangOptions::SOB_Defined:
723 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
724 case LangOptions::SOB_Undefined:
725 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
726 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
728 case LangOptions::SOB_Trapping:
729 if (CanElideOverflowCheck(CGF.getContext(), Ops))
730 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
731 return EmitOverflowCheckedBinOp(Ops);
735 if (Ops.Ty->isUnsignedIntegerType() &&
736 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
737 !CanElideOverflowCheck(CGF.getContext(), Ops))
738 return EmitOverflowCheckedBinOp(Ops);
740 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
741 Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
742 return propagateFMFlags(V, Ops);
744 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
746 /// Create a binary op that checks for overflow.
747 /// Currently only supports +, - and *.
748 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
750 // Check for undefined division and modulus behaviors.
751 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
752 llvm::Value *Zero,bool isDiv);
753 // Common helper for getting how wide LHS of shift is.
754 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
755 Value *EmitDiv(const BinOpInfo &Ops);
756 Value *EmitRem(const BinOpInfo &Ops);
757 Value *EmitAdd(const BinOpInfo &Ops);
758 Value *EmitSub(const BinOpInfo &Ops);
759 Value *EmitShl(const BinOpInfo &Ops);
760 Value *EmitShr(const BinOpInfo &Ops);
761 Value *EmitAnd(const BinOpInfo &Ops) {
762 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
764 Value *EmitXor(const BinOpInfo &Ops) {
765 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
767 Value *EmitOr (const BinOpInfo &Ops) {
768 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
771 // Helper functions for fixed point binary operations.
772 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
774 BinOpInfo EmitBinOps(const BinaryOperator *E);
775 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
776 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
779 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
780 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
782 // Binary operators and binary compound assignment operators.
783 #define HANDLEBINOP(OP) \
784 Value *VisitBin ## OP(const BinaryOperator *E) { \
785 return Emit ## OP(EmitBinOps(E)); \
787 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
788 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
803 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
804 llvm::CmpInst::Predicate SICmpOpc,
805 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
806 #define VISITCOMP(CODE, UI, SI, FP, SIG) \
807 Value *VisitBin##CODE(const BinaryOperator *E) { \
808 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
809 llvm::FCmpInst::FP, SIG); }
810 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
811 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
812 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
813 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
814 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
815 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
818 Value *VisitBinAssign (const BinaryOperator *E);
820 Value *VisitBinLAnd (const BinaryOperator *E);
821 Value *VisitBinLOr (const BinaryOperator *E);
822 Value *VisitBinComma (const BinaryOperator *E);
824 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
825 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
827 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
828 return Visit(E->getSemanticForm());
832 Value *VisitBlockExpr(const BlockExpr *BE);
833 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
834 Value *VisitChooseExpr(ChooseExpr *CE);
835 Value *VisitVAArgExpr(VAArgExpr *VE);
836 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
837 return CGF.EmitObjCStringLiteral(E);
839 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
840 return CGF.EmitObjCBoxedExpr(E);
842 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
843 return CGF.EmitObjCArrayLiteral(E);
845 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
846 return CGF.EmitObjCDictionaryLiteral(E);
848 Value *VisitAsTypeExpr(AsTypeExpr *CE);
849 Value *VisitAtomicExpr(AtomicExpr *AE);
851 } // end anonymous namespace.
853 //===----------------------------------------------------------------------===//
855 //===----------------------------------------------------------------------===//
857 /// EmitConversionToBool - Convert the specified expression value to a
858 /// boolean (i1) truth value. This is equivalent to "Val != 0".
859 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
860 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
862 if (SrcType->isRealFloatingType())
863 return EmitFloatToBoolConversion(Src);
865 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
866 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
868 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
869 "Unknown scalar type to convert");
871 if (isa<llvm::IntegerType>(Src->getType()))
872 return EmitIntToBoolConversion(Src);
874 assert(isa<llvm::PointerType>(Src->getType()));
875 return EmitPointerToBoolConversion(Src, SrcType);
878 void ScalarExprEmitter::EmitFloatConversionCheck(
879 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
880 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
881 assert(SrcType->isFloatingType() && "not a conversion from floating point");
882 if (!isa<llvm::IntegerType>(DstTy))
885 CodeGenFunction::SanitizerScope SanScope(&CGF);
889 llvm::Value *Check = nullptr;
890 const llvm::fltSemantics &SrcSema =
891 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
893 // Floating-point to integer. This has undefined behavior if the source is
894 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
896 unsigned Width = CGF.getContext().getIntWidth(DstType);
897 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
899 APSInt Min = APSInt::getMinValue(Width, Unsigned);
900 APFloat MinSrc(SrcSema, APFloat::uninitialized);
901 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
903 // Don't need an overflow check for lower bound. Just check for
905 MinSrc = APFloat::getInf(SrcSema, true);
907 // Find the largest value which is too small to represent (before
908 // truncation toward zero).
909 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
911 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
912 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
913 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
915 // Don't need an overflow check for upper bound. Just check for
917 MaxSrc = APFloat::getInf(SrcSema, false);
919 // Find the smallest value which is too large to represent (before
920 // truncation toward zero).
921 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
923 // If we're converting from __half, convert the range to float to match
925 if (OrigSrcType->isHalfType()) {
926 const llvm::fltSemantics &Sema =
927 CGF.getContext().getFloatTypeSemantics(SrcType);
929 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
930 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
934 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
936 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
937 Check = Builder.CreateAnd(GE, LE);
939 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
940 CGF.EmitCheckTypeDescriptor(OrigSrcType),
941 CGF.EmitCheckTypeDescriptor(DstType)};
942 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
943 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
946 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
947 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
948 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
949 std::pair<llvm::Value *, SanitizerMask>>
950 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
951 QualType DstType, CGBuilderTy &Builder) {
952 llvm::Type *SrcTy = Src->getType();
953 llvm::Type *DstTy = Dst->getType();
954 (void)DstTy; // Only used in assert()
956 // This should be truncation of integral types.
958 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
959 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
960 "non-integer llvm type");
962 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
963 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
965 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
966 // Else, it is a signed truncation.
967 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
969 if (!SrcSigned && !DstSigned) {
970 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
971 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
973 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
974 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
977 llvm::Value *Check = nullptr;
978 // 1. Extend the truncated value back to the same width as the Src.
979 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
980 // 2. Equality-compare with the original source value
981 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
982 // If the comparison result is 'i1 false', then the truncation was lossy.
983 return std::make_pair(Kind, std::make_pair(Check, Mask));
986 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
987 QualType SrcType, QualType DstType) {
988 return SrcType->isIntegerType() && DstType->isIntegerType();
991 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
992 Value *Dst, QualType DstType,
993 SourceLocation Loc) {
994 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
997 // We only care about int->int conversions here.
998 // We ignore conversions to/from pointer and/or bool.
999 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1003 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1004 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1005 // This must be truncation. Else we do not care.
1006 if (SrcBits <= DstBits)
1009 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1011 // If the integer sign change sanitizer is enabled,
1012 // and we are truncating from larger unsigned type to smaller signed type,
1013 // let that next sanitizer deal with it.
1014 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1015 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1016 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1017 (!SrcSigned && DstSigned))
1020 CodeGenFunction::SanitizerScope SanScope(&CGF);
1022 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1023 std::pair<llvm::Value *, SanitizerMask>>
1025 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1026 // If the comparison result is 'i1 false', then the truncation was lossy.
1028 // Do we care about this type of truncation?
1029 if (!CGF.SanOpts.has(Check.second.second))
1032 llvm::Constant *StaticArgs[] = {
1033 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1034 CGF.EmitCheckTypeDescriptor(DstType),
1035 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1036 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1040 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1041 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1042 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1043 std::pair<llvm::Value *, SanitizerMask>>
1044 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1045 QualType DstType, CGBuilderTy &Builder) {
1046 llvm::Type *SrcTy = Src->getType();
1047 llvm::Type *DstTy = Dst->getType();
1049 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1050 "non-integer llvm type");
1052 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1053 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1054 (void)SrcSigned; // Only used in assert()
1055 (void)DstSigned; // Only used in assert()
1056 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1057 unsigned DstBits = DstTy->getScalarSizeInBits();
1058 (void)SrcBits; // Only used in assert()
1059 (void)DstBits; // Only used in assert()
1061 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1062 "either the widths should be different, or the signednesses.");
1064 // NOTE: zero value is considered to be non-negative.
1065 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1066 const char *Name) -> Value * {
1067 // Is this value a signed type?
1068 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1069 llvm::Type *VTy = V->getType();
1071 // If the value is unsigned, then it is never negative.
1072 // FIXME: can we encounter non-scalar VTy here?
1073 return llvm::ConstantInt::getFalse(VTy->getContext());
1075 // Get the zero of the same type with which we will be comparing.
1076 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1077 // %V.isnegative = icmp slt %V, 0
1078 // I.e is %V *strictly* less than zero, does it have negative value?
1079 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1080 llvm::Twine(Name) + "." + V->getName() +
1081 ".negativitycheck");
1084 // 1. Was the old Value negative?
1085 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1086 // 2. Is the new Value negative?
1087 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1088 // 3. Now, was the 'negativity status' preserved during the conversion?
1089 // NOTE: conversion from negative to zero is considered to change the sign.
1090 // (We want to get 'false' when the conversion changed the sign)
1091 // So we should just equality-compare the negativity statuses.
1092 llvm::Value *Check = nullptr;
1093 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1094 // If the comparison result is 'false', then the conversion changed the sign.
1095 return std::make_pair(
1096 ScalarExprEmitter::ICCK_IntegerSignChange,
1097 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1100 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1101 Value *Dst, QualType DstType,
1102 SourceLocation Loc) {
1103 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1106 llvm::Type *SrcTy = Src->getType();
1107 llvm::Type *DstTy = Dst->getType();
1109 // We only care about int->int conversions here.
1110 // We ignore conversions to/from pointer and/or bool.
1111 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1115 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1116 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1117 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1118 unsigned DstBits = DstTy->getScalarSizeInBits();
1120 // Now, we do not need to emit the check in *all* of the cases.
1121 // We can avoid emitting it in some obvious cases where it would have been
1122 // dropped by the opt passes (instcombine) always anyways.
1123 // If it's a cast between effectively the same type, no check.
1124 // NOTE: this is *not* equivalent to checking the canonical types.
1125 if (SrcSigned == DstSigned && SrcBits == DstBits)
1127 // At least one of the values needs to have signed type.
1128 // If both are unsigned, then obviously, neither of them can be negative.
1129 if (!SrcSigned && !DstSigned)
1131 // If the conversion is to *larger* *signed* type, then no check is needed.
1132 // Because either sign-extension happens (so the sign will remain),
1133 // or zero-extension will happen (the sign bit will be zero.)
1134 if ((DstBits > SrcBits) && DstSigned)
1136 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1137 (SrcBits > DstBits) && SrcSigned) {
1138 // If the signed integer truncation sanitizer is enabled,
1139 // and this is a truncation from signed type, then no check is needed.
1140 // Because here sign change check is interchangeable with truncation check.
1143 // That's it. We can't rule out any more cases with the data we have.
1145 CodeGenFunction::SanitizerScope SanScope(&CGF);
1147 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1148 std::pair<llvm::Value *, SanitizerMask>>
1151 // Each of these checks needs to return 'false' when an issue was detected.
1152 ImplicitConversionCheckKind CheckKind;
1153 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1154 // So we can 'and' all the checks together, and still get 'false',
1155 // if at least one of the checks detected an issue.
1157 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1158 CheckKind = Check.first;
1159 Checks.emplace_back(Check.second);
1161 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1162 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1163 // If the signed integer truncation sanitizer was enabled,
1164 // and we are truncating from larger unsigned type to smaller signed type,
1165 // let's handle the case we skipped in that check.
1167 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1168 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1169 Checks.emplace_back(Check.second);
1170 // If the comparison result is 'i1 false', then the truncation was lossy.
1173 llvm::Constant *StaticArgs[] = {
1174 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1175 CGF.EmitCheckTypeDescriptor(DstType),
1176 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1177 // EmitCheck() will 'and' all the checks together.
1178 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1182 /// Emit a conversion from the specified type to the specified destination type,
1183 /// both of which are LLVM scalar types.
1184 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1187 ScalarConversionOpts Opts) {
1188 // All conversions involving fixed point types should be handled by the
1189 // EmitFixedPoint family functions. This is done to prevent bloating up this
1190 // function more, and although fixed point numbers are represented by
1191 // integers, we do not want to follow any logic that assumes they should be
1192 // treated as integers.
1193 // TODO(leonardchan): When necessary, add another if statement checking for
1194 // conversions to fixed point types from other types.
1195 if (SrcType->isFixedPointType()) {
1196 if (DstType->isBooleanType())
1197 // It is important that we check this before checking if the dest type is
1198 // an integer because booleans are technically integer types.
1199 // We do not need to check the padding bit on unsigned types if unsigned
1200 // padding is enabled because overflow into this bit is undefined
1202 return Builder.CreateIsNotNull(Src, "tobool");
1203 if (DstType->isFixedPointType() || DstType->isIntegerType())
1204 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1207 "Unhandled scalar conversion from a fixed point type to another type.");
1208 } else if (DstType->isFixedPointType()) {
1209 if (SrcType->isIntegerType())
1210 // This also includes converting booleans and enums to fixed point types.
1211 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1214 "Unhandled scalar conversion to a fixed point type from another type.");
1217 QualType NoncanonicalSrcType = SrcType;
1218 QualType NoncanonicalDstType = DstType;
1220 SrcType = CGF.getContext().getCanonicalType(SrcType);
1221 DstType = CGF.getContext().getCanonicalType(DstType);
1222 if (SrcType == DstType) return Src;
1224 if (DstType->isVoidType()) return nullptr;
1226 llvm::Value *OrigSrc = Src;
1227 QualType OrigSrcType = SrcType;
1228 llvm::Type *SrcTy = Src->getType();
1230 // Handle conversions to bool first, they are special: comparisons against 0.
1231 if (DstType->isBooleanType())
1232 return EmitConversionToBool(Src, SrcType);
1234 llvm::Type *DstTy = ConvertType(DstType);
1236 // Cast from half through float if half isn't a native type.
1237 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1238 // Cast to FP using the intrinsic if the half type itself isn't supported.
1239 if (DstTy->isFloatingPointTy()) {
1240 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1241 return Builder.CreateCall(
1242 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1245 // Cast to other types through float, using either the intrinsic or FPExt,
1246 // depending on whether the half type itself is supported
1247 // (as opposed to operations on half, available with NativeHalfType).
1248 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1249 Src = Builder.CreateCall(
1250 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1254 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1256 SrcType = CGF.getContext().FloatTy;
1257 SrcTy = CGF.FloatTy;
1261 // Ignore conversions like int -> uint.
1262 if (SrcTy == DstTy) {
1263 if (Opts.EmitImplicitIntegerSignChangeChecks)
1264 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1265 NoncanonicalDstType, Loc);
1270 // Handle pointer conversions next: pointers can only be converted to/from
1271 // other pointers and integers. Check for pointer types in terms of LLVM, as
1272 // some native types (like Obj-C id) may map to a pointer type.
1273 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1274 // The source value may be an integer, or a pointer.
1275 if (isa<llvm::PointerType>(SrcTy))
1276 return Builder.CreateBitCast(Src, DstTy, "conv");
1278 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1279 // First, convert to the correct width so that we control the kind of
1281 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1282 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1283 llvm::Value* IntResult =
1284 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1285 // Then, cast to pointer.
1286 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1289 if (isa<llvm::PointerType>(SrcTy)) {
1290 // Must be an ptr to int cast.
1291 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1292 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1295 // A scalar can be splatted to an extended vector of the same element type
1296 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1297 // Sema should add casts to make sure that the source expression's type is
1298 // the same as the vector's element type (sans qualifiers)
1299 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1300 SrcType.getTypePtr() &&
1301 "Splatted expr doesn't match with vector element type?");
1303 // Splat the element across to all elements
1304 unsigned NumElements = DstTy->getVectorNumElements();
1305 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1308 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1309 // Allow bitcast from vector to integer/fp of the same size.
1310 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1311 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1312 if (SrcSize == DstSize)
1313 return Builder.CreateBitCast(Src, DstTy, "conv");
1315 // Conversions between vectors of different sizes are not allowed except
1316 // when vectors of half are involved. Operations on storage-only half
1317 // vectors require promoting half vector operands to float vectors and
1318 // truncating the result, which is either an int or float vector, to a
1319 // short or half vector.
1321 // Source and destination are both expected to be vectors.
1322 llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1323 llvm::Type *DstElementTy = DstTy->getVectorElementType();
1326 assert(((SrcElementTy->isIntegerTy() &&
1327 DstElementTy->isIntegerTy()) ||
1328 (SrcElementTy->isFloatingPointTy() &&
1329 DstElementTy->isFloatingPointTy())) &&
1330 "unexpected conversion between a floating-point vector and an "
1333 // Truncate an i32 vector to an i16 vector.
1334 if (SrcElementTy->isIntegerTy())
1335 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1337 // Truncate a float vector to a half vector.
1338 if (SrcSize > DstSize)
1339 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1341 // Promote a half vector to a float vector.
1342 return Builder.CreateFPExt(Src, DstTy, "conv");
1345 // Finally, we have the arithmetic types: real int/float.
1346 Value *Res = nullptr;
1347 llvm::Type *ResTy = DstTy;
1349 // An overflowing conversion has undefined behavior if either the source type
1350 // or the destination type is a floating-point type. However, we consider the
1351 // range of representable values for all floating-point types to be
1352 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1353 // floating-point type.
1354 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1355 OrigSrcType->isFloatingType())
1356 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1359 // Cast to half through float if half isn't a native type.
1360 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1361 // Make sure we cast in a single step if from another FP type.
1362 if (SrcTy->isFloatingPointTy()) {
1363 // Use the intrinsic if the half type itself isn't supported
1364 // (as opposed to operations on half, available with NativeHalfType).
1365 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1366 return Builder.CreateCall(
1367 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1368 // If the half type is supported, just use an fptrunc.
1369 return Builder.CreateFPTrunc(Src, DstTy);
1371 DstTy = CGF.FloatTy;
1374 if (isa<llvm::IntegerType>(SrcTy)) {
1375 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1376 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1379 if (isa<llvm::IntegerType>(DstTy))
1380 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1381 else if (InputSigned)
1382 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1384 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1385 } else if (isa<llvm::IntegerType>(DstTy)) {
1386 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
1387 if (DstType->isSignedIntegerOrEnumerationType())
1388 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1390 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1392 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
1393 "Unknown real conversion");
1394 if (DstTy->getTypeID() < SrcTy->getTypeID())
1395 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1397 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1400 if (DstTy != ResTy) {
1401 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1402 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1403 Res = Builder.CreateCall(
1404 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1407 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1411 if (Opts.EmitImplicitIntegerTruncationChecks)
1412 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1413 NoncanonicalDstType, Loc);
1415 if (Opts.EmitImplicitIntegerSignChangeChecks)
1416 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1417 NoncanonicalDstType, Loc);
1422 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1424 SourceLocation Loc) {
1425 FixedPointSemantics SrcFPSema =
1426 CGF.getContext().getFixedPointSemantics(SrcTy);
1427 FixedPointSemantics DstFPSema =
1428 CGF.getContext().getFixedPointSemantics(DstTy);
1429 return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc,
1430 DstTy->isIntegerType());
1433 Value *ScalarExprEmitter::EmitFixedPointConversion(
1434 Value *Src, FixedPointSemantics &SrcFPSema, FixedPointSemantics &DstFPSema,
1435 SourceLocation Loc, bool DstIsInteger) {
1437 using llvm::ConstantInt;
1440 unsigned SrcWidth = SrcFPSema.getWidth();
1441 unsigned DstWidth = DstFPSema.getWidth();
1442 unsigned SrcScale = SrcFPSema.getScale();
1443 unsigned DstScale = DstFPSema.getScale();
1444 bool SrcIsSigned = SrcFPSema.isSigned();
1445 bool DstIsSigned = DstFPSema.isSigned();
1447 llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
1449 Value *Result = Src;
1450 unsigned ResultWidth = SrcWidth;
1453 if (DstScale < SrcScale) {
1454 // When converting to integers, we round towards zero. For negative numbers,
1455 // right shifting rounds towards negative infinity. In this case, we can
1456 // just round up before shifting.
1457 if (DstIsInteger && SrcIsSigned) {
1458 Value *Zero = llvm::Constant::getNullValue(Result->getType());
1459 Value *IsNegative = Builder.CreateICmpSLT(Result, Zero);
1460 Value *LowBits = ConstantInt::get(
1461 CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
1462 Value *Rounded = Builder.CreateAdd(Result, LowBits);
1463 Result = Builder.CreateSelect(IsNegative, Rounded, Result);
1466 Result = SrcIsSigned
1467 ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale")
1468 : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1471 if (!DstFPSema.isSaturated()) {
1473 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1476 if (DstScale > SrcScale)
1477 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1479 // Adjust the number of fractional bits.
1480 if (DstScale > SrcScale) {
1481 // Compare to DstWidth to prevent resizing twice.
1482 ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
1483 llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
1484 Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
1485 Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1488 // Handle saturation.
1489 bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
1491 Value *Max = ConstantInt::get(
1492 CGF.getLLVMContext(),
1493 APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
1494 Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
1495 : Builder.CreateICmpUGT(Result, Max);
1496 Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
1498 // Cannot overflow min to dest type if src is unsigned since all fixed
1499 // point types can cover the unsigned min of 0.
1500 if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
1501 Value *Min = ConstantInt::get(
1502 CGF.getLLVMContext(),
1503 APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
1504 Value *TooLow = Builder.CreateICmpSLT(Result, Min);
1505 Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
1508 // Resize the integer part to get the final destination size.
1509 if (ResultWidth != DstWidth)
1510 Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1515 /// Emit a conversion from the specified complex type to the specified
1516 /// destination type, where the destination type is an LLVM scalar type.
1517 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1518 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1519 SourceLocation Loc) {
1520 // Get the source element type.
1521 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1523 // Handle conversions to bool first, they are special: comparisons against 0.
1524 if (DstTy->isBooleanType()) {
1525 // Complex != 0 -> (Real != 0) | (Imag != 0)
1526 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1527 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1528 return Builder.CreateOr(Src.first, Src.second, "tobool");
1531 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1532 // the imaginary part of the complex value is discarded and the value of the
1533 // real part is converted according to the conversion rules for the
1534 // corresponding real type.
1535 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1538 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1539 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1542 /// Emit a sanitization check for the given "binary" operation (which
1543 /// might actually be a unary increment which has been lowered to a binary
1544 /// operation). The check passes if all values in \p Checks (which are \c i1),
1546 void ScalarExprEmitter::EmitBinOpCheck(
1547 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1548 assert(CGF.IsSanitizerScope);
1549 SanitizerHandler Check;
1550 SmallVector<llvm::Constant *, 4> StaticData;
1551 SmallVector<llvm::Value *, 2> DynamicData;
1553 BinaryOperatorKind Opcode = Info.Opcode;
1554 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1555 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1557 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1558 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1559 if (UO && UO->getOpcode() == UO_Minus) {
1560 Check = SanitizerHandler::NegateOverflow;
1561 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1562 DynamicData.push_back(Info.RHS);
1564 if (BinaryOperator::isShiftOp(Opcode)) {
1565 // Shift LHS negative or too large, or RHS out of bounds.
1566 Check = SanitizerHandler::ShiftOutOfBounds;
1567 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1568 StaticData.push_back(
1569 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1570 StaticData.push_back(
1571 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1572 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1573 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1574 Check = SanitizerHandler::DivremOverflow;
1575 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1577 // Arithmetic overflow (+, -, *).
1579 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1580 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1581 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1582 default: llvm_unreachable("unexpected opcode for bin op check");
1584 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1586 DynamicData.push_back(Info.LHS);
1587 DynamicData.push_back(Info.RHS);
1590 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1593 //===----------------------------------------------------------------------===//
1595 //===----------------------------------------------------------------------===//
1597 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1598 CGF.ErrorUnsupported(E, "scalar expression");
1599 if (E->getType()->isVoidType())
1601 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1604 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1606 if (E->getNumSubExprs() == 2) {
1607 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1608 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1611 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1612 unsigned LHSElts = LTy->getNumElements();
1616 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1618 // Mask off the high bits of each shuffle index.
1620 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1621 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1624 // mask = mask & maskbits
1626 // n = extract mask i
1627 // x = extract val n
1628 // newv = insert newv, x, i
1629 llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1630 MTy->getNumElements());
1631 Value* NewV = llvm::UndefValue::get(RTy);
1632 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1633 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1634 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1636 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1637 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1642 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1643 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1645 SmallVector<llvm::Constant*, 32> indices;
1646 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1647 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1648 // Check for -1 and output it as undef in the IR.
1649 if (Idx.isSigned() && Idx.isAllOnesValue())
1650 indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1652 indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1655 Value *SV = llvm::ConstantVector::get(indices);
1656 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1659 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1660 QualType SrcType = E->getSrcExpr()->getType(),
1661 DstType = E->getType();
1663 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1665 SrcType = CGF.getContext().getCanonicalType(SrcType);
1666 DstType = CGF.getContext().getCanonicalType(DstType);
1667 if (SrcType == DstType) return Src;
1669 assert(SrcType->isVectorType() &&
1670 "ConvertVector source type must be a vector");
1671 assert(DstType->isVectorType() &&
1672 "ConvertVector destination type must be a vector");
1674 llvm::Type *SrcTy = Src->getType();
1675 llvm::Type *DstTy = ConvertType(DstType);
1677 // Ignore conversions like int -> uint.
1681 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1682 DstEltType = DstType->castAs<VectorType>()->getElementType();
1684 assert(SrcTy->isVectorTy() &&
1685 "ConvertVector source IR type must be a vector");
1686 assert(DstTy->isVectorTy() &&
1687 "ConvertVector destination IR type must be a vector");
1689 llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1690 *DstEltTy = DstTy->getVectorElementType();
1692 if (DstEltType->isBooleanType()) {
1693 assert((SrcEltTy->isFloatingPointTy() ||
1694 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1696 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1697 if (SrcEltTy->isFloatingPointTy()) {
1698 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1700 return Builder.CreateICmpNE(Src, Zero, "tobool");
1704 // We have the arithmetic types: real int/float.
1705 Value *Res = nullptr;
1707 if (isa<llvm::IntegerType>(SrcEltTy)) {
1708 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1709 if (isa<llvm::IntegerType>(DstEltTy))
1710 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1711 else if (InputSigned)
1712 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1714 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1715 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1716 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1717 if (DstEltType->isSignedIntegerOrEnumerationType())
1718 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1720 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1722 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1723 "Unknown real conversion");
1724 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1725 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1727 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1733 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1734 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1735 CGF.EmitIgnoredExpr(E->getBase());
1736 return CGF.emitScalarConstant(Constant, E);
1738 Expr::EvalResult Result;
1739 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1740 llvm::APSInt Value = Result.Val.getInt();
1741 CGF.EmitIgnoredExpr(E->getBase());
1742 return Builder.getInt(Value);
1746 return EmitLoadOfLValue(E);
1749 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1750 TestAndClearIgnoreResultAssign();
1752 // Emit subscript expressions in rvalue context's. For most cases, this just
1753 // loads the lvalue formed by the subscript expr. However, we have to be
1754 // careful, because the base of a vector subscript is occasionally an rvalue,
1755 // so we can't get it as an lvalue.
1756 if (!E->getBase()->getType()->isVectorType())
1757 return EmitLoadOfLValue(E);
1759 // Handle the vector case. The base must be a vector, the index must be an
1761 Value *Base = Visit(E->getBase());
1762 Value *Idx = Visit(E->getIdx());
1763 QualType IdxTy = E->getIdx()->getType();
1765 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1766 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1768 return Builder.CreateExtractElement(Base, Idx, "vecext");
1771 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1772 unsigned Off, llvm::Type *I32Ty) {
1773 int MV = SVI->getMaskValue(Idx);
1775 return llvm::UndefValue::get(I32Ty);
1776 return llvm::ConstantInt::get(I32Ty, Off+MV);
1779 static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1780 if (C->getBitWidth() != 32) {
1781 assert(llvm::ConstantInt::isValueValidForType(I32Ty,
1782 C->getZExtValue()) &&
1783 "Index operand too large for shufflevector mask!");
1784 return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1789 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1790 bool Ignore = TestAndClearIgnoreResultAssign();
1792 assert (Ignore == false && "init list ignored");
1793 unsigned NumInitElements = E->getNumInits();
1795 if (E->hadArrayRangeDesignator())
1796 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1798 llvm::VectorType *VType =
1799 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1802 if (NumInitElements == 0) {
1803 // C++11 value-initialization for the scalar.
1804 return EmitNullValue(E->getType());
1806 // We have a scalar in braces. Just use the first element.
1807 return Visit(E->getInit(0));
1810 unsigned ResElts = VType->getNumElements();
1812 // Loop over initializers collecting the Value for each, and remembering
1813 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1814 // us to fold the shuffle for the swizzle into the shuffle for the vector
1815 // initializer, since LLVM optimizers generally do not want to touch
1817 unsigned CurIdx = 0;
1818 bool VIsUndefShuffle = false;
1819 llvm::Value *V = llvm::UndefValue::get(VType);
1820 for (unsigned i = 0; i != NumInitElements; ++i) {
1821 Expr *IE = E->getInit(i);
1822 Value *Init = Visit(IE);
1823 SmallVector<llvm::Constant*, 16> Args;
1825 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1827 // Handle scalar elements. If the scalar initializer is actually one
1828 // element of a different vector of the same width, use shuffle instead of
1831 if (isa<ExtVectorElementExpr>(IE)) {
1832 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1834 if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1835 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1836 Value *LHS = nullptr, *RHS = nullptr;
1838 // insert into undef -> shuffle (src, undef)
1839 // shufflemask must use an i32
1840 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1841 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1843 LHS = EI->getVectorOperand();
1845 VIsUndefShuffle = true;
1846 } else if (VIsUndefShuffle) {
1847 // insert into undefshuffle && size match -> shuffle (v, src)
1848 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1849 for (unsigned j = 0; j != CurIdx; ++j)
1850 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1851 Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1852 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1854 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1855 RHS = EI->getVectorOperand();
1856 VIsUndefShuffle = false;
1858 if (!Args.empty()) {
1859 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1860 V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1866 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1868 VIsUndefShuffle = false;
1873 unsigned InitElts = VVT->getNumElements();
1875 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1876 // input is the same width as the vector being constructed, generate an
1877 // optimized shuffle of the swizzle input into the result.
1878 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1879 if (isa<ExtVectorElementExpr>(IE)) {
1880 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1881 Value *SVOp = SVI->getOperand(0);
1882 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1884 if (OpTy->getNumElements() == ResElts) {
1885 for (unsigned j = 0; j != CurIdx; ++j) {
1886 // If the current vector initializer is a shuffle with undef, merge
1887 // this shuffle directly into it.
1888 if (VIsUndefShuffle) {
1889 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1892 Args.push_back(Builder.getInt32(j));
1895 for (unsigned j = 0, je = InitElts; j != je; ++j)
1896 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1897 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1899 if (VIsUndefShuffle)
1900 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1906 // Extend init to result vector length, and then shuffle its contribution
1907 // to the vector initializer into V.
1909 for (unsigned j = 0; j != InitElts; ++j)
1910 Args.push_back(Builder.getInt32(j));
1911 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1912 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1913 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1917 for (unsigned j = 0; j != CurIdx; ++j)
1918 Args.push_back(Builder.getInt32(j));
1919 for (unsigned j = 0; j != InitElts; ++j)
1920 Args.push_back(Builder.getInt32(j+Offset));
1921 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1924 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1925 // merging subsequent shuffles into this one.
1928 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1929 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1930 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1934 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1935 // Emit remaining default initializers.
1936 llvm::Type *EltTy = VType->getElementType();
1938 // Emit remaining default initializers
1939 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1940 Value *Idx = Builder.getInt32(CurIdx);
1941 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1942 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1947 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1948 const Expr *E = CE->getSubExpr();
1950 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1953 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1954 // We always assume that 'this' is never null.
1958 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1959 // And that glvalue casts are never null.
1960 if (ICE->getValueKind() != VK_RValue)
1967 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1968 // have to handle a more broad range of conversions than explicit casts, as they
1969 // handle things like function to ptr-to-function decay etc.
1970 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1971 Expr *E = CE->getSubExpr();
1972 QualType DestTy = CE->getType();
1973 CastKind Kind = CE->getCastKind();
1975 // These cases are generally not written to ignore the result of
1976 // evaluating their sub-expressions, so we clear this now.
1977 bool Ignored = TestAndClearIgnoreResultAssign();
1979 // Since almost all cast kinds apply to scalars, this switch doesn't have
1980 // a default case, so the compiler will warn on a missing case. The cases
1981 // are in the same order as in the CastKind enum.
1983 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1984 case CK_BuiltinFnToFnPtr:
1985 llvm_unreachable("builtin functions are handled elsewhere");
1987 case CK_LValueBitCast:
1988 case CK_ObjCObjectLValueCast: {
1989 Address Addr = EmitLValue(E).getAddress(CGF);
1990 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1991 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1992 return EmitLoadOfLValue(LV, CE->getExprLoc());
1995 case CK_LValueToRValueBitCast: {
1996 LValue SourceLVal = CGF.EmitLValue(E);
1997 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
1998 CGF.ConvertTypeForMem(DestTy));
1999 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2000 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2001 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2004 case CK_CPointerToObjCPointerCast:
2005 case CK_BlockPointerToObjCPointerCast:
2006 case CK_AnyPointerToBlockPointerCast:
2008 Value *Src = Visit(const_cast<Expr*>(E));
2009 llvm::Type *SrcTy = Src->getType();
2010 llvm::Type *DstTy = ConvertType(DestTy);
2011 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2012 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2013 llvm_unreachable("wrong cast for pointers in different address spaces"
2014 "(must be an address space cast)!");
2017 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2018 if (auto PT = DestTy->getAs<PointerType>())
2019 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2021 CodeGenFunction::CFITCK_UnrelatedCast,
2025 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2026 const QualType SrcType = E->getType();
2028 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2029 // Casting to pointer that could carry dynamic information (provided by
2030 // invariant.group) requires launder.
2031 Src = Builder.CreateLaunderInvariantGroup(Src);
2032 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2033 // Casting to pointer that does not carry dynamic information (provided
2034 // by invariant.group) requires stripping it. Note that we don't do it
2035 // if the source could not be dynamic type and destination could be
2036 // dynamic because dynamic information is already laundered. It is
2037 // because launder(strip(src)) == launder(src), so there is no need to
2038 // add extra strip before launder.
2039 Src = Builder.CreateStripInvariantGroup(Src);
2043 // Update heapallocsite metadata when there is an explicit cast.
2044 if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(Src))
2045 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE))
2046 CGF.getDebugInfo()->
2047 addHeapAllocSiteMetadata(CI, CE->getType(), CE->getExprLoc());
2049 return Builder.CreateBitCast(Src, DstTy);
2051 case CK_AddressSpaceConversion: {
2052 Expr::EvalResult Result;
2053 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2054 Result.Val.isNullPointer()) {
2055 // If E has side effect, it is emitted even if its final result is a
2056 // null pointer. In that case, a DCE pass should be able to
2057 // eliminate the useless instructions emitted during translating E.
2058 if (Result.HasSideEffects)
2060 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2061 ConvertType(DestTy)), DestTy);
2063 // Since target may map different address spaces in AST to the same address
2064 // space, an address space conversion may end up as a bitcast.
2065 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2066 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2067 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2069 case CK_AtomicToNonAtomic:
2070 case CK_NonAtomicToAtomic:
2072 case CK_UserDefinedConversion:
2073 return Visit(const_cast<Expr*>(E));
2075 case CK_BaseToDerived: {
2076 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2077 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2079 Address Base = CGF.EmitPointerWithAlignment(E);
2081 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2082 CE->path_begin(), CE->path_end(),
2083 CGF.ShouldNullCheckClassCastValue(CE));
2085 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2086 // performed and the object is not of the derived type.
2087 if (CGF.sanitizePerformTypeCheck())
2088 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2089 Derived.getPointer(), DestTy->getPointeeType());
2091 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2092 CGF.EmitVTablePtrCheckForCast(
2093 DestTy->getPointeeType(), Derived.getPointer(),
2094 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2097 return Derived.getPointer();
2099 case CK_UncheckedDerivedToBase:
2100 case CK_DerivedToBase: {
2101 // The EmitPointerWithAlignment path does this fine; just discard
2103 return CGF.EmitPointerWithAlignment(CE).getPointer();
2107 Address V = CGF.EmitPointerWithAlignment(E);
2108 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2109 return CGF.EmitDynamicCast(V, DCE);
2112 case CK_ArrayToPointerDecay:
2113 return CGF.EmitArrayToPointerDecay(E).getPointer();
2114 case CK_FunctionToPointerDecay:
2115 return EmitLValue(E).getPointer(CGF);
2117 case CK_NullToPointer:
2118 if (MustVisitNullValue(E))
2119 CGF.EmitIgnoredExpr(E);
2121 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2124 case CK_NullToMemberPointer: {
2125 if (MustVisitNullValue(E))
2126 CGF.EmitIgnoredExpr(E);
2128 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2129 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2132 case CK_ReinterpretMemberPointer:
2133 case CK_BaseToDerivedMemberPointer:
2134 case CK_DerivedToBaseMemberPointer: {
2135 Value *Src = Visit(E);
2137 // Note that the AST doesn't distinguish between checked and
2138 // unchecked member pointer conversions, so we always have to
2139 // implement checked conversions here. This is inefficient when
2140 // actual control flow may be required in order to perform the
2141 // check, which it is for data member pointers (but not member
2142 // function pointers on Itanium and ARM).
2143 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2146 case CK_ARCProduceObject:
2147 return CGF.EmitARCRetainScalarExpr(E);
2148 case CK_ARCConsumeObject:
2149 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2150 case CK_ARCReclaimReturnedObject:
2151 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2152 case CK_ARCExtendBlockObject:
2153 return CGF.EmitARCExtendBlockObject(E);
2155 case CK_CopyAndAutoreleaseBlockObject:
2156 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2158 case CK_FloatingRealToComplex:
2159 case CK_FloatingComplexCast:
2160 case CK_IntegralRealToComplex:
2161 case CK_IntegralComplexCast:
2162 case CK_IntegralComplexToFloatingComplex:
2163 case CK_FloatingComplexToIntegralComplex:
2164 case CK_ConstructorConversion:
2166 llvm_unreachable("scalar cast to non-scalar value");
2168 case CK_LValueToRValue:
2169 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2170 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2171 return Visit(const_cast<Expr*>(E));
2173 case CK_IntegralToPointer: {
2174 Value *Src = Visit(const_cast<Expr*>(E));
2176 // First, convert to the correct width so that we control the kind of
2178 auto DestLLVMTy = ConvertType(DestTy);
2179 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2180 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2181 llvm::Value* IntResult =
2182 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2184 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2186 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2187 // Going from integer to pointer that could be dynamic requires reloading
2188 // dynamic information from invariant.group.
2189 if (DestTy.mayBeDynamicClass())
2190 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2194 case CK_PointerToIntegral: {
2195 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2196 auto *PtrExpr = Visit(E);
2198 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2199 const QualType SrcType = E->getType();
2201 // Casting to integer requires stripping dynamic information as it does
2203 if (SrcType.mayBeDynamicClass())
2204 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2207 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2210 CGF.EmitIgnoredExpr(E);
2213 case CK_VectorSplat: {
2214 llvm::Type *DstTy = ConvertType(DestTy);
2215 Value *Elt = Visit(const_cast<Expr*>(E));
2216 // Splat the element across to all elements
2217 unsigned NumElements = DstTy->getVectorNumElements();
2218 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2221 case CK_FixedPointCast:
2222 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2225 case CK_FixedPointToBoolean:
2226 assert(E->getType()->isFixedPointType() &&
2227 "Expected src type to be fixed point type");
2228 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2229 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2232 case CK_FixedPointToIntegral:
2233 assert(E->getType()->isFixedPointType() &&
2234 "Expected src type to be fixed point type");
2235 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2236 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2239 case CK_IntegralToFixedPoint:
2240 assert(E->getType()->isIntegerType() &&
2241 "Expected src type to be an integer");
2242 assert(DestTy->isFixedPointType() &&
2243 "Expected dest type to be fixed point type");
2244 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2247 case CK_IntegralCast: {
2248 ScalarConversionOpts Opts;
2249 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2250 if (!ICE->isPartOfExplicitCast())
2251 Opts = ScalarConversionOpts(CGF.SanOpts);
2253 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2254 CE->getExprLoc(), Opts);
2256 case CK_IntegralToFloating:
2257 case CK_FloatingToIntegral:
2258 case CK_FloatingCast:
2259 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2261 case CK_BooleanToSignedIntegral: {
2262 ScalarConversionOpts Opts;
2263 Opts.TreatBooleanAsSigned = true;
2264 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2265 CE->getExprLoc(), Opts);
2267 case CK_IntegralToBoolean:
2268 return EmitIntToBoolConversion(Visit(E));
2269 case CK_PointerToBoolean:
2270 return EmitPointerToBoolConversion(Visit(E), E->getType());
2271 case CK_FloatingToBoolean:
2272 return EmitFloatToBoolConversion(Visit(E));
2273 case CK_MemberPointerToBoolean: {
2274 llvm::Value *MemPtr = Visit(E);
2275 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2276 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2279 case CK_FloatingComplexToReal:
2280 case CK_IntegralComplexToReal:
2281 return CGF.EmitComplexExpr(E, false, true).first;
2283 case CK_FloatingComplexToBoolean:
2284 case CK_IntegralComplexToBoolean: {
2285 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2287 // TODO: kill this function off, inline appropriate case here
2288 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2292 case CK_ZeroToOCLOpaqueType: {
2293 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2294 DestTy->isOCLIntelSubgroupAVCType()) &&
2295 "CK_ZeroToOCLEvent cast on non-event type");
2296 return llvm::Constant::getNullValue(ConvertType(DestTy));
2299 case CK_IntToOCLSampler:
2300 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2304 llvm_unreachable("unknown scalar cast");
2307 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2308 CodeGenFunction::StmtExprEvaluation eval(CGF);
2309 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2310 !E->getType()->isVoidType());
2311 if (!RetAlloca.isValid())
2313 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2317 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2318 CGF.enterFullExpression(E);
2319 CodeGenFunction::RunCleanupsScope Scope(CGF);
2320 Value *V = Visit(E->getSubExpr());
2321 // Defend against dominance problems caused by jumps out of expression
2322 // evaluation through the shared cleanup block.
2323 Scope.ForceCleanup({&V});
2327 //===----------------------------------------------------------------------===//
2329 //===----------------------------------------------------------------------===//
2331 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2332 llvm::Value *InVal, bool IsInc) {
2335 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2336 BinOp.Ty = E->getType();
2337 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2338 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2343 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2344 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2345 llvm::Value *Amount =
2346 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2347 StringRef Name = IsInc ? "inc" : "dec";
2348 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2349 case LangOptions::SOB_Defined:
2350 return Builder.CreateAdd(InVal, Amount, Name);
2351 case LangOptions::SOB_Undefined:
2352 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2353 return Builder.CreateNSWAdd(InVal, Amount, Name);
2355 case LangOptions::SOB_Trapping:
2356 if (!E->canOverflow())
2357 return Builder.CreateNSWAdd(InVal, Amount, Name);
2358 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
2360 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2364 /// Handles check and update for lastprivate conditional variables.
2365 class OMPLastprivateConditionalUpdateRAII {
2367 CodeGenFunction &CGF;
2368 const UnaryOperator *E;
2371 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2372 const UnaryOperator *E)
2374 ~OMPLastprivateConditionalUpdateRAII() {
2375 if (CGF.getLangOpts().OpenMP)
2376 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2377 CGF, E->getSubExpr());
2383 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2384 bool isInc, bool isPre) {
2385 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2386 QualType type = E->getSubExpr()->getType();
2387 llvm::PHINode *atomicPHI = nullptr;
2391 int amount = (isInc ? 1 : -1);
2392 bool isSubtraction = !isInc;
2394 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2395 type = atomicTy->getValueType();
2396 if (isInc && type->isBooleanType()) {
2397 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2399 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2400 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2401 return Builder.getTrue();
2403 // For atomic bool increment, we just store true and return it for
2404 // preincrement, do an atomic swap with true for postincrement
2405 return Builder.CreateAtomicRMW(
2406 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2407 llvm::AtomicOrdering::SequentiallyConsistent);
2409 // Special case for atomic increment / decrement on integers, emit
2410 // atomicrmw instructions. We skip this if we want to be doing overflow
2411 // checking, and fall into the slow path with the atomic cmpxchg loop.
2412 if (!type->isBooleanType() && type->isIntegerType() &&
2413 !(type->isUnsignedIntegerType() &&
2414 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2415 CGF.getLangOpts().getSignedOverflowBehavior() !=
2416 LangOptions::SOB_Trapping) {
2417 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2418 llvm::AtomicRMWInst::Sub;
2419 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2420 llvm::Instruction::Sub;
2421 llvm::Value *amt = CGF.EmitToMemory(
2422 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2424 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2425 llvm::AtomicOrdering::SequentiallyConsistent);
2426 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2428 value = EmitLoadOfLValue(LV, E->getExprLoc());
2430 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2431 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2432 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2433 value = CGF.EmitToMemory(value, type);
2434 Builder.CreateBr(opBB);
2435 Builder.SetInsertPoint(opBB);
2436 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2437 atomicPHI->addIncoming(value, startBB);
2440 value = EmitLoadOfLValue(LV, E->getExprLoc());
2444 // Special case of integer increment that we have to check first: bool++.
2445 // Due to promotion rules, we get:
2446 // bool++ -> bool = bool + 1
2447 // -> bool = (int)bool + 1
2448 // -> bool = ((int)bool + 1 != 0)
2449 // An interesting aspect of this is that increment is always true.
2450 // Decrement does not have this property.
2451 if (isInc && type->isBooleanType()) {
2452 value = Builder.getTrue();
2454 // Most common case by far: integer increment.
2455 } else if (type->isIntegerType()) {
2456 QualType promotedType;
2457 bool canPerformLossyDemotionCheck = false;
2458 if (type->isPromotableIntegerType()) {
2459 promotedType = CGF.getContext().getPromotedIntegerType(type);
2460 assert(promotedType != type && "Shouldn't promote to the same type.");
2461 canPerformLossyDemotionCheck = true;
2462 canPerformLossyDemotionCheck &=
2463 CGF.getContext().getCanonicalType(type) !=
2464 CGF.getContext().getCanonicalType(promotedType);
2465 canPerformLossyDemotionCheck &=
2466 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2467 type, promotedType);
2468 assert((!canPerformLossyDemotionCheck ||
2469 type->isSignedIntegerOrEnumerationType() ||
2470 promotedType->isSignedIntegerOrEnumerationType() ||
2471 ConvertType(type)->getScalarSizeInBits() ==
2472 ConvertType(promotedType)->getScalarSizeInBits()) &&
2473 "The following check expects that if we do promotion to different "
2474 "underlying canonical type, at least one of the types (either "
2475 "base or promoted) will be signed, or the bitwidths will match.");
2477 if (CGF.SanOpts.hasOneOf(
2478 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2479 canPerformLossyDemotionCheck) {
2480 // While `x += 1` (for `x` with width less than int) is modeled as
2481 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2482 // ease; inc/dec with width less than int can't overflow because of
2483 // promotion rules, so we omit promotion+demotion, which means that we can
2484 // not catch lossy "demotion". Because we still want to catch these cases
2485 // when the sanitizer is enabled, we perform the promotion, then perform
2486 // the increment/decrement in the wider type, and finally
2487 // perform the demotion. This will catch lossy demotions.
2489 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2490 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2491 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2492 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2494 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2495 ScalarConversionOpts(CGF.SanOpts));
2497 // Note that signed integer inc/dec with width less than int can't
2498 // overflow because of promotion rules; we're just eliding a few steps
2500 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2501 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2502 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2503 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2505 EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2507 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2508 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2511 // Next most common: pointer increment.
2512 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2513 QualType type = ptr->getPointeeType();
2515 // VLA types don't have constant size.
2516 if (const VariableArrayType *vla
2517 = CGF.getContext().getAsVariableArrayType(type)) {
2518 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2519 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2520 if (CGF.getLangOpts().isSignedOverflowDefined())
2521 value = Builder.CreateGEP(value, numElts, "vla.inc");
2523 value = CGF.EmitCheckedInBoundsGEP(
2524 value, numElts, /*SignedIndices=*/false, isSubtraction,
2525 E->getExprLoc(), "vla.inc");
2527 // Arithmetic on function pointers (!) is just +-1.
2528 } else if (type->isFunctionType()) {
2529 llvm::Value *amt = Builder.getInt32(amount);
2531 value = CGF.EmitCastToVoidPtr(value);
2532 if (CGF.getLangOpts().isSignedOverflowDefined())
2533 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2535 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2536 isSubtraction, E->getExprLoc(),
2538 value = Builder.CreateBitCast(value, input->getType());
2540 // For everything else, we can just do a simple increment.
2542 llvm::Value *amt = Builder.getInt32(amount);
2543 if (CGF.getLangOpts().isSignedOverflowDefined())
2544 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2546 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2547 isSubtraction, E->getExprLoc(),
2551 // Vector increment/decrement.
2552 } else if (type->isVectorType()) {
2553 if (type->hasIntegerRepresentation()) {
2554 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2556 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2558 value = Builder.CreateFAdd(
2560 llvm::ConstantFP::get(value->getType(), amount),
2561 isInc ? "inc" : "dec");
2565 } else if (type->isRealFloatingType()) {
2566 // Add the inc/dec to the real part.
2569 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2570 // Another special case: half FP increment should be done via float
2571 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2572 value = Builder.CreateCall(
2573 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2575 input, "incdec.conv");
2577 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2581 if (value->getType()->isFloatTy())
2582 amt = llvm::ConstantFP::get(VMContext,
2583 llvm::APFloat(static_cast<float>(amount)));
2584 else if (value->getType()->isDoubleTy())
2585 amt = llvm::ConstantFP::get(VMContext,
2586 llvm::APFloat(static_cast<double>(amount)));
2588 // Remaining types are Half, LongDouble or __float128. Convert from float.
2589 llvm::APFloat F(static_cast<float>(amount));
2591 const llvm::fltSemantics *FS;
2592 // Don't use getFloatTypeSemantics because Half isn't
2593 // necessarily represented using the "half" LLVM type.
2594 if (value->getType()->isFP128Ty())
2595 FS = &CGF.getTarget().getFloat128Format();
2596 else if (value->getType()->isHalfTy())
2597 FS = &CGF.getTarget().getHalfFormat();
2599 FS = &CGF.getTarget().getLongDoubleFormat();
2600 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2601 amt = llvm::ConstantFP::get(VMContext, F);
2603 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2605 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2606 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2607 value = Builder.CreateCall(
2608 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2610 value, "incdec.conv");
2612 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2616 // Objective-C pointer types.
2618 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2619 value = CGF.EmitCastToVoidPtr(value);
2621 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2622 if (!isInc) size = -size;
2623 llvm::Value *sizeValue =
2624 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2626 if (CGF.getLangOpts().isSignedOverflowDefined())
2627 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2629 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2630 /*SignedIndices=*/false, isSubtraction,
2631 E->getExprLoc(), "incdec.objptr");
2632 value = Builder.CreateBitCast(value, input->getType());
2636 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2637 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2638 auto Pair = CGF.EmitAtomicCompareExchange(
2639 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2640 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2641 llvm::Value *success = Pair.second;
2642 atomicPHI->addIncoming(old, curBlock);
2643 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2644 Builder.SetInsertPoint(contBB);
2645 return isPre ? value : input;
2648 // Store the updated result through the lvalue.
2649 if (LV.isBitField())
2650 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2652 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2654 // If this is a postinc, return the value read from memory, otherwise use the
2656 return isPre ? value : input;
2661 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2662 TestAndClearIgnoreResultAssign();
2663 Value *Op = Visit(E->getSubExpr());
2665 // Generate a unary FNeg for FP ops.
2666 if (Op->getType()->isFPOrFPVectorTy())
2667 return Builder.CreateFNeg(Op, "fneg");
2669 // Emit unary minus with EmitSub so we handle overflow cases etc.
2672 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2673 BinOp.Ty = E->getType();
2674 BinOp.Opcode = BO_Sub;
2675 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2677 return EmitSub(BinOp);
2680 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2681 TestAndClearIgnoreResultAssign();
2682 Value *Op = Visit(E->getSubExpr());
2683 return Builder.CreateNot(Op, "neg");
2686 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2687 // Perform vector logical not on comparison with zero vector.
2688 if (E->getType()->isExtVectorType()) {
2689 Value *Oper = Visit(E->getSubExpr());
2690 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2692 if (Oper->getType()->isFPOrFPVectorTy())
2693 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2695 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2696 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2699 // Compare operand to zero.
2700 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2703 // TODO: Could dynamically modify easy computations here. For example, if
2704 // the operand is an icmp ne, turn into icmp eq.
2705 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2707 // ZExt result to the expr type.
2708 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2711 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2712 // Try folding the offsetof to a constant.
2713 Expr::EvalResult EVResult;
2714 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2715 llvm::APSInt Value = EVResult.Val.getInt();
2716 return Builder.getInt(Value);
2719 // Loop over the components of the offsetof to compute the value.
2720 unsigned n = E->getNumComponents();
2721 llvm::Type* ResultType = ConvertType(E->getType());
2722 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2723 QualType CurrentType = E->getTypeSourceInfo()->getType();
2724 for (unsigned i = 0; i != n; ++i) {
2725 OffsetOfNode ON = E->getComponent(i);
2726 llvm::Value *Offset = nullptr;
2727 switch (ON.getKind()) {
2728 case OffsetOfNode::Array: {
2729 // Compute the index
2730 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2731 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2732 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2733 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2735 // Save the element type
2737 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2739 // Compute the element size
2740 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2741 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2743 // Multiply out to compute the result
2744 Offset = Builder.CreateMul(Idx, ElemSize);
2748 case OffsetOfNode::Field: {
2749 FieldDecl *MemberDecl = ON.getField();
2750 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2751 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2753 // Compute the index of the field in its parent.
2755 // FIXME: It would be nice if we didn't have to loop here!
2756 for (RecordDecl::field_iterator Field = RD->field_begin(),
2757 FieldEnd = RD->field_end();
2758 Field != FieldEnd; ++Field, ++i) {
2759 if (*Field == MemberDecl)
2762 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2764 // Compute the offset to the field
2765 int64_t OffsetInt = RL.getFieldOffset(i) /
2766 CGF.getContext().getCharWidth();
2767 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2769 // Save the element type.
2770 CurrentType = MemberDecl->getType();
2774 case OffsetOfNode::Identifier:
2775 llvm_unreachable("dependent __builtin_offsetof");
2777 case OffsetOfNode::Base: {
2778 if (ON.getBase()->isVirtual()) {
2779 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2783 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2784 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2786 // Save the element type.
2787 CurrentType = ON.getBase()->getType();
2789 // Compute the offset to the base.
2790 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2791 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2792 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2793 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2797 Result = Builder.CreateAdd(Result, Offset);
2802 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2803 /// argument of the sizeof expression as an integer.
2805 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2806 const UnaryExprOrTypeTraitExpr *E) {
2807 QualType TypeToSize = E->getTypeOfArgument();
2808 if (E->getKind() == UETT_SizeOf) {
2809 if (const VariableArrayType *VAT =
2810 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2811 if (E->isArgumentType()) {
2812 // sizeof(type) - make sure to emit the VLA size.
2813 CGF.EmitVariablyModifiedType(TypeToSize);
2815 // C99 6.5.3.4p2: If the argument is an expression of type
2816 // VLA, it is evaluated.
2817 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2820 auto VlaSize = CGF.getVLASize(VAT);
2821 llvm::Value *size = VlaSize.NumElts;
2823 // Scale the number of non-VLA elements by the non-VLA element size.
2824 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2825 if (!eltSize.isOne())
2826 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2830 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2833 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2834 E->getTypeOfArgument()->getPointeeType()))
2836 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2839 // If this isn't sizeof(vla), the result must be constant; use the constant
2840 // folding logic so we don't have to duplicate it here.
2841 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2844 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2845 Expr *Op = E->getSubExpr();
2846 if (Op->getType()->isAnyComplexType()) {
2847 // If it's an l-value, load through the appropriate subobject l-value.
2848 // Note that we have to ask E because Op might be an l-value that
2849 // this won't work for, e.g. an Obj-C property.
2851 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2852 E->getExprLoc()).getScalarVal();
2854 // Otherwise, calculate and project.
2855 return CGF.EmitComplexExpr(Op, false, true).first;
2861 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2862 Expr *Op = E->getSubExpr();
2863 if (Op->getType()->isAnyComplexType()) {
2864 // If it's an l-value, load through the appropriate subobject l-value.
2865 // Note that we have to ask E because Op might be an l-value that
2866 // this won't work for, e.g. an Obj-C property.
2867 if (Op->isGLValue())
2868 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2869 E->getExprLoc()).getScalarVal();
2871 // Otherwise, calculate and project.
2872 return CGF.EmitComplexExpr(Op, true, false).second;
2875 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2876 // effects are evaluated, but not the actual value.
2877 if (Op->isGLValue())
2880 CGF.EmitScalarExpr(Op, true);
2881 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2884 //===----------------------------------------------------------------------===//
2886 //===----------------------------------------------------------------------===//
2888 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2889 TestAndClearIgnoreResultAssign();
2891 Result.LHS = Visit(E->getLHS());
2892 Result.RHS = Visit(E->getRHS());
2893 Result.Ty = E->getType();
2894 Result.Opcode = E->getOpcode();
2895 Result.FPFeatures = E->getFPFeatures();
2900 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2901 const CompoundAssignOperator *E,
2902 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2904 QualType LHSTy = E->getLHS()->getType();
2907 if (E->getComputationResultType()->isAnyComplexType())
2908 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2910 // Emit the RHS first. __block variables need to have the rhs evaluated
2911 // first, plus this should improve codegen a little.
2912 OpInfo.RHS = Visit(E->getRHS());
2913 OpInfo.Ty = E->getComputationResultType();
2914 OpInfo.Opcode = E->getOpcode();
2915 OpInfo.FPFeatures = E->getFPFeatures();
2917 // Load/convert the LHS.
2918 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2920 llvm::PHINode *atomicPHI = nullptr;
2921 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2922 QualType type = atomicTy->getValueType();
2923 if (!type->isBooleanType() && type->isIntegerType() &&
2924 !(type->isUnsignedIntegerType() &&
2925 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2926 CGF.getLangOpts().getSignedOverflowBehavior() !=
2927 LangOptions::SOB_Trapping) {
2928 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
2929 llvm::Instruction::BinaryOps Op;
2930 switch (OpInfo.Opcode) {
2931 // We don't have atomicrmw operands for *, %, /, <<, >>
2932 case BO_MulAssign: case BO_DivAssign:
2938 AtomicOp = llvm::AtomicRMWInst::Add;
2939 Op = llvm::Instruction::Add;
2942 AtomicOp = llvm::AtomicRMWInst::Sub;
2943 Op = llvm::Instruction::Sub;
2946 AtomicOp = llvm::AtomicRMWInst::And;
2947 Op = llvm::Instruction::And;
2950 AtomicOp = llvm::AtomicRMWInst::Xor;
2951 Op = llvm::Instruction::Xor;
2954 AtomicOp = llvm::AtomicRMWInst::Or;
2955 Op = llvm::Instruction::Or;
2958 llvm_unreachable("Invalid compound assignment type");
2960 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
2961 llvm::Value *Amt = CGF.EmitToMemory(
2962 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2965 Value *OldVal = Builder.CreateAtomicRMW(
2966 AtomicOp, LHSLV.getPointer(CGF), Amt,
2967 llvm::AtomicOrdering::SequentiallyConsistent);
2969 // Since operation is atomic, the result type is guaranteed to be the
2970 // same as the input in LLVM terms.
2971 Result = Builder.CreateBinOp(Op, OldVal, Amt);
2975 // FIXME: For floating point types, we should be saving and restoring the
2976 // floating point environment in the loop.
2977 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2978 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2979 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2980 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2981 Builder.CreateBr(opBB);
2982 Builder.SetInsertPoint(opBB);
2983 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2984 atomicPHI->addIncoming(OpInfo.LHS, startBB);
2985 OpInfo.LHS = atomicPHI;
2988 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2990 SourceLocation Loc = E->getExprLoc();
2992 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2994 // Expand the binary operator.
2995 Result = (this->*Func)(OpInfo);
2997 // Convert the result back to the LHS type,
2998 // potentially with Implicit Conversion sanitizer check.
2999 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3000 Loc, ScalarConversionOpts(CGF.SanOpts));
3003 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3004 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3005 auto Pair = CGF.EmitAtomicCompareExchange(
3006 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3007 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3008 llvm::Value *success = Pair.second;
3009 atomicPHI->addIncoming(old, curBlock);
3010 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3011 Builder.SetInsertPoint(contBB);
3015 // Store the result value into the LHS lvalue. Bit-fields are handled
3016 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3017 // 'An assignment expression has the value of the left operand after the
3019 if (LHSLV.isBitField())
3020 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3022 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3024 if (CGF.getLangOpts().OpenMP)
3025 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3030 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3031 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3032 bool Ignore = TestAndClearIgnoreResultAssign();
3033 Value *RHS = nullptr;
3034 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3036 // If the result is clearly ignored, return now.
3040 // The result of an assignment in C is the assigned r-value.
3041 if (!CGF.getLangOpts().CPlusPlus)
3044 // If the lvalue is non-volatile, return the computed value of the assignment.
3045 if (!LHS.isVolatileQualified())
3048 // Otherwise, reload the value.
3049 return EmitLoadOfLValue(LHS, E->getExprLoc());
3052 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3053 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3054 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3056 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3057 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3058 SanitizerKind::IntegerDivideByZero));
3061 const auto *BO = cast<BinaryOperator>(Ops.E);
3062 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3063 Ops.Ty->hasSignedIntegerRepresentation() &&
3064 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3065 Ops.mayHaveIntegerOverflow()) {
3066 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3068 llvm::Value *IntMin =
3069 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3070 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
3072 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3073 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3074 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3076 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3079 if (Checks.size() > 0)
3080 EmitBinOpCheck(Checks, Ops);
3083 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3085 CodeGenFunction::SanitizerScope SanScope(&CGF);
3086 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3087 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3088 Ops.Ty->isIntegerType() &&
3089 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3090 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3091 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3092 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3093 Ops.Ty->isRealFloatingType() &&
3094 Ops.mayHaveFloatDivisionByZero()) {
3095 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3096 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3097 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3102 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3103 llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3104 if (CGF.getLangOpts().OpenCL &&
3105 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
3106 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3107 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3108 // build option allows an application to specify that single precision
3109 // floating-point divide (x/y and 1/x) and sqrt used in the program
3110 // source are correctly rounded.
3111 llvm::Type *ValTy = Val->getType();
3112 if (ValTy->isFloatTy() ||
3113 (isa<llvm::VectorType>(ValTy) &&
3114 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3115 CGF.SetFPAccuracy(Val, 2.5);
3119 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3120 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3122 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3125 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3126 // Rem in C can't be a floating point type: C99 6.5.5p2.
3127 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3128 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3129 Ops.Ty->isIntegerType() &&
3130 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3131 CodeGenFunction::SanitizerScope SanScope(&CGF);
3132 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3133 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3136 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3137 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3139 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3142 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3146 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3147 switch (Ops.Opcode) {
3151 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3152 llvm::Intrinsic::uadd_with_overflow;
3157 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3158 llvm::Intrinsic::usub_with_overflow;
3163 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3164 llvm::Intrinsic::umul_with_overflow;
3167 llvm_unreachable("Unsupported operation for overflow detection");
3173 CodeGenFunction::SanitizerScope SanScope(&CGF);
3174 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3176 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3178 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3179 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3180 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3182 // Handle overflow with llvm.trap if no custom handler has been specified.
3183 const std::string *handlerName =
3184 &CGF.getLangOpts().OverflowHandler;
3185 if (handlerName->empty()) {
3186 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3187 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3188 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3189 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3190 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3191 : SanitizerKind::UnsignedIntegerOverflow;
3192 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3194 CGF.EmitTrapCheck(Builder.CreateNot(overflow));
3198 // Branch in case of overflow.
3199 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3200 llvm::BasicBlock *continueBB =
3201 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3202 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3204 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3206 // If an overflow handler is set, then we want to call it and then use its
3207 // result, if it returns.
3208 Builder.SetInsertPoint(overflowBB);
3210 // Get the overflow handler.
3211 llvm::Type *Int8Ty = CGF.Int8Ty;
3212 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3213 llvm::FunctionType *handlerTy =
3214 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3215 llvm::FunctionCallee handler =
3216 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3218 // Sign extend the args to 64-bit, so that we can use the same handler for
3219 // all types of overflow.
3220 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3221 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3223 // Call the handler with the two arguments, the operation, and the size of
3225 llvm::Value *handlerArgs[] = {
3228 Builder.getInt8(OpID),
3229 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3231 llvm::Value *handlerResult =
3232 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3234 // Truncate the result back to the desired size.
3235 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3236 Builder.CreateBr(continueBB);
3238 Builder.SetInsertPoint(continueBB);
3239 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3240 phi->addIncoming(result, initialBB);
3241 phi->addIncoming(handlerResult, overflowBB);
3246 /// Emit pointer + index arithmetic.
3247 static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3248 const BinOpInfo &op,
3249 bool isSubtraction) {
3250 // Must have binary (not unary) expr here. Unary pointer
3251 // increment/decrement doesn't use this path.
3252 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3254 Value *pointer = op.LHS;
3255 Expr *pointerOperand = expr->getLHS();
3256 Value *index = op.RHS;
3257 Expr *indexOperand = expr->getRHS();
3259 // In a subtraction, the LHS is always the pointer.
3260 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3261 std::swap(pointer, index);
3262 std::swap(pointerOperand, indexOperand);
3265 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3267 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3268 auto &DL = CGF.CGM.getDataLayout();
3269 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3271 // Some versions of glibc and gcc use idioms (particularly in their malloc
3272 // routines) that add a pointer-sized integer (known to be a pointer value)
3273 // to a null pointer in order to cast the value back to an integer or as
3274 // part of a pointer alignment algorithm. This is undefined behavior, but
3275 // we'd like to be able to compile programs that use it.
3277 // Normally, we'd generate a GEP with a null-pointer base here in response
3278 // to that code, but it's also UB to dereference a pointer created that
3279 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3280 // generate a direct cast of the integer value to a pointer.
3282 // The idiom (p = nullptr + N) is not met if any of the following are true:
3284 // The operation is subtraction.
3285 // The index is not pointer-sized.
3286 // The pointer type is not byte-sized.
3288 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3292 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3294 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3295 // Zero-extend or sign-extend the pointer value according to
3296 // whether the index is signed or not.
3297 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3301 // If this is subtraction, negate the index.
3303 index = CGF.Builder.CreateNeg(index, "idx.neg");
3305 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3306 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3307 /*Accessed*/ false);
3309 const PointerType *pointerType
3310 = pointerOperand->getType()->getAs<PointerType>();
3312 QualType objectType = pointerOperand->getType()
3313 ->castAs<ObjCObjectPointerType>()
3315 llvm::Value *objectSize
3316 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3318 index = CGF.Builder.CreateMul(index, objectSize);
3320 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3321 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3322 return CGF.Builder.CreateBitCast(result, pointer->getType());
3325 QualType elementType = pointerType->getPointeeType();
3326 if (const VariableArrayType *vla
3327 = CGF.getContext().getAsVariableArrayType(elementType)) {
3328 // The element count here is the total number of non-VLA elements.
3329 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3331 // Effectively, the multiply by the VLA size is part of the GEP.
3332 // GEP indexes are signed, and scaling an index isn't permitted to
3333 // signed-overflow, so we use the same semantics for our explicit
3334 // multiply. We suppress this if overflow is not undefined behavior.
3335 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3336 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3337 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3339 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3341 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3342 op.E->getExprLoc(), "add.ptr");
3347 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3348 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3350 if (elementType->isVoidType() || elementType->isFunctionType()) {
3351 Value *result = CGF.EmitCastToVoidPtr(pointer);
3352 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3353 return CGF.Builder.CreateBitCast(result, pointer->getType());
3356 if (CGF.getLangOpts().isSignedOverflowDefined())
3357 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3359 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3360 op.E->getExprLoc(), "add.ptr");
3363 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3364 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3365 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3366 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3367 // efficient operations.
3368 static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
3369 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3370 bool negMul, bool negAdd) {
3371 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3373 Value *MulOp0 = MulOp->getOperand(0);
3374 Value *MulOp1 = MulOp->getOperand(1);
3376 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3378 Addend = Builder.CreateFNeg(Addend, "neg");
3380 Value *FMulAdd = Builder.CreateCall(
3381 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3382 {MulOp0, MulOp1, Addend});
3383 MulOp->eraseFromParent();
3388 // Check whether it would be legal to emit an fmuladd intrinsic call to
3389 // represent op and if so, build the fmuladd.
3391 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3392 // Does NOT check the type of the operation - it's assumed that this function
3393 // will be called from contexts where it's known that the type is contractable.
3394 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3395 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3398 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3399 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3400 "Only fadd/fsub can be the root of an fmuladd.");
3402 // Check whether this op is marked as fusable.
3403 if (!op.FPFeatures.allowFPContractWithinStatement())
3406 // We have a potentially fusable op. Look for a mul on one of the operands.
3407 // Also, make sure that the mul result isn't used directly. In that case,
3408 // there's no point creating a muladd operation.
3409 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3410 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3411 LHSBinOp->use_empty())
3412 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3414 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3415 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3416 RHSBinOp->use_empty())
3417 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3423 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3424 if (op.LHS->getType()->isPointerTy() ||
3425 op.RHS->getType()->isPointerTy())
3426 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3428 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3429 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3430 case LangOptions::SOB_Defined:
3431 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3432 case LangOptions::SOB_Undefined:
3433 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3434 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3436 case LangOptions::SOB_Trapping:
3437 if (CanElideOverflowCheck(CGF.getContext(), op))
3438 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3439 return EmitOverflowCheckedBinOp(op);
3443 if (op.Ty->isUnsignedIntegerType() &&
3444 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3445 !CanElideOverflowCheck(CGF.getContext(), op))
3446 return EmitOverflowCheckedBinOp(op);
3448 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3449 // Try to form an fmuladd.
3450 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3453 Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3454 return propagateFMFlags(V, op);
3457 if (op.isFixedPointBinOp())
3458 return EmitFixedPointBinOp(op);
3460 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3463 /// The resulting value must be calculated with exact precision, so the operands
3464 /// may not be the same type.
3465 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3467 using llvm::ConstantInt;
3469 const auto *BinOp = cast<BinaryOperator>(op.E);
3471 // The result is a fixed point type and at least one of the operands is fixed
3472 // point while the other is either fixed point or an int. This resulting type
3473 // should be determined by Sema::handleFixedPointConversions().
3474 QualType ResultTy = op.Ty;
3475 QualType LHSTy = BinOp->getLHS()->getType();
3476 QualType RHSTy = BinOp->getRHS()->getType();
3477 ASTContext &Ctx = CGF.getContext();
3478 Value *LHS = op.LHS;
3479 Value *RHS = op.RHS;
3481 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3482 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3483 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3484 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3486 // Convert the operands to the full precision type.
3487 Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
3488 BinOp->getExprLoc());
3489 Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
3490 BinOp->getExprLoc());
3492 // Perform the actual addition.
3494 switch (BinOp->getOpcode()) {
3496 if (ResultFixedSema.isSaturated()) {
3497 llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3498 ? llvm::Intrinsic::sadd_sat
3499 : llvm::Intrinsic::uadd_sat;
3500 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3502 Result = Builder.CreateAdd(FullLHS, FullRHS);
3507 if (ResultFixedSema.isSaturated()) {
3508 llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3509 ? llvm::Intrinsic::ssub_sat
3510 : llvm::Intrinsic::usub_sat;
3511 Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3513 Result = Builder.CreateSub(FullLHS, FullRHS);
3518 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
3519 : Builder.CreateICmpULT(FullLHS, FullRHS);
3521 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS)
3522 : Builder.CreateICmpUGT(FullLHS, FullRHS);
3524 return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS)
3525 : Builder.CreateICmpULE(FullLHS, FullRHS);
3527 return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS)
3528 : Builder.CreateICmpUGE(FullLHS, FullRHS);
3530 // For equality operations, we assume any padding bits on unsigned types are
3531 // zero'd out. They could be overwritten through non-saturating operations
3532 // that cause overflow, but this leads to undefined behavior.
3533 return Builder.CreateICmpEQ(FullLHS, FullRHS);
3535 return Builder.CreateICmpNE(FullLHS, FullRHS);
3549 llvm_unreachable("Found unimplemented fixed point binary operation");
3562 llvm_unreachable("Found unsupported binary operation for fixed point types.");
3565 // Convert to the result type.
3566 return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
3567 BinOp->getExprLoc());
3570 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3571 // The LHS is always a pointer if either side is.
3572 if (!op.LHS->getType()->isPointerTy()) {
3573 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3574 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3575 case LangOptions::SOB_Defined:
3576 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3577 case LangOptions::SOB_Undefined:
3578 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3579 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3581 case LangOptions::SOB_Trapping:
3582 if (CanElideOverflowCheck(CGF.getContext(), op))
3583 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3584 return EmitOverflowCheckedBinOp(op);
3588 if (op.Ty->isUnsignedIntegerType() &&
3589 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3590 !CanElideOverflowCheck(CGF.getContext(), op))
3591 return EmitOverflowCheckedBinOp(op);
3593 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3594 // Try to form an fmuladd.
3595 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3597 Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3598 return propagateFMFlags(V, op);
3601 if (op.isFixedPointBinOp())
3602 return EmitFixedPointBinOp(op);
3604 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3607 // If the RHS is not a pointer, then we have normal pointer
3609 if (!op.RHS->getType()->isPointerTy())
3610 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3612 // Otherwise, this is a pointer subtraction.
3614 // Do the raw subtraction part.
3616 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3618 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3619 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3621 // Okay, figure out the element size.
3622 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3623 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3625 llvm::Value *divisor = nullptr;
3627 // For a variable-length array, this is going to be non-constant.
3628 if (const VariableArrayType *vla
3629 = CGF.getContext().getAsVariableArrayType(elementType)) {
3630 auto VlaSize = CGF.getVLASize(vla);
3631 elementType = VlaSize.Type;
3632 divisor = VlaSize.NumElts;
3634 // Scale the number of non-VLA elements by the non-VLA element size.
3635 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3636 if (!eltSize.isOne())
3637 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3639 // For everything elese, we can just compute it, safe in the
3640 // assumption that Sema won't let anything through that we can't
3641 // safely compute the size of.
3643 CharUnits elementSize;
3644 // Handle GCC extension for pointer arithmetic on void* and
3645 // function pointer types.
3646 if (elementType->isVoidType() || elementType->isFunctionType())
3647 elementSize = CharUnits::One();
3649 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3651 // Don't even emit the divide for element size of 1.
3652 if (elementSize.isOne())
3655 divisor = CGF.CGM.getSize(elementSize);
3658 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3659 // pointer difference in C is only defined in the case where both operands
3660 // are pointing to elements of an array.
3661 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3664 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3665 llvm::IntegerType *Ty;
3666 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3667 Ty = cast<llvm::IntegerType>(VT->getElementType());
3669 Ty = cast<llvm::IntegerType>(LHS->getType());
3670 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3673 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3674 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3675 // RHS to the same size as the LHS.
3676 Value *RHS = Ops.RHS;
3677 if (Ops.LHS->getType() != RHS->getType())
3678 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3680 bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3681 Ops.Ty->hasSignedIntegerRepresentation() &&
3682 !CGF.getLangOpts().isSignedOverflowDefined() &&
3683 !CGF.getLangOpts().CPlusPlus2a;
3684 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3685 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3686 if (CGF.getLangOpts().OpenCL)
3688 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3689 else if ((SanitizeBase || SanitizeExponent) &&
3690 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3691 CodeGenFunction::SanitizerScope SanScope(&CGF);
3692 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3693 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3694 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3696 if (SanitizeExponent) {
3698 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3702 // Check whether we are shifting any non-zero bits off the top of the
3703 // integer. We only emit this check if exponent is valid - otherwise
3704 // instructions below will have undefined behavior themselves.
3705 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3706 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3707 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3708 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3709 llvm::Value *PromotedWidthMinusOne =
3710 (RHS == Ops.RHS) ? WidthMinusOne
3711 : GetWidthMinusOneValue(Ops.LHS, RHS);
3712 CGF.EmitBlock(CheckShiftBase);
3713 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3714 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3715 /*NUW*/ true, /*NSW*/ true),
3717 if (CGF.getLangOpts().CPlusPlus) {
3718 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3719 // Under C++11's rules, shifting a 1 bit into the sign bit is
3720 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3721 // define signed left shifts, so we use the C99 and C++11 rules there).
3722 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3723 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3725 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3726 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3727 CGF.EmitBlock(Cont);
3728 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3729 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3730 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3731 Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3734 assert(!Checks.empty());
3735 EmitBinOpCheck(Checks, Ops);
3738 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3741 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3742 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3743 // RHS to the same size as the LHS.
3744 Value *RHS = Ops.RHS;
3745 if (Ops.LHS->getType() != RHS->getType())
3746 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3748 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3749 if (CGF.getLangOpts().OpenCL)
3751 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3752 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3753 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3754 CodeGenFunction::SanitizerScope SanScope(&CGF);
3755 llvm::Value *Valid =
3756 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3757 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3760 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3761 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3762 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3765 enum IntrinsicType { VCMPEQ, VCMPGT };
3766 // return corresponding comparison intrinsic for given vector type
3767 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3768 BuiltinType::Kind ElemKind) {
3770 default: llvm_unreachable("unexpected element type");
3771 case BuiltinType::Char_U:
3772 case BuiltinType::UChar:
3773 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3774 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3775 case BuiltinType::Char_S:
3776 case BuiltinType::SChar:
3777 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3778 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3779 case BuiltinType::UShort:
3780 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3781 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3782 case BuiltinType::Short:
3783 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3784 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3785 case BuiltinType::UInt:
3786 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3787 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3788 case BuiltinType::Int:
3789 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3790 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3791 case BuiltinType::ULong:
3792 case BuiltinType::ULongLong:
3793 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3794 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3795 case BuiltinType::Long:
3796 case BuiltinType::LongLong:
3797 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3798 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3799 case BuiltinType::Float:
3800 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3801 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3802 case BuiltinType::Double:
3803 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3804 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3808 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3809 llvm::CmpInst::Predicate UICmpOpc,
3810 llvm::CmpInst::Predicate SICmpOpc,
3811 llvm::CmpInst::Predicate FCmpOpc,
3813 TestAndClearIgnoreResultAssign();
3815 QualType LHSTy = E->getLHS()->getType();
3816 QualType RHSTy = E->getRHS()->getType();
3817 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3818 assert(E->getOpcode() == BO_EQ ||
3819 E->getOpcode() == BO_NE);
3820 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3821 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3822 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3823 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3824 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3825 BinOpInfo BOInfo = EmitBinOps(E);
3826 Value *LHS = BOInfo.LHS;
3827 Value *RHS = BOInfo.RHS;
3829 // If AltiVec, the comparison results in a numeric type, so we use
3830 // intrinsics comparing vectors and giving 0 or 1 as a result
3831 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3832 // constants for mapping CR6 register bits to predicate result
3833 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3835 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3837 // in several cases vector arguments order will be reversed
3838 Value *FirstVecArg = LHS,
3839 *SecondVecArg = RHS;
3841 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
3842 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
3844 switch(E->getOpcode()) {
3845 default: llvm_unreachable("is not a comparison operation");
3848 ID = GetIntrinsic(VCMPEQ, ElementKind);
3852 ID = GetIntrinsic(VCMPEQ, ElementKind);
3856 ID = GetIntrinsic(VCMPGT, ElementKind);
3857 std::swap(FirstVecArg, SecondVecArg);
3861 ID = GetIntrinsic(VCMPGT, ElementKind);
3864 if (ElementKind == BuiltinType::Float) {
3866 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3867 std::swap(FirstVecArg, SecondVecArg);
3871 ID = GetIntrinsic(VCMPGT, ElementKind);
3875 if (ElementKind == BuiltinType::Float) {
3877 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3881 ID = GetIntrinsic(VCMPGT, ElementKind);
3882 std::swap(FirstVecArg, SecondVecArg);
3887 Value *CR6Param = Builder.getInt32(CR6);
3888 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3889 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3891 // The result type of intrinsic may not be same as E->getType().
3892 // If E->getType() is not BoolTy, EmitScalarConversion will do the
3893 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3894 // do nothing, if ResultTy is not i1 at the same time, it will cause
3896 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3897 if (ResultTy->getBitWidth() > 1 &&
3898 E->getType() == CGF.getContext().BoolTy)
3899 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3900 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3904 if (BOInfo.isFixedPointBinOp()) {
3905 Result = EmitFixedPointBinOp(BOInfo);
3906 } else if (LHS->getType()->isFPOrFPVectorTy()) {
3908 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3910 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
3911 } else if (LHSTy->hasSignedIntegerRepresentation()) {
3912 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3914 // Unsigned integers and pointers.
3916 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3917 !isa<llvm::ConstantPointerNull>(LHS) &&
3918 !isa<llvm::ConstantPointerNull>(RHS)) {
3920 // Dynamic information is required to be stripped for comparisons,
3921 // because it could leak the dynamic information. Based on comparisons
3922 // of pointers to dynamic objects, the optimizer can replace one pointer
3923 // with another, which might be incorrect in presence of invariant
3924 // groups. Comparison with null is safe because null does not carry any
3925 // dynamic information.
3926 if (LHSTy.mayBeDynamicClass())
3927 LHS = Builder.CreateStripInvariantGroup(LHS);
3928 if (RHSTy.mayBeDynamicClass())
3929 RHS = Builder.CreateStripInvariantGroup(RHS);
3932 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3935 // If this is a vector comparison, sign extend the result to the appropriate
3936 // vector integer type and return it (don't convert to bool).
3937 if (LHSTy->isVectorType())
3938 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3941 // Complex Comparison: can only be an equality comparison.
3942 CodeGenFunction::ComplexPairTy LHS, RHS;
3944 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3945 LHS = CGF.EmitComplexExpr(E->getLHS());
3946 CETy = CTy->getElementType();
3948 LHS.first = Visit(E->getLHS());
3949 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3952 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3953 RHS = CGF.EmitComplexExpr(E->getRHS());
3954 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
3955 CTy->getElementType()) &&
3956 "The element types must always match.");
3959 RHS.first = Visit(E->getRHS());
3960 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3961 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
3962 "The element types must always match.");
3965 Value *ResultR, *ResultI;
3966 if (CETy->isRealFloatingType()) {
3967 // As complex comparisons can only be equality comparisons, they
3968 // are never signaling comparisons.
3969 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3970 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3972 // Complex comparisons can only be equality comparisons. As such, signed
3973 // and unsigned opcodes are the same.
3974 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3975 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
3978 if (E->getOpcode() == BO_EQ) {
3979 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
3981 assert(E->getOpcode() == BO_NE &&
3982 "Complex comparison other than == or != ?");
3983 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
3987 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3991 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3992 bool Ignore = TestAndClearIgnoreResultAssign();
3997 switch (E->getLHS()->getType().getObjCLifetime()) {
3998 case Qualifiers::OCL_Strong:
3999 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4002 case Qualifiers::OCL_Autoreleasing:
4003 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4006 case Qualifiers::OCL_ExplicitNone:
4007 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4010 case Qualifiers::OCL_Weak:
4011 RHS = Visit(E->getRHS());
4012 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4013 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4016 case Qualifiers::OCL_None:
4017 // __block variables need to have the rhs evaluated first, plus
4018 // this should improve codegen just a little.
4019 RHS = Visit(E->getRHS());
4020 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4022 // Store the value into the LHS. Bit-fields are handled specially
4023 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4024 // 'An assignment expression has the value of the left operand after
4025 // the assignment...'.
4026 if (LHS.isBitField()) {
4027 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4029 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4030 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4034 // If the result is clearly ignored, return now.
4038 // The result of an assignment in C is the assigned r-value.
4039 if (!CGF.getLangOpts().CPlusPlus)
4042 // If the lvalue is non-volatile, return the computed value of the assignment.
4043 if (!LHS.isVolatileQualified())
4046 // Otherwise, reload the value.
4047 return EmitLoadOfLValue(LHS, E->getExprLoc());
4050 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4051 // Perform vector logical and on comparisons with zero vectors.
4052 if (E->getType()->isVectorType()) {
4053 CGF.incrementProfileCounter(E);
4055 Value *LHS = Visit(E->getLHS());
4056 Value *RHS = Visit(E->getRHS());
4057 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4058 if (LHS->getType()->isFPOrFPVectorTy()) {
4059 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4060 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4062 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4063 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4065 Value *And = Builder.CreateAnd(LHS, RHS);
4066 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4069 llvm::Type *ResTy = ConvertType(E->getType());
4071 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4072 // If we have 1 && X, just emit X without inserting the control flow.
4074 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4075 if (LHSCondVal) { // If we have 1 && X, just emit X.
4076 CGF.incrementProfileCounter(E);
4078 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4079 // ZExt result to int or bool.
4080 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4083 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4084 if (!CGF.ContainsLabel(E->getRHS()))
4085 return llvm::Constant::getNullValue(ResTy);
4088 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4089 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4091 CodeGenFunction::ConditionalEvaluation eval(CGF);
4093 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4094 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4095 CGF.getProfileCount(E->getRHS()));
4097 // Any edges into the ContBlock are now from an (indeterminate number of)
4098 // edges from this first condition. All of these values will be false. Start
4099 // setting up the PHI node in the Cont Block for this.
4100 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4102 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4104 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4107 CGF.EmitBlock(RHSBlock);
4108 CGF.incrementProfileCounter(E);
4109 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4112 // Reaquire the RHS block, as there may be subblocks inserted.
4113 RHSBlock = Builder.GetInsertBlock();
4115 // Emit an unconditional branch from this block to ContBlock.
4117 // There is no need to emit line number for unconditional branch.
4118 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4119 CGF.EmitBlock(ContBlock);
4121 // Insert an entry into the phi node for the edge with the value of RHSCond.
4122 PN->addIncoming(RHSCond, RHSBlock);
4124 // Artificial location to preserve the scope information
4126 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4127 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4130 // ZExt result to int.
4131 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4134 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4135 // Perform vector logical or on comparisons with zero vectors.
4136 if (E->getType()->isVectorType()) {
4137 CGF.incrementProfileCounter(E);
4139 Value *LHS = Visit(E->getLHS());
4140 Value *RHS = Visit(E->getRHS());
4141 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4142 if (LHS->getType()->isFPOrFPVectorTy()) {
4143 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4144 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4146 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4147 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4149 Value *Or = Builder.CreateOr(LHS, RHS);
4150 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4153 llvm::Type *ResTy = ConvertType(E->getType());
4155 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4156 // If we have 0 || X, just emit X without inserting the control flow.
4158 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4159 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4160 CGF.incrementProfileCounter(E);
4162 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4163 // ZExt result to int or bool.
4164 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4167 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4168 if (!CGF.ContainsLabel(E->getRHS()))
4169 return llvm::ConstantInt::get(ResTy, 1);
4172 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4173 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4175 CodeGenFunction::ConditionalEvaluation eval(CGF);
4177 // Branch on the LHS first. If it is true, go to the success (cont) block.
4178 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4179 CGF.getCurrentProfileCount() -
4180 CGF.getProfileCount(E->getRHS()));
4182 // Any edges into the ContBlock are now from an (indeterminate number of)
4183 // edges from this first condition. All of these values will be true. Start
4184 // setting up the PHI node in the Cont Block for this.
4185 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4187 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4189 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4193 // Emit the RHS condition as a bool value.
4194 CGF.EmitBlock(RHSBlock);
4195 CGF.incrementProfileCounter(E);
4196 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4200 // Reaquire the RHS block, as there may be subblocks inserted.
4201 RHSBlock = Builder.GetInsertBlock();
4203 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4204 // into the phi node for the edge with the value of RHSCond.
4205 CGF.EmitBlock(ContBlock);
4206 PN->addIncoming(RHSCond, RHSBlock);
4208 // ZExt result to int.
4209 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4212 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4213 CGF.EmitIgnoredExpr(E->getLHS());
4214 CGF.EnsureInsertPoint();
4215 return Visit(E->getRHS());
4218 //===----------------------------------------------------------------------===//
4220 //===----------------------------------------------------------------------===//
4222 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4223 /// expression is cheap enough and side-effect-free enough to evaluate
4224 /// unconditionally instead of conditionally. This is used to convert control
4225 /// flow into selects in some cases.
4226 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4227 CodeGenFunction &CGF) {
4228 // Anything that is an integer or floating point constant is fine.
4229 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4231 // Even non-volatile automatic variables can't be evaluated unconditionally.
4232 // Referencing a thread_local may cause non-trivial initialization work to
4233 // occur. If we're inside a lambda and one of the variables is from the scope
4234 // outside the lambda, that function may have returned already. Reading its
4235 // locals is a bad idea. Also, these reads may introduce races there didn't
4236 // exist in the source-level program.
4240 Value *ScalarExprEmitter::
4241 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4242 TestAndClearIgnoreResultAssign();
4244 // Bind the common expression if necessary.
4245 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4247 Expr *condExpr = E->getCond();
4248 Expr *lhsExpr = E->getTrueExpr();
4249 Expr *rhsExpr = E->getFalseExpr();
4251 // If the condition constant folds and can be elided, try to avoid emitting
4252 // the condition and the dead arm.
4254 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4255 Expr *live = lhsExpr, *dead = rhsExpr;
4256 if (!CondExprBool) std::swap(live, dead);
4258 // If the dead side doesn't have labels we need, just emit the Live part.
4259 if (!CGF.ContainsLabel(dead)) {
4261 CGF.incrementProfileCounter(E);
4262 Value *Result = Visit(live);
4264 // If the live part is a throw expression, it acts like it has a void
4265 // type, so evaluating it returns a null Value*. However, a conditional
4266 // with non-void type must return a non-null Value*.
4267 if (!Result && !E->getType()->isVoidType())
4268 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4274 // OpenCL: If the condition is a vector, we can treat this condition like
4275 // the select function.
4276 if (CGF.getLangOpts().OpenCL
4277 && condExpr->getType()->isVectorType()) {
4278 CGF.incrementProfileCounter(E);
4280 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4281 llvm::Value *LHS = Visit(lhsExpr);
4282 llvm::Value *RHS = Visit(rhsExpr);
4284 llvm::Type *condType = ConvertType(condExpr->getType());
4285 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
4287 unsigned numElem = vecTy->getNumElements();
4288 llvm::Type *elemType = vecTy->getElementType();
4290 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4291 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4292 llvm::Value *tmp = Builder.CreateSExt(TestMSB,
4293 llvm::VectorType::get(elemType,
4296 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4298 // Cast float to int to perform ANDs if necessary.
4299 llvm::Value *RHSTmp = RHS;
4300 llvm::Value *LHSTmp = LHS;
4301 bool wasCast = false;
4302 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4303 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4304 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4305 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4309 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4310 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4311 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4313 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4318 if (condExpr->getType()->isVectorType()) {
4319 CGF.incrementProfileCounter(E);
4321 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4322 llvm::Value *LHS = Visit(lhsExpr);
4323 llvm::Value *RHS = Visit(rhsExpr);
4325 llvm::Type *CondType = ConvertType(condExpr->getType());
4326 auto *VecTy = cast<llvm::VectorType>(CondType);
4327 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4329 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4330 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4333 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4334 // select instead of as control flow. We can only do this if it is cheap and
4335 // safe to evaluate the LHS and RHS unconditionally.
4336 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4337 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4338 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4339 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4341 CGF.incrementProfileCounter(E, StepV);
4343 llvm::Value *LHS = Visit(lhsExpr);
4344 llvm::Value *RHS = Visit(rhsExpr);
4346 // If the conditional has void type, make sure we return a null Value*.
4347 assert(!RHS && "LHS and RHS types must match");
4350 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4353 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4354 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4355 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4357 CodeGenFunction::ConditionalEvaluation eval(CGF);
4358 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4359 CGF.getProfileCount(lhsExpr));
4361 CGF.EmitBlock(LHSBlock);
4362 CGF.incrementProfileCounter(E);
4364 Value *LHS = Visit(lhsExpr);
4367 LHSBlock = Builder.GetInsertBlock();
4368 Builder.CreateBr(ContBlock);
4370 CGF.EmitBlock(RHSBlock);
4372 Value *RHS = Visit(rhsExpr);
4375 RHSBlock = Builder.GetInsertBlock();
4376 CGF.EmitBlock(ContBlock);
4378 // If the LHS or RHS is a throw expression, it will be legitimately null.
4384 // Create a PHI node for the real part.
4385 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4386 PN->addIncoming(LHS, LHSBlock);
4387 PN->addIncoming(RHS, RHSBlock);
4391 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4392 return Visit(E->getChosenSubExpr());
4395 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4396 QualType Ty = VE->getType();
4398 if (Ty->isVariablyModifiedType())
4399 CGF.EmitVariablyModifiedType(Ty);
4401 Address ArgValue = Address::invalid();
4402 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4404 llvm::Type *ArgTy = ConvertType(VE->getType());
4406 // If EmitVAArg fails, emit an error.
4407 if (!ArgPtr.isValid()) {
4408 CGF.ErrorUnsupported(VE, "va_arg expression");
4409 return llvm::UndefValue::get(ArgTy);
4412 // FIXME Volatility.
4413 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4415 // If EmitVAArg promoted the type, we must truncate it.
4416 if (ArgTy != Val->getType()) {
4417 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4418 Val = Builder.CreateIntToPtr(Val, ArgTy);
4420 Val = Builder.CreateTrunc(Val, ArgTy);
4426 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4427 return CGF.EmitBlockLiteral(block);
4430 // Convert a vec3 to vec4, or vice versa.
4431 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4432 Value *Src, unsigned NumElementsDst) {
4433 llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
4434 SmallVector<llvm::Constant*, 4> Args;
4435 Args.push_back(Builder.getInt32(0));
4436 Args.push_back(Builder.getInt32(1));
4437 Args.push_back(Builder.getInt32(2));
4438 if (NumElementsDst == 4)
4439 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
4440 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
4441 return Builder.CreateShuffleVector(Src, UnV, Mask);
4444 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4445 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4446 // but could be scalar or vectors of different lengths, and either can be
4448 // There are 4 cases:
4449 // 1. non-pointer -> non-pointer : needs 1 bitcast
4450 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4451 // 3. pointer -> non-pointer
4452 // a) pointer -> intptr_t : needs 1 ptrtoint
4453 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4454 // 4. non-pointer -> pointer
4455 // a) intptr_t -> pointer : needs 1 inttoptr
4456 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4457 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4458 // allow casting directly between pointer types and non-integer non-pointer
4460 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4461 const llvm::DataLayout &DL,
4462 Value *Src, llvm::Type *DstTy,
4463 StringRef Name = "") {
4464 auto SrcTy = Src->getType();
4467 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4468 return Builder.CreateBitCast(Src, DstTy, Name);
4471 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4472 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4475 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4477 if (!DstTy->isIntegerTy())
4478 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4480 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4484 if (!SrcTy->isIntegerTy())
4485 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4487 return Builder.CreateIntToPtr(Src, DstTy, Name);
4490 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4491 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4492 llvm::Type *DstTy = ConvertType(E->getType());
4494 llvm::Type *SrcTy = Src->getType();
4495 unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
4496 cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
4497 unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
4498 cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
4500 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4501 // vector to get a vec4, then a bitcast if the target type is different.
4502 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4503 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4505 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4506 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4510 Src->setName("astype");
4514 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4515 // to vec4 if the original type is not vec4, then a shuffle vector to
4517 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4518 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4519 auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
4520 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4524 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4525 Src->setName("astype");
4529 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4530 Src, DstTy, "astype");
4533 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4534 return CGF.EmitAtomicExpr(E).getScalarVal();
4537 //===----------------------------------------------------------------------===//
4538 // Entry Point into this File
4539 //===----------------------------------------------------------------------===//
4541 /// Emit the computation of the specified expression of scalar type, ignoring
4543 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4544 assert(E && hasScalarEvaluationKind(E->getType()) &&
4545 "Invalid scalar expression to emit");
4547 return ScalarExprEmitter(*this, IgnoreResultAssign)
4548 .Visit(const_cast<Expr *>(E));
4551 /// Emit a conversion from the specified type to the specified destination type,
4552 /// both of which are LLVM scalar types.
4553 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4555 SourceLocation Loc) {
4556 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4557 "Invalid scalar expression to emit");
4558 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4561 /// Emit a conversion from the specified complex type to the specified
4562 /// destination type, where the destination type is an LLVM scalar type.
4563 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4566 SourceLocation Loc) {
4567 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4568 "Invalid complex -> scalar conversion");
4569 return ScalarExprEmitter(*this)
4570 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4574 llvm::Value *CodeGenFunction::
4575 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4576 bool isInc, bool isPre) {
4577 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4580 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4581 // object->isa or (*object).isa
4582 // Generate code as for: *(Class*)object
4584 Expr *BaseExpr = E->getBase();
4585 Address Addr = Address::invalid();
4586 if (BaseExpr->isRValue()) {
4587 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4589 Addr = EmitLValue(BaseExpr).getAddress(*this);
4592 // Cast the address to Class*.
4593 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4594 return MakeAddrLValue(Addr, E->getType());
4598 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4599 const CompoundAssignOperator *E) {
4600 ScalarExprEmitter Scalar(*this);
4601 Value *Result = nullptr;
4602 switch (E->getOpcode()) {
4603 #define COMPOUND_OP(Op) \
4604 case BO_##Op##Assign: \
4605 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4642 llvm_unreachable("Not valid compound assignment operators");
4645 llvm_unreachable("Unhandled compound assignment operator");
4648 struct GEPOffsetAndOverflow {
4649 // The total (signed) byte offset for the GEP.
4650 llvm::Value *TotalOffset;
4651 // The offset overflow flag - true if the total offset overflows.
4652 llvm::Value *OffsetOverflows;
4655 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4656 /// and compute the total offset it applies from it's base pointer BasePtr.
4657 /// Returns offset in bytes and a boolean flag whether an overflow happened
4658 /// during evaluation.
4659 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4660 llvm::LLVMContext &VMContext,
4662 CGBuilderTy Builder) {
4663 const auto &DL = CGM.getDataLayout();
4665 // The total (signed) byte offset for the GEP.
4666 llvm::Value *TotalOffset = nullptr;
4668 // Was the GEP already reduced to a constant?
4669 if (isa<llvm::Constant>(GEPVal)) {
4670 // Compute the offset by casting both pointers to integers and subtracting:
4671 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4672 Value *BasePtr_int =
4673 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4675 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4676 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4677 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4680 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4681 assert(GEP->getPointerOperand() == BasePtr &&
4682 "BasePtr must be the the base of the GEP.");
4683 assert(GEP->isInBounds() && "Expected inbounds GEP");
4685 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4687 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4688 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4689 auto *SAddIntrinsic =
4690 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4691 auto *SMulIntrinsic =
4692 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4694 // The offset overflow flag - true if the total offset overflows.
4695 llvm::Value *OffsetOverflows = Builder.getFalse();
4697 /// Return the result of the given binary operation.
4698 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4699 llvm::Value *RHS) -> llvm::Value * {
4700 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4702 // If the operands are constants, return a constant result.
4703 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4704 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4706 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4707 /*Signed=*/true, N);
4709 OffsetOverflows = Builder.getTrue();
4710 return llvm::ConstantInt::get(VMContext, N);
4714 // Otherwise, compute the result with checked arithmetic.
4715 auto *ResultAndOverflow = Builder.CreateCall(
4716 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4717 OffsetOverflows = Builder.CreateOr(
4718 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4719 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4722 // Determine the total byte offset by looking at each GEP operand.
4723 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4724 GTI != GTE; ++GTI) {
4725 llvm::Value *LocalOffset;
4726 auto *Index = GTI.getOperand();
4727 // Compute the local offset contributed by this indexing step:
4728 if (auto *STy = GTI.getStructTypeOrNull()) {
4729 // For struct indexing, the local offset is the byte position of the
4731 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4732 LocalOffset = llvm::ConstantInt::get(
4733 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4735 // Otherwise this is array-like indexing. The local offset is the index
4736 // multiplied by the element size.
4737 auto *ElementSize = llvm::ConstantInt::get(
4738 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4739 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4740 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4743 // If this is the first offset, set it as the total offset. Otherwise, add
4744 // the local offset into the running total.
4745 if (!TotalOffset || TotalOffset == Zero)
4746 TotalOffset = LocalOffset;
4748 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4751 return {TotalOffset, OffsetOverflows};
4755 CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
4756 bool SignedIndices, bool IsSubtraction,
4757 SourceLocation Loc, const Twine &Name) {
4758 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4760 // If the pointer overflow sanitizer isn't enabled, do nothing.
4761 if (!SanOpts.has(SanitizerKind::PointerOverflow))
4764 llvm::Type *PtrTy = Ptr->getType();
4766 // Perform nullptr-and-offset check unless the nullptr is defined.
4767 bool PerformNullCheck = !NullPointerIsDefined(
4768 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
4769 // Check for overflows unless the GEP got constant-folded,
4770 // and only in the default address space
4771 bool PerformOverflowCheck =
4772 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
4774 if (!(PerformNullCheck || PerformOverflowCheck))
4777 const auto &DL = CGM.getDataLayout();
4779 SanitizerScope SanScope(this);
4780 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4782 GEPOffsetAndOverflow EvaluatedGEP =
4783 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
4785 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
4786 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
4787 "If the offset got constant-folded, we don't expect that there was an "
4790 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4792 // Common case: if the total offset is zero, and we are using C++ semantics,
4793 // where nullptr+0 is defined, don't emit a check.
4794 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
4797 // Now that we've computed the total offset, add it to the base pointer (with
4798 // wrapping semantics).
4799 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
4800 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
4802 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
4804 if (PerformNullCheck) {
4805 // In C++, if the base pointer evaluates to a null pointer value,
4806 // the only valid pointer this inbounds GEP can produce is also
4807 // a null pointer, so the offset must also evaluate to zero.
4808 // Likewise, if we have non-zero base pointer, we can not get null pointer
4809 // as a result, so the offset can not be -intptr_t(BasePtr).
4810 // In other words, both pointers are either null, or both are non-null,
4811 // or the behaviour is undefined.
4813 // C, however, is more strict in this regard, and gives more
4814 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
4815 // So both the input to the 'gep inbounds' AND the output must not be null.
4816 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
4817 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
4819 CGM.getLangOpts().CPlusPlus
4820 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
4821 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
4822 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
4825 if (PerformOverflowCheck) {
4826 // The GEP is valid if:
4827 // 1) The total offset doesn't overflow, and
4828 // 2) The sign of the difference between the computed address and the base
4829 // pointer matches the sign of the total offset.
4830 llvm::Value *ValidGEP;
4831 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
4832 if (SignedIndices) {
4833 // GEP is computed as `unsigned base + signed offset`, therefore:
4834 // * If offset was positive, then the computed pointer can not be
4835 // [unsigned] less than the base pointer, unless it overflowed.
4836 // * If offset was negative, then the computed pointer can not be
4837 // [unsigned] greater than the bas pointere, unless it overflowed.
4838 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4839 auto *PosOrZeroOffset =
4840 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
4841 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4843 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
4844 } else if (!IsSubtraction) {
4845 // GEP is computed as `unsigned base + unsigned offset`, therefore the
4846 // computed pointer can not be [unsigned] less than base pointer,
4847 // unless there was an overflow.
4848 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
4849 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4851 // GEP is computed as `unsigned base - unsigned offset`, therefore the
4852 // computed pointer can not be [unsigned] greater than base pointer,
4853 // unless there was an overflow.
4854 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
4855 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4857 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
4858 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
4861 assert(!Checks.empty() && "Should have produced some checks.");
4863 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4864 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4865 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4866 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);