1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/ASTContext.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/Expr.h"
24 #include "clang/AST/RecordLayout.h"
25 #include "clang/AST/StmtVisitor.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "clang/Frontend/CodeGenOptions.h"
28 #include "llvm/ADT/Optional.h"
29 #include "llvm/IR/CFG.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Module.h"
39 using namespace clang;
40 using namespace CodeGen;
43 //===----------------------------------------------------------------------===//
44 // Scalar Expression Emitter
45 //===----------------------------------------------------------------------===//
49 /// Determine whether the given binary operation may overflow.
50 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
51 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
52 /// the returned overflow check is precise. The returned value is 'true' for
53 /// all other opcodes, to be conservative.
54 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
55 BinaryOperator::Opcode Opcode, bool Signed,
56 llvm::APInt &Result) {
57 // Assume overflow is possible, unless we can prove otherwise.
59 const auto &LHSAP = LHS->getValue();
60 const auto &RHSAP = RHS->getValue();
61 if (Opcode == BO_Add) {
63 Result = LHSAP.sadd_ov(RHSAP, Overflow);
65 Result = LHSAP.uadd_ov(RHSAP, Overflow);
66 } else if (Opcode == BO_Sub) {
68 Result = LHSAP.ssub_ov(RHSAP, Overflow);
70 Result = LHSAP.usub_ov(RHSAP, Overflow);
71 } else if (Opcode == BO_Mul) {
73 Result = LHSAP.smul_ov(RHSAP, Overflow);
75 Result = LHSAP.umul_ov(RHSAP, Overflow);
76 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
77 if (Signed && !RHS->isZero())
78 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
88 QualType Ty; // Computation Type.
89 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
91 const Expr *E; // Entire expr, for error unsupported. May not be binop.
93 /// Check if the binop can result in integer overflow.
94 bool mayHaveIntegerOverflow() const {
95 // Without constant input, we can't rule out overflow.
96 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
97 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
102 return ::mayHaveIntegerOverflow(
103 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
106 /// Check if the binop computes a division or a remainder.
107 bool isDivremOp() const {
108 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
109 Opcode == BO_RemAssign;
112 /// Check if the binop can result in an integer division by zero.
113 bool mayHaveIntegerDivisionByZero() const {
115 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
120 /// Check if the binop can result in a float division by zero.
121 bool mayHaveFloatDivisionByZero() const {
123 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
124 return CFP->isZero();
129 static bool MustVisitNullValue(const Expr *E) {
130 // If a null pointer expression's type is the C++0x nullptr_t, then
131 // it's not necessarily a simple constant and it must be evaluated
132 // for its potential side effects.
133 return E->getType()->isNullPtrType();
136 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
137 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
139 const Expr *Base = E->IgnoreImpCasts();
143 QualType BaseTy = Base->getType();
144 if (!BaseTy->isPromotableIntegerType() ||
145 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
151 /// Check if \p E is a widened promoted integer.
152 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
153 return getUnwidenedIntegerType(Ctx, E).hasValue();
156 /// Check if we can skip the overflow check for \p Op.
157 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
158 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
159 "Expected a unary or binary operator");
161 // If the binop has constant inputs and we can prove there is no overflow,
162 // we can elide the overflow check.
163 if (!Op.mayHaveIntegerOverflow())
166 // If a unary op has a widened operand, the op cannot overflow.
167 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
168 return !UO->canOverflow();
170 // We usually don't need overflow checks for binops with widened operands.
171 // Multiplication with promoted unsigned operands is a special case.
172 const auto *BO = cast<BinaryOperator>(Op.E);
173 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
177 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
181 QualType LHSTy = *OptionalLHSTy;
182 QualType RHSTy = *OptionalRHSTy;
184 // This is the simple case: binops without unsigned multiplication, and with
185 // widened operands. No overflow check is needed here.
186 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
187 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
190 // For unsigned multiplication the overflow check can be elided if either one
191 // of the unpromoted types are less than half the size of the promoted type.
192 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
193 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
194 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
197 /// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
198 static void updateFastMathFlags(llvm::FastMathFlags &FMF,
199 FPOptions FPFeatures) {
200 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
203 /// Propagate fast-math flags from \p Op to the instruction in \p V.
204 static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
205 if (auto *I = dyn_cast<llvm::Instruction>(V)) {
206 llvm::FastMathFlags FMF = I->getFastMathFlags();
207 updateFastMathFlags(FMF, Op.FPFeatures);
208 I->setFastMathFlags(FMF);
213 class ScalarExprEmitter
214 : public StmtVisitor<ScalarExprEmitter, Value*> {
215 CodeGenFunction &CGF;
216 CGBuilderTy &Builder;
217 bool IgnoreResultAssign;
218 llvm::LLVMContext &VMContext;
221 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
222 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
223 VMContext(cgf.getLLVMContext()) {
226 //===--------------------------------------------------------------------===//
228 //===--------------------------------------------------------------------===//
230 bool TestAndClearIgnoreResultAssign() {
231 bool I = IgnoreResultAssign;
232 IgnoreResultAssign = false;
236 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
237 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
238 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
239 return CGF.EmitCheckedLValue(E, TCK);
242 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
243 const BinOpInfo &Info);
245 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
246 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
249 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
250 const AlignValueAttr *AVAttr = nullptr;
251 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
252 const ValueDecl *VD = DRE->getDecl();
254 if (VD->getType()->isReferenceType()) {
255 if (const auto *TTy =
256 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
257 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
259 // Assumptions for function parameters are emitted at the start of the
260 // function, so there is no need to repeat that here.
261 if (isa<ParmVarDecl>(VD))
264 AVAttr = VD->getAttr<AlignValueAttr>();
269 if (const auto *TTy =
270 dyn_cast<TypedefType>(E->getType()))
271 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
276 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
277 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
278 CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
281 /// EmitLoadOfLValue - Given an expression with complex type that represents a
282 /// value l-value, this method emits the address of the l-value, then loads
283 /// and returns the result.
284 Value *EmitLoadOfLValue(const Expr *E) {
285 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
288 EmitLValueAlignmentAssumption(E, V);
292 /// EmitConversionToBool - Convert the specified expression value to a
293 /// boolean (i1) truth value. This is equivalent to "Val != 0".
294 Value *EmitConversionToBool(Value *Src, QualType DstTy);
296 /// Emit a check that a conversion to or from a floating-point type does not
298 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
299 Value *Src, QualType SrcType, QualType DstType,
300 llvm::Type *DstTy, SourceLocation Loc);
302 /// Known implicit conversion check kinds.
303 /// Keep in sync with the enum of the same name in ubsan_handlers.h
304 enum ImplicitConversionCheckKind : unsigned char {
305 ICCK_IntegerTruncation = 0,
308 /// Emit a check that an [implicit] truncation of an integer does not
309 /// discard any bits. It is not UB, so we use the value after truncation.
310 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
311 QualType DstType, SourceLocation Loc);
313 /// Emit a conversion from the specified type to the specified destination
314 /// type, both of which are LLVM scalar types.
315 struct ScalarConversionOpts {
316 bool TreatBooleanAsSigned;
317 bool EmitImplicitIntegerTruncationChecks;
319 ScalarConversionOpts()
320 : TreatBooleanAsSigned(false),
321 EmitImplicitIntegerTruncationChecks(false) {}
324 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
326 ScalarConversionOpts Opts = ScalarConversionOpts());
328 /// Emit a conversion from the specified complex type to the specified
329 /// destination type, where the destination type is an LLVM scalar type.
330 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
331 QualType SrcTy, QualType DstTy,
334 /// EmitNullValue - Emit a value that corresponds to null for the given type.
335 Value *EmitNullValue(QualType Ty);
337 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
338 Value *EmitFloatToBoolConversion(Value *V) {
339 // Compare against 0.0 for fp scalars.
340 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
341 return Builder.CreateFCmpUNE(V, Zero, "tobool");
344 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
345 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
346 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
348 return Builder.CreateICmpNE(V, Zero, "tobool");
351 Value *EmitIntToBoolConversion(Value *V) {
352 // Because of the type rules of C, we often end up computing a
353 // logical value, then zero extending it to int, then wanting it
354 // as a logical value again. Optimize this common case.
355 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
356 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
357 Value *Result = ZI->getOperand(0);
358 // If there aren't any more uses, zap the instruction to save space.
359 // Note that there can be more uses, for example if this
360 // is the result of an assignment.
362 ZI->eraseFromParent();
367 return Builder.CreateIsNotNull(V, "tobool");
370 //===--------------------------------------------------------------------===//
372 //===--------------------------------------------------------------------===//
374 Value *Visit(Expr *E) {
375 ApplyDebugLocation DL(CGF, E);
376 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
379 Value *VisitStmt(Stmt *S) {
380 S->dump(CGF.getContext().getSourceManager());
381 llvm_unreachable("Stmt can't have complex result type!");
383 Value *VisitExpr(Expr *S);
385 Value *VisitParenExpr(ParenExpr *PE) {
386 return Visit(PE->getSubExpr());
388 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
389 return Visit(E->getReplacement());
391 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
392 return Visit(GE->getResultExpr());
394 Value *VisitCoawaitExpr(CoawaitExpr *S) {
395 return CGF.EmitCoawaitExpr(*S).getScalarVal();
397 Value *VisitCoyieldExpr(CoyieldExpr *S) {
398 return CGF.EmitCoyieldExpr(*S).getScalarVal();
400 Value *VisitUnaryCoawait(const UnaryOperator *E) {
401 return Visit(E->getSubExpr());
405 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
406 return Builder.getInt(E->getValue());
408 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
409 return Builder.getInt(E->getValue());
411 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
412 return llvm::ConstantFP::get(VMContext, E->getValue());
414 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
415 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
417 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
418 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
420 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
421 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
423 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
424 return EmitNullValue(E->getType());
426 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
427 return EmitNullValue(E->getType());
429 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
430 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
431 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
432 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
433 return Builder.CreateBitCast(V, ConvertType(E->getType()));
436 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
437 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
440 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
441 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
444 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
446 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
449 // Otherwise, assume the mapping is the scalar directly.
450 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
453 Value *emitConstant(const CodeGenFunction::ConstantEmission &Constant,
455 assert(Constant && "not a constant");
456 if (Constant.isReference())
457 return EmitLoadOfLValue(Constant.getReferenceLValue(CGF, E),
459 return Constant.getValue();
463 Value *VisitDeclRefExpr(DeclRefExpr *E) {
464 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
465 return emitConstant(Constant, E);
466 return EmitLoadOfLValue(E);
469 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
470 return CGF.EmitObjCSelectorExpr(E);
472 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
473 return CGF.EmitObjCProtocolExpr(E);
475 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
476 return EmitLoadOfLValue(E);
478 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
479 if (E->getMethodDecl() &&
480 E->getMethodDecl()->getReturnType()->isReferenceType())
481 return EmitLoadOfLValue(E);
482 return CGF.EmitObjCMessageExpr(E).getScalarVal();
485 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
486 LValue LV = CGF.EmitObjCIsaExpr(E);
487 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
491 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
492 VersionTuple Version = E->getVersion();
494 // If we're checking for a platform older than our minimum deployment
495 // target, we can fold the check away.
496 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
497 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
499 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
500 llvm::Value *Args[] = {
501 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
502 llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
503 llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
506 return CGF.EmitBuiltinAvailable(Args);
509 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
510 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
511 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
512 Value *VisitMemberExpr(MemberExpr *E);
513 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
514 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
515 return EmitLoadOfLValue(E);
518 Value *VisitInitListExpr(InitListExpr *E);
520 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
521 assert(CGF.getArrayInitIndex() &&
522 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
523 return CGF.getArrayInitIndex();
526 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
527 return EmitNullValue(E->getType());
529 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
530 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
531 return VisitCastExpr(E);
533 Value *VisitCastExpr(CastExpr *E);
535 Value *VisitCallExpr(const CallExpr *E) {
536 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
537 return EmitLoadOfLValue(E);
539 Value *V = CGF.EmitCallExpr(E).getScalarVal();
541 EmitLValueAlignmentAssumption(E, V);
545 Value *VisitStmtExpr(const StmtExpr *E);
548 Value *VisitUnaryPostDec(const UnaryOperator *E) {
549 LValue LV = EmitLValue(E->getSubExpr());
550 return EmitScalarPrePostIncDec(E, LV, false, false);
552 Value *VisitUnaryPostInc(const UnaryOperator *E) {
553 LValue LV = EmitLValue(E->getSubExpr());
554 return EmitScalarPrePostIncDec(E, LV, true, false);
556 Value *VisitUnaryPreDec(const UnaryOperator *E) {
557 LValue LV = EmitLValue(E->getSubExpr());
558 return EmitScalarPrePostIncDec(E, LV, false, true);
560 Value *VisitUnaryPreInc(const UnaryOperator *E) {
561 LValue LV = EmitLValue(E->getSubExpr());
562 return EmitScalarPrePostIncDec(E, LV, true, true);
565 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
569 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
570 bool isInc, bool isPre);
573 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
574 if (isa<MemberPointerType>(E->getType())) // never sugared
575 return CGF.CGM.getMemberPointerConstant(E);
577 return EmitLValue(E->getSubExpr()).getPointer();
579 Value *VisitUnaryDeref(const UnaryOperator *E) {
580 if (E->getType()->isVoidType())
581 return Visit(E->getSubExpr()); // the actual value should be unused
582 return EmitLoadOfLValue(E);
584 Value *VisitUnaryPlus(const UnaryOperator *E) {
585 // This differs from gcc, though, most likely due to a bug in gcc.
586 TestAndClearIgnoreResultAssign();
587 return Visit(E->getSubExpr());
589 Value *VisitUnaryMinus (const UnaryOperator *E);
590 Value *VisitUnaryNot (const UnaryOperator *E);
591 Value *VisitUnaryLNot (const UnaryOperator *E);
592 Value *VisitUnaryReal (const UnaryOperator *E);
593 Value *VisitUnaryImag (const UnaryOperator *E);
594 Value *VisitUnaryExtension(const UnaryOperator *E) {
595 return Visit(E->getSubExpr());
599 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
600 return EmitLoadOfLValue(E);
603 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
604 return Visit(DAE->getExpr());
606 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
607 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
608 return Visit(DIE->getExpr());
610 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
611 return CGF.LoadCXXThis();
614 Value *VisitExprWithCleanups(ExprWithCleanups *E);
615 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
616 return CGF.EmitCXXNewExpr(E);
618 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
619 CGF.EmitCXXDeleteExpr(E);
623 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
624 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
627 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
628 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
631 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
632 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
635 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
636 // C++ [expr.pseudo]p1:
637 // The result shall only be used as the operand for the function call
638 // operator (), and the result of such a call has type void. The only
639 // effect is the evaluation of the postfix-expression before the dot or
641 CGF.EmitScalarExpr(E->getBase());
645 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
646 return EmitNullValue(E->getType());
649 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
650 CGF.EmitCXXThrowExpr(E);
654 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
655 return Builder.getInt1(E->getValue());
659 Value *EmitMul(const BinOpInfo &Ops) {
660 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
661 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
662 case LangOptions::SOB_Defined:
663 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
664 case LangOptions::SOB_Undefined:
665 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
666 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
668 case LangOptions::SOB_Trapping:
669 if (CanElideOverflowCheck(CGF.getContext(), Ops))
670 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
671 return EmitOverflowCheckedBinOp(Ops);
675 if (Ops.Ty->isUnsignedIntegerType() &&
676 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
677 !CanElideOverflowCheck(CGF.getContext(), Ops))
678 return EmitOverflowCheckedBinOp(Ops);
680 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
681 Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
682 return propagateFMFlags(V, Ops);
684 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
686 /// Create a binary op that checks for overflow.
687 /// Currently only supports +, - and *.
688 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
690 // Check for undefined division and modulus behaviors.
691 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
692 llvm::Value *Zero,bool isDiv);
693 // Common helper for getting how wide LHS of shift is.
694 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
695 Value *EmitDiv(const BinOpInfo &Ops);
696 Value *EmitRem(const BinOpInfo &Ops);
697 Value *EmitAdd(const BinOpInfo &Ops);
698 Value *EmitSub(const BinOpInfo &Ops);
699 Value *EmitShl(const BinOpInfo &Ops);
700 Value *EmitShr(const BinOpInfo &Ops);
701 Value *EmitAnd(const BinOpInfo &Ops) {
702 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
704 Value *EmitXor(const BinOpInfo &Ops) {
705 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
707 Value *EmitOr (const BinOpInfo &Ops) {
708 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
711 BinOpInfo EmitBinOps(const BinaryOperator *E);
712 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
713 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
716 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
717 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
719 // Binary operators and binary compound assignment operators.
720 #define HANDLEBINOP(OP) \
721 Value *VisitBin ## OP(const BinaryOperator *E) { \
722 return Emit ## OP(EmitBinOps(E)); \
724 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
725 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
740 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
741 llvm::CmpInst::Predicate SICmpOpc,
742 llvm::CmpInst::Predicate FCmpOpc);
743 #define VISITCOMP(CODE, UI, SI, FP) \
744 Value *VisitBin##CODE(const BinaryOperator *E) { \
745 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
746 llvm::FCmpInst::FP); }
747 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
748 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
749 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
750 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
751 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
752 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
755 Value *VisitBinAssign (const BinaryOperator *E);
757 Value *VisitBinLAnd (const BinaryOperator *E);
758 Value *VisitBinLOr (const BinaryOperator *E);
759 Value *VisitBinComma (const BinaryOperator *E);
761 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
762 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
765 Value *VisitBlockExpr(const BlockExpr *BE);
766 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
767 Value *VisitChooseExpr(ChooseExpr *CE);
768 Value *VisitVAArgExpr(VAArgExpr *VE);
769 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
770 return CGF.EmitObjCStringLiteral(E);
772 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
773 return CGF.EmitObjCBoxedExpr(E);
775 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
776 return CGF.EmitObjCArrayLiteral(E);
778 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
779 return CGF.EmitObjCDictionaryLiteral(E);
781 Value *VisitAsTypeExpr(AsTypeExpr *CE);
782 Value *VisitAtomicExpr(AtomicExpr *AE);
784 } // end anonymous namespace.
786 //===----------------------------------------------------------------------===//
788 //===----------------------------------------------------------------------===//
790 /// EmitConversionToBool - Convert the specified expression value to a
791 /// boolean (i1) truth value. This is equivalent to "Val != 0".
792 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
793 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
795 if (SrcType->isRealFloatingType())
796 return EmitFloatToBoolConversion(Src);
798 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
799 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
801 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
802 "Unknown scalar type to convert");
804 if (isa<llvm::IntegerType>(Src->getType()))
805 return EmitIntToBoolConversion(Src);
807 assert(isa<llvm::PointerType>(Src->getType()));
808 return EmitPointerToBoolConversion(Src, SrcType);
811 void ScalarExprEmitter::EmitFloatConversionCheck(
812 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
813 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
814 CodeGenFunction::SanitizerScope SanScope(&CGF);
818 llvm::Type *SrcTy = Src->getType();
820 llvm::Value *Check = nullptr;
821 if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
822 // Integer to floating-point. This can fail for unsigned short -> __half
823 // or unsigned __int128 -> float.
824 assert(DstType->isFloatingType());
825 bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
827 APFloat LargestFloat =
828 APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
829 APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
832 if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
833 &IsExact) != APFloat::opOK)
834 // The range of representable values of this floating point type includes
835 // all values of this integer type. Don't need an overflow check.
838 llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
840 Check = Builder.CreateICmpULE(Src, Max);
842 llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
843 llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
844 llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
845 Check = Builder.CreateAnd(GE, LE);
848 const llvm::fltSemantics &SrcSema =
849 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
850 if (isa<llvm::IntegerType>(DstTy)) {
851 // Floating-point to integer. This has undefined behavior if the source is
852 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
854 unsigned Width = CGF.getContext().getIntWidth(DstType);
855 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
857 APSInt Min = APSInt::getMinValue(Width, Unsigned);
858 APFloat MinSrc(SrcSema, APFloat::uninitialized);
859 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
861 // Don't need an overflow check for lower bound. Just check for
863 MinSrc = APFloat::getInf(SrcSema, true);
865 // Find the largest value which is too small to represent (before
866 // truncation toward zero).
867 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
869 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
870 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
871 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
873 // Don't need an overflow check for upper bound. Just check for
875 MaxSrc = APFloat::getInf(SrcSema, false);
877 // Find the smallest value which is too large to represent (before
878 // truncation toward zero).
879 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
881 // If we're converting from __half, convert the range to float to match
883 if (OrigSrcType->isHalfType()) {
884 const llvm::fltSemantics &Sema =
885 CGF.getContext().getFloatTypeSemantics(SrcType);
887 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
888 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
892 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
894 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
895 Check = Builder.CreateAnd(GE, LE);
897 // FIXME: Maybe split this sanitizer out from float-cast-overflow.
899 // Floating-point to floating-point. This has undefined behavior if the
900 // source is not in the range of representable values of the destination
901 // type. The C and C++ standards are spectacularly unclear here. We
902 // diagnose finite out-of-range conversions, but allow infinities and NaNs
903 // to convert to the corresponding value in the smaller type.
905 // C11 Annex F gives all such conversions defined behavior for IEC 60559
906 // conforming implementations. Unfortunately, LLVM's fptrunc instruction
909 // Converting from a lower rank to a higher rank can never have
910 // undefined behavior, since higher-rank types must have a superset
911 // of values of lower-rank types.
912 if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
915 assert(!OrigSrcType->isHalfType() &&
916 "should not check conversion from __half, it has the lowest rank");
918 const llvm::fltSemantics &DstSema =
919 CGF.getContext().getFloatTypeSemantics(DstType);
920 APFloat MinBad = APFloat::getLargest(DstSema, false);
921 APFloat MaxBad = APFloat::getInf(DstSema, false);
924 MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
925 MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
927 Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
928 CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
930 Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
932 Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
933 Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
937 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
938 CGF.EmitCheckTypeDescriptor(OrigSrcType),
939 CGF.EmitCheckTypeDescriptor(DstType)};
940 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
941 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
944 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
945 Value *Dst, QualType DstType,
946 SourceLocation Loc) {
947 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerTruncation))
950 llvm::Type *SrcTy = Src->getType();
951 llvm::Type *DstTy = Dst->getType();
953 // We only care about int->int conversions here.
954 // We ignore conversions to/from pointer and/or bool.
955 if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
958 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
959 "clang integer type lowered to non-integer llvm type");
961 unsigned SrcBits = SrcTy->getScalarSizeInBits();
962 unsigned DstBits = DstTy->getScalarSizeInBits();
963 // This must be truncation. Else we do not care.
964 if (SrcBits <= DstBits)
967 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
969 CodeGenFunction::SanitizerScope SanScope(&CGF);
971 llvm::Value *Check = nullptr;
973 // 1. Extend the truncated value back to the same width as the Src.
974 bool InputSigned = DstType->isSignedIntegerOrEnumerationType();
975 Check = Builder.CreateIntCast(Dst, SrcTy, InputSigned, "anyext");
976 // 2. Equality-compare with the original source value
977 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
978 // If the comparison result is 'i1 false', then the truncation was lossy.
980 llvm::Constant *StaticArgs[] = {
981 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
982 CGF.EmitCheckTypeDescriptor(DstType),
983 llvm::ConstantInt::get(Builder.getInt8Ty(), ICCK_IntegerTruncation)};
984 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::ImplicitIntegerTruncation),
985 SanitizerHandler::ImplicitConversion, StaticArgs, {Src, Dst});
988 /// Emit a conversion from the specified type to the specified destination type,
989 /// both of which are LLVM scalar types.
990 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
993 ScalarConversionOpts Opts) {
994 QualType NoncanonicalSrcType = SrcType;
995 QualType NoncanonicalDstType = DstType;
997 SrcType = CGF.getContext().getCanonicalType(SrcType);
998 DstType = CGF.getContext().getCanonicalType(DstType);
999 if (SrcType == DstType) return Src;
1001 if (DstType->isVoidType()) return nullptr;
1003 llvm::Value *OrigSrc = Src;
1004 QualType OrigSrcType = SrcType;
1005 llvm::Type *SrcTy = Src->getType();
1007 // Handle conversions to bool first, they are special: comparisons against 0.
1008 if (DstType->isBooleanType())
1009 return EmitConversionToBool(Src, SrcType);
1011 llvm::Type *DstTy = ConvertType(DstType);
1013 // Cast from half through float if half isn't a native type.
1014 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1015 // Cast to FP using the intrinsic if the half type itself isn't supported.
1016 if (DstTy->isFloatingPointTy()) {
1017 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1018 return Builder.CreateCall(
1019 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1022 // Cast to other types through float, using either the intrinsic or FPExt,
1023 // depending on whether the half type itself is supported
1024 // (as opposed to operations on half, available with NativeHalfType).
1025 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1026 Src = Builder.CreateCall(
1027 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1031 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1033 SrcType = CGF.getContext().FloatTy;
1034 SrcTy = CGF.FloatTy;
1038 // Ignore conversions like int -> uint.
1042 // Handle pointer conversions next: pointers can only be converted to/from
1043 // other pointers and integers. Check for pointer types in terms of LLVM, as
1044 // some native types (like Obj-C id) may map to a pointer type.
1045 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1046 // The source value may be an integer, or a pointer.
1047 if (isa<llvm::PointerType>(SrcTy))
1048 return Builder.CreateBitCast(Src, DstTy, "conv");
1050 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1051 // First, convert to the correct width so that we control the kind of
1053 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1054 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1055 llvm::Value* IntResult =
1056 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1057 // Then, cast to pointer.
1058 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1061 if (isa<llvm::PointerType>(SrcTy)) {
1062 // Must be an ptr to int cast.
1063 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1064 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1067 // A scalar can be splatted to an extended vector of the same element type
1068 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1069 // Sema should add casts to make sure that the source expression's type is
1070 // the same as the vector's element type (sans qualifiers)
1071 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1072 SrcType.getTypePtr() &&
1073 "Splatted expr doesn't match with vector element type?");
1075 // Splat the element across to all elements
1076 unsigned NumElements = DstTy->getVectorNumElements();
1077 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1080 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1081 // Allow bitcast from vector to integer/fp of the same size.
1082 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1083 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1084 if (SrcSize == DstSize)
1085 return Builder.CreateBitCast(Src, DstTy, "conv");
1087 // Conversions between vectors of different sizes are not allowed except
1088 // when vectors of half are involved. Operations on storage-only half
1089 // vectors require promoting half vector operands to float vectors and
1090 // truncating the result, which is either an int or float vector, to a
1091 // short or half vector.
1093 // Source and destination are both expected to be vectors.
1094 llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1095 llvm::Type *DstElementTy = DstTy->getVectorElementType();
1098 assert(((SrcElementTy->isIntegerTy() &&
1099 DstElementTy->isIntegerTy()) ||
1100 (SrcElementTy->isFloatingPointTy() &&
1101 DstElementTy->isFloatingPointTy())) &&
1102 "unexpected conversion between a floating-point vector and an "
1105 // Truncate an i32 vector to an i16 vector.
1106 if (SrcElementTy->isIntegerTy())
1107 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1109 // Truncate a float vector to a half vector.
1110 if (SrcSize > DstSize)
1111 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1113 // Promote a half vector to a float vector.
1114 return Builder.CreateFPExt(Src, DstTy, "conv");
1117 // Finally, we have the arithmetic types: real int/float.
1118 Value *Res = nullptr;
1119 llvm::Type *ResTy = DstTy;
1121 // An overflowing conversion has undefined behavior if either the source type
1122 // or the destination type is a floating-point type.
1123 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1124 (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
1125 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1128 // Cast to half through float if half isn't a native type.
1129 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1130 // Make sure we cast in a single step if from another FP type.
1131 if (SrcTy->isFloatingPointTy()) {
1132 // Use the intrinsic if the half type itself isn't supported
1133 // (as opposed to operations on half, available with NativeHalfType).
1134 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1135 return Builder.CreateCall(
1136 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1137 // If the half type is supported, just use an fptrunc.
1138 return Builder.CreateFPTrunc(Src, DstTy);
1140 DstTy = CGF.FloatTy;
1143 if (isa<llvm::IntegerType>(SrcTy)) {
1144 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1145 if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1148 if (isa<llvm::IntegerType>(DstTy))
1149 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1150 else if (InputSigned)
1151 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1153 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1154 } else if (isa<llvm::IntegerType>(DstTy)) {
1155 assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
1156 if (DstType->isSignedIntegerOrEnumerationType())
1157 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1159 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1161 assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
1162 "Unknown real conversion");
1163 if (DstTy->getTypeID() < SrcTy->getTypeID())
1164 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1166 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1169 if (DstTy != ResTy) {
1170 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1171 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1172 Res = Builder.CreateCall(
1173 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1176 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1180 if (Opts.EmitImplicitIntegerTruncationChecks)
1181 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1182 NoncanonicalDstType, Loc);
1187 /// Emit a conversion from the specified complex type to the specified
1188 /// destination type, where the destination type is an LLVM scalar type.
1189 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1190 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1191 SourceLocation Loc) {
1192 // Get the source element type.
1193 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1195 // Handle conversions to bool first, they are special: comparisons against 0.
1196 if (DstTy->isBooleanType()) {
1197 // Complex != 0 -> (Real != 0) | (Imag != 0)
1198 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1199 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1200 return Builder.CreateOr(Src.first, Src.second, "tobool");
1203 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1204 // the imaginary part of the complex value is discarded and the value of the
1205 // real part is converted according to the conversion rules for the
1206 // corresponding real type.
1207 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1210 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1211 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1214 /// Emit a sanitization check for the given "binary" operation (which
1215 /// might actually be a unary increment which has been lowered to a binary
1216 /// operation). The check passes if all values in \p Checks (which are \c i1),
1218 void ScalarExprEmitter::EmitBinOpCheck(
1219 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1220 assert(CGF.IsSanitizerScope);
1221 SanitizerHandler Check;
1222 SmallVector<llvm::Constant *, 4> StaticData;
1223 SmallVector<llvm::Value *, 2> DynamicData;
1225 BinaryOperatorKind Opcode = Info.Opcode;
1226 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1227 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1229 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1230 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1231 if (UO && UO->getOpcode() == UO_Minus) {
1232 Check = SanitizerHandler::NegateOverflow;
1233 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1234 DynamicData.push_back(Info.RHS);
1236 if (BinaryOperator::isShiftOp(Opcode)) {
1237 // Shift LHS negative or too large, or RHS out of bounds.
1238 Check = SanitizerHandler::ShiftOutOfBounds;
1239 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1240 StaticData.push_back(
1241 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1242 StaticData.push_back(
1243 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1244 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1245 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1246 Check = SanitizerHandler::DivremOverflow;
1247 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1249 // Arithmetic overflow (+, -, *).
1251 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1252 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1253 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1254 default: llvm_unreachable("unexpected opcode for bin op check");
1256 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1258 DynamicData.push_back(Info.LHS);
1259 DynamicData.push_back(Info.RHS);
1262 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1265 //===----------------------------------------------------------------------===//
1267 //===----------------------------------------------------------------------===//
1269 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1270 CGF.ErrorUnsupported(E, "scalar expression");
1271 if (E->getType()->isVoidType())
1273 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1276 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1278 if (E->getNumSubExprs() == 2) {
1279 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1280 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1283 llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1284 unsigned LHSElts = LTy->getNumElements();
1288 llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1290 // Mask off the high bits of each shuffle index.
1292 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1293 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1296 // mask = mask & maskbits
1298 // n = extract mask i
1299 // x = extract val n
1300 // newv = insert newv, x, i
1301 llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1302 MTy->getNumElements());
1303 Value* NewV = llvm::UndefValue::get(RTy);
1304 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1305 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1306 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1308 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1309 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1314 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1315 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1317 SmallVector<llvm::Constant*, 32> indices;
1318 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1319 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1320 // Check for -1 and output it as undef in the IR.
1321 if (Idx.isSigned() && Idx.isAllOnesValue())
1322 indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1324 indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1327 Value *SV = llvm::ConstantVector::get(indices);
1328 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1331 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1332 QualType SrcType = E->getSrcExpr()->getType(),
1333 DstType = E->getType();
1335 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1337 SrcType = CGF.getContext().getCanonicalType(SrcType);
1338 DstType = CGF.getContext().getCanonicalType(DstType);
1339 if (SrcType == DstType) return Src;
1341 assert(SrcType->isVectorType() &&
1342 "ConvertVector source type must be a vector");
1343 assert(DstType->isVectorType() &&
1344 "ConvertVector destination type must be a vector");
1346 llvm::Type *SrcTy = Src->getType();
1347 llvm::Type *DstTy = ConvertType(DstType);
1349 // Ignore conversions like int -> uint.
1353 QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
1354 DstEltType = DstType->getAs<VectorType>()->getElementType();
1356 assert(SrcTy->isVectorTy() &&
1357 "ConvertVector source IR type must be a vector");
1358 assert(DstTy->isVectorTy() &&
1359 "ConvertVector destination IR type must be a vector");
1361 llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1362 *DstEltTy = DstTy->getVectorElementType();
1364 if (DstEltType->isBooleanType()) {
1365 assert((SrcEltTy->isFloatingPointTy() ||
1366 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1368 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1369 if (SrcEltTy->isFloatingPointTy()) {
1370 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1372 return Builder.CreateICmpNE(Src, Zero, "tobool");
1376 // We have the arithmetic types: real int/float.
1377 Value *Res = nullptr;
1379 if (isa<llvm::IntegerType>(SrcEltTy)) {
1380 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1381 if (isa<llvm::IntegerType>(DstEltTy))
1382 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1383 else if (InputSigned)
1384 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1386 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1387 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1388 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1389 if (DstEltType->isSignedIntegerOrEnumerationType())
1390 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1392 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1394 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1395 "Unknown real conversion");
1396 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1397 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1399 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1405 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1406 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1407 CGF.EmitIgnoredExpr(E->getBase());
1408 return emitConstant(Constant, E);
1411 if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1412 CGF.EmitIgnoredExpr(E->getBase());
1413 return Builder.getInt(Value);
1417 return EmitLoadOfLValue(E);
1420 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1421 TestAndClearIgnoreResultAssign();
1423 // Emit subscript expressions in rvalue context's. For most cases, this just
1424 // loads the lvalue formed by the subscript expr. However, we have to be
1425 // careful, because the base of a vector subscript is occasionally an rvalue,
1426 // so we can't get it as an lvalue.
1427 if (!E->getBase()->getType()->isVectorType())
1428 return EmitLoadOfLValue(E);
1430 // Handle the vector case. The base must be a vector, the index must be an
1432 Value *Base = Visit(E->getBase());
1433 Value *Idx = Visit(E->getIdx());
1434 QualType IdxTy = E->getIdx()->getType();
1436 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1437 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1439 return Builder.CreateExtractElement(Base, Idx, "vecext");
1442 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1443 unsigned Off, llvm::Type *I32Ty) {
1444 int MV = SVI->getMaskValue(Idx);
1446 return llvm::UndefValue::get(I32Ty);
1447 return llvm::ConstantInt::get(I32Ty, Off+MV);
1450 static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1451 if (C->getBitWidth() != 32) {
1452 assert(llvm::ConstantInt::isValueValidForType(I32Ty,
1453 C->getZExtValue()) &&
1454 "Index operand too large for shufflevector mask!");
1455 return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1460 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1461 bool Ignore = TestAndClearIgnoreResultAssign();
1463 assert (Ignore == false && "init list ignored");
1464 unsigned NumInitElements = E->getNumInits();
1466 if (E->hadArrayRangeDesignator())
1467 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1469 llvm::VectorType *VType =
1470 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1473 if (NumInitElements == 0) {
1474 // C++11 value-initialization for the scalar.
1475 return EmitNullValue(E->getType());
1477 // We have a scalar in braces. Just use the first element.
1478 return Visit(E->getInit(0));
1481 unsigned ResElts = VType->getNumElements();
1483 // Loop over initializers collecting the Value for each, and remembering
1484 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1485 // us to fold the shuffle for the swizzle into the shuffle for the vector
1486 // initializer, since LLVM optimizers generally do not want to touch
1488 unsigned CurIdx = 0;
1489 bool VIsUndefShuffle = false;
1490 llvm::Value *V = llvm::UndefValue::get(VType);
1491 for (unsigned i = 0; i != NumInitElements; ++i) {
1492 Expr *IE = E->getInit(i);
1493 Value *Init = Visit(IE);
1494 SmallVector<llvm::Constant*, 16> Args;
1496 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1498 // Handle scalar elements. If the scalar initializer is actually one
1499 // element of a different vector of the same width, use shuffle instead of
1502 if (isa<ExtVectorElementExpr>(IE)) {
1503 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1505 if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1506 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1507 Value *LHS = nullptr, *RHS = nullptr;
1509 // insert into undef -> shuffle (src, undef)
1510 // shufflemask must use an i32
1511 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1512 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1514 LHS = EI->getVectorOperand();
1516 VIsUndefShuffle = true;
1517 } else if (VIsUndefShuffle) {
1518 // insert into undefshuffle && size match -> shuffle (v, src)
1519 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1520 for (unsigned j = 0; j != CurIdx; ++j)
1521 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1522 Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1523 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1525 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1526 RHS = EI->getVectorOperand();
1527 VIsUndefShuffle = false;
1529 if (!Args.empty()) {
1530 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1531 V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1537 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1539 VIsUndefShuffle = false;
1544 unsigned InitElts = VVT->getNumElements();
1546 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1547 // input is the same width as the vector being constructed, generate an
1548 // optimized shuffle of the swizzle input into the result.
1549 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1550 if (isa<ExtVectorElementExpr>(IE)) {
1551 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1552 Value *SVOp = SVI->getOperand(0);
1553 llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1555 if (OpTy->getNumElements() == ResElts) {
1556 for (unsigned j = 0; j != CurIdx; ++j) {
1557 // If the current vector initializer is a shuffle with undef, merge
1558 // this shuffle directly into it.
1559 if (VIsUndefShuffle) {
1560 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1563 Args.push_back(Builder.getInt32(j));
1566 for (unsigned j = 0, je = InitElts; j != je; ++j)
1567 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1568 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1570 if (VIsUndefShuffle)
1571 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1577 // Extend init to result vector length, and then shuffle its contribution
1578 // to the vector initializer into V.
1580 for (unsigned j = 0; j != InitElts; ++j)
1581 Args.push_back(Builder.getInt32(j));
1582 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1583 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1584 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1588 for (unsigned j = 0; j != CurIdx; ++j)
1589 Args.push_back(Builder.getInt32(j));
1590 for (unsigned j = 0; j != InitElts; ++j)
1591 Args.push_back(Builder.getInt32(j+Offset));
1592 Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1595 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1596 // merging subsequent shuffles into this one.
1599 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1600 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1601 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1605 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1606 // Emit remaining default initializers.
1607 llvm::Type *EltTy = VType->getElementType();
1609 // Emit remaining default initializers
1610 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1611 Value *Idx = Builder.getInt32(CurIdx);
1612 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1613 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1618 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1619 const Expr *E = CE->getSubExpr();
1621 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1624 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1625 // We always assume that 'this' is never null.
1629 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1630 // And that glvalue casts are never null.
1631 if (ICE->getValueKind() != VK_RValue)
1638 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1639 // have to handle a more broad range of conversions than explicit casts, as they
1640 // handle things like function to ptr-to-function decay etc.
1641 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1642 Expr *E = CE->getSubExpr();
1643 QualType DestTy = CE->getType();
1644 CastKind Kind = CE->getCastKind();
1646 // These cases are generally not written to ignore the result of
1647 // evaluating their sub-expressions, so we clear this now.
1648 bool Ignored = TestAndClearIgnoreResultAssign();
1650 // Since almost all cast kinds apply to scalars, this switch doesn't have
1651 // a default case, so the compiler will warn on a missing case. The cases
1652 // are in the same order as in the CastKind enum.
1654 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1655 case CK_BuiltinFnToFnPtr:
1656 llvm_unreachable("builtin functions are handled elsewhere");
1658 case CK_LValueBitCast:
1659 case CK_ObjCObjectLValueCast: {
1660 Address Addr = EmitLValue(E).getAddress();
1661 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1662 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1663 return EmitLoadOfLValue(LV, CE->getExprLoc());
1666 case CK_CPointerToObjCPointerCast:
1667 case CK_BlockPointerToObjCPointerCast:
1668 case CK_AnyPointerToBlockPointerCast:
1670 Value *Src = Visit(const_cast<Expr*>(E));
1671 llvm::Type *SrcTy = Src->getType();
1672 llvm::Type *DstTy = ConvertType(DestTy);
1673 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1674 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
1675 llvm_unreachable("wrong cast for pointers in different address spaces"
1676 "(must be an address space cast)!");
1679 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
1680 if (auto PT = DestTy->getAs<PointerType>())
1681 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
1683 CodeGenFunction::CFITCK_UnrelatedCast,
1687 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1688 const QualType SrcType = E->getType();
1690 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
1691 // Casting to pointer that could carry dynamic information (provided by
1692 // invariant.group) requires launder.
1693 Src = Builder.CreateLaunderInvariantGroup(Src);
1694 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
1695 // Casting to pointer that does not carry dynamic information (provided
1696 // by invariant.group) requires stripping it. Note that we don't do it
1697 // if the source could not be dynamic type and destination could be
1698 // dynamic because dynamic information is already laundered. It is
1699 // because launder(strip(src)) == launder(src), so there is no need to
1700 // add extra strip before launder.
1701 Src = Builder.CreateStripInvariantGroup(Src);
1705 return Builder.CreateBitCast(Src, DstTy);
1707 case CK_AddressSpaceConversion: {
1708 Expr::EvalResult Result;
1709 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
1710 Result.Val.isNullPointer()) {
1711 // If E has side effect, it is emitted even if its final result is a
1712 // null pointer. In that case, a DCE pass should be able to
1713 // eliminate the useless instructions emitted during translating E.
1714 if (Result.HasSideEffects)
1716 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
1717 ConvertType(DestTy)), DestTy);
1719 // Since target may map different address spaces in AST to the same address
1720 // space, an address space conversion may end up as a bitcast.
1721 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
1722 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
1723 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
1725 case CK_AtomicToNonAtomic:
1726 case CK_NonAtomicToAtomic:
1728 case CK_UserDefinedConversion:
1729 return Visit(const_cast<Expr*>(E));
1731 case CK_BaseToDerived: {
1732 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
1733 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
1735 Address Base = CGF.EmitPointerWithAlignment(E);
1737 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
1738 CE->path_begin(), CE->path_end(),
1739 CGF.ShouldNullCheckClassCastValue(CE));
1741 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
1742 // performed and the object is not of the derived type.
1743 if (CGF.sanitizePerformTypeCheck())
1744 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
1745 Derived.getPointer(), DestTy->getPointeeType());
1747 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
1748 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(),
1749 Derived.getPointer(),
1751 CodeGenFunction::CFITCK_DerivedCast,
1754 return Derived.getPointer();
1756 case CK_UncheckedDerivedToBase:
1757 case CK_DerivedToBase: {
1758 // The EmitPointerWithAlignment path does this fine; just discard
1760 return CGF.EmitPointerWithAlignment(CE).getPointer();
1764 Address V = CGF.EmitPointerWithAlignment(E);
1765 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
1766 return CGF.EmitDynamicCast(V, DCE);
1769 case CK_ArrayToPointerDecay:
1770 return CGF.EmitArrayToPointerDecay(E).getPointer();
1771 case CK_FunctionToPointerDecay:
1772 return EmitLValue(E).getPointer();
1774 case CK_NullToPointer:
1775 if (MustVisitNullValue(E))
1778 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
1781 case CK_NullToMemberPointer: {
1782 if (MustVisitNullValue(E))
1785 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
1786 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
1789 case CK_ReinterpretMemberPointer:
1790 case CK_BaseToDerivedMemberPointer:
1791 case CK_DerivedToBaseMemberPointer: {
1792 Value *Src = Visit(E);
1794 // Note that the AST doesn't distinguish between checked and
1795 // unchecked member pointer conversions, so we always have to
1796 // implement checked conversions here. This is inefficient when
1797 // actual control flow may be required in order to perform the
1798 // check, which it is for data member pointers (but not member
1799 // function pointers on Itanium and ARM).
1800 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
1803 case CK_ARCProduceObject:
1804 return CGF.EmitARCRetainScalarExpr(E);
1805 case CK_ARCConsumeObject:
1806 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
1807 case CK_ARCReclaimReturnedObject:
1808 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
1809 case CK_ARCExtendBlockObject:
1810 return CGF.EmitARCExtendBlockObject(E);
1812 case CK_CopyAndAutoreleaseBlockObject:
1813 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
1815 case CK_FloatingRealToComplex:
1816 case CK_FloatingComplexCast:
1817 case CK_IntegralRealToComplex:
1818 case CK_IntegralComplexCast:
1819 case CK_IntegralComplexToFloatingComplex:
1820 case CK_FloatingComplexToIntegralComplex:
1821 case CK_ConstructorConversion:
1823 llvm_unreachable("scalar cast to non-scalar value");
1825 case CK_LValueToRValue:
1826 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
1827 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1828 return Visit(const_cast<Expr*>(E));
1830 case CK_IntegralToPointer: {
1831 Value *Src = Visit(const_cast<Expr*>(E));
1833 // First, convert to the correct width so that we control the kind of
1835 auto DestLLVMTy = ConvertType(DestTy);
1836 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
1837 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
1838 llvm::Value* IntResult =
1839 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1841 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
1843 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1844 // Going from integer to pointer that could be dynamic requires reloading
1845 // dynamic information from invariant.group.
1846 if (DestTy.mayBeDynamicClass())
1847 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
1851 case CK_PointerToIntegral: {
1852 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
1853 auto *PtrExpr = Visit(E);
1855 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1856 const QualType SrcType = E->getType();
1858 // Casting to integer requires stripping dynamic information as it does
1860 if (SrcType.mayBeDynamicClass())
1861 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
1864 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
1867 CGF.EmitIgnoredExpr(E);
1870 case CK_VectorSplat: {
1871 llvm::Type *DstTy = ConvertType(DestTy);
1872 Value *Elt = Visit(const_cast<Expr*>(E));
1873 // Splat the element across to all elements
1874 unsigned NumElements = DstTy->getVectorNumElements();
1875 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
1878 case CK_IntegralCast: {
1879 ScalarConversionOpts Opts;
1880 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerTruncation)) {
1881 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE))
1882 Opts.EmitImplicitIntegerTruncationChecks = !ICE->isPartOfExplicitCast();
1884 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
1885 CE->getExprLoc(), Opts);
1887 case CK_IntegralToFloating:
1888 case CK_FloatingToIntegral:
1889 case CK_FloatingCast:
1890 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
1892 case CK_BooleanToSignedIntegral: {
1893 ScalarConversionOpts Opts;
1894 Opts.TreatBooleanAsSigned = true;
1895 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
1896 CE->getExprLoc(), Opts);
1898 case CK_IntegralToBoolean:
1899 return EmitIntToBoolConversion(Visit(E));
1900 case CK_PointerToBoolean:
1901 return EmitPointerToBoolConversion(Visit(E), E->getType());
1902 case CK_FloatingToBoolean:
1903 return EmitFloatToBoolConversion(Visit(E));
1904 case CK_MemberPointerToBoolean: {
1905 llvm::Value *MemPtr = Visit(E);
1906 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
1907 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
1910 case CK_FloatingComplexToReal:
1911 case CK_IntegralComplexToReal:
1912 return CGF.EmitComplexExpr(E, false, true).first;
1914 case CK_FloatingComplexToBoolean:
1915 case CK_IntegralComplexToBoolean: {
1916 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
1918 // TODO: kill this function off, inline appropriate case here
1919 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
1923 case CK_ZeroToOCLEvent: {
1924 assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type");
1925 return llvm::Constant::getNullValue(ConvertType(DestTy));
1928 case CK_ZeroToOCLQueue: {
1929 assert(DestTy->isQueueT() && "CK_ZeroToOCLQueue cast on non queue_t type");
1930 return llvm::Constant::getNullValue(ConvertType(DestTy));
1933 case CK_IntToOCLSampler:
1934 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
1938 llvm_unreachable("unknown scalar cast");
1941 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
1942 CodeGenFunction::StmtExprEvaluation eval(CGF);
1943 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
1944 !E->getType()->isVoidType());
1945 if (!RetAlloca.isValid())
1947 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
1951 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1952 CGF.enterFullExpression(E);
1953 CodeGenFunction::RunCleanupsScope Scope(CGF);
1954 Value *V = Visit(E->getSubExpr());
1955 // Defend against dominance problems caused by jumps out of expression
1956 // evaluation through the shared cleanup block.
1957 Scope.ForceCleanup({&V});
1961 //===----------------------------------------------------------------------===//
1963 //===----------------------------------------------------------------------===//
1965 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
1966 llvm::Value *InVal, bool IsInc) {
1969 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
1970 BinOp.Ty = E->getType();
1971 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
1972 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
1977 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
1978 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
1979 llvm::Value *Amount =
1980 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
1981 StringRef Name = IsInc ? "inc" : "dec";
1982 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
1983 case LangOptions::SOB_Defined:
1984 return Builder.CreateAdd(InVal, Amount, Name);
1985 case LangOptions::SOB_Undefined:
1986 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
1987 return Builder.CreateNSWAdd(InVal, Amount, Name);
1989 case LangOptions::SOB_Trapping:
1990 if (!E->canOverflow())
1991 return Builder.CreateNSWAdd(InVal, Amount, Name);
1992 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
1994 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
1998 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1999 bool isInc, bool isPre) {
2001 QualType type = E->getSubExpr()->getType();
2002 llvm::PHINode *atomicPHI = nullptr;
2006 int amount = (isInc ? 1 : -1);
2007 bool isSubtraction = !isInc;
2009 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2010 type = atomicTy->getValueType();
2011 if (isInc && type->isBooleanType()) {
2012 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2014 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
2015 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2016 return Builder.getTrue();
2018 // For atomic bool increment, we just store true and return it for
2019 // preincrement, do an atomic swap with true for postincrement
2020 return Builder.CreateAtomicRMW(
2021 llvm::AtomicRMWInst::Xchg, LV.getPointer(), True,
2022 llvm::AtomicOrdering::SequentiallyConsistent);
2024 // Special case for atomic increment / decrement on integers, emit
2025 // atomicrmw instructions. We skip this if we want to be doing overflow
2026 // checking, and fall into the slow path with the atomic cmpxchg loop.
2027 if (!type->isBooleanType() && type->isIntegerType() &&
2028 !(type->isUnsignedIntegerType() &&
2029 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2030 CGF.getLangOpts().getSignedOverflowBehavior() !=
2031 LangOptions::SOB_Trapping) {
2032 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2033 llvm::AtomicRMWInst::Sub;
2034 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2035 llvm::Instruction::Sub;
2036 llvm::Value *amt = CGF.EmitToMemory(
2037 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2038 llvm::Value *old = Builder.CreateAtomicRMW(aop,
2039 LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent);
2040 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2042 value = EmitLoadOfLValue(LV, E->getExprLoc());
2044 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2045 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2046 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2047 value = CGF.EmitToMemory(value, type);
2048 Builder.CreateBr(opBB);
2049 Builder.SetInsertPoint(opBB);
2050 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2051 atomicPHI->addIncoming(value, startBB);
2054 value = EmitLoadOfLValue(LV, E->getExprLoc());
2058 // Special case of integer increment that we have to check first: bool++.
2059 // Due to promotion rules, we get:
2060 // bool++ -> bool = bool + 1
2061 // -> bool = (int)bool + 1
2062 // -> bool = ((int)bool + 1 != 0)
2063 // An interesting aspect of this is that increment is always true.
2064 // Decrement does not have this property.
2065 if (isInc && type->isBooleanType()) {
2066 value = Builder.getTrue();
2068 // Most common case by far: integer increment.
2069 } else if (type->isIntegerType()) {
2070 // Note that signed integer inc/dec with width less than int can't
2071 // overflow because of promotion rules; we're just eliding a few steps here.
2072 if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2073 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2074 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2075 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2077 EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2079 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2080 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2083 // Next most common: pointer increment.
2084 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2085 QualType type = ptr->getPointeeType();
2087 // VLA types don't have constant size.
2088 if (const VariableArrayType *vla
2089 = CGF.getContext().getAsVariableArrayType(type)) {
2090 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2091 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2092 if (CGF.getLangOpts().isSignedOverflowDefined())
2093 value = Builder.CreateGEP(value, numElts, "vla.inc");
2095 value = CGF.EmitCheckedInBoundsGEP(
2096 value, numElts, /*SignedIndices=*/false, isSubtraction,
2097 E->getExprLoc(), "vla.inc");
2099 // Arithmetic on function pointers (!) is just +-1.
2100 } else if (type->isFunctionType()) {
2101 llvm::Value *amt = Builder.getInt32(amount);
2103 value = CGF.EmitCastToVoidPtr(value);
2104 if (CGF.getLangOpts().isSignedOverflowDefined())
2105 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2107 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2108 isSubtraction, E->getExprLoc(),
2110 value = Builder.CreateBitCast(value, input->getType());
2112 // For everything else, we can just do a simple increment.
2114 llvm::Value *amt = Builder.getInt32(amount);
2115 if (CGF.getLangOpts().isSignedOverflowDefined())
2116 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2118 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2119 isSubtraction, E->getExprLoc(),
2123 // Vector increment/decrement.
2124 } else if (type->isVectorType()) {
2125 if (type->hasIntegerRepresentation()) {
2126 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2128 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2130 value = Builder.CreateFAdd(
2132 llvm::ConstantFP::get(value->getType(), amount),
2133 isInc ? "inc" : "dec");
2137 } else if (type->isRealFloatingType()) {
2138 // Add the inc/dec to the real part.
2141 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2142 // Another special case: half FP increment should be done via float
2143 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2144 value = Builder.CreateCall(
2145 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2147 input, "incdec.conv");
2149 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2153 if (value->getType()->isFloatTy())
2154 amt = llvm::ConstantFP::get(VMContext,
2155 llvm::APFloat(static_cast<float>(amount)));
2156 else if (value->getType()->isDoubleTy())
2157 amt = llvm::ConstantFP::get(VMContext,
2158 llvm::APFloat(static_cast<double>(amount)));
2160 // Remaining types are Half, LongDouble or __float128. Convert from float.
2161 llvm::APFloat F(static_cast<float>(amount));
2163 const llvm::fltSemantics *FS;
2164 // Don't use getFloatTypeSemantics because Half isn't
2165 // necessarily represented using the "half" LLVM type.
2166 if (value->getType()->isFP128Ty())
2167 FS = &CGF.getTarget().getFloat128Format();
2168 else if (value->getType()->isHalfTy())
2169 FS = &CGF.getTarget().getHalfFormat();
2171 FS = &CGF.getTarget().getLongDoubleFormat();
2172 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2173 amt = llvm::ConstantFP::get(VMContext, F);
2175 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2177 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2178 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2179 value = Builder.CreateCall(
2180 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2182 value, "incdec.conv");
2184 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2188 // Objective-C pointer types.
2190 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2191 value = CGF.EmitCastToVoidPtr(value);
2193 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2194 if (!isInc) size = -size;
2195 llvm::Value *sizeValue =
2196 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2198 if (CGF.getLangOpts().isSignedOverflowDefined())
2199 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2201 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2202 /*SignedIndices=*/false, isSubtraction,
2203 E->getExprLoc(), "incdec.objptr");
2204 value = Builder.CreateBitCast(value, input->getType());
2208 llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2209 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2210 auto Pair = CGF.EmitAtomicCompareExchange(
2211 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2212 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2213 llvm::Value *success = Pair.second;
2214 atomicPHI->addIncoming(old, opBB);
2215 Builder.CreateCondBr(success, contBB, opBB);
2216 Builder.SetInsertPoint(contBB);
2217 return isPre ? value : input;
2220 // Store the updated result through the lvalue.
2221 if (LV.isBitField())
2222 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2224 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2226 // If this is a postinc, return the value read from memory, otherwise use the
2228 return isPre ? value : input;
2233 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2234 TestAndClearIgnoreResultAssign();
2235 // Emit unary minus with EmitSub so we handle overflow cases etc.
2237 BinOp.RHS = Visit(E->getSubExpr());
2239 if (BinOp.RHS->getType()->isFPOrFPVectorTy())
2240 BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
2242 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2243 BinOp.Ty = E->getType();
2244 BinOp.Opcode = BO_Sub;
2245 // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2247 return EmitSub(BinOp);
2250 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2251 TestAndClearIgnoreResultAssign();
2252 Value *Op = Visit(E->getSubExpr());
2253 return Builder.CreateNot(Op, "neg");
2256 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2257 // Perform vector logical not on comparison with zero vector.
2258 if (E->getType()->isExtVectorType()) {
2259 Value *Oper = Visit(E->getSubExpr());
2260 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2262 if (Oper->getType()->isFPOrFPVectorTy())
2263 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2265 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2266 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2269 // Compare operand to zero.
2270 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2273 // TODO: Could dynamically modify easy computations here. For example, if
2274 // the operand is an icmp ne, turn into icmp eq.
2275 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2277 // ZExt result to the expr type.
2278 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2281 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2282 // Try folding the offsetof to a constant.
2284 if (E->EvaluateAsInt(Value, CGF.getContext()))
2285 return Builder.getInt(Value);
2287 // Loop over the components of the offsetof to compute the value.
2288 unsigned n = E->getNumComponents();
2289 llvm::Type* ResultType = ConvertType(E->getType());
2290 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2291 QualType CurrentType = E->getTypeSourceInfo()->getType();
2292 for (unsigned i = 0; i != n; ++i) {
2293 OffsetOfNode ON = E->getComponent(i);
2294 llvm::Value *Offset = nullptr;
2295 switch (ON.getKind()) {
2296 case OffsetOfNode::Array: {
2297 // Compute the index
2298 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2299 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2300 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2301 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2303 // Save the element type
2305 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2307 // Compute the element size
2308 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2309 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2311 // Multiply out to compute the result
2312 Offset = Builder.CreateMul(Idx, ElemSize);
2316 case OffsetOfNode::Field: {
2317 FieldDecl *MemberDecl = ON.getField();
2318 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2319 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2321 // Compute the index of the field in its parent.
2323 // FIXME: It would be nice if we didn't have to loop here!
2324 for (RecordDecl::field_iterator Field = RD->field_begin(),
2325 FieldEnd = RD->field_end();
2326 Field != FieldEnd; ++Field, ++i) {
2327 if (*Field == MemberDecl)
2330 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2332 // Compute the offset to the field
2333 int64_t OffsetInt = RL.getFieldOffset(i) /
2334 CGF.getContext().getCharWidth();
2335 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2337 // Save the element type.
2338 CurrentType = MemberDecl->getType();
2342 case OffsetOfNode::Identifier:
2343 llvm_unreachable("dependent __builtin_offsetof");
2345 case OffsetOfNode::Base: {
2346 if (ON.getBase()->isVirtual()) {
2347 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2351 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2352 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2354 // Save the element type.
2355 CurrentType = ON.getBase()->getType();
2357 // Compute the offset to the base.
2358 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2359 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2360 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2361 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2365 Result = Builder.CreateAdd(Result, Offset);
2370 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2371 /// argument of the sizeof expression as an integer.
2373 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2374 const UnaryExprOrTypeTraitExpr *E) {
2375 QualType TypeToSize = E->getTypeOfArgument();
2376 if (E->getKind() == UETT_SizeOf) {
2377 if (const VariableArrayType *VAT =
2378 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2379 if (E->isArgumentType()) {
2380 // sizeof(type) - make sure to emit the VLA size.
2381 CGF.EmitVariablyModifiedType(TypeToSize);
2383 // C99 6.5.3.4p2: If the argument is an expression of type
2384 // VLA, it is evaluated.
2385 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2388 auto VlaSize = CGF.getVLASize(VAT);
2389 llvm::Value *size = VlaSize.NumElts;
2391 // Scale the number of non-VLA elements by the non-VLA element size.
2392 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2393 if (!eltSize.isOne())
2394 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2398 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2401 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2402 E->getTypeOfArgument()->getPointeeType()))
2404 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2407 // If this isn't sizeof(vla), the result must be constant; use the constant
2408 // folding logic so we don't have to duplicate it here.
2409 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2412 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2413 Expr *Op = E->getSubExpr();
2414 if (Op->getType()->isAnyComplexType()) {
2415 // If it's an l-value, load through the appropriate subobject l-value.
2416 // Note that we have to ask E because Op might be an l-value that
2417 // this won't work for, e.g. an Obj-C property.
2419 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2420 E->getExprLoc()).getScalarVal();
2422 // Otherwise, calculate and project.
2423 return CGF.EmitComplexExpr(Op, false, true).first;
2429 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2430 Expr *Op = E->getSubExpr();
2431 if (Op->getType()->isAnyComplexType()) {
2432 // If it's an l-value, load through the appropriate subobject l-value.
2433 // Note that we have to ask E because Op might be an l-value that
2434 // this won't work for, e.g. an Obj-C property.
2435 if (Op->isGLValue())
2436 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2437 E->getExprLoc()).getScalarVal();
2439 // Otherwise, calculate and project.
2440 return CGF.EmitComplexExpr(Op, true, false).second;
2443 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2444 // effects are evaluated, but not the actual value.
2445 if (Op->isGLValue())
2448 CGF.EmitScalarExpr(Op, true);
2449 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2452 //===----------------------------------------------------------------------===//
2454 //===----------------------------------------------------------------------===//
2456 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2457 TestAndClearIgnoreResultAssign();
2459 Result.LHS = Visit(E->getLHS());
2460 Result.RHS = Visit(E->getRHS());
2461 Result.Ty = E->getType();
2462 Result.Opcode = E->getOpcode();
2463 Result.FPFeatures = E->getFPFeatures();
2468 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2469 const CompoundAssignOperator *E,
2470 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2472 QualType LHSTy = E->getLHS()->getType();
2475 if (E->getComputationResultType()->isAnyComplexType())
2476 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2478 // Emit the RHS first. __block variables need to have the rhs evaluated
2479 // first, plus this should improve codegen a little.
2480 OpInfo.RHS = Visit(E->getRHS());
2481 OpInfo.Ty = E->getComputationResultType();
2482 OpInfo.Opcode = E->getOpcode();
2483 OpInfo.FPFeatures = E->getFPFeatures();
2485 // Load/convert the LHS.
2486 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2488 llvm::PHINode *atomicPHI = nullptr;
2489 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2490 QualType type = atomicTy->getValueType();
2491 if (!type->isBooleanType() && type->isIntegerType() &&
2492 !(type->isUnsignedIntegerType() &&
2493 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2494 CGF.getLangOpts().getSignedOverflowBehavior() !=
2495 LangOptions::SOB_Trapping) {
2496 llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
2497 switch (OpInfo.Opcode) {
2498 // We don't have atomicrmw operands for *, %, /, <<, >>
2499 case BO_MulAssign: case BO_DivAssign:
2505 aop = llvm::AtomicRMWInst::Add;
2508 aop = llvm::AtomicRMWInst::Sub;
2511 aop = llvm::AtomicRMWInst::And;
2514 aop = llvm::AtomicRMWInst::Xor;
2517 aop = llvm::AtomicRMWInst::Or;
2520 llvm_unreachable("Invalid compound assignment type");
2522 if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
2523 llvm::Value *amt = CGF.EmitToMemory(
2524 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2527 Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
2528 llvm::AtomicOrdering::SequentiallyConsistent);
2532 // FIXME: For floating point types, we should be saving and restoring the
2533 // floating point environment in the loop.
2534 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2535 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2536 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2537 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2538 Builder.CreateBr(opBB);
2539 Builder.SetInsertPoint(opBB);
2540 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2541 atomicPHI->addIncoming(OpInfo.LHS, startBB);
2542 OpInfo.LHS = atomicPHI;
2545 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2547 SourceLocation Loc = E->getExprLoc();
2549 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2551 // Expand the binary operator.
2552 Result = (this->*Func)(OpInfo);
2554 // Convert the result back to the LHS type.
2556 EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, Loc);
2559 llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2560 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2561 auto Pair = CGF.EmitAtomicCompareExchange(
2562 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
2563 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
2564 llvm::Value *success = Pair.second;
2565 atomicPHI->addIncoming(old, opBB);
2566 Builder.CreateCondBr(success, contBB, opBB);
2567 Builder.SetInsertPoint(contBB);
2571 // Store the result value into the LHS lvalue. Bit-fields are handled
2572 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
2573 // 'An assignment expression has the value of the left operand after the
2575 if (LHSLV.isBitField())
2576 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
2578 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
2583 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
2584 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
2585 bool Ignore = TestAndClearIgnoreResultAssign();
2587 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
2589 // If the result is clearly ignored, return now.
2593 // The result of an assignment in C is the assigned r-value.
2594 if (!CGF.getLangOpts().CPlusPlus)
2597 // If the lvalue is non-volatile, return the computed value of the assignment.
2598 if (!LHS.isVolatileQualified())
2601 // Otherwise, reload the value.
2602 return EmitLoadOfLValue(LHS, E->getExprLoc());
2605 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
2606 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
2607 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
2609 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2610 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
2611 SanitizerKind::IntegerDivideByZero));
2614 const auto *BO = cast<BinaryOperator>(Ops.E);
2615 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
2616 Ops.Ty->hasSignedIntegerRepresentation() &&
2617 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
2618 Ops.mayHaveIntegerOverflow()) {
2619 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
2621 llvm::Value *IntMin =
2622 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
2623 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
2625 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
2626 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
2627 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
2629 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
2632 if (Checks.size() > 0)
2633 EmitBinOpCheck(Checks, Ops);
2636 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
2638 CodeGenFunction::SanitizerScope SanScope(&CGF);
2639 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2640 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2641 Ops.Ty->isIntegerType() &&
2642 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
2643 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2644 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
2645 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
2646 Ops.Ty->isRealFloatingType() &&
2647 Ops.mayHaveFloatDivisionByZero()) {
2648 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2649 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
2650 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
2655 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
2656 llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
2657 if (CGF.getLangOpts().OpenCL &&
2658 !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
2659 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
2660 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
2661 // build option allows an application to specify that single precision
2662 // floating-point divide (x/y and 1/x) and sqrt used in the program
2663 // source are correctly rounded.
2664 llvm::Type *ValTy = Val->getType();
2665 if (ValTy->isFloatTy() ||
2666 (isa<llvm::VectorType>(ValTy) &&
2667 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
2668 CGF.SetFPAccuracy(Val, 2.5);
2672 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
2673 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
2675 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
2678 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
2679 // Rem in C can't be a floating point type: C99 6.5.5p2.
2680 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2681 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2682 Ops.Ty->isIntegerType() &&
2683 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
2684 CodeGenFunction::SanitizerScope SanScope(&CGF);
2685 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2686 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
2689 if (Ops.Ty->hasUnsignedIntegerRepresentation())
2690 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
2692 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
2695 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
2699 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
2700 switch (Ops.Opcode) {
2704 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
2705 llvm::Intrinsic::uadd_with_overflow;
2710 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
2711 llvm::Intrinsic::usub_with_overflow;
2716 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
2717 llvm::Intrinsic::umul_with_overflow;
2720 llvm_unreachable("Unsupported operation for overflow detection");
2726 CodeGenFunction::SanitizerScope SanScope(&CGF);
2727 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
2729 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
2731 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
2732 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
2733 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
2735 // Handle overflow with llvm.trap if no custom handler has been specified.
2736 const std::string *handlerName =
2737 &CGF.getLangOpts().OverflowHandler;
2738 if (handlerName->empty()) {
2739 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
2740 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
2741 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
2742 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
2743 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
2744 : SanitizerKind::UnsignedIntegerOverflow;
2745 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
2747 CGF.EmitTrapCheck(Builder.CreateNot(overflow));
2751 // Branch in case of overflow.
2752 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
2753 llvm::BasicBlock *continueBB =
2754 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
2755 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
2757 Builder.CreateCondBr(overflow, overflowBB, continueBB);
2759 // If an overflow handler is set, then we want to call it and then use its
2760 // result, if it returns.
2761 Builder.SetInsertPoint(overflowBB);
2763 // Get the overflow handler.
2764 llvm::Type *Int8Ty = CGF.Int8Ty;
2765 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
2766 llvm::FunctionType *handlerTy =
2767 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
2768 llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
2770 // Sign extend the args to 64-bit, so that we can use the same handler for
2771 // all types of overflow.
2772 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
2773 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
2775 // Call the handler with the two arguments, the operation, and the size of
2777 llvm::Value *handlerArgs[] = {
2780 Builder.getInt8(OpID),
2781 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
2783 llvm::Value *handlerResult =
2784 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
2786 // Truncate the result back to the desired size.
2787 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
2788 Builder.CreateBr(continueBB);
2790 Builder.SetInsertPoint(continueBB);
2791 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
2792 phi->addIncoming(result, initialBB);
2793 phi->addIncoming(handlerResult, overflowBB);
2798 /// Emit pointer + index arithmetic.
2799 static Value *emitPointerArithmetic(CodeGenFunction &CGF,
2800 const BinOpInfo &op,
2801 bool isSubtraction) {
2802 // Must have binary (not unary) expr here. Unary pointer
2803 // increment/decrement doesn't use this path.
2804 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
2806 Value *pointer = op.LHS;
2807 Expr *pointerOperand = expr->getLHS();
2808 Value *index = op.RHS;
2809 Expr *indexOperand = expr->getRHS();
2811 // In a subtraction, the LHS is always the pointer.
2812 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
2813 std::swap(pointer, index);
2814 std::swap(pointerOperand, indexOperand);
2817 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
2819 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
2820 auto &DL = CGF.CGM.getDataLayout();
2821 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
2823 // Some versions of glibc and gcc use idioms (particularly in their malloc
2824 // routines) that add a pointer-sized integer (known to be a pointer value)
2825 // to a null pointer in order to cast the value back to an integer or as
2826 // part of a pointer alignment algorithm. This is undefined behavior, but
2827 // we'd like to be able to compile programs that use it.
2829 // Normally, we'd generate a GEP with a null-pointer base here in response
2830 // to that code, but it's also UB to dereference a pointer created that
2831 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
2832 // generate a direct cast of the integer value to a pointer.
2834 // The idiom (p = nullptr + N) is not met if any of the following are true:
2836 // The operation is subtraction.
2837 // The index is not pointer-sized.
2838 // The pointer type is not byte-sized.
2840 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
2844 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
2846 if (width != DL.getTypeSizeInBits(PtrTy)) {
2847 // Zero-extend or sign-extend the pointer value according to
2848 // whether the index is signed or not.
2849 index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
2853 // If this is subtraction, negate the index.
2855 index = CGF.Builder.CreateNeg(index, "idx.neg");
2857 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2858 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
2859 /*Accessed*/ false);
2861 const PointerType *pointerType
2862 = pointerOperand->getType()->getAs<PointerType>();
2864 QualType objectType = pointerOperand->getType()
2865 ->castAs<ObjCObjectPointerType>()
2867 llvm::Value *objectSize
2868 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
2870 index = CGF.Builder.CreateMul(index, objectSize);
2872 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
2873 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
2874 return CGF.Builder.CreateBitCast(result, pointer->getType());
2877 QualType elementType = pointerType->getPointeeType();
2878 if (const VariableArrayType *vla
2879 = CGF.getContext().getAsVariableArrayType(elementType)) {
2880 // The element count here is the total number of non-VLA elements.
2881 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
2883 // Effectively, the multiply by the VLA size is part of the GEP.
2884 // GEP indexes are signed, and scaling an index isn't permitted to
2885 // signed-overflow, so we use the same semantics for our explicit
2886 // multiply. We suppress this if overflow is not undefined behavior.
2887 if (CGF.getLangOpts().isSignedOverflowDefined()) {
2888 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
2889 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
2891 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
2893 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
2894 op.E->getExprLoc(), "add.ptr");
2899 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
2900 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
2902 if (elementType->isVoidType() || elementType->isFunctionType()) {
2903 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
2904 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
2905 return CGF.Builder.CreateBitCast(result, pointer->getType());
2908 if (CGF.getLangOpts().isSignedOverflowDefined())
2909 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
2911 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
2912 op.E->getExprLoc(), "add.ptr");
2915 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
2916 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
2917 // the add operand respectively. This allows fmuladd to represent a*b-c, or
2918 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
2919 // efficient operations.
2920 static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
2921 const CodeGenFunction &CGF, CGBuilderTy &Builder,
2922 bool negMul, bool negAdd) {
2923 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
2925 Value *MulOp0 = MulOp->getOperand(0);
2926 Value *MulOp1 = MulOp->getOperand(1);
2930 llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
2932 } else if (negAdd) {
2935 llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
2939 Value *FMulAdd = Builder.CreateCall(
2940 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
2941 {MulOp0, MulOp1, Addend});
2942 MulOp->eraseFromParent();
2947 // Check whether it would be legal to emit an fmuladd intrinsic call to
2948 // represent op and if so, build the fmuladd.
2950 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
2951 // Does NOT check the type of the operation - it's assumed that this function
2952 // will be called from contexts where it's known that the type is contractable.
2953 static Value* tryEmitFMulAdd(const BinOpInfo &op,
2954 const CodeGenFunction &CGF, CGBuilderTy &Builder,
2957 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
2958 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
2959 "Only fadd/fsub can be the root of an fmuladd.");
2961 // Check whether this op is marked as fusable.
2962 if (!op.FPFeatures.allowFPContractWithinStatement())
2965 // We have a potentially fusable op. Look for a mul on one of the operands.
2966 // Also, make sure that the mul result isn't used directly. In that case,
2967 // there's no point creating a muladd operation.
2968 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
2969 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
2970 LHSBinOp->use_empty())
2971 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
2973 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
2974 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
2975 RHSBinOp->use_empty())
2976 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
2982 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
2983 if (op.LHS->getType()->isPointerTy() ||
2984 op.RHS->getType()->isPointerTy())
2985 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
2987 if (op.Ty->isSignedIntegerOrEnumerationType()) {
2988 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2989 case LangOptions::SOB_Defined:
2990 return Builder.CreateAdd(op.LHS, op.RHS, "add");
2991 case LangOptions::SOB_Undefined:
2992 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2993 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
2995 case LangOptions::SOB_Trapping:
2996 if (CanElideOverflowCheck(CGF.getContext(), op))
2997 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
2998 return EmitOverflowCheckedBinOp(op);
3002 if (op.Ty->isUnsignedIntegerType() &&
3003 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3004 !CanElideOverflowCheck(CGF.getContext(), op))
3005 return EmitOverflowCheckedBinOp(op);
3007 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3008 // Try to form an fmuladd.
3009 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3012 Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3013 return propagateFMFlags(V, op);
3016 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3019 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3020 // The LHS is always a pointer if either side is.
3021 if (!op.LHS->getType()->isPointerTy()) {
3022 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3023 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3024 case LangOptions::SOB_Defined:
3025 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3026 case LangOptions::SOB_Undefined:
3027 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3028 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3030 case LangOptions::SOB_Trapping:
3031 if (CanElideOverflowCheck(CGF.getContext(), op))
3032 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3033 return EmitOverflowCheckedBinOp(op);
3037 if (op.Ty->isUnsignedIntegerType() &&
3038 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3039 !CanElideOverflowCheck(CGF.getContext(), op))
3040 return EmitOverflowCheckedBinOp(op);
3042 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3043 // Try to form an fmuladd.
3044 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3046 Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3047 return propagateFMFlags(V, op);
3050 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3053 // If the RHS is not a pointer, then we have normal pointer
3055 if (!op.RHS->getType()->isPointerTy())
3056 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3058 // Otherwise, this is a pointer subtraction.
3060 // Do the raw subtraction part.
3062 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3064 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3065 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3067 // Okay, figure out the element size.
3068 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3069 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3071 llvm::Value *divisor = nullptr;
3073 // For a variable-length array, this is going to be non-constant.
3074 if (const VariableArrayType *vla
3075 = CGF.getContext().getAsVariableArrayType(elementType)) {
3076 auto VlaSize = CGF.getVLASize(vla);
3077 elementType = VlaSize.Type;
3078 divisor = VlaSize.NumElts;
3080 // Scale the number of non-VLA elements by the non-VLA element size.
3081 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3082 if (!eltSize.isOne())
3083 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3085 // For everything elese, we can just compute it, safe in the
3086 // assumption that Sema won't let anything through that we can't
3087 // safely compute the size of.
3089 CharUnits elementSize;
3090 // Handle GCC extension for pointer arithmetic on void* and
3091 // function pointer types.
3092 if (elementType->isVoidType() || elementType->isFunctionType())
3093 elementSize = CharUnits::One();
3095 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3097 // Don't even emit the divide for element size of 1.
3098 if (elementSize.isOne())
3101 divisor = CGF.CGM.getSize(elementSize);
3104 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3105 // pointer difference in C is only defined in the case where both operands
3106 // are pointing to elements of an array.
3107 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3110 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3111 llvm::IntegerType *Ty;
3112 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3113 Ty = cast<llvm::IntegerType>(VT->getElementType());
3115 Ty = cast<llvm::IntegerType>(LHS->getType());
3116 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3119 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3120 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3121 // RHS to the same size as the LHS.
3122 Value *RHS = Ops.RHS;
3123 if (Ops.LHS->getType() != RHS->getType())
3124 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3126 bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3127 Ops.Ty->hasSignedIntegerRepresentation() &&
3128 !CGF.getLangOpts().isSignedOverflowDefined();
3129 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3130 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3131 if (CGF.getLangOpts().OpenCL)
3133 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3134 else if ((SanitizeBase || SanitizeExponent) &&
3135 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3136 CodeGenFunction::SanitizerScope SanScope(&CGF);
3137 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3138 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3139 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3141 if (SanitizeExponent) {
3143 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3147 // Check whether we are shifting any non-zero bits off the top of the
3148 // integer. We only emit this check if exponent is valid - otherwise
3149 // instructions below will have undefined behavior themselves.
3150 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3151 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3152 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3153 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3154 llvm::Value *PromotedWidthMinusOne =
3155 (RHS == Ops.RHS) ? WidthMinusOne
3156 : GetWidthMinusOneValue(Ops.LHS, RHS);
3157 CGF.EmitBlock(CheckShiftBase);
3158 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3159 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3160 /*NUW*/ true, /*NSW*/ true),
3162 if (CGF.getLangOpts().CPlusPlus) {
3163 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3164 // Under C++11's rules, shifting a 1 bit into the sign bit is
3165 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3166 // define signed left shifts, so we use the C99 and C++11 rules there).
3167 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3168 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3170 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3171 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3172 CGF.EmitBlock(Cont);
3173 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3174 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3175 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3176 Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3179 assert(!Checks.empty());
3180 EmitBinOpCheck(Checks, Ops);
3183 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3186 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3187 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3188 // RHS to the same size as the LHS.
3189 Value *RHS = Ops.RHS;
3190 if (Ops.LHS->getType() != RHS->getType())
3191 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3193 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3194 if (CGF.getLangOpts().OpenCL)
3196 Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3197 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3198 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3199 CodeGenFunction::SanitizerScope SanScope(&CGF);
3200 llvm::Value *Valid =
3201 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3202 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3205 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3206 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3207 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3210 enum IntrinsicType { VCMPEQ, VCMPGT };
3211 // return corresponding comparison intrinsic for given vector type
3212 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3213 BuiltinType::Kind ElemKind) {
3215 default: llvm_unreachable("unexpected element type");
3216 case BuiltinType::Char_U:
3217 case BuiltinType::UChar:
3218 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3219 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3220 case BuiltinType::Char_S:
3221 case BuiltinType::SChar:
3222 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3223 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3224 case BuiltinType::UShort:
3225 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3226 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3227 case BuiltinType::Short:
3228 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3229 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3230 case BuiltinType::UInt:
3231 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3232 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3233 case BuiltinType::Int:
3234 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3235 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3236 case BuiltinType::ULong:
3237 case BuiltinType::ULongLong:
3238 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3239 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3240 case BuiltinType::Long:
3241 case BuiltinType::LongLong:
3242 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3243 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3244 case BuiltinType::Float:
3245 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3246 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3247 case BuiltinType::Double:
3248 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3249 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3253 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3254 llvm::CmpInst::Predicate UICmpOpc,
3255 llvm::CmpInst::Predicate SICmpOpc,
3256 llvm::CmpInst::Predicate FCmpOpc) {
3257 TestAndClearIgnoreResultAssign();
3259 QualType LHSTy = E->getLHS()->getType();
3260 QualType RHSTy = E->getRHS()->getType();
3261 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3262 assert(E->getOpcode() == BO_EQ ||
3263 E->getOpcode() == BO_NE);
3264 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3265 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3266 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3267 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3268 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3269 Value *LHS = Visit(E->getLHS());
3270 Value *RHS = Visit(E->getRHS());
3272 // If AltiVec, the comparison results in a numeric type, so we use
3273 // intrinsics comparing vectors and giving 0 or 1 as a result
3274 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3275 // constants for mapping CR6 register bits to predicate result
3276 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3278 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3280 // in several cases vector arguments order will be reversed
3281 Value *FirstVecArg = LHS,
3282 *SecondVecArg = RHS;
3284 QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
3285 const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
3286 BuiltinType::Kind ElementKind = BTy->getKind();
3288 switch(E->getOpcode()) {
3289 default: llvm_unreachable("is not a comparison operation");
3292 ID = GetIntrinsic(VCMPEQ, ElementKind);
3296 ID = GetIntrinsic(VCMPEQ, ElementKind);
3300 ID = GetIntrinsic(VCMPGT, ElementKind);
3301 std::swap(FirstVecArg, SecondVecArg);
3305 ID = GetIntrinsic(VCMPGT, ElementKind);
3308 if (ElementKind == BuiltinType::Float) {
3310 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3311 std::swap(FirstVecArg, SecondVecArg);
3315 ID = GetIntrinsic(VCMPGT, ElementKind);
3319 if (ElementKind == BuiltinType::Float) {
3321 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3325 ID = GetIntrinsic(VCMPGT, ElementKind);
3326 std::swap(FirstVecArg, SecondVecArg);
3331 Value *CR6Param = Builder.getInt32(CR6);
3332 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3333 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3335 // The result type of intrinsic may not be same as E->getType().
3336 // If E->getType() is not BoolTy, EmitScalarConversion will do the
3337 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3338 // do nothing, if ResultTy is not i1 at the same time, it will cause
3340 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3341 if (ResultTy->getBitWidth() > 1 &&
3342 E->getType() == CGF.getContext().BoolTy)
3343 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3344 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3348 if (LHS->getType()->isFPOrFPVectorTy()) {
3349 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3350 } else if (LHSTy->hasSignedIntegerRepresentation()) {
3351 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3353 // Unsigned integers and pointers.
3355 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3356 !isa<llvm::ConstantPointerNull>(LHS) &&
3357 !isa<llvm::ConstantPointerNull>(RHS)) {
3359 // Dynamic information is required to be stripped for comparisons,
3360 // because it could leak the dynamic information. Based on comparisons
3361 // of pointers to dynamic objects, the optimizer can replace one pointer
3362 // with another, which might be incorrect in presence of invariant
3363 // groups. Comparison with null is safe because null does not carry any
3364 // dynamic information.
3365 if (LHSTy.mayBeDynamicClass())
3366 LHS = Builder.CreateStripInvariantGroup(LHS);
3367 if (RHSTy.mayBeDynamicClass())
3368 RHS = Builder.CreateStripInvariantGroup(RHS);
3371 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3374 // If this is a vector comparison, sign extend the result to the appropriate
3375 // vector integer type and return it (don't convert to bool).
3376 if (LHSTy->isVectorType())
3377 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3380 // Complex Comparison: can only be an equality comparison.
3381 CodeGenFunction::ComplexPairTy LHS, RHS;
3383 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3384 LHS = CGF.EmitComplexExpr(E->getLHS());
3385 CETy = CTy->getElementType();
3387 LHS.first = Visit(E->getLHS());
3388 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3391 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3392 RHS = CGF.EmitComplexExpr(E->getRHS());
3393 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
3394 CTy->getElementType()) &&
3395 "The element types must always match.");
3398 RHS.first = Visit(E->getRHS());
3399 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3400 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
3401 "The element types must always match.");
3404 Value *ResultR, *ResultI;
3405 if (CETy->isRealFloatingType()) {
3406 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3407 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3409 // Complex comparisons can only be equality comparisons. As such, signed
3410 // and unsigned opcodes are the same.
3411 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3412 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
3415 if (E->getOpcode() == BO_EQ) {
3416 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
3418 assert(E->getOpcode() == BO_NE &&
3419 "Complex comparison other than == or != ?");
3420 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
3424 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3428 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3429 bool Ignore = TestAndClearIgnoreResultAssign();
3434 switch (E->getLHS()->getType().getObjCLifetime()) {
3435 case Qualifiers::OCL_Strong:
3436 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
3439 case Qualifiers::OCL_Autoreleasing:
3440 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
3443 case Qualifiers::OCL_ExplicitNone:
3444 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
3447 case Qualifiers::OCL_Weak:
3448 RHS = Visit(E->getRHS());
3449 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3450 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
3453 case Qualifiers::OCL_None:
3454 // __block variables need to have the rhs evaluated first, plus
3455 // this should improve codegen just a little.
3456 RHS = Visit(E->getRHS());
3457 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3459 // Store the value into the LHS. Bit-fields are handled specially
3460 // because the result is altered by the store, i.e., [C99 6.5.16p1]
3461 // 'An assignment expression has the value of the left operand after
3462 // the assignment...'.
3463 if (LHS.isBitField()) {
3464 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
3466 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
3467 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
3471 // If the result is clearly ignored, return now.
3475 // The result of an assignment in C is the assigned r-value.
3476 if (!CGF.getLangOpts().CPlusPlus)
3479 // If the lvalue is non-volatile, return the computed value of the assignment.
3480 if (!LHS.isVolatileQualified())
3483 // Otherwise, reload the value.
3484 return EmitLoadOfLValue(LHS, E->getExprLoc());
3487 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
3488 // Perform vector logical and on comparisons with zero vectors.
3489 if (E->getType()->isVectorType()) {
3490 CGF.incrementProfileCounter(E);
3492 Value *LHS = Visit(E->getLHS());
3493 Value *RHS = Visit(E->getRHS());
3494 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3495 if (LHS->getType()->isFPOrFPVectorTy()) {
3496 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3497 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3499 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3500 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3502 Value *And = Builder.CreateAnd(LHS, RHS);
3503 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
3506 llvm::Type *ResTy = ConvertType(E->getType());
3508 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
3509 // If we have 1 && X, just emit X without inserting the control flow.
3511 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3512 if (LHSCondVal) { // If we have 1 && X, just emit X.
3513 CGF.incrementProfileCounter(E);
3515 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3516 // ZExt result to int or bool.
3517 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
3520 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
3521 if (!CGF.ContainsLabel(E->getRHS()))
3522 return llvm::Constant::getNullValue(ResTy);
3525 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
3526 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
3528 CodeGenFunction::ConditionalEvaluation eval(CGF);
3530 // Branch on the LHS first. If it is false, go to the failure (cont) block.
3531 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
3532 CGF.getProfileCount(E->getRHS()));
3534 // Any edges into the ContBlock are now from an (indeterminate number of)
3535 // edges from this first condition. All of these values will be false. Start
3536 // setting up the PHI node in the Cont Block for this.
3537 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3539 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3541 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
3544 CGF.EmitBlock(RHSBlock);
3545 CGF.incrementProfileCounter(E);
3546 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3549 // Reaquire the RHS block, as there may be subblocks inserted.
3550 RHSBlock = Builder.GetInsertBlock();
3552 // Emit an unconditional branch from this block to ContBlock.
3554 // There is no need to emit line number for unconditional branch.
3555 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
3556 CGF.EmitBlock(ContBlock);
3558 // Insert an entry into the phi node for the edge with the value of RHSCond.
3559 PN->addIncoming(RHSCond, RHSBlock);
3561 // Artificial location to preserve the scope information
3563 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
3564 PN->setDebugLoc(Builder.getCurrentDebugLocation());
3567 // ZExt result to int.
3568 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
3571 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
3572 // Perform vector logical or on comparisons with zero vectors.
3573 if (E->getType()->isVectorType()) {
3574 CGF.incrementProfileCounter(E);
3576 Value *LHS = Visit(E->getLHS());
3577 Value *RHS = Visit(E->getRHS());
3578 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3579 if (LHS->getType()->isFPOrFPVectorTy()) {
3580 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3581 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3583 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3584 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3586 Value *Or = Builder.CreateOr(LHS, RHS);
3587 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
3590 llvm::Type *ResTy = ConvertType(E->getType());
3592 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
3593 // If we have 0 || X, just emit X without inserting the control flow.
3595 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3596 if (!LHSCondVal) { // If we have 0 || X, just emit X.
3597 CGF.incrementProfileCounter(E);
3599 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3600 // ZExt result to int or bool.
3601 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
3604 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
3605 if (!CGF.ContainsLabel(E->getRHS()))
3606 return llvm::ConstantInt::get(ResTy, 1);
3609 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
3610 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
3612 CodeGenFunction::ConditionalEvaluation eval(CGF);
3614 // Branch on the LHS first. If it is true, go to the success (cont) block.
3615 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
3616 CGF.getCurrentProfileCount() -
3617 CGF.getProfileCount(E->getRHS()));
3619 // Any edges into the ContBlock are now from an (indeterminate number of)
3620 // edges from this first condition. All of these values will be true. Start
3621 // setting up the PHI node in the Cont Block for this.
3622 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3624 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3626 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
3630 // Emit the RHS condition as a bool value.
3631 CGF.EmitBlock(RHSBlock);
3632 CGF.incrementProfileCounter(E);
3633 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3637 // Reaquire the RHS block, as there may be subblocks inserted.
3638 RHSBlock = Builder.GetInsertBlock();
3640 // Emit an unconditional branch from this block to ContBlock. Insert an entry
3641 // into the phi node for the edge with the value of RHSCond.
3642 CGF.EmitBlock(ContBlock);
3643 PN->addIncoming(RHSCond, RHSBlock);
3645 // ZExt result to int.
3646 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
3649 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
3650 CGF.EmitIgnoredExpr(E->getLHS());
3651 CGF.EnsureInsertPoint();
3652 return Visit(E->getRHS());
3655 //===----------------------------------------------------------------------===//
3657 //===----------------------------------------------------------------------===//
3659 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
3660 /// expression is cheap enough and side-effect-free enough to evaluate
3661 /// unconditionally instead of conditionally. This is used to convert control
3662 /// flow into selects in some cases.
3663 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
3664 CodeGenFunction &CGF) {
3665 // Anything that is an integer or floating point constant is fine.
3666 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
3668 // Even non-volatile automatic variables can't be evaluated unconditionally.
3669 // Referencing a thread_local may cause non-trivial initialization work to
3670 // occur. If we're inside a lambda and one of the variables is from the scope
3671 // outside the lambda, that function may have returned already. Reading its
3672 // locals is a bad idea. Also, these reads may introduce races there didn't
3673 // exist in the source-level program.
3677 Value *ScalarExprEmitter::
3678 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
3679 TestAndClearIgnoreResultAssign();
3681 // Bind the common expression if necessary.
3682 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
3684 Expr *condExpr = E->getCond();
3685 Expr *lhsExpr = E->getTrueExpr();
3686 Expr *rhsExpr = E->getFalseExpr();
3688 // If the condition constant folds and can be elided, try to avoid emitting
3689 // the condition and the dead arm.
3691 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
3692 Expr *live = lhsExpr, *dead = rhsExpr;
3693 if (!CondExprBool) std::swap(live, dead);
3695 // If the dead side doesn't have labels we need, just emit the Live part.
3696 if (!CGF.ContainsLabel(dead)) {
3698 CGF.incrementProfileCounter(E);
3699 Value *Result = Visit(live);
3701 // If the live part is a throw expression, it acts like it has a void
3702 // type, so evaluating it returns a null Value*. However, a conditional
3703 // with non-void type must return a non-null Value*.
3704 if (!Result && !E->getType()->isVoidType())
3705 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
3711 // OpenCL: If the condition is a vector, we can treat this condition like
3712 // the select function.
3713 if (CGF.getLangOpts().OpenCL
3714 && condExpr->getType()->isVectorType()) {
3715 CGF.incrementProfileCounter(E);
3717 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
3718 llvm::Value *LHS = Visit(lhsExpr);
3719 llvm::Value *RHS = Visit(rhsExpr);
3721 llvm::Type *condType = ConvertType(condExpr->getType());
3722 llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
3724 unsigned numElem = vecTy->getNumElements();
3725 llvm::Type *elemType = vecTy->getElementType();
3727 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
3728 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
3729 llvm::Value *tmp = Builder.CreateSExt(TestMSB,
3730 llvm::VectorType::get(elemType,
3733 llvm::Value *tmp2 = Builder.CreateNot(tmp);
3735 // Cast float to int to perform ANDs if necessary.
3736 llvm::Value *RHSTmp = RHS;
3737 llvm::Value *LHSTmp = LHS;
3738 bool wasCast = false;
3739 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
3740 if (rhsVTy->getElementType()->isFloatingPointTy()) {
3741 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
3742 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
3746 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
3747 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
3748 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
3750 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
3755 // If this is a really simple expression (like x ? 4 : 5), emit this as a
3756 // select instead of as control flow. We can only do this if it is cheap and
3757 // safe to evaluate the LHS and RHS unconditionally.
3758 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
3759 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
3760 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
3761 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
3763 CGF.incrementProfileCounter(E, StepV);
3765 llvm::Value *LHS = Visit(lhsExpr);
3766 llvm::Value *RHS = Visit(rhsExpr);
3768 // If the conditional has void type, make sure we return a null Value*.
3769 assert(!RHS && "LHS and RHS types must match");
3772 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
3775 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
3776 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
3777 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
3779 CodeGenFunction::ConditionalEvaluation eval(CGF);
3780 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
3781 CGF.getProfileCount(lhsExpr));
3783 CGF.EmitBlock(LHSBlock);
3784 CGF.incrementProfileCounter(E);
3786 Value *LHS = Visit(lhsExpr);
3789 LHSBlock = Builder.GetInsertBlock();
3790 Builder.CreateBr(ContBlock);
3792 CGF.EmitBlock(RHSBlock);
3794 Value *RHS = Visit(rhsExpr);
3797 RHSBlock = Builder.GetInsertBlock();
3798 CGF.EmitBlock(ContBlock);
3800 // If the LHS or RHS is a throw expression, it will be legitimately null.
3806 // Create a PHI node for the real part.
3807 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
3808 PN->addIncoming(LHS, LHSBlock);
3809 PN->addIncoming(RHS, RHSBlock);
3813 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
3814 return Visit(E->getChosenSubExpr());
3817 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
3818 QualType Ty = VE->getType();
3820 if (Ty->isVariablyModifiedType())
3821 CGF.EmitVariablyModifiedType(Ty);
3823 Address ArgValue = Address::invalid();
3824 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
3826 llvm::Type *ArgTy = ConvertType(VE->getType());
3828 // If EmitVAArg fails, emit an error.
3829 if (!ArgPtr.isValid()) {
3830 CGF.ErrorUnsupported(VE, "va_arg expression");
3831 return llvm::UndefValue::get(ArgTy);
3834 // FIXME Volatility.
3835 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
3837 // If EmitVAArg promoted the type, we must truncate it.
3838 if (ArgTy != Val->getType()) {
3839 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
3840 Val = Builder.CreateIntToPtr(Val, ArgTy);
3842 Val = Builder.CreateTrunc(Val, ArgTy);
3848 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
3849 return CGF.EmitBlockLiteral(block);
3852 // Convert a vec3 to vec4, or vice versa.
3853 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
3854 Value *Src, unsigned NumElementsDst) {
3855 llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
3856 SmallVector<llvm::Constant*, 4> Args;
3857 Args.push_back(Builder.getInt32(0));
3858 Args.push_back(Builder.getInt32(1));
3859 Args.push_back(Builder.getInt32(2));
3860 if (NumElementsDst == 4)
3861 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
3862 llvm::Constant *Mask = llvm::ConstantVector::get(Args);
3863 return Builder.CreateShuffleVector(Src, UnV, Mask);
3866 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
3867 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
3868 // but could be scalar or vectors of different lengths, and either can be
3870 // There are 4 cases:
3871 // 1. non-pointer -> non-pointer : needs 1 bitcast
3872 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
3873 // 3. pointer -> non-pointer
3874 // a) pointer -> intptr_t : needs 1 ptrtoint
3875 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
3876 // 4. non-pointer -> pointer
3877 // a) intptr_t -> pointer : needs 1 inttoptr
3878 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
3879 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
3880 // allow casting directly between pointer types and non-integer non-pointer
3882 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
3883 const llvm::DataLayout &DL,
3884 Value *Src, llvm::Type *DstTy,
3885 StringRef Name = "") {
3886 auto SrcTy = Src->getType();
3889 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
3890 return Builder.CreateBitCast(Src, DstTy, Name);
3893 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
3894 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
3897 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
3899 if (!DstTy->isIntegerTy())
3900 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
3902 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
3906 if (!SrcTy->isIntegerTy())
3907 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
3909 return Builder.CreateIntToPtr(Src, DstTy, Name);
3912 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
3913 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
3914 llvm::Type *DstTy = ConvertType(E->getType());
3916 llvm::Type *SrcTy = Src->getType();
3917 unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
3918 cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
3919 unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
3920 cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
3922 // Going from vec3 to non-vec3 is a special case and requires a shuffle
3923 // vector to get a vec4, then a bitcast if the target type is different.
3924 if (NumElementsSrc == 3 && NumElementsDst != 3) {
3925 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
3927 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
3928 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
3932 Src->setName("astype");
3936 // Going from non-vec3 to vec3 is a special case and requires a bitcast
3937 // to vec4 if the original type is not vec4, then a shuffle vector to
3939 if (NumElementsSrc != 3 && NumElementsDst == 3) {
3940 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
3941 auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
3942 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
3946 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
3947 Src->setName("astype");
3951 return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
3952 Src, DstTy, "astype");
3955 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
3956 return CGF.EmitAtomicExpr(E).getScalarVal();
3959 //===----------------------------------------------------------------------===//
3960 // Entry Point into this File
3961 //===----------------------------------------------------------------------===//
3963 /// Emit the computation of the specified expression of scalar type, ignoring
3965 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
3966 assert(E && hasScalarEvaluationKind(E->getType()) &&
3967 "Invalid scalar expression to emit");
3969 return ScalarExprEmitter(*this, IgnoreResultAssign)
3970 .Visit(const_cast<Expr *>(E));
3973 /// Emit a conversion from the specified type to the specified destination type,
3974 /// both of which are LLVM scalar types.
3975 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
3977 SourceLocation Loc) {
3978 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
3979 "Invalid scalar expression to emit");
3980 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
3983 /// Emit a conversion from the specified complex type to the specified
3984 /// destination type, where the destination type is an LLVM scalar type.
3985 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
3988 SourceLocation Loc) {
3989 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
3990 "Invalid complex -> scalar conversion");
3991 return ScalarExprEmitter(*this)
3992 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
3996 llvm::Value *CodeGenFunction::
3997 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3998 bool isInc, bool isPre) {
3999 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4002 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4003 // object->isa or (*object).isa
4004 // Generate code as for: *(Class*)object
4006 Expr *BaseExpr = E->getBase();
4007 Address Addr = Address::invalid();
4008 if (BaseExpr->isRValue()) {
4009 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4011 Addr = EmitLValue(BaseExpr).getAddress();
4014 // Cast the address to Class*.
4015 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4016 return MakeAddrLValue(Addr, E->getType());
4020 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4021 const CompoundAssignOperator *E) {
4022 ScalarExprEmitter Scalar(*this);
4023 Value *Result = nullptr;
4024 switch (E->getOpcode()) {
4025 #define COMPOUND_OP(Op) \
4026 case BO_##Op##Assign: \
4027 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4064 llvm_unreachable("Not valid compound assignment operators");
4067 llvm_unreachable("Unhandled compound assignment operator");
4070 Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr,
4071 ArrayRef<Value *> IdxList,
4075 const Twine &Name) {
4076 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4078 // If the pointer overflow sanitizer isn't enabled, do nothing.
4079 if (!SanOpts.has(SanitizerKind::PointerOverflow))
4082 // If the GEP has already been reduced to a constant, leave it be.
4083 if (isa<llvm::Constant>(GEPVal))
4086 // Only check for overflows in the default address space.
4087 if (GEPVal->getType()->getPointerAddressSpace())
4090 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4091 assert(GEP->isInBounds() && "Expected inbounds GEP");
4093 SanitizerScope SanScope(this);
4094 auto &VMContext = getLLVMContext();
4095 const auto &DL = CGM.getDataLayout();
4096 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4098 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4099 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4100 auto *SAddIntrinsic =
4101 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4102 auto *SMulIntrinsic =
4103 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4105 // The total (signed) byte offset for the GEP.
4106 llvm::Value *TotalOffset = nullptr;
4107 // The offset overflow flag - true if the total offset overflows.
4108 llvm::Value *OffsetOverflows = Builder.getFalse();
4110 /// Return the result of the given binary operation.
4111 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4112 llvm::Value *RHS) -> llvm::Value * {
4113 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4115 // If the operands are constants, return a constant result.
4116 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4117 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4119 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4120 /*Signed=*/true, N);
4122 OffsetOverflows = Builder.getTrue();
4123 return llvm::ConstantInt::get(VMContext, N);
4127 // Otherwise, compute the result with checked arithmetic.
4128 auto *ResultAndOverflow = Builder.CreateCall(
4129 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4130 OffsetOverflows = Builder.CreateOr(
4131 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4132 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4135 // Determine the total byte offset by looking at each GEP operand.
4136 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4137 GTI != GTE; ++GTI) {
4138 llvm::Value *LocalOffset;
4139 auto *Index = GTI.getOperand();
4140 // Compute the local offset contributed by this indexing step:
4141 if (auto *STy = GTI.getStructTypeOrNull()) {
4142 // For struct indexing, the local offset is the byte position of the
4144 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4145 LocalOffset = llvm::ConstantInt::get(
4146 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4148 // Otherwise this is array-like indexing. The local offset is the index
4149 // multiplied by the element size.
4150 auto *ElementSize = llvm::ConstantInt::get(
4151 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4152 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4153 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4156 // If this is the first offset, set it as the total offset. Otherwise, add
4157 // the local offset into the running total.
4158 if (!TotalOffset || TotalOffset == Zero)
4159 TotalOffset = LocalOffset;
4161 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4164 // Common case: if the total offset is zero, don't emit a check.
4165 if (TotalOffset == Zero)
4168 // Now that we've computed the total offset, add it to the base pointer (with
4169 // wrapping semantics).
4170 auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy);
4171 auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset);
4173 // The GEP is valid if:
4174 // 1) The total offset doesn't overflow, and
4175 // 2) The sign of the difference between the computed address and the base
4176 // pointer matches the sign of the total offset.
4177 llvm::Value *ValidGEP;
4178 auto *NoOffsetOverflow = Builder.CreateNot(OffsetOverflows);
4179 if (SignedIndices) {
4180 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4181 auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero);
4182 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4183 ValidGEP = Builder.CreateAnd(
4184 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid),
4186 } else if (!SignedIndices && !IsSubtraction) {
4187 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4188 ValidGEP = Builder.CreateAnd(PosOrZeroValid, NoOffsetOverflow);
4190 auto *NegOrZeroValid = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4191 ValidGEP = Builder.CreateAnd(NegOrZeroValid, NoOffsetOverflow);
4194 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4195 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4196 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4197 EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow),
4198 SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);