1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines routines for folding instructions into constants.
12 // Also, to supplement the basic IR ConstantExpr simplifications,
13 // this file defines some additional folding routines that can make use of
14 // DataLayout information. These functions cannot go in IR due to library
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/ADT/APFloat.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Config/config.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/KnownBits.h"
46 #include "llvm/Support/MathExtras.h"
58 //===----------------------------------------------------------------------===//
59 // Constant Folding internal helper functions
60 //===----------------------------------------------------------------------===//
62 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
63 Constant *C, Type *SrcEltTy,
65 const DataLayout &DL) {
66 // Now that we know that the input value is a vector of integers, just shift
67 // and insert them into our result.
68 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
69 for (unsigned i = 0; i != NumSrcElts; ++i) {
71 if (DL.isLittleEndian())
72 Element = C->getAggregateElement(NumSrcElts - i - 1);
74 Element = C->getAggregateElement(i);
76 if (Element && isa<UndefValue>(Element)) {
81 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
83 return ConstantExpr::getBitCast(C, DestTy);
86 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
92 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
93 /// This always returns a non-null constant, but it may be a
94 /// ConstantExpr if unfoldable.
95 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
96 // Catch the obvious splat cases.
97 if (C->isNullValue() && !DestTy->isX86_MMXTy())
98 return Constant::getNullValue(DestTy);
99 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
100 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
101 return Constant::getAllOnesValue(DestTy);
103 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
104 // Handle a vector->scalar integer/fp cast.
105 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
106 unsigned NumSrcElts = VTy->getNumElements();
107 Type *SrcEltTy = VTy->getElementType();
109 // If the vector is a vector of floating point, convert it to vector of int
110 // to simplify things.
111 if (SrcEltTy->isFloatingPointTy()) {
112 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
114 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
115 // Ask IR to do the conversion now that #elts line up.
116 C = ConstantExpr::getBitCast(C, SrcIVTy);
119 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
120 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
121 SrcEltTy, NumSrcElts, DL))
124 if (isa<IntegerType>(DestTy))
125 return ConstantInt::get(DestTy, Result);
127 APFloat FP(DestTy->getFltSemantics(), Result);
128 return ConstantFP::get(DestTy->getContext(), FP);
132 // The code below only handles casts to vectors currently.
133 auto *DestVTy = dyn_cast<VectorType>(DestTy);
135 return ConstantExpr::getBitCast(C, DestTy);
137 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
138 // vector so the code below can handle it uniformly.
139 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
140 Constant *Ops = C; // don't take the address of C!
141 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
144 // If this is a bitcast from constant vector -> vector, fold it.
145 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
146 return ConstantExpr::getBitCast(C, DestTy);
148 // If the element types match, IR can fold it.
149 unsigned NumDstElt = DestVTy->getNumElements();
150 unsigned NumSrcElt = C->getType()->getVectorNumElements();
151 if (NumDstElt == NumSrcElt)
152 return ConstantExpr::getBitCast(C, DestTy);
154 Type *SrcEltTy = C->getType()->getVectorElementType();
155 Type *DstEltTy = DestVTy->getElementType();
157 // Otherwise, we're changing the number of elements in a vector, which
158 // requires endianness information to do the right thing. For example,
159 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
160 // folds to (little endian):
161 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
162 // and to (big endian):
163 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
165 // First thing is first. We only want to think about integer here, so if
166 // we have something in FP form, recast it as integer.
167 if (DstEltTy->isFloatingPointTy()) {
168 // Fold to an vector of integers with same size as our FP type.
169 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
171 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
172 // Recursively handle this integer conversion, if possible.
173 C = FoldBitCast(C, DestIVTy, DL);
175 // Finally, IR can handle this now that #elts line up.
176 return ConstantExpr::getBitCast(C, DestTy);
179 // Okay, we know the destination is integer, if the input is FP, convert
180 // it to integer first.
181 if (SrcEltTy->isFloatingPointTy()) {
182 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
184 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
185 // Ask IR to do the conversion now that #elts line up.
186 C = ConstantExpr::getBitCast(C, SrcIVTy);
187 // If IR wasn't able to fold it, bail out.
188 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
189 !isa<ConstantDataVector>(C))
193 // Now we know that the input and output vectors are both integer vectors
194 // of the same size, and that their #elements is not the same. Do the
195 // conversion here, which depends on whether the input or output has
197 bool isLittleEndian = DL.isLittleEndian();
199 SmallVector<Constant*, 32> Result;
200 if (NumDstElt < NumSrcElt) {
201 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
202 Constant *Zero = Constant::getNullValue(DstEltTy);
203 unsigned Ratio = NumSrcElt/NumDstElt;
204 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
206 for (unsigned i = 0; i != NumDstElt; ++i) {
207 // Build each element of the result.
208 Constant *Elt = Zero;
209 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
210 for (unsigned j = 0; j != Ratio; ++j) {
211 Constant *Src = C->getAggregateElement(SrcElt++);
212 if (Src && isa<UndefValue>(Src))
213 Src = Constant::getNullValue(C->getType()->getVectorElementType());
215 Src = dyn_cast_or_null<ConstantInt>(Src);
216 if (!Src) // Reject constantexpr elements.
217 return ConstantExpr::getBitCast(C, DestTy);
219 // Zero extend the element to the right size.
220 Src = ConstantExpr::getZExt(Src, Elt->getType());
222 // Shift it to the right place, depending on endianness.
223 Src = ConstantExpr::getShl(Src,
224 ConstantInt::get(Src->getType(), ShiftAmt));
225 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
228 Elt = ConstantExpr::getOr(Elt, Src);
230 Result.push_back(Elt);
232 return ConstantVector::get(Result);
235 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
236 unsigned Ratio = NumDstElt/NumSrcElt;
237 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
239 // Loop over each source value, expanding into multiple results.
240 for (unsigned i = 0; i != NumSrcElt; ++i) {
241 auto *Element = C->getAggregateElement(i);
243 if (!Element) // Reject constantexpr elements.
244 return ConstantExpr::getBitCast(C, DestTy);
246 if (isa<UndefValue>(Element)) {
247 // Correctly Propagate undef values.
248 Result.append(Ratio, UndefValue::get(DstEltTy));
252 auto *Src = dyn_cast<ConstantInt>(Element);
254 return ConstantExpr::getBitCast(C, DestTy);
256 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
257 for (unsigned j = 0; j != Ratio; ++j) {
258 // Shift the piece of the value into the right place, depending on
260 Constant *Elt = ConstantExpr::getLShr(Src,
261 ConstantInt::get(Src->getType(), ShiftAmt));
262 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
264 // Truncate the element to an integer with the same pointer size and
265 // convert the element back to a pointer using a inttoptr.
266 if (DstEltTy->isPointerTy()) {
267 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
268 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
269 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
273 // Truncate and remember this piece.
274 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
278 return ConstantVector::get(Result);
281 } // end anonymous namespace
283 /// If this constant is a constant offset from a global, return the global and
284 /// the constant. Because of constantexprs, this function is recursive.
285 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
286 APInt &Offset, const DataLayout &DL) {
287 // Trivial case, constant is the global.
288 if ((GV = dyn_cast<GlobalValue>(C))) {
289 unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType());
290 Offset = APInt(BitWidth, 0);
294 // Otherwise, if this isn't a constant expr, bail out.
295 auto *CE = dyn_cast<ConstantExpr>(C);
296 if (!CE) return false;
298 // Look through ptr->int and ptr->ptr casts.
299 if (CE->getOpcode() == Instruction::PtrToInt ||
300 CE->getOpcode() == Instruction::BitCast)
301 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
303 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
304 auto *GEP = dyn_cast<GEPOperator>(CE);
308 unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
309 APInt TmpOffset(BitWidth, 0);
311 // If the base isn't a global+constant, we aren't either.
312 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
315 // Otherwise, add any offset that our operands provide.
316 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
325 /// Recursive helper to read bits out of global. C is the constant being copied
326 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
327 /// results into and BytesLeft is the number of bytes left in
328 /// the CurPtr buffer. DL is the DataLayout.
329 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
330 unsigned BytesLeft, const DataLayout &DL) {
331 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
332 "Out of range access");
334 // If this element is zero or undefined, we can just return since *CurPtr is
336 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
339 if (auto *CI = dyn_cast<ConstantInt>(C)) {
340 if (CI->getBitWidth() > 64 ||
341 (CI->getBitWidth() & 7) != 0)
344 uint64_t Val = CI->getZExtValue();
345 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
347 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
349 if (!DL.isLittleEndian())
350 n = IntBytes - n - 1;
351 CurPtr[i] = (unsigned char)(Val >> (n * 8));
357 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
358 if (CFP->getType()->isDoubleTy()) {
359 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
360 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
362 if (CFP->getType()->isFloatTy()){
363 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
364 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
366 if (CFP->getType()->isHalfTy()){
367 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
368 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
373 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
374 const StructLayout *SL = DL.getStructLayout(CS->getType());
375 unsigned Index = SL->getElementContainingOffset(ByteOffset);
376 uint64_t CurEltOffset = SL->getElementOffset(Index);
377 ByteOffset -= CurEltOffset;
380 // If the element access is to the element itself and not to tail padding,
381 // read the bytes from the element.
382 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
384 if (ByteOffset < EltSize &&
385 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
391 // Check to see if we read from the last struct element, if so we're done.
392 if (Index == CS->getType()->getNumElements())
395 // If we read all of the bytes we needed from this element we're done.
396 uint64_t NextEltOffset = SL->getElementOffset(Index);
398 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
401 // Move to the next element of the struct.
402 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
403 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
405 CurEltOffset = NextEltOffset;
410 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
411 isa<ConstantDataSequential>(C)) {
412 Type *EltTy = C->getType()->getSequentialElementType();
413 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
414 uint64_t Index = ByteOffset / EltSize;
415 uint64_t Offset = ByteOffset - Index * EltSize;
417 if (auto *AT = dyn_cast<ArrayType>(C->getType()))
418 NumElts = AT->getNumElements();
420 NumElts = C->getType()->getVectorNumElements();
422 for (; Index != NumElts; ++Index) {
423 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
427 uint64_t BytesWritten = EltSize - Offset;
428 assert(BytesWritten <= EltSize && "Not indexing into this element?");
429 if (BytesWritten >= BytesLeft)
433 BytesLeft -= BytesWritten;
434 CurPtr += BytesWritten;
439 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
440 if (CE->getOpcode() == Instruction::IntToPtr &&
441 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
442 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
447 // Otherwise, unknown initializer type.
451 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
452 const DataLayout &DL) {
453 auto *PTy = cast<PointerType>(C->getType());
454 auto *IntType = dyn_cast<IntegerType>(LoadTy);
456 // If this isn't an integer load we can't fold it directly.
458 unsigned AS = PTy->getAddressSpace();
460 // If this is a float/double load, we can try folding it as an int32/64 load
461 // and then bitcast the result. This can be useful for union cases. Note
462 // that address spaces don't matter here since we're not going to result in
463 // an actual new load.
465 if (LoadTy->isHalfTy())
466 MapTy = Type::getInt16Ty(C->getContext());
467 else if (LoadTy->isFloatTy())
468 MapTy = Type::getInt32Ty(C->getContext());
469 else if (LoadTy->isDoubleTy())
470 MapTy = Type::getInt64Ty(C->getContext());
471 else if (LoadTy->isVectorTy()) {
472 MapTy = PointerType::getIntNTy(C->getContext(),
473 DL.getTypeAllocSizeInBits(LoadTy));
477 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
478 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL))
479 return FoldBitCast(Res, LoadTy, DL);
483 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
484 if (BytesLoaded > 32 || BytesLoaded == 0)
489 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
492 auto *GV = dyn_cast<GlobalVariable>(GVal);
493 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
494 !GV->getInitializer()->getType()->isSized())
497 int64_t Offset = OffsetAI.getSExtValue();
498 int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
500 // If we're not accessing anything in this constant, the result is undefined.
501 if (Offset + BytesLoaded <= 0)
502 return UndefValue::get(IntType);
504 // If we're not accessing anything in this constant, the result is undefined.
505 if (Offset >= InitializerSize)
506 return UndefValue::get(IntType);
508 unsigned char RawBytes[32] = {0};
509 unsigned char *CurPtr = RawBytes;
510 unsigned BytesLeft = BytesLoaded;
512 // If we're loading off the beginning of the global, some bytes may be valid.
519 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
522 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
523 if (DL.isLittleEndian()) {
524 ResultVal = RawBytes[BytesLoaded - 1];
525 for (unsigned i = 1; i != BytesLoaded; ++i) {
527 ResultVal |= RawBytes[BytesLoaded - 1 - i];
530 ResultVal = RawBytes[0];
531 for (unsigned i = 1; i != BytesLoaded; ++i) {
533 ResultVal |= RawBytes[i];
537 return ConstantInt::get(IntType->getContext(), ResultVal);
540 Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, Type *DestTy,
541 const DataLayout &DL) {
542 auto *SrcPtr = CE->getOperand(0);
543 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
546 Type *SrcTy = SrcPtrTy->getPointerElementType();
548 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
553 Type *SrcTy = C->getType();
555 // If the type sizes are the same and a cast is legal, just directly
556 // cast the constant.
557 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
558 Instruction::CastOps Cast = Instruction::BitCast;
559 // If we are going from a pointer to int or vice versa, we spell the cast
561 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
562 Cast = Instruction::IntToPtr;
563 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
564 Cast = Instruction::PtrToInt;
566 if (CastInst::castIsValid(Cast, C, DestTy))
567 return ConstantExpr::getCast(Cast, C, DestTy);
570 // If this isn't an aggregate type, there is nothing we can do to drill down
571 // and find a bitcastable constant.
572 if (!SrcTy->isAggregateType())
575 // We're simulating a load through a pointer that was bitcast to point to
576 // a different type, so we can try to walk down through the initial
577 // elements of an aggregate to see if some part of th e aggregate is
578 // castable to implement the "load" semantic model.
579 C = C->getAggregateElement(0u);
585 } // end anonymous namespace
587 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
588 const DataLayout &DL) {
589 // First, try the easy cases:
590 if (auto *GV = dyn_cast<GlobalVariable>(C))
591 if (GV->isConstant() && GV->hasDefinitiveInitializer())
592 return GV->getInitializer();
594 if (auto *GA = dyn_cast<GlobalAlias>(C))
595 if (GA->getAliasee() && !GA->isInterposable())
596 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
598 // If the loaded value isn't a constant expr, we can't handle it.
599 auto *CE = dyn_cast<ConstantExpr>(C);
603 if (CE->getOpcode() == Instruction::GetElementPtr) {
604 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
605 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
607 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
613 if (CE->getOpcode() == Instruction::BitCast)
614 if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, Ty, DL))
617 // Instead of loading constant c string, use corresponding integer value
618 // directly if string length is small enough.
620 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
621 size_t StrLen = Str.size();
622 unsigned NumBits = Ty->getPrimitiveSizeInBits();
623 // Replace load with immediate integer if the result is an integer or fp
625 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
626 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
627 APInt StrVal(NumBits, 0);
628 APInt SingleChar(NumBits, 0);
629 if (DL.isLittleEndian()) {
630 for (unsigned char C : reverse(Str.bytes())) {
631 SingleChar = static_cast<uint64_t>(C);
632 StrVal = (StrVal << 8) | SingleChar;
635 for (unsigned char C : Str.bytes()) {
636 SingleChar = static_cast<uint64_t>(C);
637 StrVal = (StrVal << 8) | SingleChar;
639 // Append NULL at the end.
641 StrVal = (StrVal << 8) | SingleChar;
644 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
645 if (Ty->isFloatingPointTy())
646 Res = ConstantExpr::getBitCast(Res, Ty);
651 // If this load comes from anywhere in a constant global, and if the global
652 // is all undef or zero, we know what it loads.
653 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
654 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
655 if (GV->getInitializer()->isNullValue())
656 return Constant::getNullValue(Ty);
657 if (isa<UndefValue>(GV->getInitializer()))
658 return UndefValue::get(Ty);
662 // Try hard to fold loads from bitcasted strange and non-type-safe things.
663 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
668 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
669 if (LI->isVolatile()) return nullptr;
671 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
672 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
677 /// One of Op0/Op1 is a constant expression.
678 /// Attempt to symbolically evaluate the result of a binary operator merging
679 /// these together. If target data info is available, it is provided as DL,
680 /// otherwise DL is null.
681 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
682 const DataLayout &DL) {
685 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
686 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
689 if (Opc == Instruction::And) {
690 unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
691 KnownBits Known0(BitWidth);
692 KnownBits Known1(BitWidth);
693 computeKnownBits(Op0, Known0, DL);
694 computeKnownBits(Op1, Known1, DL);
695 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
696 // All the bits of Op0 that the 'and' could be masking are already zero.
699 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
700 // All the bits of Op1 that the 'and' could be masking are already zero.
704 Known0.Zero |= Known1.Zero;
705 Known0.One &= Known1.One;
706 if (Known0.isConstant())
707 return ConstantInt::get(Op0->getType(), Known0.getConstant());
710 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
711 // constant. This happens frequently when iterating over a global array.
712 if (Opc == Instruction::Sub) {
713 GlobalValue *GV1, *GV2;
716 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
717 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
718 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
720 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
721 // PtrToInt may change the bitwidth so we have convert to the right size
723 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
724 Offs2.zextOrTrunc(OpSize));
731 /// If array indices are not pointer-sized integers, explicitly cast them so
732 /// that they aren't implicitly casted by the getelementptr.
733 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
734 Type *ResultTy, Optional<unsigned> InRangeIndex,
735 const DataLayout &DL, const TargetLibraryInfo *TLI) {
736 Type *IntPtrTy = DL.getIntPtrType(ResultTy);
737 Type *IntPtrScalarTy = IntPtrTy->getScalarType();
740 SmallVector<Constant*, 32> NewIdxs;
741 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
743 !isa<StructType>(GetElementPtrInst::getIndexedType(
744 SrcElemTy, Ops.slice(1, i - 1)))) &&
745 Ops[i]->getType()->getScalarType() != IntPtrScalarTy) {
747 Type *NewType = Ops[i]->getType()->isVectorTy()
749 : IntPtrTy->getScalarType();
750 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
756 NewIdxs.push_back(Ops[i]);
762 Constant *C = ConstantExpr::getGetElementPtr(
763 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
764 if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
770 /// Strip the pointer casts, but preserve the address space information.
771 Constant* StripPtrCastKeepAS(Constant* Ptr, Type *&ElemTy) {
772 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
773 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
774 Ptr = Ptr->stripPointerCasts();
775 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
777 ElemTy = NewPtrTy->getPointerElementType();
779 // Preserve the address space number of the pointer.
780 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
781 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
782 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
787 /// If we can symbolically evaluate the GEP constant expression, do so.
788 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
789 ArrayRef<Constant *> Ops,
790 const DataLayout &DL,
791 const TargetLibraryInfo *TLI) {
792 const GEPOperator *InnermostGEP = GEP;
793 bool InBounds = GEP->isInBounds();
795 Type *SrcElemTy = GEP->getSourceElementType();
796 Type *ResElemTy = GEP->getResultElementType();
797 Type *ResTy = GEP->getType();
798 if (!SrcElemTy->isSized())
801 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
802 GEP->getInRangeIndex(), DL, TLI))
805 Constant *Ptr = Ops[0];
806 if (!Ptr->getType()->isPointerTy())
809 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
811 // If this is a constant expr gep that is effectively computing an
812 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
813 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
814 if (!isa<ConstantInt>(Ops[i])) {
816 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
817 // "inttoptr (sub (ptrtoint Ptr), V)"
818 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
819 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
820 assert((!CE || CE->getType() == IntPtrTy) &&
821 "CastGEPIndices didn't canonicalize index types!");
822 if (CE && CE->getOpcode() == Instruction::Sub &&
823 CE->getOperand(0)->isNullValue()) {
824 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
825 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
826 Res = ConstantExpr::getIntToPtr(Res, ResTy);
827 if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
835 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
838 DL.getIndexedOffsetInType(
840 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
841 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
843 // If this is a GEP of a GEP, fold it all into a single GEP.
844 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
846 InBounds &= GEP->isInBounds();
848 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
850 // Do not try the incorporate the sub-GEP if some index is not a number.
851 bool AllConstantInt = true;
852 for (Value *NestedOp : NestedOps)
853 if (!isa<ConstantInt>(NestedOp)) {
854 AllConstantInt = false;
860 Ptr = cast<Constant>(GEP->getOperand(0));
861 SrcElemTy = GEP->getSourceElementType();
862 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
863 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
866 // If the base value for this address is a literal integer value, fold the
867 // getelementptr to the resulting integer value casted to the pointer type.
868 APInt BasePtr(BitWidth, 0);
869 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
870 if (CE->getOpcode() == Instruction::IntToPtr) {
871 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
872 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
876 auto *PTy = cast<PointerType>(Ptr->getType());
877 if ((Ptr->isNullValue() || BasePtr != 0) &&
878 !DL.isNonIntegralPointerType(PTy)) {
879 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
880 return ConstantExpr::getIntToPtr(C, ResTy);
883 // Otherwise form a regular getelementptr. Recompute the indices so that
884 // we eliminate over-indexing of the notional static type array bounds.
885 // This makes it easy to determine if the getelementptr is "inbounds".
886 // Also, this helps GlobalOpt do SROA on GlobalVariables.
888 SmallVector<Constant *, 32> NewIdxs;
891 if (!Ty->isStructTy()) {
892 if (Ty->isPointerTy()) {
893 // The only pointer indexing we'll do is on the first index of the GEP.
894 if (!NewIdxs.empty())
899 // Only handle pointers to sized types, not pointers to functions.
902 } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
903 Ty = ATy->getElementType();
905 // We've reached some non-indexable type.
909 // Determine which element of the array the offset points into.
910 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
912 // The element size is 0. This may be [0 x Ty]*, so just use a zero
913 // index for this level and proceed to the next level to see if it can
914 // accommodate the offset.
915 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
917 // The element size is non-zero divide the offset by the element
918 // size (rounding down), to compute the index at this level.
920 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
923 Offset -= NewIdx * ElemSize;
924 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
927 auto *STy = cast<StructType>(Ty);
928 // If we end up with an offset that isn't valid for this struct type, we
929 // can't re-form this GEP in a regular form, so bail out. The pointer
930 // operand likely went through casts that are necessary to make the GEP
932 const StructLayout &SL = *DL.getStructLayout(STy);
933 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
936 // Determine which field of the struct the offset points into. The
937 // getZExtValue is fine as we've already ensured that the offset is
938 // within the range representable by the StructLayout API.
939 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
940 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
942 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
943 Ty = STy->getTypeAtIndex(ElIdx);
945 } while (Ty != ResElemTy);
947 // If we haven't used up the entire offset by descending the static
948 // type, then the offset is pointing into the middle of an indivisible
949 // member, so we can't simplify it.
953 // Preserve the inrange index from the innermost GEP if possible. We must
954 // have calculated the same indices up to and including the inrange index.
955 Optional<unsigned> InRangeIndex;
956 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
957 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
958 NewIdxs.size() > *LastIRIndex) {
959 InRangeIndex = LastIRIndex;
960 for (unsigned I = 0; I <= *LastIRIndex; ++I)
961 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) {
968 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
969 InBounds, InRangeIndex);
970 assert(C->getType()->getPointerElementType() == Ty &&
971 "Computed GetElementPtr has unexpected type!");
973 // If we ended up indexing a member with a type that doesn't match
974 // the type of what the original indices indexed, add a cast.
976 C = FoldBitCast(C, ResTy, DL);
981 /// Attempt to constant fold an instruction with the
982 /// specified opcode and operands. If successful, the constant result is
983 /// returned, if not, null is returned. Note that this function can fail when
984 /// attempting to fold instructions like loads and stores, which have no
985 /// constant expression form.
987 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/inrange
988 /// etc information, due to only being passed an opcode and operands. Constant
989 /// folding using this function strips this information.
991 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
992 ArrayRef<Constant *> Ops,
993 const DataLayout &DL,
994 const TargetLibraryInfo *TLI) {
995 Type *DestTy = InstOrCE->getType();
997 // Handle easy binops first.
998 if (Instruction::isBinaryOp(Opcode))
999 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1001 if (Instruction::isCast(Opcode))
1002 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1004 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1005 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1008 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1009 Ops.slice(1), GEP->isInBounds(),
1010 GEP->getInRangeIndex());
1013 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1014 return CE->getWithOperands(Ops);
1017 default: return nullptr;
1018 case Instruction::ICmp:
1019 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1020 case Instruction::Call:
1021 if (auto *F = dyn_cast<Function>(Ops.back()))
1022 if (canConstantFoldCallTo(F))
1023 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
1025 case Instruction::Select:
1026 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1027 case Instruction::ExtractElement:
1028 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1029 case Instruction::InsertElement:
1030 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1031 case Instruction::ShuffleVector:
1032 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1036 } // end anonymous namespace
1038 //===----------------------------------------------------------------------===//
1039 // Constant Folding public APIs
1040 //===----------------------------------------------------------------------===//
1045 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1046 const TargetLibraryInfo *TLI,
1047 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1048 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1051 SmallVector<Constant *, 8> Ops;
1052 for (const Use &NewU : C->operands()) {
1053 auto *NewC = cast<Constant>(&NewU);
1054 // Recursively fold the ConstantExpr's operands. If we have already folded
1055 // a ConstantExpr, we don't have to process it again.
1056 if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
1057 auto It = FoldedOps.find(NewC);
1058 if (It == FoldedOps.end()) {
1060 ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
1061 FoldedOps.insert({NewC, FoldedC});
1064 FoldedOps.insert({NewC, NewC});
1070 Ops.push_back(NewC);
1073 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1074 if (CE->isCompare())
1075 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1078 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1081 assert(isa<ConstantVector>(C));
1082 return ConstantVector::get(Ops);
1085 } // end anonymous namespace
1087 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1088 const TargetLibraryInfo *TLI) {
1089 // Handle PHI nodes quickly here...
1090 if (auto *PN = dyn_cast<PHINode>(I)) {
1091 Constant *CommonValue = nullptr;
1093 SmallDenseMap<Constant *, Constant *> FoldedOps;
1094 for (Value *Incoming : PN->incoming_values()) {
1095 // If the incoming value is undef then skip it. Note that while we could
1096 // skip the value if it is equal to the phi node itself we choose not to
1097 // because that would break the rule that constant folding only applies if
1098 // all operands are constants.
1099 if (isa<UndefValue>(Incoming))
1101 // If the incoming value is not a constant, then give up.
1102 auto *C = dyn_cast<Constant>(Incoming);
1105 // Fold the PHI's operands.
1106 if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
1108 // If the incoming value is a different constant to
1109 // the one we saw previously, then give up.
1110 if (CommonValue && C != CommonValue)
1115 // If we reach here, all incoming values are the same constant or undef.
1116 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1119 // Scan the operand list, checking to see if they are all constants, if so,
1120 // hand off to ConstantFoldInstOperandsImpl.
1121 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1124 SmallDenseMap<Constant *, Constant *> FoldedOps;
1125 SmallVector<Constant *, 8> Ops;
1126 for (const Use &OpU : I->operands()) {
1127 auto *Op = cast<Constant>(&OpU);
1128 // Fold the Instruction's operands.
1129 if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
1135 if (const auto *CI = dyn_cast<CmpInst>(I))
1136 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1139 if (const auto *LI = dyn_cast<LoadInst>(I))
1140 return ConstantFoldLoadInst(LI, DL);
1142 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1143 return ConstantExpr::getInsertValue(
1144 cast<Constant>(IVI->getAggregateOperand()),
1145 cast<Constant>(IVI->getInsertedValueOperand()),
1149 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1150 return ConstantExpr::getExtractValue(
1151 cast<Constant>(EVI->getAggregateOperand()),
1155 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1158 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1159 const TargetLibraryInfo *TLI) {
1160 SmallDenseMap<Constant *, Constant *> FoldedOps;
1161 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1164 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1165 ArrayRef<Constant *> Ops,
1166 const DataLayout &DL,
1167 const TargetLibraryInfo *TLI) {
1168 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1171 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1172 Constant *Ops0, Constant *Ops1,
1173 const DataLayout &DL,
1174 const TargetLibraryInfo *TLI) {
1175 // fold: icmp (inttoptr x), null -> icmp x, 0
1176 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1177 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1178 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1180 // FIXME: The following comment is out of data and the DataLayout is here now.
1181 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1182 // around to know if bit truncation is happening.
1183 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1184 if (Ops1->isNullValue()) {
1185 if (CE0->getOpcode() == Instruction::IntToPtr) {
1186 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1187 // Convert the integer value to the right size to ensure we get the
1188 // proper extension or truncation.
1189 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1191 Constant *Null = Constant::getNullValue(C->getType());
1192 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1195 // Only do this transformation if the int is intptrty in size, otherwise
1196 // there is a truncation or extension that we aren't modeling.
1197 if (CE0->getOpcode() == Instruction::PtrToInt) {
1198 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1199 if (CE0->getType() == IntPtrTy) {
1200 Constant *C = CE0->getOperand(0);
1201 Constant *Null = Constant::getNullValue(C->getType());
1202 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1207 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1208 if (CE0->getOpcode() == CE1->getOpcode()) {
1209 if (CE0->getOpcode() == Instruction::IntToPtr) {
1210 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1212 // Convert the integer value to the right size to ensure we get the
1213 // proper extension or truncation.
1214 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1216 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1218 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1221 // Only do this transformation if the int is intptrty in size, otherwise
1222 // there is a truncation or extension that we aren't modeling.
1223 if (CE0->getOpcode() == Instruction::PtrToInt) {
1224 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1225 if (CE0->getType() == IntPtrTy &&
1226 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1227 return ConstantFoldCompareInstOperands(
1228 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1234 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1235 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1236 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1237 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1238 Constant *LHS = ConstantFoldCompareInstOperands(
1239 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1240 Constant *RHS = ConstantFoldCompareInstOperands(
1241 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1243 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1244 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1248 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1251 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1253 const DataLayout &DL) {
1254 assert(Instruction::isBinaryOp(Opcode));
1255 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1256 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1259 return ConstantExpr::get(Opcode, LHS, RHS);
1262 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1263 Type *DestTy, const DataLayout &DL) {
1264 assert(Instruction::isCast(Opcode));
1267 llvm_unreachable("Missing case");
1268 case Instruction::PtrToInt:
1269 // If the input is a inttoptr, eliminate the pair. This requires knowing
1270 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1271 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1272 if (CE->getOpcode() == Instruction::IntToPtr) {
1273 Constant *Input = CE->getOperand(0);
1274 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1275 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1276 if (PtrWidth < InWidth) {
1278 ConstantInt::get(CE->getContext(),
1279 APInt::getLowBitsSet(InWidth, PtrWidth));
1280 Input = ConstantExpr::getAnd(Input, Mask);
1282 // Do a zext or trunc to get to the dest size.
1283 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1286 return ConstantExpr::getCast(Opcode, C, DestTy);
1287 case Instruction::IntToPtr:
1288 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1289 // the int size is >= the ptr size and the address spaces are the same.
1290 // This requires knowing the width of a pointer, so it can't be done in
1291 // ConstantExpr::getCast.
1292 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1293 if (CE->getOpcode() == Instruction::PtrToInt) {
1294 Constant *SrcPtr = CE->getOperand(0);
1295 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1296 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1298 if (MidIntSize >= SrcPtrSize) {
1299 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1300 if (SrcAS == DestTy->getPointerAddressSpace())
1301 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1306 return ConstantExpr::getCast(Opcode, C, DestTy);
1307 case Instruction::Trunc:
1308 case Instruction::ZExt:
1309 case Instruction::SExt:
1310 case Instruction::FPTrunc:
1311 case Instruction::FPExt:
1312 case Instruction::UIToFP:
1313 case Instruction::SIToFP:
1314 case Instruction::FPToUI:
1315 case Instruction::FPToSI:
1316 case Instruction::AddrSpaceCast:
1317 return ConstantExpr::getCast(Opcode, C, DestTy);
1318 case Instruction::BitCast:
1319 return FoldBitCast(C, DestTy, DL);
1323 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1325 if (!CE->getOperand(1)->isNullValue())
1326 return nullptr; // Do not allow stepping over the value!
1328 // Loop over all of the operands, tracking down which value we are
1330 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1331 C = C->getAggregateElement(CE->getOperand(i));
1339 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1340 ArrayRef<Constant *> Indices) {
1341 // Loop over all of the operands, tracking down which value we are
1343 for (Constant *Index : Indices) {
1344 C = C->getAggregateElement(Index);
1351 //===----------------------------------------------------------------------===//
1352 // Constant Folding for Calls
1355 bool llvm::canConstantFoldCallTo(const Function *F) {
1356 switch (F->getIntrinsicID()) {
1357 case Intrinsic::fabs:
1358 case Intrinsic::minnum:
1359 case Intrinsic::maxnum:
1360 case Intrinsic::log:
1361 case Intrinsic::log2:
1362 case Intrinsic::log10:
1363 case Intrinsic::exp:
1364 case Intrinsic::exp2:
1365 case Intrinsic::floor:
1366 case Intrinsic::ceil:
1367 case Intrinsic::sqrt:
1368 case Intrinsic::sin:
1369 case Intrinsic::cos:
1370 case Intrinsic::trunc:
1371 case Intrinsic::rint:
1372 case Intrinsic::nearbyint:
1373 case Intrinsic::pow:
1374 case Intrinsic::powi:
1375 case Intrinsic::bswap:
1376 case Intrinsic::ctpop:
1377 case Intrinsic::ctlz:
1378 case Intrinsic::cttz:
1379 case Intrinsic::fma:
1380 case Intrinsic::fmuladd:
1381 case Intrinsic::copysign:
1382 case Intrinsic::round:
1383 case Intrinsic::masked_load:
1384 case Intrinsic::sadd_with_overflow:
1385 case Intrinsic::uadd_with_overflow:
1386 case Intrinsic::ssub_with_overflow:
1387 case Intrinsic::usub_with_overflow:
1388 case Intrinsic::smul_with_overflow:
1389 case Intrinsic::umul_with_overflow:
1390 case Intrinsic::convert_from_fp16:
1391 case Intrinsic::convert_to_fp16:
1392 case Intrinsic::bitreverse:
1393 case Intrinsic::x86_sse_cvtss2si:
1394 case Intrinsic::x86_sse_cvtss2si64:
1395 case Intrinsic::x86_sse_cvttss2si:
1396 case Intrinsic::x86_sse_cvttss2si64:
1397 case Intrinsic::x86_sse2_cvtsd2si:
1398 case Intrinsic::x86_sse2_cvtsd2si64:
1399 case Intrinsic::x86_sse2_cvttsd2si:
1400 case Intrinsic::x86_sse2_cvttsd2si64:
1404 case Intrinsic::not_intrinsic: break;
1409 StringRef Name = F->getName();
1411 // In these cases, the check of the length is required. We don't want to
1412 // return true for a name like "cos\0blah" which strcmp would return equal to
1413 // "cos", but has length 8.
1418 return Name == "acos" || Name == "asin" || Name == "atan" ||
1419 Name == "atan2" || Name == "acosf" || Name == "asinf" ||
1420 Name == "atanf" || Name == "atan2f";
1422 return Name == "ceil" || Name == "cos" || Name == "cosh" ||
1423 Name == "ceilf" || Name == "cosf" || Name == "coshf";
1425 return Name == "exp" || Name == "exp2" || Name == "expf" || Name == "exp2f";
1427 return Name == "fabs" || Name == "floor" || Name == "fmod" ||
1428 Name == "fabsf" || Name == "floorf" || Name == "fmodf";
1430 return Name == "log" || Name == "log10" || Name == "logf" ||
1433 return Name == "pow" || Name == "powf";
1435 return Name == "round" || Name == "roundf";
1437 return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1438 Name == "sinf" || Name == "sinhf" || Name == "sqrtf";
1440 return Name == "tan" || Name == "tanh" || Name == "tanf" || Name == "tanhf";
1446 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1447 if (Ty->isHalfTy()) {
1450 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &unused);
1451 return ConstantFP::get(Ty->getContext(), APF);
1453 if (Ty->isFloatTy())
1454 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1455 if (Ty->isDoubleTy())
1456 return ConstantFP::get(Ty->getContext(), APFloat(V));
1457 llvm_unreachable("Can only constant fold half/float/double");
1460 /// Clear the floating-point exception state.
1461 inline void llvm_fenv_clearexcept() {
1462 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1463 feclearexcept(FE_ALL_EXCEPT);
1468 /// Test if a floating-point exception was raised.
1469 inline bool llvm_fenv_testexcept() {
1470 int errno_val = errno;
1471 if (errno_val == ERANGE || errno_val == EDOM)
1473 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1474 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1480 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1481 llvm_fenv_clearexcept();
1483 if (llvm_fenv_testexcept()) {
1484 llvm_fenv_clearexcept();
1488 return GetConstantFoldFPValue(V, Ty);
1491 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1492 double W, Type *Ty) {
1493 llvm_fenv_clearexcept();
1495 if (llvm_fenv_testexcept()) {
1496 llvm_fenv_clearexcept();
1500 return GetConstantFoldFPValue(V, Ty);
1503 /// Attempt to fold an SSE floating point to integer conversion of a constant
1504 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1505 /// used (toward nearest, ties to even). This matches the behavior of the
1506 /// non-truncating SSE instructions in the default rounding mode. The desired
1507 /// integer type Ty is used to select how many bits are available for the
1508 /// result. Returns null if the conversion cannot be performed, otherwise
1509 /// returns the Constant value resulting from the conversion.
1510 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1512 // All of these conversion intrinsics form an integer of at most 64bits.
1513 unsigned ResultWidth = Ty->getIntegerBitWidth();
1514 assert(ResultWidth <= 64 &&
1515 "Can only constant fold conversions to 64 and 32 bit ints");
1518 bool isExact = false;
1519 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1520 : APFloat::rmNearestTiesToEven;
1521 APFloat::opStatus status =
1522 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1523 /*isSigned=*/true, mode, &isExact);
1524 if (status != APFloat::opOK &&
1525 (!roundTowardZero || status != APFloat::opInexact))
1527 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
1530 double getValueAsDouble(ConstantFP *Op) {
1531 Type *Ty = Op->getType();
1533 if (Ty->isFloatTy())
1534 return Op->getValueAPF().convertToFloat();
1536 if (Ty->isDoubleTy())
1537 return Op->getValueAPF().convertToDouble();
1540 APFloat APF = Op->getValueAPF();
1541 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1542 return APF.convertToDouble();
1545 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
1546 ArrayRef<Constant *> Operands,
1547 const TargetLibraryInfo *TLI) {
1548 if (Operands.size() == 1) {
1549 if (isa<UndefValue>(Operands[0])) {
1550 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
1551 if (IntrinsicID == Intrinsic::cos)
1552 return Constant::getNullValue(Ty);
1554 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1555 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1556 APFloat Val(Op->getValueAPF());
1559 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1561 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1564 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1567 if (IntrinsicID == Intrinsic::round) {
1568 APFloat V = Op->getValueAPF();
1569 V.roundToIntegral(APFloat::rmNearestTiesToAway);
1570 return ConstantFP::get(Ty->getContext(), V);
1573 if (IntrinsicID == Intrinsic::floor) {
1574 APFloat V = Op->getValueAPF();
1575 V.roundToIntegral(APFloat::rmTowardNegative);
1576 return ConstantFP::get(Ty->getContext(), V);
1579 if (IntrinsicID == Intrinsic::ceil) {
1580 APFloat V = Op->getValueAPF();
1581 V.roundToIntegral(APFloat::rmTowardPositive);
1582 return ConstantFP::get(Ty->getContext(), V);
1585 if (IntrinsicID == Intrinsic::trunc) {
1586 APFloat V = Op->getValueAPF();
1587 V.roundToIntegral(APFloat::rmTowardZero);
1588 return ConstantFP::get(Ty->getContext(), V);
1591 if (IntrinsicID == Intrinsic::rint) {
1592 APFloat V = Op->getValueAPF();
1593 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1594 return ConstantFP::get(Ty->getContext(), V);
1597 if (IntrinsicID == Intrinsic::nearbyint) {
1598 APFloat V = Op->getValueAPF();
1599 V.roundToIntegral(APFloat::rmNearestTiesToEven);
1600 return ConstantFP::get(Ty->getContext(), V);
1603 /// We only fold functions with finite arguments. Folding NaN and inf is
1604 /// likely to be aborted with an exception anyway, and some host libms
1605 /// have known errors raising exceptions.
1606 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1609 /// Currently APFloat versions of these functions do not exist, so we use
1610 /// the host native double versions. Float versions are not called
1611 /// directly but for all these it is true (float)(f((double)arg)) ==
1612 /// f(arg). Long double not supported yet.
1613 double V = getValueAsDouble(Op);
1615 switch (IntrinsicID) {
1617 case Intrinsic::fabs:
1618 return ConstantFoldFP(fabs, V, Ty);
1619 case Intrinsic::log2:
1620 return ConstantFoldFP(Log2, V, Ty);
1621 case Intrinsic::log:
1622 return ConstantFoldFP(log, V, Ty);
1623 case Intrinsic::log10:
1624 return ConstantFoldFP(log10, V, Ty);
1625 case Intrinsic::exp:
1626 return ConstantFoldFP(exp, V, Ty);
1627 case Intrinsic::exp2:
1628 return ConstantFoldFP(exp2, V, Ty);
1629 case Intrinsic::sin:
1630 return ConstantFoldFP(sin, V, Ty);
1631 case Intrinsic::cos:
1632 return ConstantFoldFP(cos, V, Ty);
1633 case Intrinsic::sqrt:
1634 return ConstantFoldFP(sqrt, V, Ty);
1642 if ((Name == "acos" && TLI->has(LibFunc_acos)) ||
1643 (Name == "acosf" && TLI->has(LibFunc_acosf)))
1644 return ConstantFoldFP(acos, V, Ty);
1645 else if ((Name == "asin" && TLI->has(LibFunc_asin)) ||
1646 (Name == "asinf" && TLI->has(LibFunc_asinf)))
1647 return ConstantFoldFP(asin, V, Ty);
1648 else if ((Name == "atan" && TLI->has(LibFunc_atan)) ||
1649 (Name == "atanf" && TLI->has(LibFunc_atanf)))
1650 return ConstantFoldFP(atan, V, Ty);
1653 if ((Name == "ceil" && TLI->has(LibFunc_ceil)) ||
1654 (Name == "ceilf" && TLI->has(LibFunc_ceilf)))
1655 return ConstantFoldFP(ceil, V, Ty);
1656 else if ((Name == "cos" && TLI->has(LibFunc_cos)) ||
1657 (Name == "cosf" && TLI->has(LibFunc_cosf)))
1658 return ConstantFoldFP(cos, V, Ty);
1659 else if ((Name == "cosh" && TLI->has(LibFunc_cosh)) ||
1660 (Name == "coshf" && TLI->has(LibFunc_coshf)))
1661 return ConstantFoldFP(cosh, V, Ty);
1664 if ((Name == "exp" && TLI->has(LibFunc_exp)) ||
1665 (Name == "expf" && TLI->has(LibFunc_expf)))
1666 return ConstantFoldFP(exp, V, Ty);
1667 if ((Name == "exp2" && TLI->has(LibFunc_exp2)) ||
1668 (Name == "exp2f" && TLI->has(LibFunc_exp2f)))
1669 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1671 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1674 if ((Name == "fabs" && TLI->has(LibFunc_fabs)) ||
1675 (Name == "fabsf" && TLI->has(LibFunc_fabsf)))
1676 return ConstantFoldFP(fabs, V, Ty);
1677 else if ((Name == "floor" && TLI->has(LibFunc_floor)) ||
1678 (Name == "floorf" && TLI->has(LibFunc_floorf)))
1679 return ConstantFoldFP(floor, V, Ty);
1682 if ((Name == "log" && V > 0 && TLI->has(LibFunc_log)) ||
1683 (Name == "logf" && V > 0 && TLI->has(LibFunc_logf)))
1684 return ConstantFoldFP(log, V, Ty);
1685 else if ((Name == "log10" && V > 0 && TLI->has(LibFunc_log10)) ||
1686 (Name == "log10f" && V > 0 && TLI->has(LibFunc_log10f)))
1687 return ConstantFoldFP(log10, V, Ty);
1690 if ((Name == "round" && TLI->has(LibFunc_round)) ||
1691 (Name == "roundf" && TLI->has(LibFunc_roundf)))
1692 return ConstantFoldFP(round, V, Ty);
1694 if ((Name == "sin" && TLI->has(LibFunc_sin)) ||
1695 (Name == "sinf" && TLI->has(LibFunc_sinf)))
1696 return ConstantFoldFP(sin, V, Ty);
1697 else if ((Name == "sinh" && TLI->has(LibFunc_sinh)) ||
1698 (Name == "sinhf" && TLI->has(LibFunc_sinhf)))
1699 return ConstantFoldFP(sinh, V, Ty);
1700 else if ((Name == "sqrt" && V >= 0 && TLI->has(LibFunc_sqrt)) ||
1701 (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc_sqrtf)))
1702 return ConstantFoldFP(sqrt, V, Ty);
1705 if ((Name == "tan" && TLI->has(LibFunc_tan)) ||
1706 (Name == "tanf" && TLI->has(LibFunc_tanf)))
1707 return ConstantFoldFP(tan, V, Ty);
1708 else if ((Name == "tanh" && TLI->has(LibFunc_tanh)) ||
1709 (Name == "tanhf" && TLI->has(LibFunc_tanhf)))
1710 return ConstantFoldFP(tanh, V, Ty);
1718 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1719 switch (IntrinsicID) {
1720 case Intrinsic::bswap:
1721 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1722 case Intrinsic::ctpop:
1723 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1724 case Intrinsic::bitreverse:
1725 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1726 case Intrinsic::convert_from_fp16: {
1727 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
1730 APFloat::opStatus status = Val.convert(
1731 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
1733 // Conversion is always precise.
1735 assert(status == APFloat::opOK && !lost &&
1736 "Precision lost during fp16 constfolding");
1738 return ConstantFP::get(Ty->getContext(), Val);
1745 // Support ConstantVector in case we have an Undef in the top.
1746 if (isa<ConstantVector>(Operands[0]) ||
1747 isa<ConstantDataVector>(Operands[0])) {
1748 auto *Op = cast<Constant>(Operands[0]);
1749 switch (IntrinsicID) {
1751 case Intrinsic::x86_sse_cvtss2si:
1752 case Intrinsic::x86_sse_cvtss2si64:
1753 case Intrinsic::x86_sse2_cvtsd2si:
1754 case Intrinsic::x86_sse2_cvtsd2si64:
1755 if (ConstantFP *FPOp =
1756 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1757 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1758 /*roundTowardZero=*/false, Ty);
1759 case Intrinsic::x86_sse_cvttss2si:
1760 case Intrinsic::x86_sse_cvttss2si64:
1761 case Intrinsic::x86_sse2_cvttsd2si:
1762 case Intrinsic::x86_sse2_cvttsd2si64:
1763 if (ConstantFP *FPOp =
1764 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1765 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
1766 /*roundTowardZero=*/true, Ty);
1770 if (isa<UndefValue>(Operands[0])) {
1771 if (IntrinsicID == Intrinsic::bswap ||
1772 IntrinsicID == Intrinsic::bitreverse)
1780 if (Operands.size() == 2) {
1781 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1782 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1784 double Op1V = getValueAsDouble(Op1);
1786 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1787 if (Op2->getType() != Op1->getType())
1790 double Op2V = getValueAsDouble(Op2);
1791 if (IntrinsicID == Intrinsic::pow) {
1792 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1794 if (IntrinsicID == Intrinsic::copysign) {
1795 APFloat V1 = Op1->getValueAPF();
1796 const APFloat &V2 = Op2->getValueAPF();
1798 return ConstantFP::get(Ty->getContext(), V1);
1801 if (IntrinsicID == Intrinsic::minnum) {
1802 const APFloat &C1 = Op1->getValueAPF();
1803 const APFloat &C2 = Op2->getValueAPF();
1804 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1807 if (IntrinsicID == Intrinsic::maxnum) {
1808 const APFloat &C1 = Op1->getValueAPF();
1809 const APFloat &C2 = Op2->getValueAPF();
1810 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1815 if ((Name == "pow" && TLI->has(LibFunc_pow)) ||
1816 (Name == "powf" && TLI->has(LibFunc_powf)))
1817 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1818 if ((Name == "fmod" && TLI->has(LibFunc_fmod)) ||
1819 (Name == "fmodf" && TLI->has(LibFunc_fmodf)))
1820 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1821 if ((Name == "atan2" && TLI->has(LibFunc_atan2)) ||
1822 (Name == "atan2f" && TLI->has(LibFunc_atan2f)))
1823 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1824 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1825 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
1826 return ConstantFP::get(Ty->getContext(),
1827 APFloat((float)std::pow((float)Op1V,
1828 (int)Op2C->getZExtValue())));
1829 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
1830 return ConstantFP::get(Ty->getContext(),
1831 APFloat((float)std::pow((float)Op1V,
1832 (int)Op2C->getZExtValue())));
1833 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
1834 return ConstantFP::get(Ty->getContext(),
1835 APFloat((double)std::pow((double)Op1V,
1836 (int)Op2C->getZExtValue())));
1841 if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
1842 if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
1843 switch (IntrinsicID) {
1845 case Intrinsic::sadd_with_overflow:
1846 case Intrinsic::uadd_with_overflow:
1847 case Intrinsic::ssub_with_overflow:
1848 case Intrinsic::usub_with_overflow:
1849 case Intrinsic::smul_with_overflow:
1850 case Intrinsic::umul_with_overflow: {
1853 switch (IntrinsicID) {
1854 default: llvm_unreachable("Invalid case");
1855 case Intrinsic::sadd_with_overflow:
1856 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
1858 case Intrinsic::uadd_with_overflow:
1859 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
1861 case Intrinsic::ssub_with_overflow:
1862 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
1864 case Intrinsic::usub_with_overflow:
1865 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
1867 case Intrinsic::smul_with_overflow:
1868 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
1870 case Intrinsic::umul_with_overflow:
1871 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
1875 ConstantInt::get(Ty->getContext(), Res),
1876 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
1878 return ConstantStruct::get(cast<StructType>(Ty), Ops);
1880 case Intrinsic::cttz:
1881 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
1882 return UndefValue::get(Ty);
1883 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
1884 case Intrinsic::ctlz:
1885 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
1886 return UndefValue::get(Ty);
1887 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
1896 if (Operands.size() != 3)
1899 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1900 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1901 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
1902 switch (IntrinsicID) {
1904 case Intrinsic::fma:
1905 case Intrinsic::fmuladd: {
1906 APFloat V = Op1->getValueAPF();
1907 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
1909 APFloat::rmNearestTiesToEven);
1910 if (s != APFloat::opInvalidOp)
1911 return ConstantFP::get(Ty->getContext(), V);
1923 Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
1924 VectorType *VTy, ArrayRef<Constant *> Operands,
1925 const DataLayout &DL,
1926 const TargetLibraryInfo *TLI) {
1927 SmallVector<Constant *, 4> Result(VTy->getNumElements());
1928 SmallVector<Constant *, 4> Lane(Operands.size());
1929 Type *Ty = VTy->getElementType();
1931 if (IntrinsicID == Intrinsic::masked_load) {
1932 auto *SrcPtr = Operands[0];
1933 auto *Mask = Operands[2];
1934 auto *Passthru = Operands[3];
1936 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
1938 SmallVector<Constant *, 32> NewElements;
1939 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
1940 auto *MaskElt = Mask->getAggregateElement(I);
1943 auto *PassthruElt = Passthru->getAggregateElement(I);
1944 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
1945 if (isa<UndefValue>(MaskElt)) {
1947 NewElements.push_back(PassthruElt);
1949 NewElements.push_back(VecElt);
1953 if (MaskElt->isNullValue()) {
1956 NewElements.push_back(PassthruElt);
1957 } else if (MaskElt->isOneValue()) {
1960 NewElements.push_back(VecElt);
1965 if (NewElements.size() != VTy->getNumElements())
1967 return ConstantVector::get(NewElements);
1970 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
1971 // Gather a column of constants.
1972 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
1973 Constant *Agg = Operands[J]->getAggregateElement(I);
1980 // Use the regular scalar folding to simplify this column.
1981 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
1987 return ConstantVector::get(Result);
1990 } // end anonymous namespace
1993 llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
1994 const TargetLibraryInfo *TLI) {
1997 StringRef Name = F->getName();
1999 Type *Ty = F->getReturnType();
2001 if (auto *VTy = dyn_cast<VectorType>(Ty))
2002 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2003 F->getParent()->getDataLayout(), TLI);
2005 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);
2008 bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) {
2009 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2010 // (and to some extent ConstantFoldScalarCall).
2011 Function *F = CS.getCalledFunction();
2016 if (!TLI || !TLI->getLibFunc(*F, Func))
2019 if (CS.getNumArgOperands() == 1) {
2020 if (ConstantFP *OpC = dyn_cast<ConstantFP>(CS.getArgOperand(0))) {
2021 const APFloat &Op = OpC->getValueAPF();
2029 case LibFunc_log10l:
2031 case LibFunc_log10f:
2032 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2037 // FIXME: These boundaries are slightly conservative.
2038 if (OpC->getType()->isDoubleTy())
2039 return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
2040 Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
2041 if (OpC->getType()->isFloatTy())
2042 return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
2043 Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
2049 // FIXME: These boundaries are slightly conservative.
2050 if (OpC->getType()->isDoubleTy())
2051 return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
2052 Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
2053 if (OpC->getType()->isFloatTy())
2054 return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
2055 Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
2064 return !Op.isInfinity();
2068 case LibFunc_tanf: {
2069 // FIXME: Stop using the host math library.
2070 // FIXME: The computation isn't done in the right precision.
2071 Type *Ty = OpC->getType();
2072 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2073 double OpV = getValueAsDouble(OpC);
2074 return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2085 return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
2086 APFloat::cmpLessThan &&
2087 Op.compare(APFloat(Op.getSemantics(), "1")) !=
2088 APFloat::cmpGreaterThan;
2096 // FIXME: These boundaries are slightly conservative.
2097 if (OpC->getType()->isDoubleTy())
2098 return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
2099 Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
2100 if (OpC->getType()->isFloatTy())
2101 return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
2102 Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
2108 return Op.isNaN() || Op.isZero() || !Op.isNegative();
2110 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2118 if (CS.getNumArgOperands() == 2) {
2119 ConstantFP *Op0C = dyn_cast<ConstantFP>(CS.getArgOperand(0));
2120 ConstantFP *Op1C = dyn_cast<ConstantFP>(CS.getArgOperand(1));
2122 const APFloat &Op0 = Op0C->getValueAPF();
2123 const APFloat &Op1 = Op1C->getValueAPF();
2128 case LibFunc_powf: {
2129 // FIXME: Stop using the host math library.
2130 // FIXME: The computation isn't done in the right precision.
2131 Type *Ty = Op0C->getType();
2132 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2133 if (Ty == Op1C->getType()) {
2134 double Op0V = getValueAsDouble(Op0C);
2135 double Op1V = getValueAsDouble(Op1C);
2136 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2145 return Op0.isNaN() || Op1.isNaN() ||
2146 (!Op0.isInfinity() && !Op1.isZero());