1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines routines for folding instructions into constants.
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/Config/config.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/IntrinsicsX86.h"
42 #include "llvm/IR/Operator.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/KnownBits.h"
48 #include "llvm/Support/MathExtras.h"
60 //===----------------------------------------------------------------------===//
61 // Constant Folding internal helper functions
62 //===----------------------------------------------------------------------===//
64 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
65 Constant *C, Type *SrcEltTy,
67 const DataLayout &DL) {
68 // Now that we know that the input value is a vector of integers, just shift
69 // and insert them into our result.
70 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
71 for (unsigned i = 0; i != NumSrcElts; ++i) {
73 if (DL.isLittleEndian())
74 Element = C->getAggregateElement(NumSrcElts - i - 1);
76 Element = C->getAggregateElement(i);
78 if (Element && isa<UndefValue>(Element)) {
83 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
85 return ConstantExpr::getBitCast(C, DestTy);
88 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
94 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
95 /// This always returns a non-null constant, but it may be a
96 /// ConstantExpr if unfoldable.
97 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
98 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
99 "Invalid constantexpr bitcast!");
101 // Catch the obvious splat cases.
102 if (C->isNullValue() && !DestTy->isX86_MMXTy())
103 return Constant::getNullValue(DestTy);
104 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
105 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
106 return Constant::getAllOnesValue(DestTy);
108 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
109 // Handle a vector->scalar integer/fp cast.
110 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
111 unsigned NumSrcElts = VTy->getNumElements();
112 Type *SrcEltTy = VTy->getElementType();
114 // If the vector is a vector of floating point, convert it to vector of int
115 // to simplify things.
116 if (SrcEltTy->isFloatingPointTy()) {
117 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
119 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
120 // Ask IR to do the conversion now that #elts line up.
121 C = ConstantExpr::getBitCast(C, SrcIVTy);
124 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
125 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
126 SrcEltTy, NumSrcElts, DL))
129 if (isa<IntegerType>(DestTy))
130 return ConstantInt::get(DestTy, Result);
132 APFloat FP(DestTy->getFltSemantics(), Result);
133 return ConstantFP::get(DestTy->getContext(), FP);
137 // The code below only handles casts to vectors currently.
138 auto *DestVTy = dyn_cast<VectorType>(DestTy);
140 return ConstantExpr::getBitCast(C, DestTy);
142 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
143 // vector so the code below can handle it uniformly.
144 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
145 Constant *Ops = C; // don't take the address of C!
146 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
149 // If this is a bitcast from constant vector -> vector, fold it.
150 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
151 return ConstantExpr::getBitCast(C, DestTy);
153 // If the element types match, IR can fold it.
154 unsigned NumDstElt = DestVTy->getNumElements();
155 unsigned NumSrcElt = C->getType()->getVectorNumElements();
156 if (NumDstElt == NumSrcElt)
157 return ConstantExpr::getBitCast(C, DestTy);
159 Type *SrcEltTy = C->getType()->getVectorElementType();
160 Type *DstEltTy = DestVTy->getElementType();
162 // Otherwise, we're changing the number of elements in a vector, which
163 // requires endianness information to do the right thing. For example,
164 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
165 // folds to (little endian):
166 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
167 // and to (big endian):
168 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
170 // First thing is first. We only want to think about integer here, so if
171 // we have something in FP form, recast it as integer.
172 if (DstEltTy->isFloatingPointTy()) {
173 // Fold to an vector of integers with same size as our FP type.
174 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
176 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
177 // Recursively handle this integer conversion, if possible.
178 C = FoldBitCast(C, DestIVTy, DL);
180 // Finally, IR can handle this now that #elts line up.
181 return ConstantExpr::getBitCast(C, DestTy);
184 // Okay, we know the destination is integer, if the input is FP, convert
185 // it to integer first.
186 if (SrcEltTy->isFloatingPointTy()) {
187 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
189 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
190 // Ask IR to do the conversion now that #elts line up.
191 C = ConstantExpr::getBitCast(C, SrcIVTy);
192 // If IR wasn't able to fold it, bail out.
193 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
194 !isa<ConstantDataVector>(C))
198 // Now we know that the input and output vectors are both integer vectors
199 // of the same size, and that their #elements is not the same. Do the
200 // conversion here, which depends on whether the input or output has
202 bool isLittleEndian = DL.isLittleEndian();
204 SmallVector<Constant*, 32> Result;
205 if (NumDstElt < NumSrcElt) {
206 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
207 Constant *Zero = Constant::getNullValue(DstEltTy);
208 unsigned Ratio = NumSrcElt/NumDstElt;
209 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
211 for (unsigned i = 0; i != NumDstElt; ++i) {
212 // Build each element of the result.
213 Constant *Elt = Zero;
214 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
215 for (unsigned j = 0; j != Ratio; ++j) {
216 Constant *Src = C->getAggregateElement(SrcElt++);
217 if (Src && isa<UndefValue>(Src))
218 Src = Constant::getNullValue(C->getType()->getVectorElementType());
220 Src = dyn_cast_or_null<ConstantInt>(Src);
221 if (!Src) // Reject constantexpr elements.
222 return ConstantExpr::getBitCast(C, DestTy);
224 // Zero extend the element to the right size.
225 Src = ConstantExpr::getZExt(Src, Elt->getType());
227 // Shift it to the right place, depending on endianness.
228 Src = ConstantExpr::getShl(Src,
229 ConstantInt::get(Src->getType(), ShiftAmt));
230 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
233 Elt = ConstantExpr::getOr(Elt, Src);
235 Result.push_back(Elt);
237 return ConstantVector::get(Result);
240 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
241 unsigned Ratio = NumDstElt/NumSrcElt;
242 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
244 // Loop over each source value, expanding into multiple results.
245 for (unsigned i = 0; i != NumSrcElt; ++i) {
246 auto *Element = C->getAggregateElement(i);
248 if (!Element) // Reject constantexpr elements.
249 return ConstantExpr::getBitCast(C, DestTy);
251 if (isa<UndefValue>(Element)) {
252 // Correctly Propagate undef values.
253 Result.append(Ratio, UndefValue::get(DstEltTy));
257 auto *Src = dyn_cast<ConstantInt>(Element);
259 return ConstantExpr::getBitCast(C, DestTy);
261 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
262 for (unsigned j = 0; j != Ratio; ++j) {
263 // Shift the piece of the value into the right place, depending on
265 Constant *Elt = ConstantExpr::getLShr(Src,
266 ConstantInt::get(Src->getType(), ShiftAmt));
267 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
269 // Truncate the element to an integer with the same pointer size and
270 // convert the element back to a pointer using a inttoptr.
271 if (DstEltTy->isPointerTy()) {
272 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
273 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
274 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
278 // Truncate and remember this piece.
279 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
283 return ConstantVector::get(Result);
286 } // end anonymous namespace
288 /// If this constant is a constant offset from a global, return the global and
289 /// the constant. Because of constantexprs, this function is recursive.
290 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
291 APInt &Offset, const DataLayout &DL) {
292 // Trivial case, constant is the global.
293 if ((GV = dyn_cast<GlobalValue>(C))) {
294 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
295 Offset = APInt(BitWidth, 0);
299 // Otherwise, if this isn't a constant expr, bail out.
300 auto *CE = dyn_cast<ConstantExpr>(C);
301 if (!CE) return false;
303 // Look through ptr->int and ptr->ptr casts.
304 if (CE->getOpcode() == Instruction::PtrToInt ||
305 CE->getOpcode() == Instruction::BitCast)
306 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
308 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
309 auto *GEP = dyn_cast<GEPOperator>(CE);
313 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
314 APInt TmpOffset(BitWidth, 0);
316 // If the base isn't a global+constant, we aren't either.
317 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
320 // Otherwise, add any offset that our operands provide.
321 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
328 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
329 const DataLayout &DL) {
331 Type *SrcTy = C->getType();
333 // If the type sizes are the same and a cast is legal, just directly
334 // cast the constant.
335 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
336 Instruction::CastOps Cast = Instruction::BitCast;
337 // If we are going from a pointer to int or vice versa, we spell the cast
339 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
340 Cast = Instruction::IntToPtr;
341 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
342 Cast = Instruction::PtrToInt;
344 if (CastInst::castIsValid(Cast, C, DestTy))
345 return ConstantExpr::getCast(Cast, C, DestTy);
348 // If this isn't an aggregate type, there is nothing we can do to drill down
349 // and find a bitcastable constant.
350 if (!SrcTy->isAggregateType())
353 // We're simulating a load through a pointer that was bitcast to point to
354 // a different type, so we can try to walk down through the initial
355 // elements of an aggregate to see if some part of the aggregate is
356 // castable to implement the "load" semantic model.
357 if (SrcTy->isStructTy()) {
358 // Struct types might have leading zero-length elements like [0 x i32],
359 // which are certainly not what we are looking for, so skip them.
363 ElemC = C->getAggregateElement(Elem++);
364 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()) == 0);
367 C = C->getAggregateElement(0u);
376 /// Recursive helper to read bits out of global. C is the constant being copied
377 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
378 /// results into and BytesLeft is the number of bytes left in
379 /// the CurPtr buffer. DL is the DataLayout.
380 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
381 unsigned BytesLeft, const DataLayout &DL) {
382 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
383 "Out of range access");
385 // If this element is zero or undefined, we can just return since *CurPtr is
387 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
390 if (auto *CI = dyn_cast<ConstantInt>(C)) {
391 if (CI->getBitWidth() > 64 ||
392 (CI->getBitWidth() & 7) != 0)
395 uint64_t Val = CI->getZExtValue();
396 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
398 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
400 if (!DL.isLittleEndian())
401 n = IntBytes - n - 1;
402 CurPtr[i] = (unsigned char)(Val >> (n * 8));
408 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
409 if (CFP->getType()->isDoubleTy()) {
410 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
411 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
413 if (CFP->getType()->isFloatTy()){
414 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
415 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
417 if (CFP->getType()->isHalfTy()){
418 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
419 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
424 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
425 const StructLayout *SL = DL.getStructLayout(CS->getType());
426 unsigned Index = SL->getElementContainingOffset(ByteOffset);
427 uint64_t CurEltOffset = SL->getElementOffset(Index);
428 ByteOffset -= CurEltOffset;
431 // If the element access is to the element itself and not to tail padding,
432 // read the bytes from the element.
433 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
435 if (ByteOffset < EltSize &&
436 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
442 // Check to see if we read from the last struct element, if so we're done.
443 if (Index == CS->getType()->getNumElements())
446 // If we read all of the bytes we needed from this element we're done.
447 uint64_t NextEltOffset = SL->getElementOffset(Index);
449 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
452 // Move to the next element of the struct.
453 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
454 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
456 CurEltOffset = NextEltOffset;
461 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
462 isa<ConstantDataSequential>(C)) {
463 Type *EltTy = C->getType()->getSequentialElementType();
464 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
465 uint64_t Index = ByteOffset / EltSize;
466 uint64_t Offset = ByteOffset - Index * EltSize;
468 if (auto *AT = dyn_cast<ArrayType>(C->getType()))
469 NumElts = AT->getNumElements();
471 NumElts = C->getType()->getVectorNumElements();
473 for (; Index != NumElts; ++Index) {
474 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
478 uint64_t BytesWritten = EltSize - Offset;
479 assert(BytesWritten <= EltSize && "Not indexing into this element?");
480 if (BytesWritten >= BytesLeft)
484 BytesLeft -= BytesWritten;
485 CurPtr += BytesWritten;
490 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
491 if (CE->getOpcode() == Instruction::IntToPtr &&
492 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
493 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
498 // Otherwise, unknown initializer type.
502 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
503 const DataLayout &DL) {
504 auto *PTy = cast<PointerType>(C->getType());
505 auto *IntType = dyn_cast<IntegerType>(LoadTy);
507 // If this isn't an integer load we can't fold it directly.
509 unsigned AS = PTy->getAddressSpace();
511 // If this is a float/double load, we can try folding it as an int32/64 load
512 // and then bitcast the result. This can be useful for union cases. Note
513 // that address spaces don't matter here since we're not going to result in
514 // an actual new load.
516 if (LoadTy->isHalfTy())
517 MapTy = Type::getInt16Ty(C->getContext());
518 else if (LoadTy->isFloatTy())
519 MapTy = Type::getInt32Ty(C->getContext());
520 else if (LoadTy->isDoubleTy())
521 MapTy = Type::getInt64Ty(C->getContext());
522 else if (LoadTy->isVectorTy()) {
523 MapTy = PointerType::getIntNTy(C->getContext(),
524 DL.getTypeSizeInBits(LoadTy));
528 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
529 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) {
530 if (Res->isNullValue() && !LoadTy->isX86_MMXTy())
531 // Materializing a zero can be done trivially without a bitcast
532 return Constant::getNullValue(LoadTy);
533 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
534 Res = FoldBitCast(Res, CastTy, DL);
535 if (LoadTy->isPtrOrPtrVectorTy()) {
536 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
537 if (Res->isNullValue() && !LoadTy->isX86_MMXTy())
538 return Constant::getNullValue(LoadTy);
539 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
540 // Be careful not to replace a load of an addrspace value with an inttoptr here
542 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
549 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
550 if (BytesLoaded > 32 || BytesLoaded == 0)
555 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
558 auto *GV = dyn_cast<GlobalVariable>(GVal);
559 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
560 !GV->getInitializer()->getType()->isSized())
563 int64_t Offset = OffsetAI.getSExtValue();
564 int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType());
566 // If we're not accessing anything in this constant, the result is undefined.
567 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
568 return UndefValue::get(IntType);
570 // If we're not accessing anything in this constant, the result is undefined.
571 if (Offset >= InitializerSize)
572 return UndefValue::get(IntType);
574 unsigned char RawBytes[32] = {0};
575 unsigned char *CurPtr = RawBytes;
576 unsigned BytesLeft = BytesLoaded;
578 // If we're loading off the beginning of the global, some bytes may be valid.
585 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
588 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
589 if (DL.isLittleEndian()) {
590 ResultVal = RawBytes[BytesLoaded - 1];
591 for (unsigned i = 1; i != BytesLoaded; ++i) {
593 ResultVal |= RawBytes[BytesLoaded - 1 - i];
596 ResultVal = RawBytes[0];
597 for (unsigned i = 1; i != BytesLoaded; ++i) {
599 ResultVal |= RawBytes[i];
603 return ConstantInt::get(IntType->getContext(), ResultVal);
606 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
607 const DataLayout &DL) {
608 auto *SrcPtr = CE->getOperand(0);
609 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
612 Type *SrcTy = SrcPtrTy->getPointerElementType();
614 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
618 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
621 } // end anonymous namespace
623 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
624 const DataLayout &DL) {
625 // First, try the easy cases:
626 if (auto *GV = dyn_cast<GlobalVariable>(C))
627 if (GV->isConstant() && GV->hasDefinitiveInitializer())
628 return GV->getInitializer();
630 if (auto *GA = dyn_cast<GlobalAlias>(C))
631 if (GA->getAliasee() && !GA->isInterposable())
632 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
634 // If the loaded value isn't a constant expr, we can't handle it.
635 auto *CE = dyn_cast<ConstantExpr>(C);
639 if (CE->getOpcode() == Instruction::GetElementPtr) {
640 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
641 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
643 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
649 if (CE->getOpcode() == Instruction::BitCast)
650 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
653 // Instead of loading constant c string, use corresponding integer value
654 // directly if string length is small enough.
656 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
657 size_t StrLen = Str.size();
658 unsigned NumBits = Ty->getPrimitiveSizeInBits();
659 // Replace load with immediate integer if the result is an integer or fp
661 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
662 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
663 APInt StrVal(NumBits, 0);
664 APInt SingleChar(NumBits, 0);
665 if (DL.isLittleEndian()) {
666 for (unsigned char C : reverse(Str.bytes())) {
667 SingleChar = static_cast<uint64_t>(C);
668 StrVal = (StrVal << 8) | SingleChar;
671 for (unsigned char C : Str.bytes()) {
672 SingleChar = static_cast<uint64_t>(C);
673 StrVal = (StrVal << 8) | SingleChar;
675 // Append NULL at the end.
677 StrVal = (StrVal << 8) | SingleChar;
680 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
681 if (Ty->isFloatingPointTy())
682 Res = ConstantExpr::getBitCast(Res, Ty);
687 // If this load comes from anywhere in a constant global, and if the global
688 // is all undef or zero, we know what it loads.
689 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
690 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
691 if (GV->getInitializer()->isNullValue())
692 return Constant::getNullValue(Ty);
693 if (isa<UndefValue>(GV->getInitializer()))
694 return UndefValue::get(Ty);
698 // Try hard to fold loads from bitcasted strange and non-type-safe things.
699 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
704 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
705 if (LI->isVolatile()) return nullptr;
707 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
708 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
713 /// One of Op0/Op1 is a constant expression.
714 /// Attempt to symbolically evaluate the result of a binary operator merging
715 /// these together. If target data info is available, it is provided as DL,
716 /// otherwise DL is null.
717 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
718 const DataLayout &DL) {
721 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
722 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
725 if (Opc == Instruction::And) {
726 KnownBits Known0 = computeKnownBits(Op0, DL);
727 KnownBits Known1 = computeKnownBits(Op1, DL);
728 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
729 // All the bits of Op0 that the 'and' could be masking are already zero.
732 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
733 // All the bits of Op1 that the 'and' could be masking are already zero.
737 Known0.Zero |= Known1.Zero;
738 Known0.One &= Known1.One;
739 if (Known0.isConstant())
740 return ConstantInt::get(Op0->getType(), Known0.getConstant());
743 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
744 // constant. This happens frequently when iterating over a global array.
745 if (Opc == Instruction::Sub) {
746 GlobalValue *GV1, *GV2;
749 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
750 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
751 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
753 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
754 // PtrToInt may change the bitwidth so we have convert to the right size
756 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
757 Offs2.zextOrTrunc(OpSize));
764 /// If array indices are not pointer-sized integers, explicitly cast them so
765 /// that they aren't implicitly casted by the getelementptr.
766 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
767 Type *ResultTy, Optional<unsigned> InRangeIndex,
768 const DataLayout &DL, const TargetLibraryInfo *TLI) {
769 Type *IntIdxTy = DL.getIndexType(ResultTy);
770 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
773 SmallVector<Constant*, 32> NewIdxs;
774 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
776 !isa<StructType>(GetElementPtrInst::getIndexedType(
777 SrcElemTy, Ops.slice(1, i - 1)))) &&
778 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
780 Type *NewType = Ops[i]->getType()->isVectorTy()
783 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
789 NewIdxs.push_back(Ops[i]);
795 Constant *C = ConstantExpr::getGetElementPtr(
796 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
797 if (Constant *Folded = ConstantFoldConstant(C, DL, TLI))
803 /// Strip the pointer casts, but preserve the address space information.
804 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) {
805 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
806 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
807 Ptr = cast<Constant>(Ptr->stripPointerCasts());
808 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
810 ElemTy = NewPtrTy->getPointerElementType();
812 // Preserve the address space number of the pointer.
813 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
814 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
815 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
820 /// If we can symbolically evaluate the GEP constant expression, do so.
821 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
822 ArrayRef<Constant *> Ops,
823 const DataLayout &DL,
824 const TargetLibraryInfo *TLI) {
825 const GEPOperator *InnermostGEP = GEP;
826 bool InBounds = GEP->isInBounds();
828 Type *SrcElemTy = GEP->getSourceElementType();
829 Type *ResElemTy = GEP->getResultElementType();
830 Type *ResTy = GEP->getType();
831 if (!SrcElemTy->isSized())
834 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
835 GEP->getInRangeIndex(), DL, TLI))
838 Constant *Ptr = Ops[0];
839 if (!Ptr->getType()->isPointerTy())
842 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
844 // If this is a constant expr gep that is effectively computing an
845 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
846 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
847 if (!isa<ConstantInt>(Ops[i])) {
849 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
850 // "inttoptr (sub (ptrtoint Ptr), V)"
851 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
852 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
853 assert((!CE || CE->getType() == IntIdxTy) &&
854 "CastGEPIndices didn't canonicalize index types!");
855 if (CE && CE->getOpcode() == Instruction::Sub &&
856 CE->getOperand(0)->isNullValue()) {
857 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
858 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
859 Res = ConstantExpr::getIntToPtr(Res, ResTy);
860 if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
868 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
871 DL.getIndexedOffsetInType(
873 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
874 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
876 // If this is a GEP of a GEP, fold it all into a single GEP.
877 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
879 InBounds &= GEP->isInBounds();
881 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
883 // Do not try the incorporate the sub-GEP if some index is not a number.
884 bool AllConstantInt = true;
885 for (Value *NestedOp : NestedOps)
886 if (!isa<ConstantInt>(NestedOp)) {
887 AllConstantInt = false;
893 Ptr = cast<Constant>(GEP->getOperand(0));
894 SrcElemTy = GEP->getSourceElementType();
895 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
896 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
899 // If the base value for this address is a literal integer value, fold the
900 // getelementptr to the resulting integer value casted to the pointer type.
901 APInt BasePtr(BitWidth, 0);
902 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
903 if (CE->getOpcode() == Instruction::IntToPtr) {
904 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
905 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
909 auto *PTy = cast<PointerType>(Ptr->getType());
910 if ((Ptr->isNullValue() || BasePtr != 0) &&
911 !DL.isNonIntegralPointerType(PTy)) {
912 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
913 return ConstantExpr::getIntToPtr(C, ResTy);
916 // Otherwise form a regular getelementptr. Recompute the indices so that
917 // we eliminate over-indexing of the notional static type array bounds.
918 // This makes it easy to determine if the getelementptr is "inbounds".
919 // Also, this helps GlobalOpt do SROA on GlobalVariables.
921 SmallVector<Constant *, 32> NewIdxs;
924 if (!Ty->isStructTy()) {
925 if (Ty->isPointerTy()) {
926 // The only pointer indexing we'll do is on the first index of the GEP.
927 if (!NewIdxs.empty())
932 // Only handle pointers to sized types, not pointers to functions.
935 } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
936 Ty = ATy->getElementType();
938 // We've reached some non-indexable type.
942 // Determine which element of the array the offset points into.
943 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
945 // The element size is 0. This may be [0 x Ty]*, so just use a zero
946 // index for this level and proceed to the next level to see if it can
947 // accommodate the offset.
948 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0));
950 // The element size is non-zero divide the offset by the element
951 // size (rounding down), to compute the index at this level.
953 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
956 Offset -= NewIdx * ElemSize;
957 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx));
960 auto *STy = cast<StructType>(Ty);
961 // If we end up with an offset that isn't valid for this struct type, we
962 // can't re-form this GEP in a regular form, so bail out. The pointer
963 // operand likely went through casts that are necessary to make the GEP
965 const StructLayout &SL = *DL.getStructLayout(STy);
966 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
969 // Determine which field of the struct the offset points into. The
970 // getZExtValue is fine as we've already ensured that the offset is
971 // within the range representable by the StructLayout API.
972 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
973 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
975 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
976 Ty = STy->getTypeAtIndex(ElIdx);
978 } while (Ty != ResElemTy);
980 // If we haven't used up the entire offset by descending the static
981 // type, then the offset is pointing into the middle of an indivisible
982 // member, so we can't simplify it.
986 // Preserve the inrange index from the innermost GEP if possible. We must
987 // have calculated the same indices up to and including the inrange index.
988 Optional<unsigned> InRangeIndex;
989 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
990 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
991 NewIdxs.size() > *LastIRIndex) {
992 InRangeIndex = LastIRIndex;
993 for (unsigned I = 0; I <= *LastIRIndex; ++I)
994 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
999 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
1000 InBounds, InRangeIndex);
1001 assert(C->getType()->getPointerElementType() == Ty &&
1002 "Computed GetElementPtr has unexpected type!");
1004 // If we ended up indexing a member with a type that doesn't match
1005 // the type of what the original indices indexed, add a cast.
1006 if (Ty != ResElemTy)
1007 C = FoldBitCast(C, ResTy, DL);
1012 /// Attempt to constant fold an instruction with the
1013 /// specified opcode and operands. If successful, the constant result is
1014 /// returned, if not, null is returned. Note that this function can fail when
1015 /// attempting to fold instructions like loads and stores, which have no
1016 /// constant expression form.
1017 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1018 ArrayRef<Constant *> Ops,
1019 const DataLayout &DL,
1020 const TargetLibraryInfo *TLI) {
1021 Type *DestTy = InstOrCE->getType();
1023 if (Instruction::isUnaryOp(Opcode))
1024 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1026 if (Instruction::isBinaryOp(Opcode))
1027 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1029 if (Instruction::isCast(Opcode))
1030 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1032 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1033 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1036 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1037 Ops.slice(1), GEP->isInBounds(),
1038 GEP->getInRangeIndex());
1041 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1042 return CE->getWithOperands(Ops);
1045 default: return nullptr;
1046 case Instruction::ICmp:
1047 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1048 case Instruction::Call:
1049 if (auto *F = dyn_cast<Function>(Ops.back())) {
1050 const auto *Call = cast<CallBase>(InstOrCE);
1051 if (canConstantFoldCallTo(Call, F))
1052 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1055 case Instruction::Select:
1056 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1057 case Instruction::ExtractElement:
1058 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1059 case Instruction::ExtractValue:
1060 return ConstantExpr::getExtractValue(
1061 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1062 case Instruction::InsertElement:
1063 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1064 case Instruction::ShuffleVector:
1065 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1069 } // end anonymous namespace
1071 //===----------------------------------------------------------------------===//
1072 // Constant Folding public APIs
1073 //===----------------------------------------------------------------------===//
1078 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1079 const TargetLibraryInfo *TLI,
1080 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1081 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1084 SmallVector<Constant *, 8> Ops;
1085 for (const Use &NewU : C->operands()) {
1086 auto *NewC = cast<Constant>(&NewU);
1087 // Recursively fold the ConstantExpr's operands. If we have already folded
1088 // a ConstantExpr, we don't have to process it again.
1089 if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) {
1090 auto It = FoldedOps.find(NewC);
1091 if (It == FoldedOps.end()) {
1093 ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) {
1094 FoldedOps.insert({NewC, FoldedC});
1097 FoldedOps.insert({NewC, NewC});
1103 Ops.push_back(NewC);
1106 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1107 if (CE->isCompare())
1108 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1111 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1114 assert(isa<ConstantVector>(C));
1115 return ConstantVector::get(Ops);
1118 } // end anonymous namespace
1120 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1121 const TargetLibraryInfo *TLI) {
1122 // Handle PHI nodes quickly here...
1123 if (auto *PN = dyn_cast<PHINode>(I)) {
1124 Constant *CommonValue = nullptr;
1126 SmallDenseMap<Constant *, Constant *> FoldedOps;
1127 for (Value *Incoming : PN->incoming_values()) {
1128 // If the incoming value is undef then skip it. Note that while we could
1129 // skip the value if it is equal to the phi node itself we choose not to
1130 // because that would break the rule that constant folding only applies if
1131 // all operands are constants.
1132 if (isa<UndefValue>(Incoming))
1134 // If the incoming value is not a constant, then give up.
1135 auto *C = dyn_cast<Constant>(Incoming);
1138 // Fold the PHI's operands.
1139 if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps))
1141 // If the incoming value is a different constant to
1142 // the one we saw previously, then give up.
1143 if (CommonValue && C != CommonValue)
1148 // If we reach here, all incoming values are the same constant or undef.
1149 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1152 // Scan the operand list, checking to see if they are all constants, if so,
1153 // hand off to ConstantFoldInstOperandsImpl.
1154 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1157 SmallDenseMap<Constant *, Constant *> FoldedOps;
1158 SmallVector<Constant *, 8> Ops;
1159 for (const Use &OpU : I->operands()) {
1160 auto *Op = cast<Constant>(&OpU);
1161 // Fold the Instruction's operands.
1162 if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps))
1168 if (const auto *CI = dyn_cast<CmpInst>(I))
1169 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1172 if (const auto *LI = dyn_cast<LoadInst>(I))
1173 return ConstantFoldLoadInst(LI, DL);
1175 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1176 return ConstantExpr::getInsertValue(
1177 cast<Constant>(IVI->getAggregateOperand()),
1178 cast<Constant>(IVI->getInsertedValueOperand()),
1182 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1183 return ConstantExpr::getExtractValue(
1184 cast<Constant>(EVI->getAggregateOperand()),
1188 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1191 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1192 const TargetLibraryInfo *TLI) {
1193 SmallDenseMap<Constant *, Constant *> FoldedOps;
1194 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1197 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1198 ArrayRef<Constant *> Ops,
1199 const DataLayout &DL,
1200 const TargetLibraryInfo *TLI) {
1201 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1204 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1205 Constant *Ops0, Constant *Ops1,
1206 const DataLayout &DL,
1207 const TargetLibraryInfo *TLI) {
1208 // fold: icmp (inttoptr x), null -> icmp x, 0
1209 // fold: icmp null, (inttoptr x) -> icmp 0, x
1210 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1211 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1212 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1213 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1215 // FIXME: The following comment is out of data and the DataLayout is here now.
1216 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1217 // around to know if bit truncation is happening.
1218 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1219 if (Ops1->isNullValue()) {
1220 if (CE0->getOpcode() == Instruction::IntToPtr) {
1221 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1222 // Convert the integer value to the right size to ensure we get the
1223 // proper extension or truncation.
1224 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1226 Constant *Null = Constant::getNullValue(C->getType());
1227 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1230 // Only do this transformation if the int is intptrty in size, otherwise
1231 // there is a truncation or extension that we aren't modeling.
1232 if (CE0->getOpcode() == Instruction::PtrToInt) {
1233 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1234 if (CE0->getType() == IntPtrTy) {
1235 Constant *C = CE0->getOperand(0);
1236 Constant *Null = Constant::getNullValue(C->getType());
1237 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1242 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1243 if (CE0->getOpcode() == CE1->getOpcode()) {
1244 if (CE0->getOpcode() == Instruction::IntToPtr) {
1245 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1247 // Convert the integer value to the right size to ensure we get the
1248 // proper extension or truncation.
1249 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1251 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1253 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1256 // Only do this transformation if the int is intptrty in size, otherwise
1257 // there is a truncation or extension that we aren't modeling.
1258 if (CE0->getOpcode() == Instruction::PtrToInt) {
1259 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1260 if (CE0->getType() == IntPtrTy &&
1261 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1262 return ConstantFoldCompareInstOperands(
1263 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1269 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1270 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1271 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1272 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1273 Constant *LHS = ConstantFoldCompareInstOperands(
1274 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1275 Constant *RHS = ConstantFoldCompareInstOperands(
1276 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1278 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1279 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1281 } else if (isa<ConstantExpr>(Ops1)) {
1282 // If RHS is a constant expression, but the left side isn't, swap the
1283 // operands and try again.
1284 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1285 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1288 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1291 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1292 const DataLayout &DL) {
1293 assert(Instruction::isUnaryOp(Opcode));
1295 return ConstantExpr::get(Opcode, Op);
1298 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1300 const DataLayout &DL) {
1301 assert(Instruction::isBinaryOp(Opcode));
1302 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1303 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1306 return ConstantExpr::get(Opcode, LHS, RHS);
1309 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1310 Type *DestTy, const DataLayout &DL) {
1311 assert(Instruction::isCast(Opcode));
1314 llvm_unreachable("Missing case");
1315 case Instruction::PtrToInt:
1316 // If the input is a inttoptr, eliminate the pair. This requires knowing
1317 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1318 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1319 if (CE->getOpcode() == Instruction::IntToPtr) {
1320 Constant *Input = CE->getOperand(0);
1321 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1322 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1323 if (PtrWidth < InWidth) {
1325 ConstantInt::get(CE->getContext(),
1326 APInt::getLowBitsSet(InWidth, PtrWidth));
1327 Input = ConstantExpr::getAnd(Input, Mask);
1329 // Do a zext or trunc to get to the dest size.
1330 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1333 return ConstantExpr::getCast(Opcode, C, DestTy);
1334 case Instruction::IntToPtr:
1335 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1336 // the int size is >= the ptr size and the address spaces are the same.
1337 // This requires knowing the width of a pointer, so it can't be done in
1338 // ConstantExpr::getCast.
1339 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1340 if (CE->getOpcode() == Instruction::PtrToInt) {
1341 Constant *SrcPtr = CE->getOperand(0);
1342 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1343 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1345 if (MidIntSize >= SrcPtrSize) {
1346 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1347 if (SrcAS == DestTy->getPointerAddressSpace())
1348 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1353 return ConstantExpr::getCast(Opcode, C, DestTy);
1354 case Instruction::Trunc:
1355 case Instruction::ZExt:
1356 case Instruction::SExt:
1357 case Instruction::FPTrunc:
1358 case Instruction::FPExt:
1359 case Instruction::UIToFP:
1360 case Instruction::SIToFP:
1361 case Instruction::FPToUI:
1362 case Instruction::FPToSI:
1363 case Instruction::AddrSpaceCast:
1364 return ConstantExpr::getCast(Opcode, C, DestTy);
1365 case Instruction::BitCast:
1366 return FoldBitCast(C, DestTy, DL);
1370 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1372 if (!CE->getOperand(1)->isNullValue())
1373 return nullptr; // Do not allow stepping over the value!
1375 // Loop over all of the operands, tracking down which value we are
1377 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1378 C = C->getAggregateElement(CE->getOperand(i));
1386 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1387 ArrayRef<Constant *> Indices) {
1388 // Loop over all of the operands, tracking down which value we are
1390 for (Constant *Index : Indices) {
1391 C = C->getAggregateElement(Index);
1398 //===----------------------------------------------------------------------===//
1399 // Constant Folding for Calls
1402 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1403 if (Call->isNoBuiltin() || Call->isStrictFP())
1405 switch (F->getIntrinsicID()) {
1406 case Intrinsic::fabs:
1407 case Intrinsic::minnum:
1408 case Intrinsic::maxnum:
1409 case Intrinsic::minimum:
1410 case Intrinsic::maximum:
1411 case Intrinsic::log:
1412 case Intrinsic::log2:
1413 case Intrinsic::log10:
1414 case Intrinsic::exp:
1415 case Intrinsic::exp2:
1416 case Intrinsic::floor:
1417 case Intrinsic::ceil:
1418 case Intrinsic::sqrt:
1419 case Intrinsic::sin:
1420 case Intrinsic::cos:
1421 case Intrinsic::trunc:
1422 case Intrinsic::rint:
1423 case Intrinsic::nearbyint:
1424 case Intrinsic::pow:
1425 case Intrinsic::powi:
1426 case Intrinsic::bswap:
1427 case Intrinsic::ctpop:
1428 case Intrinsic::ctlz:
1429 case Intrinsic::cttz:
1430 case Intrinsic::fshl:
1431 case Intrinsic::fshr:
1432 case Intrinsic::fma:
1433 case Intrinsic::fmuladd:
1434 case Intrinsic::copysign:
1435 case Intrinsic::launder_invariant_group:
1436 case Intrinsic::strip_invariant_group:
1437 case Intrinsic::round:
1438 case Intrinsic::masked_load:
1439 case Intrinsic::sadd_with_overflow:
1440 case Intrinsic::uadd_with_overflow:
1441 case Intrinsic::ssub_with_overflow:
1442 case Intrinsic::usub_with_overflow:
1443 case Intrinsic::smul_with_overflow:
1444 case Intrinsic::umul_with_overflow:
1445 case Intrinsic::sadd_sat:
1446 case Intrinsic::uadd_sat:
1447 case Intrinsic::ssub_sat:
1448 case Intrinsic::usub_sat:
1449 case Intrinsic::smul_fix:
1450 case Intrinsic::smul_fix_sat:
1451 case Intrinsic::convert_from_fp16:
1452 case Intrinsic::convert_to_fp16:
1453 case Intrinsic::bitreverse:
1454 case Intrinsic::x86_sse_cvtss2si:
1455 case Intrinsic::x86_sse_cvtss2si64:
1456 case Intrinsic::x86_sse_cvttss2si:
1457 case Intrinsic::x86_sse_cvttss2si64:
1458 case Intrinsic::x86_sse2_cvtsd2si:
1459 case Intrinsic::x86_sse2_cvtsd2si64:
1460 case Intrinsic::x86_sse2_cvttsd2si:
1461 case Intrinsic::x86_sse2_cvttsd2si64:
1462 case Intrinsic::x86_avx512_vcvtss2si32:
1463 case Intrinsic::x86_avx512_vcvtss2si64:
1464 case Intrinsic::x86_avx512_cvttss2si:
1465 case Intrinsic::x86_avx512_cvttss2si64:
1466 case Intrinsic::x86_avx512_vcvtsd2si32:
1467 case Intrinsic::x86_avx512_vcvtsd2si64:
1468 case Intrinsic::x86_avx512_cvttsd2si:
1469 case Intrinsic::x86_avx512_cvttsd2si64:
1470 case Intrinsic::x86_avx512_vcvtss2usi32:
1471 case Intrinsic::x86_avx512_vcvtss2usi64:
1472 case Intrinsic::x86_avx512_cvttss2usi:
1473 case Intrinsic::x86_avx512_cvttss2usi64:
1474 case Intrinsic::x86_avx512_vcvtsd2usi32:
1475 case Intrinsic::x86_avx512_vcvtsd2usi64:
1476 case Intrinsic::x86_avx512_cvttsd2usi:
1477 case Intrinsic::x86_avx512_cvttsd2usi64:
1478 case Intrinsic::is_constant:
1482 case Intrinsic::not_intrinsic: break;
1488 // In these cases, the check of the length is required. We don't want to
1489 // return true for a name like "cos\0blah" which strcmp would return equal to
1490 // "cos", but has length 8.
1491 StringRef Name = F->getName();
1496 return Name == "acos" || Name == "acosf" ||
1497 Name == "asin" || Name == "asinf" ||
1498 Name == "atan" || Name == "atanf" ||
1499 Name == "atan2" || Name == "atan2f";
1501 return Name == "ceil" || Name == "ceilf" ||
1502 Name == "cos" || Name == "cosf" ||
1503 Name == "cosh" || Name == "coshf";
1505 return Name == "exp" || Name == "expf" ||
1506 Name == "exp2" || Name == "exp2f";
1508 return Name == "fabs" || Name == "fabsf" ||
1509 Name == "floor" || Name == "floorf" ||
1510 Name == "fmod" || Name == "fmodf";
1512 return Name == "log" || Name == "logf" ||
1513 Name == "log2" || Name == "log2f" ||
1514 Name == "log10" || Name == "log10f";
1516 return Name == "nearbyint" || Name == "nearbyintf";
1518 return Name == "pow" || Name == "powf";
1520 return Name == "rint" || Name == "rintf" ||
1521 Name == "round" || Name == "roundf";
1523 return Name == "sin" || Name == "sinf" ||
1524 Name == "sinh" || Name == "sinhf" ||
1525 Name == "sqrt" || Name == "sqrtf";
1527 return Name == "tan" || Name == "tanf" ||
1528 Name == "tanh" || Name == "tanhf" ||
1529 Name == "trunc" || Name == "truncf";
1531 // Check for various function names that get used for the math functions
1532 // when the header files are preprocessed with the macro
1533 // __FINITE_MATH_ONLY__ enabled.
1534 // The '12' here is the length of the shortest name that can match.
1535 // We need to check the size before looking at Name[1] and Name[2]
1536 // so we may as well check a limit that will eliminate mismatches.
1537 if (Name.size() < 12 || Name[1] != '_')
1543 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1544 Name == "__asin_finite" || Name == "__asinf_finite" ||
1545 Name == "__atan2_finite" || Name == "__atan2f_finite";
1547 return Name == "__cosh_finite" || Name == "__coshf_finite";
1549 return Name == "__exp_finite" || Name == "__expf_finite" ||
1550 Name == "__exp2_finite" || Name == "__exp2f_finite";
1552 return Name == "__log_finite" || Name == "__logf_finite" ||
1553 Name == "__log10_finite" || Name == "__log10f_finite";
1555 return Name == "__pow_finite" || Name == "__powf_finite";
1557 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1564 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1565 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1568 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1569 return ConstantFP::get(Ty->getContext(), APF);
1571 if (Ty->isDoubleTy())
1572 return ConstantFP::get(Ty->getContext(), APFloat(V));
1573 llvm_unreachable("Can only constant fold half/float/double");
1576 /// Clear the floating-point exception state.
1577 inline void llvm_fenv_clearexcept() {
1578 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1579 feclearexcept(FE_ALL_EXCEPT);
1584 /// Test if a floating-point exception was raised.
1585 inline bool llvm_fenv_testexcept() {
1586 int errno_val = errno;
1587 if (errno_val == ERANGE || errno_val == EDOM)
1589 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1590 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1596 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1597 llvm_fenv_clearexcept();
1599 if (llvm_fenv_testexcept()) {
1600 llvm_fenv_clearexcept();
1604 return GetConstantFoldFPValue(V, Ty);
1607 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1608 double W, Type *Ty) {
1609 llvm_fenv_clearexcept();
1611 if (llvm_fenv_testexcept()) {
1612 llvm_fenv_clearexcept();
1616 return GetConstantFoldFPValue(V, Ty);
1619 /// Attempt to fold an SSE floating point to integer conversion of a constant
1620 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1621 /// used (toward nearest, ties to even). This matches the behavior of the
1622 /// non-truncating SSE instructions in the default rounding mode. The desired
1623 /// integer type Ty is used to select how many bits are available for the
1624 /// result. Returns null if the conversion cannot be performed, otherwise
1625 /// returns the Constant value resulting from the conversion.
1626 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1627 Type *Ty, bool IsSigned) {
1628 // All of these conversion intrinsics form an integer of at most 64bits.
1629 unsigned ResultWidth = Ty->getIntegerBitWidth();
1630 assert(ResultWidth <= 64 &&
1631 "Can only constant fold conversions to 64 and 32 bit ints");
1634 bool isExact = false;
1635 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1636 : APFloat::rmNearestTiesToEven;
1637 APFloat::opStatus status =
1638 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1639 IsSigned, mode, &isExact);
1640 if (status != APFloat::opOK &&
1641 (!roundTowardZero || status != APFloat::opInexact))
1643 return ConstantInt::get(Ty, UIntVal, IsSigned);
1646 double getValueAsDouble(ConstantFP *Op) {
1647 Type *Ty = Op->getType();
1649 if (Ty->isFloatTy())
1650 return Op->getValueAPF().convertToFloat();
1652 if (Ty->isDoubleTy())
1653 return Op->getValueAPF().convertToDouble();
1656 APFloat APF = Op->getValueAPF();
1657 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1658 return APF.convertToDouble();
1661 static bool isManifestConstant(const Constant *c) {
1662 if (isa<ConstantData>(c)) {
1664 } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) {
1665 for (const Value *subc : c->operand_values()) {
1666 if (!isManifestConstant(cast<Constant>(subc)))
1674 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1675 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1676 C = &CI->getValue();
1679 if (isa<UndefValue>(Op)) {
1686 static Constant *ConstantFoldScalarCall1(StringRef Name,
1687 Intrinsic::ID IntrinsicID,
1689 ArrayRef<Constant *> Operands,
1690 const TargetLibraryInfo *TLI,
1691 const CallBase *Call) {
1692 assert(Operands.size() == 1 && "Wrong number of operands.");
1694 if (IntrinsicID == Intrinsic::is_constant) {
1695 // We know we have a "Constant" argument. But we want to only
1696 // return true for manifest constants, not those that depend on
1697 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1698 if (isManifestConstant(Operands[0]))
1699 return ConstantInt::getTrue(Ty->getContext());
1702 if (isa<UndefValue>(Operands[0])) {
1703 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1704 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1705 if (IntrinsicID == Intrinsic::cos ||
1706 IntrinsicID == Intrinsic::ctpop)
1707 return Constant::getNullValue(Ty);
1708 if (IntrinsicID == Intrinsic::bswap ||
1709 IntrinsicID == Intrinsic::bitreverse ||
1710 IntrinsicID == Intrinsic::launder_invariant_group ||
1711 IntrinsicID == Intrinsic::strip_invariant_group)
1715 if (isa<ConstantPointerNull>(Operands[0])) {
1716 // launder(null) == null == strip(null) iff in addrspace 0
1717 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1718 IntrinsicID == Intrinsic::strip_invariant_group) {
1719 // If instruction is not yet put in a basic block (e.g. when cloning
1720 // a function during inlining), Call's caller may not be available.
1721 // So check Call's BB first before querying Call->getCaller.
1722 const Function *Caller =
1723 Call->getParent() ? Call->getCaller() : nullptr;
1725 !NullPointerIsDefined(
1726 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1733 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1734 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1735 APFloat Val(Op->getValueAPF());
1738 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1740 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1743 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1746 // Use internal versions of these intrinsics.
1747 APFloat U = Op->getValueAPF();
1749 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
1750 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1751 return ConstantFP::get(Ty->getContext(), U);
1754 if (IntrinsicID == Intrinsic::round) {
1755 U.roundToIntegral(APFloat::rmNearestTiesToAway);
1756 return ConstantFP::get(Ty->getContext(), U);
1759 if (IntrinsicID == Intrinsic::ceil) {
1760 U.roundToIntegral(APFloat::rmTowardPositive);
1761 return ConstantFP::get(Ty->getContext(), U);
1764 if (IntrinsicID == Intrinsic::floor) {
1765 U.roundToIntegral(APFloat::rmTowardNegative);
1766 return ConstantFP::get(Ty->getContext(), U);
1769 if (IntrinsicID == Intrinsic::trunc) {
1770 U.roundToIntegral(APFloat::rmTowardZero);
1771 return ConstantFP::get(Ty->getContext(), U);
1774 if (IntrinsicID == Intrinsic::fabs) {
1776 return ConstantFP::get(Ty->getContext(), U);
1779 /// We only fold functions with finite arguments. Folding NaN and inf is
1780 /// likely to be aborted with an exception anyway, and some host libms
1781 /// have known errors raising exceptions.
1782 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1785 /// Currently APFloat versions of these functions do not exist, so we use
1786 /// the host native double versions. Float versions are not called
1787 /// directly but for all these it is true (float)(f((double)arg)) ==
1788 /// f(arg). Long double not supported yet.
1789 double V = getValueAsDouble(Op);
1791 switch (IntrinsicID) {
1793 case Intrinsic::log:
1794 return ConstantFoldFP(log, V, Ty);
1795 case Intrinsic::log2:
1796 // TODO: What about hosts that lack a C99 library?
1797 return ConstantFoldFP(Log2, V, Ty);
1798 case Intrinsic::log10:
1799 // TODO: What about hosts that lack a C99 library?
1800 return ConstantFoldFP(log10, V, Ty);
1801 case Intrinsic::exp:
1802 return ConstantFoldFP(exp, V, Ty);
1803 case Intrinsic::exp2:
1804 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
1805 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1806 case Intrinsic::sin:
1807 return ConstantFoldFP(sin, V, Ty);
1808 case Intrinsic::cos:
1809 return ConstantFoldFP(cos, V, Ty);
1810 case Intrinsic::sqrt:
1811 return ConstantFoldFP(sqrt, V, Ty);
1817 LibFunc Func = NotLibFunc;
1818 TLI->getLibFunc(Name, Func);
1824 case LibFunc_acos_finite:
1825 case LibFunc_acosf_finite:
1827 return ConstantFoldFP(acos, V, Ty);
1831 case LibFunc_asin_finite:
1832 case LibFunc_asinf_finite:
1834 return ConstantFoldFP(asin, V, Ty);
1839 return ConstantFoldFP(atan, V, Ty);
1843 if (TLI->has(Func)) {
1844 U.roundToIntegral(APFloat::rmTowardPositive);
1845 return ConstantFP::get(Ty->getContext(), U);
1851 return ConstantFoldFP(cos, V, Ty);
1855 case LibFunc_cosh_finite:
1856 case LibFunc_coshf_finite:
1858 return ConstantFoldFP(cosh, V, Ty);
1862 case LibFunc_exp_finite:
1863 case LibFunc_expf_finite:
1865 return ConstantFoldFP(exp, V, Ty);
1869 case LibFunc_exp2_finite:
1870 case LibFunc_exp2f_finite:
1872 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
1873 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1877 if (TLI->has(Func)) {
1879 return ConstantFP::get(Ty->getContext(), U);
1883 case LibFunc_floorf:
1884 if (TLI->has(Func)) {
1885 U.roundToIntegral(APFloat::rmTowardNegative);
1886 return ConstantFP::get(Ty->getContext(), U);
1891 case LibFunc_log_finite:
1892 case LibFunc_logf_finite:
1893 if (V > 0.0 && TLI->has(Func))
1894 return ConstantFoldFP(log, V, Ty);
1898 case LibFunc_log2_finite:
1899 case LibFunc_log2f_finite:
1900 if (V > 0.0 && TLI->has(Func))
1901 // TODO: What about hosts that lack a C99 library?
1902 return ConstantFoldFP(Log2, V, Ty);
1905 case LibFunc_log10f:
1906 case LibFunc_log10_finite:
1907 case LibFunc_log10f_finite:
1908 if (V > 0.0 && TLI->has(Func))
1909 // TODO: What about hosts that lack a C99 library?
1910 return ConstantFoldFP(log10, V, Ty);
1912 case LibFunc_nearbyint:
1913 case LibFunc_nearbyintf:
1916 if (TLI->has(Func)) {
1917 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1918 return ConstantFP::get(Ty->getContext(), U);
1922 case LibFunc_roundf:
1923 if (TLI->has(Func)) {
1924 U.roundToIntegral(APFloat::rmNearestTiesToAway);
1925 return ConstantFP::get(Ty->getContext(), U);
1931 return ConstantFoldFP(sin, V, Ty);
1935 case LibFunc_sinh_finite:
1936 case LibFunc_sinhf_finite:
1938 return ConstantFoldFP(sinh, V, Ty);
1942 if (V >= 0.0 && TLI->has(Func))
1943 return ConstantFoldFP(sqrt, V, Ty);
1948 return ConstantFoldFP(tan, V, Ty);
1953 return ConstantFoldFP(tanh, V, Ty);
1956 case LibFunc_truncf:
1957 if (TLI->has(Func)) {
1958 U.roundToIntegral(APFloat::rmTowardZero);
1959 return ConstantFP::get(Ty->getContext(), U);
1966 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
1967 switch (IntrinsicID) {
1968 case Intrinsic::bswap:
1969 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1970 case Intrinsic::ctpop:
1971 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1972 case Intrinsic::bitreverse:
1973 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
1974 case Intrinsic::convert_from_fp16: {
1975 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
1978 APFloat::opStatus status = Val.convert(
1979 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
1981 // Conversion is always precise.
1983 assert(status == APFloat::opOK && !lost &&
1984 "Precision lost during fp16 constfolding");
1986 return ConstantFP::get(Ty->getContext(), Val);
1993 // Support ConstantVector in case we have an Undef in the top.
1994 if (isa<ConstantVector>(Operands[0]) ||
1995 isa<ConstantDataVector>(Operands[0])) {
1996 auto *Op = cast<Constant>(Operands[0]);
1997 switch (IntrinsicID) {
1999 case Intrinsic::x86_sse_cvtss2si:
2000 case Intrinsic::x86_sse_cvtss2si64:
2001 case Intrinsic::x86_sse2_cvtsd2si:
2002 case Intrinsic::x86_sse2_cvtsd2si64:
2003 if (ConstantFP *FPOp =
2004 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2005 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2006 /*roundTowardZero=*/false, Ty,
2009 case Intrinsic::x86_sse_cvttss2si:
2010 case Intrinsic::x86_sse_cvttss2si64:
2011 case Intrinsic::x86_sse2_cvttsd2si:
2012 case Intrinsic::x86_sse2_cvttsd2si64:
2013 if (ConstantFP *FPOp =
2014 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2015 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2016 /*roundTowardZero=*/true, Ty,
2025 static Constant *ConstantFoldScalarCall2(StringRef Name,
2026 Intrinsic::ID IntrinsicID,
2028 ArrayRef<Constant *> Operands,
2029 const TargetLibraryInfo *TLI,
2030 const CallBase *Call) {
2031 assert(Operands.size() == 2 && "Wrong number of operands.");
2033 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2034 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2036 double Op1V = getValueAsDouble(Op1);
2038 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2039 if (Op2->getType() != Op1->getType())
2042 double Op2V = getValueAsDouble(Op2);
2043 if (IntrinsicID == Intrinsic::pow) {
2044 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2046 if (IntrinsicID == Intrinsic::copysign) {
2047 APFloat V1 = Op1->getValueAPF();
2048 const APFloat &V2 = Op2->getValueAPF();
2050 return ConstantFP::get(Ty->getContext(), V1);
2053 if (IntrinsicID == Intrinsic::minnum) {
2054 const APFloat &C1 = Op1->getValueAPF();
2055 const APFloat &C2 = Op2->getValueAPF();
2056 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
2059 if (IntrinsicID == Intrinsic::maxnum) {
2060 const APFloat &C1 = Op1->getValueAPF();
2061 const APFloat &C2 = Op2->getValueAPF();
2062 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
2065 if (IntrinsicID == Intrinsic::minimum) {
2066 const APFloat &C1 = Op1->getValueAPF();
2067 const APFloat &C2 = Op2->getValueAPF();
2068 return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
2071 if (IntrinsicID == Intrinsic::maximum) {
2072 const APFloat &C1 = Op1->getValueAPF();
2073 const APFloat &C2 = Op2->getValueAPF();
2074 return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
2080 LibFunc Func = NotLibFunc;
2081 TLI->getLibFunc(Name, Func);
2087 case LibFunc_pow_finite:
2088 case LibFunc_powf_finite:
2090 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2094 if (TLI->has(Func)) {
2095 APFloat V = Op1->getValueAPF();
2096 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2097 return ConstantFP::get(Ty->getContext(), V);
2101 case LibFunc_atan2f:
2102 case LibFunc_atan2_finite:
2103 case LibFunc_atan2f_finite:
2105 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2108 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2109 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2110 return ConstantFP::get(Ty->getContext(),
2111 APFloat((float)std::pow((float)Op1V,
2112 (int)Op2C->getZExtValue())));
2113 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2114 return ConstantFP::get(Ty->getContext(),
2115 APFloat((float)std::pow((float)Op1V,
2116 (int)Op2C->getZExtValue())));
2117 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2118 return ConstantFP::get(Ty->getContext(),
2119 APFloat((double)std::pow((double)Op1V,
2120 (int)Op2C->getZExtValue())));
2125 if (Operands[0]->getType()->isIntegerTy() &&
2126 Operands[1]->getType()->isIntegerTy()) {
2127 const APInt *C0, *C1;
2128 if (!getConstIntOrUndef(Operands[0], C0) ||
2129 !getConstIntOrUndef(Operands[1], C1))
2132 switch (IntrinsicID) {
2134 case Intrinsic::usub_with_overflow:
2135 case Intrinsic::ssub_with_overflow:
2136 case Intrinsic::uadd_with_overflow:
2137 case Intrinsic::sadd_with_overflow:
2138 // X - undef -> { undef, false }
2139 // undef - X -> { undef, false }
2140 // X + undef -> { undef, false }
2141 // undef + x -> { undef, false }
2143 return ConstantStruct::get(
2144 cast<StructType>(Ty),
2145 {UndefValue::get(Ty->getStructElementType(0)),
2146 Constant::getNullValue(Ty->getStructElementType(1))});
2149 case Intrinsic::smul_with_overflow:
2150 case Intrinsic::umul_with_overflow: {
2151 // undef * X -> { 0, false }
2152 // X * undef -> { 0, false }
2154 return Constant::getNullValue(Ty);
2158 switch (IntrinsicID) {
2159 default: llvm_unreachable("Invalid case");
2160 case Intrinsic::sadd_with_overflow:
2161 Res = C0->sadd_ov(*C1, Overflow);
2163 case Intrinsic::uadd_with_overflow:
2164 Res = C0->uadd_ov(*C1, Overflow);
2166 case Intrinsic::ssub_with_overflow:
2167 Res = C0->ssub_ov(*C1, Overflow);
2169 case Intrinsic::usub_with_overflow:
2170 Res = C0->usub_ov(*C1, Overflow);
2172 case Intrinsic::smul_with_overflow:
2173 Res = C0->smul_ov(*C1, Overflow);
2175 case Intrinsic::umul_with_overflow:
2176 Res = C0->umul_ov(*C1, Overflow);
2180 ConstantInt::get(Ty->getContext(), Res),
2181 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2183 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2185 case Intrinsic::uadd_sat:
2186 case Intrinsic::sadd_sat:
2188 return UndefValue::get(Ty);
2190 return Constant::getAllOnesValue(Ty);
2191 if (IntrinsicID == Intrinsic::uadd_sat)
2192 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2194 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2195 case Intrinsic::usub_sat:
2196 case Intrinsic::ssub_sat:
2198 return UndefValue::get(Ty);
2200 return Constant::getNullValue(Ty);
2201 if (IntrinsicID == Intrinsic::usub_sat)
2202 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2204 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2205 case Intrinsic::cttz:
2206 case Intrinsic::ctlz:
2207 assert(C1 && "Must be constant int");
2209 // cttz(0, 1) and ctlz(0, 1) are undef.
2210 if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2211 return UndefValue::get(Ty);
2213 return Constant::getNullValue(Ty);
2214 if (IntrinsicID == Intrinsic::cttz)
2215 return ConstantInt::get(Ty, C0->countTrailingZeros());
2217 return ConstantInt::get(Ty, C0->countLeadingZeros());
2223 // Support ConstantVector in case we have an Undef in the top.
2224 if ((isa<ConstantVector>(Operands[0]) ||
2225 isa<ConstantDataVector>(Operands[0])) &&
2226 // Check for default rounding mode.
2227 // FIXME: Support other rounding modes?
2228 isa<ConstantInt>(Operands[1]) &&
2229 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2230 auto *Op = cast<Constant>(Operands[0]);
2231 switch (IntrinsicID) {
2233 case Intrinsic::x86_avx512_vcvtss2si32:
2234 case Intrinsic::x86_avx512_vcvtss2si64:
2235 case Intrinsic::x86_avx512_vcvtsd2si32:
2236 case Intrinsic::x86_avx512_vcvtsd2si64:
2237 if (ConstantFP *FPOp =
2238 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2239 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2240 /*roundTowardZero=*/false, Ty,
2243 case Intrinsic::x86_avx512_vcvtss2usi32:
2244 case Intrinsic::x86_avx512_vcvtss2usi64:
2245 case Intrinsic::x86_avx512_vcvtsd2usi32:
2246 case Intrinsic::x86_avx512_vcvtsd2usi64:
2247 if (ConstantFP *FPOp =
2248 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2249 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2250 /*roundTowardZero=*/false, Ty,
2253 case Intrinsic::x86_avx512_cvttss2si:
2254 case Intrinsic::x86_avx512_cvttss2si64:
2255 case Intrinsic::x86_avx512_cvttsd2si:
2256 case Intrinsic::x86_avx512_cvttsd2si64:
2257 if (ConstantFP *FPOp =
2258 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2259 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2260 /*roundTowardZero=*/true, Ty,
2263 case Intrinsic::x86_avx512_cvttss2usi:
2264 case Intrinsic::x86_avx512_cvttss2usi64:
2265 case Intrinsic::x86_avx512_cvttsd2usi:
2266 case Intrinsic::x86_avx512_cvttsd2usi64:
2267 if (ConstantFP *FPOp =
2268 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2269 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2270 /*roundTowardZero=*/true, Ty,
2278 static Constant *ConstantFoldScalarCall3(StringRef Name,
2279 Intrinsic::ID IntrinsicID,
2281 ArrayRef<Constant *> Operands,
2282 const TargetLibraryInfo *TLI,
2283 const CallBase *Call) {
2284 assert(Operands.size() == 3 && "Wrong number of operands.");
2286 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2287 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2288 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2289 switch (IntrinsicID) {
2291 case Intrinsic::fma:
2292 case Intrinsic::fmuladd: {
2293 APFloat V = Op1->getValueAPF();
2294 V.fusedMultiplyAdd(Op2->getValueAPF(), Op3->getValueAPF(),
2295 APFloat::rmNearestTiesToEven);
2296 return ConstantFP::get(Ty->getContext(), V);
2303 if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
2304 if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
2305 if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) {
2306 switch (IntrinsicID) {
2308 case Intrinsic::smul_fix:
2309 case Intrinsic::smul_fix_sat: {
2310 // This code performs rounding towards negative infinity in case the
2311 // result cannot be represented exactly for the given scale. Targets
2312 // that do care about rounding should use a target hook for specifying
2313 // how rounding should be done, and provide their own folding to be
2314 // consistent with rounding. This is the same approach as used by
2315 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2316 APInt Lhs = Op1->getValue();
2317 APInt Rhs = Op2->getValue();
2318 unsigned Scale = Op3->getValue().getZExtValue();
2319 unsigned Width = Lhs.getBitWidth();
2320 assert(Scale < Width && "Illegal scale.");
2321 unsigned ExtendedWidth = Width * 2;
2322 APInt Product = (Lhs.sextOrSelf(ExtendedWidth) *
2323 Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale);
2324 if (IntrinsicID == Intrinsic::smul_fix_sat) {
2326 APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2328 APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2329 Product = APIntOps::smin(Product, MaxValue);
2330 Product = APIntOps::smax(Product, MinValue);
2332 return ConstantInt::get(Ty->getContext(),
2333 Product.sextOrTrunc(Width));
2340 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2341 const APInt *C0, *C1, *C2;
2342 if (!getConstIntOrUndef(Operands[0], C0) ||
2343 !getConstIntOrUndef(Operands[1], C1) ||
2344 !getConstIntOrUndef(Operands[2], C2))
2347 bool IsRight = IntrinsicID == Intrinsic::fshr;
2349 return Operands[IsRight ? 1 : 0];
2351 return UndefValue::get(Ty);
2353 // The shift amount is interpreted as modulo the bitwidth. If the shift
2354 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2355 unsigned BitWidth = C2->getBitWidth();
2356 unsigned ShAmt = C2->urem(BitWidth);
2358 return Operands[IsRight ? 1 : 0];
2360 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2361 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2362 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2364 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2366 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2367 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2373 static Constant *ConstantFoldScalarCall(StringRef Name,
2374 Intrinsic::ID IntrinsicID,
2376 ArrayRef<Constant *> Operands,
2377 const TargetLibraryInfo *TLI,
2378 const CallBase *Call) {
2379 if (Operands.size() == 1)
2380 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2382 if (Operands.size() == 2)
2383 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2385 if (Operands.size() == 3)
2386 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
2391 static Constant *ConstantFoldVectorCall(StringRef Name,
2392 Intrinsic::ID IntrinsicID,
2394 ArrayRef<Constant *> Operands,
2395 const DataLayout &DL,
2396 const TargetLibraryInfo *TLI,
2397 const CallBase *Call) {
2398 SmallVector<Constant *, 4> Result(VTy->getNumElements());
2399 SmallVector<Constant *, 4> Lane(Operands.size());
2400 Type *Ty = VTy->getElementType();
2402 if (IntrinsicID == Intrinsic::masked_load) {
2403 auto *SrcPtr = Operands[0];
2404 auto *Mask = Operands[2];
2405 auto *Passthru = Operands[3];
2407 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL);
2409 SmallVector<Constant *, 32> NewElements;
2410 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2411 auto *MaskElt = Mask->getAggregateElement(I);
2414 auto *PassthruElt = Passthru->getAggregateElement(I);
2415 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2416 if (isa<UndefValue>(MaskElt)) {
2418 NewElements.push_back(PassthruElt);
2420 NewElements.push_back(VecElt);
2424 if (MaskElt->isNullValue()) {
2427 NewElements.push_back(PassthruElt);
2428 } else if (MaskElt->isOneValue()) {
2431 NewElements.push_back(VecElt);
2436 if (NewElements.size() != VTy->getNumElements())
2438 return ConstantVector::get(NewElements);
2441 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
2442 // Gather a column of constants.
2443 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2444 // Some intrinsics use a scalar type for certain arguments.
2445 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
2446 Lane[J] = Operands[J];
2450 Constant *Agg = Operands[J]->getAggregateElement(I);
2457 // Use the regular scalar folding to simplify this column.
2459 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
2465 return ConstantVector::get(Result);
2468 } // end anonymous namespace
2470 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
2471 ArrayRef<Constant *> Operands,
2472 const TargetLibraryInfo *TLI) {
2473 if (Call->isNoBuiltin() || Call->isStrictFP())
2477 StringRef Name = F->getName();
2479 Type *Ty = F->getReturnType();
2481 if (auto *VTy = dyn_cast<VectorType>(Ty))
2482 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2483 F->getParent()->getDataLayout(), TLI, Call);
2485 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
2489 bool llvm::isMathLibCallNoop(const CallBase *Call,
2490 const TargetLibraryInfo *TLI) {
2491 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2492 // (and to some extent ConstantFoldScalarCall).
2493 if (Call->isNoBuiltin() || Call->isStrictFP())
2495 Function *F = Call->getCalledFunction();
2500 if (!TLI || !TLI->getLibFunc(*F, Func))
2503 if (Call->getNumArgOperands() == 1) {
2504 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
2505 const APFloat &Op = OpC->getValueAPF();
2513 case LibFunc_log10l:
2515 case LibFunc_log10f:
2516 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2521 // FIXME: These boundaries are slightly conservative.
2522 if (OpC->getType()->isDoubleTy())
2523 return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan &&
2524 Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan;
2525 if (OpC->getType()->isFloatTy())
2526 return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan &&
2527 Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan;
2533 // FIXME: These boundaries are slightly conservative.
2534 if (OpC->getType()->isDoubleTy())
2535 return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan &&
2536 Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan;
2537 if (OpC->getType()->isFloatTy())
2538 return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan &&
2539 Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan;
2548 return !Op.isInfinity();
2552 case LibFunc_tanf: {
2553 // FIXME: Stop using the host math library.
2554 // FIXME: The computation isn't done in the right precision.
2555 Type *Ty = OpC->getType();
2556 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2557 double OpV = getValueAsDouble(OpC);
2558 return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2569 return Op.compare(APFloat(Op.getSemantics(), "-1")) !=
2570 APFloat::cmpLessThan &&
2571 Op.compare(APFloat(Op.getSemantics(), "1")) !=
2572 APFloat::cmpGreaterThan;
2580 // FIXME: These boundaries are slightly conservative.
2581 if (OpC->getType()->isDoubleTy())
2582 return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan &&
2583 Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan;
2584 if (OpC->getType()->isFloatTy())
2585 return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan &&
2586 Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan;
2592 return Op.isNaN() || Op.isZero() || !Op.isNegative();
2594 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2602 if (Call->getNumArgOperands() == 2) {
2603 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
2604 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
2606 const APFloat &Op0 = Op0C->getValueAPF();
2607 const APFloat &Op1 = Op1C->getValueAPF();
2612 case LibFunc_powf: {
2613 // FIXME: Stop using the host math library.
2614 // FIXME: The computation isn't done in the right precision.
2615 Type *Ty = Op0C->getType();
2616 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2617 if (Ty == Op1C->getType()) {
2618 double Op0V = getValueAsDouble(Op0C);
2619 double Op1V = getValueAsDouble(Op1C);
2620 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2629 return Op0.isNaN() || Op1.isNaN() ||
2630 (!Op0.isInfinity() && !Op1.isZero());