1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/IntrinsicsX86.h"
44 #include "llvm/IR/IntrinsicsARM.h"
45 #include "llvm/IR/IntrinsicsAArch64.h"
46 #include "llvm/IR/IntrinsicsNVPTX.h"
47 #include "llvm/IR/IntrinsicsAMDGPU.h"
48 #include "llvm/IR/IntrinsicsPowerPC.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/PatternMatch.h"
52 #include "llvm/IR/Statepoint.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/IR/ValueHandle.h"
57 #include "llvm/Support/AtomicOrdering.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/KnownBits.h"
64 #include "llvm/Support/MathExtras.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
77 using namespace PatternMatch;
79 #define DEBUG_TYPE "instcombine"
81 STATISTIC(NumSimplified, "Number of library calls simplified");
83 static cl::opt<unsigned> GuardWideningWindow(
84 "instcombine-guard-widening-window",
86 cl::desc("How wide an instruction window to bypass looking for "
89 /// Return the specified type promoted as it would be to pass though a va_arg
91 static Type *getPromotedType(Type *Ty) {
92 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
93 if (ITy->getBitWidth() < 32)
94 return Type::getInt32Ty(Ty->getContext());
99 /// Return a constant boolean vector that has true elements in all positions
100 /// where the input constant data vector has an element with the sign bit set.
101 static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
102 SmallVector<Constant *, 32> BoolVec;
103 IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
104 for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
105 Constant *Elt = V->getElementAsConstant(I);
106 assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
107 "Unexpected constant data vector element type");
108 bool Sign = V->getElementType()->isIntegerTy()
109 ? cast<ConstantInt>(Elt)->isNegative()
110 : cast<ConstantFP>(Elt)->isNegative();
111 BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
113 return ConstantVector::get(BoolVec);
116 Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
117 unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
118 unsigned CopyDstAlign = MI->getDestAlignment();
119 if (CopyDstAlign < DstAlign){
120 MI->setDestAlignment(DstAlign);
124 unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
125 unsigned CopySrcAlign = MI->getSourceAlignment();
126 if (CopySrcAlign < SrcAlign) {
127 MI->setSourceAlignment(SrcAlign);
131 // If we have a store to a location which is known constant, we can conclude
132 // that the store must be storing the constant value (else the memory
133 // wouldn't be constant), and this must be a noop.
134 if (AA->pointsToConstantMemory(MI->getDest())) {
135 // Set the size of the copy to 0, it will be deleted on the next iteration.
136 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
140 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
142 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
143 if (!MemOpLength) return nullptr;
145 // Source and destination pointer types are always "i8*" for intrinsic. See
146 // if the size is something we can handle with a single primitive load/store.
147 // A single load+store correctly handles overlapping memory in the memmove
149 uint64_t Size = MemOpLength->getLimitedValue();
150 assert(Size && "0-sized memory transferring should be removed already.");
152 if (Size > 8 || (Size&(Size-1)))
153 return nullptr; // If not 1/2/4/8 bytes, exit.
155 // If it is an atomic and alignment is less than the size then we will
156 // introduce the unaligned memory access which will be later transformed
157 // into libcall in CodeGen. This is not evident performance gain so disable
159 if (isa<AtomicMemTransferInst>(MI))
160 if (CopyDstAlign < Size || CopySrcAlign < Size)
163 // Use an integer load+store unless we can find something better.
165 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
167 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
169 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
170 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
171 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
173 // If the memcpy has metadata describing the members, see if we can get the
174 // TBAA tag describing our copy.
175 MDNode *CopyMD = nullptr;
176 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
178 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
179 if (M->getNumOperands() == 3 && M->getOperand(0) &&
180 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
181 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
183 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
184 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
186 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
187 CopyMD = cast<MDNode>(M->getOperand(2));
190 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
191 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
192 LoadInst *L = Builder.CreateLoad(IntType, Src);
193 // Alignment from the mem intrinsic will be better, so use it.
195 MaybeAlign(CopySrcAlign)); // FIXME: Check if we can use Align instead.
197 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
198 MDNode *LoopMemParallelMD =
199 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
200 if (LoopMemParallelMD)
201 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
202 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
204 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
206 StoreInst *S = Builder.CreateStore(L, Dest);
207 // Alignment from the mem intrinsic will be better, so use it.
209 MaybeAlign(CopyDstAlign)); // FIXME: Check if we can use Align instead.
211 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
212 if (LoopMemParallelMD)
213 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
215 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
217 if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
218 // non-atomics can be volatile
219 L->setVolatile(MT->isVolatile());
220 S->setVolatile(MT->isVolatile());
222 if (isa<AtomicMemTransferInst>(MI)) {
223 // atomics have to be unordered
224 L->setOrdering(AtomicOrdering::Unordered);
225 S->setOrdering(AtomicOrdering::Unordered);
228 // Set the size of the copy to 0, it will be deleted on the next iteration.
229 MI->setLength(Constant::getNullValue(MemOpLength->getType()));
233 Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
234 const unsigned KnownAlignment =
235 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
236 if (MI->getDestAlignment() < KnownAlignment) {
237 MI->setDestAlignment(KnownAlignment);
241 // If we have a store to a location which is known constant, we can conclude
242 // that the store must be storing the constant value (else the memory
243 // wouldn't be constant), and this must be a noop.
244 if (AA->pointsToConstantMemory(MI->getDest())) {
245 // Set the size of the copy to 0, it will be deleted on the next iteration.
246 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
250 // Extract the length and alignment and fill if they are constant.
251 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
252 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
253 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
255 const uint64_t Len = LenC->getLimitedValue();
256 assert(Len && "0-sized memory setting should be removed already.");
257 const Align Alignment = assumeAligned(MI->getDestAlignment());
259 // If it is an atomic and alignment is less than the size then we will
260 // introduce the unaligned memory access which will be later transformed
261 // into libcall in CodeGen. This is not evident performance gain so disable
263 if (isa<AtomicMemSetInst>(MI))
267 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
268 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
269 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
271 Value *Dest = MI->getDest();
272 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
273 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
274 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
276 // Extract the fill value and store.
277 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
278 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
280 S->setAlignment(Alignment);
281 if (isa<AtomicMemSetInst>(MI))
282 S->setOrdering(AtomicOrdering::Unordered);
284 // Set the size of the copy to 0, it will be deleted on the next iteration.
285 MI->setLength(Constant::getNullValue(LenC->getType()));
292 static Value *simplifyX86immShift(const IntrinsicInst &II,
293 InstCombiner::BuilderTy &Builder) {
294 bool LogicalShift = false;
295 bool ShiftLeft = false;
297 switch (II.getIntrinsicID()) {
298 default: llvm_unreachable("Unexpected intrinsic!");
299 case Intrinsic::x86_sse2_psra_d:
300 case Intrinsic::x86_sse2_psra_w:
301 case Intrinsic::x86_sse2_psrai_d:
302 case Intrinsic::x86_sse2_psrai_w:
303 case Intrinsic::x86_avx2_psra_d:
304 case Intrinsic::x86_avx2_psra_w:
305 case Intrinsic::x86_avx2_psrai_d:
306 case Intrinsic::x86_avx2_psrai_w:
307 case Intrinsic::x86_avx512_psra_q_128:
308 case Intrinsic::x86_avx512_psrai_q_128:
309 case Intrinsic::x86_avx512_psra_q_256:
310 case Intrinsic::x86_avx512_psrai_q_256:
311 case Intrinsic::x86_avx512_psra_d_512:
312 case Intrinsic::x86_avx512_psra_q_512:
313 case Intrinsic::x86_avx512_psra_w_512:
314 case Intrinsic::x86_avx512_psrai_d_512:
315 case Intrinsic::x86_avx512_psrai_q_512:
316 case Intrinsic::x86_avx512_psrai_w_512:
317 LogicalShift = false; ShiftLeft = false;
319 case Intrinsic::x86_sse2_psrl_d:
320 case Intrinsic::x86_sse2_psrl_q:
321 case Intrinsic::x86_sse2_psrl_w:
322 case Intrinsic::x86_sse2_psrli_d:
323 case Intrinsic::x86_sse2_psrli_q:
324 case Intrinsic::x86_sse2_psrli_w:
325 case Intrinsic::x86_avx2_psrl_d:
326 case Intrinsic::x86_avx2_psrl_q:
327 case Intrinsic::x86_avx2_psrl_w:
328 case Intrinsic::x86_avx2_psrli_d:
329 case Intrinsic::x86_avx2_psrli_q:
330 case Intrinsic::x86_avx2_psrli_w:
331 case Intrinsic::x86_avx512_psrl_d_512:
332 case Intrinsic::x86_avx512_psrl_q_512:
333 case Intrinsic::x86_avx512_psrl_w_512:
334 case Intrinsic::x86_avx512_psrli_d_512:
335 case Intrinsic::x86_avx512_psrli_q_512:
336 case Intrinsic::x86_avx512_psrli_w_512:
337 LogicalShift = true; ShiftLeft = false;
339 case Intrinsic::x86_sse2_psll_d:
340 case Intrinsic::x86_sse2_psll_q:
341 case Intrinsic::x86_sse2_psll_w:
342 case Intrinsic::x86_sse2_pslli_d:
343 case Intrinsic::x86_sse2_pslli_q:
344 case Intrinsic::x86_sse2_pslli_w:
345 case Intrinsic::x86_avx2_psll_d:
346 case Intrinsic::x86_avx2_psll_q:
347 case Intrinsic::x86_avx2_psll_w:
348 case Intrinsic::x86_avx2_pslli_d:
349 case Intrinsic::x86_avx2_pslli_q:
350 case Intrinsic::x86_avx2_pslli_w:
351 case Intrinsic::x86_avx512_psll_d_512:
352 case Intrinsic::x86_avx512_psll_q_512:
353 case Intrinsic::x86_avx512_psll_w_512:
354 case Intrinsic::x86_avx512_pslli_d_512:
355 case Intrinsic::x86_avx512_pslli_q_512:
356 case Intrinsic::x86_avx512_pslli_w_512:
357 LogicalShift = true; ShiftLeft = true;
360 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
362 // Simplify if count is constant.
363 auto Arg1 = II.getArgOperand(1);
364 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
365 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
366 auto CInt = dyn_cast<ConstantInt>(Arg1);
367 if (!CAZ && !CDV && !CInt)
372 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
373 // operand to compute the shift amount.
374 auto VT = cast<VectorType>(CDV->getType());
375 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
376 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
377 unsigned NumSubElts = 64 / BitWidth;
379 // Concatenate the sub-elements to create the 64-bit value.
380 for (unsigned i = 0; i != NumSubElts; ++i) {
381 unsigned SubEltIdx = (NumSubElts - 1) - i;
382 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
384 Count |= SubElt->getValue().zextOrTrunc(64);
388 Count = CInt->getValue();
390 auto Vec = II.getArgOperand(0);
391 auto VT = cast<VectorType>(Vec->getType());
392 auto SVT = VT->getElementType();
393 unsigned VWidth = VT->getNumElements();
394 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
396 // If shift-by-zero then just return the original value.
397 if (Count.isNullValue())
400 // Handle cases when Shift >= BitWidth.
401 if (Count.uge(BitWidth)) {
402 // If LogicalShift - just return zero.
404 return ConstantAggregateZero::get(VT);
406 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
407 Count = APInt(64, BitWidth - 1);
410 // Get a constant vector of the same type as the first operand.
411 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
412 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
415 return Builder.CreateShl(Vec, ShiftVec);
418 return Builder.CreateLShr(Vec, ShiftVec);
420 return Builder.CreateAShr(Vec, ShiftVec);
423 // Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
424 // Unlike the generic IR shifts, the intrinsics have defined behaviour for out
425 // of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
426 static Value *simplifyX86varShift(const IntrinsicInst &II,
427 InstCombiner::BuilderTy &Builder) {
428 bool LogicalShift = false;
429 bool ShiftLeft = false;
431 switch (II.getIntrinsicID()) {
432 default: llvm_unreachable("Unexpected intrinsic!");
433 case Intrinsic::x86_avx2_psrav_d:
434 case Intrinsic::x86_avx2_psrav_d_256:
435 case Intrinsic::x86_avx512_psrav_q_128:
436 case Intrinsic::x86_avx512_psrav_q_256:
437 case Intrinsic::x86_avx512_psrav_d_512:
438 case Intrinsic::x86_avx512_psrav_q_512:
439 case Intrinsic::x86_avx512_psrav_w_128:
440 case Intrinsic::x86_avx512_psrav_w_256:
441 case Intrinsic::x86_avx512_psrav_w_512:
442 LogicalShift = false;
445 case Intrinsic::x86_avx2_psrlv_d:
446 case Intrinsic::x86_avx2_psrlv_d_256:
447 case Intrinsic::x86_avx2_psrlv_q:
448 case Intrinsic::x86_avx2_psrlv_q_256:
449 case Intrinsic::x86_avx512_psrlv_d_512:
450 case Intrinsic::x86_avx512_psrlv_q_512:
451 case Intrinsic::x86_avx512_psrlv_w_128:
452 case Intrinsic::x86_avx512_psrlv_w_256:
453 case Intrinsic::x86_avx512_psrlv_w_512:
457 case Intrinsic::x86_avx2_psllv_d:
458 case Intrinsic::x86_avx2_psllv_d_256:
459 case Intrinsic::x86_avx2_psllv_q:
460 case Intrinsic::x86_avx2_psllv_q_256:
461 case Intrinsic::x86_avx512_psllv_d_512:
462 case Intrinsic::x86_avx512_psllv_q_512:
463 case Intrinsic::x86_avx512_psllv_w_128:
464 case Intrinsic::x86_avx512_psllv_w_256:
465 case Intrinsic::x86_avx512_psllv_w_512:
470 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
472 // Simplify if all shift amounts are constant/undef.
473 auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
477 auto Vec = II.getArgOperand(0);
478 auto VT = cast<VectorType>(II.getType());
479 auto SVT = VT->getVectorElementType();
480 int NumElts = VT->getNumElements();
481 int BitWidth = SVT->getIntegerBitWidth();
483 // Collect each element's shift amount.
484 // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
485 bool AnyOutOfRange = false;
486 SmallVector<int, 8> ShiftAmts;
487 for (int I = 0; I < NumElts; ++I) {
488 auto *CElt = CShift->getAggregateElement(I);
489 if (CElt && isa<UndefValue>(CElt)) {
490 ShiftAmts.push_back(-1);
494 auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
498 // Handle out of range shifts.
499 // If LogicalShift - set to BitWidth (special case).
500 // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
501 APInt ShiftVal = COp->getValue();
502 if (ShiftVal.uge(BitWidth)) {
503 AnyOutOfRange = LogicalShift;
504 ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
508 ShiftAmts.push_back((int)ShiftVal.getZExtValue());
511 // If all elements out of range or UNDEF, return vector of zeros/undefs.
512 // ArithmeticShift should only hit this if they are all UNDEF.
513 auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
514 if (llvm::all_of(ShiftAmts, OutOfRange)) {
515 SmallVector<Constant *, 8> ConstantVec;
516 for (int Idx : ShiftAmts) {
518 ConstantVec.push_back(UndefValue::get(SVT));
520 assert(LogicalShift && "Logical shift expected");
521 ConstantVec.push_back(ConstantInt::getNullValue(SVT));
524 return ConstantVector::get(ConstantVec);
527 // We can't handle only some out of range values with generic logical shifts.
531 // Build the shift amount constant vector.
532 SmallVector<Constant *, 8> ShiftVecAmts;
533 for (int Idx : ShiftAmts) {
535 ShiftVecAmts.push_back(UndefValue::get(SVT));
537 ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
539 auto ShiftVec = ConstantVector::get(ShiftVecAmts);
542 return Builder.CreateShl(Vec, ShiftVec);
545 return Builder.CreateLShr(Vec, ShiftVec);
547 return Builder.CreateAShr(Vec, ShiftVec);
550 static Value *simplifyX86pack(IntrinsicInst &II,
551 InstCombiner::BuilderTy &Builder, bool IsSigned) {
552 Value *Arg0 = II.getArgOperand(0);
553 Value *Arg1 = II.getArgOperand(1);
554 Type *ResTy = II.getType();
556 // Fast all undef handling.
557 if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
558 return UndefValue::get(ResTy);
560 Type *ArgTy = Arg0->getType();
561 unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
562 unsigned NumSrcElts = ArgTy->getVectorNumElements();
563 assert(ResTy->getVectorNumElements() == (2 * NumSrcElts) &&
564 "Unexpected packing types");
566 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
567 unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
568 unsigned SrcScalarSizeInBits = ArgTy->getScalarSizeInBits();
569 assert(SrcScalarSizeInBits == (2 * DstScalarSizeInBits) &&
570 "Unexpected packing types");
573 if (!isa<Constant>(Arg0) || !isa<Constant>(Arg1))
576 // Clamp Values - signed/unsigned both use signed clamp values, but they
577 // differ on the min/max values.
578 APInt MinValue, MaxValue;
580 // PACKSS: Truncate signed value with signed saturation.
581 // Source values less than dst minint are saturated to minint.
582 // Source values greater than dst maxint are saturated to maxint.
584 APInt::getSignedMinValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
586 APInt::getSignedMaxValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
588 // PACKUS: Truncate signed value with unsigned saturation.
589 // Source values less than zero are saturated to zero.
590 // Source values greater than dst maxuint are saturated to maxuint.
591 MinValue = APInt::getNullValue(SrcScalarSizeInBits);
592 MaxValue = APInt::getLowBitsSet(SrcScalarSizeInBits, DstScalarSizeInBits);
595 auto *MinC = Constant::getIntegerValue(ArgTy, MinValue);
596 auto *MaxC = Constant::getIntegerValue(ArgTy, MaxValue);
597 Arg0 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg0, MinC), MinC, Arg0);
598 Arg1 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg1, MinC), MinC, Arg1);
599 Arg0 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg0, MaxC), MaxC, Arg0);
600 Arg1 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg1, MaxC), MaxC, Arg1);
602 // Shuffle clamped args together at the lane level.
603 SmallVector<unsigned, 32> PackMask;
604 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
605 for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
606 PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane));
607 for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
608 PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane) + NumSrcElts);
610 auto *Shuffle = Builder.CreateShuffleVector(Arg0, Arg1, PackMask);
612 // Truncate to dst size.
613 return Builder.CreateTrunc(Shuffle, ResTy);
616 static Value *simplifyX86movmsk(const IntrinsicInst &II,
617 InstCombiner::BuilderTy &Builder) {
618 Value *Arg = II.getArgOperand(0);
619 Type *ResTy = II.getType();
620 Type *ArgTy = Arg->getType();
622 // movmsk(undef) -> zero as we must ensure the upper bits are zero.
623 if (isa<UndefValue>(Arg))
624 return Constant::getNullValue(ResTy);
626 // We can't easily peek through x86_mmx types.
627 if (!ArgTy->isVectorTy())
630 // Expand MOVMSK to compare/bitcast/zext:
631 // e.g. PMOVMSKB(v16i8 x):
632 // %cmp = icmp slt <16 x i8> %x, zeroinitializer
633 // %int = bitcast <16 x i1> %cmp to i16
634 // %res = zext i16 %int to i32
635 unsigned NumElts = ArgTy->getVectorNumElements();
636 Type *IntegerVecTy = VectorType::getInteger(cast<VectorType>(ArgTy));
637 Type *IntegerTy = Builder.getIntNTy(NumElts);
639 Value *Res = Builder.CreateBitCast(Arg, IntegerVecTy);
640 Res = Builder.CreateICmpSLT(Res, Constant::getNullValue(IntegerVecTy));
641 Res = Builder.CreateBitCast(Res, IntegerTy);
642 Res = Builder.CreateZExtOrTrunc(Res, ResTy);
646 static Value *simplifyX86addcarry(const IntrinsicInst &II,
647 InstCombiner::BuilderTy &Builder) {
648 Value *CarryIn = II.getArgOperand(0);
649 Value *Op1 = II.getArgOperand(1);
650 Value *Op2 = II.getArgOperand(2);
651 Type *RetTy = II.getType();
652 Type *OpTy = Op1->getType();
653 assert(RetTy->getStructElementType(0)->isIntegerTy(8) &&
654 RetTy->getStructElementType(1) == OpTy && OpTy == Op2->getType() &&
655 "Unexpected types for x86 addcarry");
657 // If carry-in is zero, this is just an unsigned add with overflow.
658 if (match(CarryIn, m_ZeroInt())) {
659 Value *UAdd = Builder.CreateIntrinsic(Intrinsic::uadd_with_overflow, OpTy,
661 // The types have to be adjusted to match the x86 call types.
662 Value *UAddResult = Builder.CreateExtractValue(UAdd, 0);
663 Value *UAddOV = Builder.CreateZExt(Builder.CreateExtractValue(UAdd, 1),
664 Builder.getInt8Ty());
665 Value *Res = UndefValue::get(RetTy);
666 Res = Builder.CreateInsertValue(Res, UAddOV, 0);
667 return Builder.CreateInsertValue(Res, UAddResult, 1);
673 static Value *simplifyX86insertps(const IntrinsicInst &II,
674 InstCombiner::BuilderTy &Builder) {
675 auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
679 VectorType *VecTy = cast<VectorType>(II.getType());
680 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
682 // The immediate permute control byte looks like this:
683 // [3:0] - zero mask for each 32-bit lane
684 // [5:4] - select one 32-bit destination lane
685 // [7:6] - select one 32-bit source lane
687 uint8_t Imm = CInt->getZExtValue();
688 uint8_t ZMask = Imm & 0xf;
689 uint8_t DestLane = (Imm >> 4) & 0x3;
690 uint8_t SourceLane = (Imm >> 6) & 0x3;
692 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
694 // If all zero mask bits are set, this was just a weird way to
695 // generate a zero vector.
699 // Initialize by passing all of the first source bits through.
700 uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
702 // We may replace the second operand with the zero vector.
703 Value *V1 = II.getArgOperand(1);
706 // If the zero mask is being used with a single input or the zero mask
707 // overrides the destination lane, this is a shuffle with the zero vector.
708 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
709 (ZMask & (1 << DestLane))) {
711 // We may still move 32-bits of the first source vector from one lane
713 ShuffleMask[DestLane] = SourceLane;
714 // The zero mask may override the previous insert operation.
715 for (unsigned i = 0; i < 4; ++i)
716 if ((ZMask >> i) & 0x1)
717 ShuffleMask[i] = i + 4;
719 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
723 // Replace the selected destination lane with the selected source lane.
724 ShuffleMask[DestLane] = SourceLane + 4;
727 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
730 /// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
731 /// or conversion to a shuffle vector.
732 static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
733 ConstantInt *CILength, ConstantInt *CIIndex,
734 InstCombiner::BuilderTy &Builder) {
735 auto LowConstantHighUndef = [&](uint64_t Val) {
736 Type *IntTy64 = Type::getInt64Ty(II.getContext());
737 Constant *Args[] = {ConstantInt::get(IntTy64, Val),
738 UndefValue::get(IntTy64)};
739 return ConstantVector::get(Args);
742 // See if we're dealing with constant values.
743 Constant *C0 = dyn_cast<Constant>(Op0);
745 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
748 // Attempt to constant fold.
749 if (CILength && CIIndex) {
750 // From AMD documentation: "The bit index and field length are each six
751 // bits in length other bits of the field are ignored."
752 APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
753 APInt APLength = CILength->getValue().zextOrTrunc(6);
755 unsigned Index = APIndex.getZExtValue();
757 // From AMD documentation: "a value of zero in the field length is
758 // defined as length of 64".
759 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
761 // From AMD documentation: "If the sum of the bit index + length field
762 // is greater than 64, the results are undefined".
763 unsigned End = Index + Length;
765 // Note that both field index and field length are 8-bit quantities.
766 // Since variables 'Index' and 'Length' are unsigned values
767 // obtained from zero-extending field index and field length
768 // respectively, their sum should never wrap around.
770 return UndefValue::get(II.getType());
772 // If we are inserting whole bytes, we can convert this to a shuffle.
773 // Lowering can recognize EXTRQI shuffle masks.
774 if ((Length % 8) == 0 && (Index % 8) == 0) {
775 // Convert bit indices to byte indices.
779 Type *IntTy8 = Type::getInt8Ty(II.getContext());
780 Type *IntTy32 = Type::getInt32Ty(II.getContext());
781 VectorType *ShufTy = VectorType::get(IntTy8, 16);
783 SmallVector<Constant *, 16> ShuffleMask;
784 for (int i = 0; i != (int)Length; ++i)
785 ShuffleMask.push_back(
786 Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
787 for (int i = Length; i != 8; ++i)
788 ShuffleMask.push_back(
789 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
790 for (int i = 8; i != 16; ++i)
791 ShuffleMask.push_back(UndefValue::get(IntTy32));
793 Value *SV = Builder.CreateShuffleVector(
794 Builder.CreateBitCast(Op0, ShufTy),
795 ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
796 return Builder.CreateBitCast(SV, II.getType());
799 // Constant Fold - shift Index'th bit to lowest position and mask off
802 APInt Elt = CI0->getValue();
803 Elt.lshrInPlace(Index);
804 Elt = Elt.zextOrTrunc(Length);
805 return LowConstantHighUndef(Elt.getZExtValue());
808 // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
809 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
810 Value *Args[] = {Op0, CILength, CIIndex};
811 Module *M = II.getModule();
812 Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
813 return Builder.CreateCall(F, Args);
817 // Constant Fold - extraction from zero is always {zero, undef}.
818 if (CI0 && CI0->isZero())
819 return LowConstantHighUndef(0);
824 /// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
825 /// folding or conversion to a shuffle vector.
826 static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
827 APInt APLength, APInt APIndex,
828 InstCombiner::BuilderTy &Builder) {
829 // From AMD documentation: "The bit index and field length are each six bits
830 // in length other bits of the field are ignored."
831 APIndex = APIndex.zextOrTrunc(6);
832 APLength = APLength.zextOrTrunc(6);
834 // Attempt to constant fold.
835 unsigned Index = APIndex.getZExtValue();
837 // From AMD documentation: "a value of zero in the field length is
838 // defined as length of 64".
839 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
841 // From AMD documentation: "If the sum of the bit index + length field
842 // is greater than 64, the results are undefined".
843 unsigned End = Index + Length;
845 // Note that both field index and field length are 8-bit quantities.
846 // Since variables 'Index' and 'Length' are unsigned values
847 // obtained from zero-extending field index and field length
848 // respectively, their sum should never wrap around.
850 return UndefValue::get(II.getType());
852 // If we are inserting whole bytes, we can convert this to a shuffle.
853 // Lowering can recognize INSERTQI shuffle masks.
854 if ((Length % 8) == 0 && (Index % 8) == 0) {
855 // Convert bit indices to byte indices.
859 Type *IntTy8 = Type::getInt8Ty(II.getContext());
860 Type *IntTy32 = Type::getInt32Ty(II.getContext());
861 VectorType *ShufTy = VectorType::get(IntTy8, 16);
863 SmallVector<Constant *, 16> ShuffleMask;
864 for (int i = 0; i != (int)Index; ++i)
865 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
866 for (int i = 0; i != (int)Length; ++i)
867 ShuffleMask.push_back(
868 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
869 for (int i = Index + Length; i != 8; ++i)
870 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
871 for (int i = 8; i != 16; ++i)
872 ShuffleMask.push_back(UndefValue::get(IntTy32));
874 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
875 Builder.CreateBitCast(Op1, ShufTy),
876 ConstantVector::get(ShuffleMask));
877 return Builder.CreateBitCast(SV, II.getType());
880 // See if we're dealing with constant values.
881 Constant *C0 = dyn_cast<Constant>(Op0);
882 Constant *C1 = dyn_cast<Constant>(Op1);
884 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
887 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
890 // Constant Fold - insert bottom Length bits starting at the Index'th bit.
892 APInt V00 = CI00->getValue();
893 APInt V10 = CI10->getValue();
894 APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
896 V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
897 APInt Val = V00 | V10;
898 Type *IntTy64 = Type::getInt64Ty(II.getContext());
899 Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
900 UndefValue::get(IntTy64)};
901 return ConstantVector::get(Args);
904 // If we were an INSERTQ call, we'll save demanded elements if we convert to
906 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
907 Type *IntTy8 = Type::getInt8Ty(II.getContext());
908 Constant *CILength = ConstantInt::get(IntTy8, Length, false);
909 Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
911 Value *Args[] = {Op0, Op1, CILength, CIIndex};
912 Module *M = II.getModule();
913 Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
914 return Builder.CreateCall(F, Args);
920 /// Attempt to convert pshufb* to shufflevector if the mask is constant.
921 static Value *simplifyX86pshufb(const IntrinsicInst &II,
922 InstCombiner::BuilderTy &Builder) {
923 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
927 auto *VecTy = cast<VectorType>(II.getType());
928 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
929 unsigned NumElts = VecTy->getNumElements();
930 assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
931 "Unexpected number of elements in shuffle mask!");
933 // Construct a shuffle mask from constant integers or UNDEFs.
934 Constant *Indexes[64] = {nullptr};
936 // Each byte in the shuffle control mask forms an index to permute the
937 // corresponding byte in the destination operand.
938 for (unsigned I = 0; I < NumElts; ++I) {
939 Constant *COp = V->getAggregateElement(I);
940 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
943 if (isa<UndefValue>(COp)) {
944 Indexes[I] = UndefValue::get(MaskEltTy);
948 int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
950 // If the most significant bit (bit[7]) of each byte of the shuffle
951 // control mask is set, then zero is written in the result byte.
952 // The zero vector is in the right-hand side of the resulting
955 // The value of each index for the high 128-bit lane is the least
956 // significant 4 bits of the respective shuffle control byte.
957 Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
958 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
961 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
962 auto V1 = II.getArgOperand(0);
963 auto V2 = Constant::getNullValue(VecTy);
964 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
967 /// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
968 static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
969 InstCombiner::BuilderTy &Builder) {
970 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
974 auto *VecTy = cast<VectorType>(II.getType());
975 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
976 unsigned NumElts = VecTy->getVectorNumElements();
977 bool IsPD = VecTy->getScalarType()->isDoubleTy();
978 unsigned NumLaneElts = IsPD ? 2 : 4;
979 assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
981 // Construct a shuffle mask from constant integers or UNDEFs.
982 Constant *Indexes[16] = {nullptr};
984 // The intrinsics only read one or two bits, clear the rest.
985 for (unsigned I = 0; I < NumElts; ++I) {
986 Constant *COp = V->getAggregateElement(I);
987 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
990 if (isa<UndefValue>(COp)) {
991 Indexes[I] = UndefValue::get(MaskEltTy);
995 APInt Index = cast<ConstantInt>(COp)->getValue();
996 Index = Index.zextOrTrunc(32).getLoBits(2);
998 // The PD variants uses bit 1 to select per-lane element index, so
999 // shift down to convert to generic shuffle mask index.
1001 Index.lshrInPlace(1);
1003 // The _256 variants are a bit trickier since the mask bits always index
1004 // into the corresponding 128 half. In order to convert to a generic
1005 // shuffle, we have to make that explicit.
1006 Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
1008 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1011 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
1012 auto V1 = II.getArgOperand(0);
1013 auto V2 = UndefValue::get(V1->getType());
1014 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1017 /// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
1018 static Value *simplifyX86vpermv(const IntrinsicInst &II,
1019 InstCombiner::BuilderTy &Builder) {
1020 auto *V = dyn_cast<Constant>(II.getArgOperand(1));
1024 auto *VecTy = cast<VectorType>(II.getType());
1025 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1026 unsigned Size = VecTy->getNumElements();
1027 assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1028 "Unexpected shuffle mask size");
1030 // Construct a shuffle mask from constant integers or UNDEFs.
1031 Constant *Indexes[64] = {nullptr};
1033 for (unsigned I = 0; I < Size; ++I) {
1034 Constant *COp = V->getAggregateElement(I);
1035 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1038 if (isa<UndefValue>(COp)) {
1039 Indexes[I] = UndefValue::get(MaskEltTy);
1043 uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
1045 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1048 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
1049 auto V1 = II.getArgOperand(0);
1050 auto V2 = UndefValue::get(VecTy);
1051 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1054 // TODO, Obvious Missing Transforms:
1055 // * Narrow width by halfs excluding zero/undef lanes
1056 Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
1057 Value *LoadPtr = II.getArgOperand(0);
1058 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1060 // If the mask is all ones or undefs, this is a plain vector load of the 1st
1062 if (maskIsAllOneOrUndef(II.getArgOperand(2)))
1063 return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1066 // If we can unconditionally load from this address, replace with a
1067 // load/select idiom. TODO: use DT for context sensitive query
1068 if (isDereferenceableAndAlignedPointer(
1069 LoadPtr, II.getType(), MaybeAlign(Alignment),
1070 II.getModule()->getDataLayout(), &II, nullptr)) {
1071 Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1073 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
1079 // TODO, Obvious Missing Transforms:
1080 // * Single constant active lane -> store
1081 // * Narrow width by halfs excluding zero/undef lanes
1082 Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) {
1083 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1087 // If the mask is all zeros, this instruction does nothing.
1088 if (ConstMask->isNullValue())
1089 return eraseInstFromFunction(II);
1091 // If the mask is all ones, this is a plain vector store of the 1st argument.
1092 if (ConstMask->isAllOnesValue()) {
1093 Value *StorePtr = II.getArgOperand(1);
1094 MaybeAlign Alignment(
1095 cast<ConstantInt>(II.getArgOperand(2))->getZExtValue());
1096 return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1099 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1100 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1101 APInt UndefElts(DemandedElts.getBitWidth(), 0);
1102 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1103 DemandedElts, UndefElts)) {
1104 II.setOperand(0, V);
1111 // TODO, Obvious Missing Transforms:
1112 // * Single constant active lane load -> load
1113 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
1114 // * Adjacent vector addresses -> masked.load
1115 // * Narrow width by halfs excluding zero/undef lanes
1116 // * Vector splat address w/known mask -> scalar load
1117 // * Vector incrementing address -> vector masked load
1118 Instruction *InstCombiner::simplifyMaskedGather(IntrinsicInst &II) {
1122 // TODO, Obvious Missing Transforms:
1123 // * Single constant active lane -> store
1124 // * Adjacent vector addresses -> masked.store
1125 // * Narrow store width by halfs excluding zero/undef lanes
1126 // * Vector splat address w/known mask -> scalar store
1127 // * Vector incrementing address -> vector masked store
1128 Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) {
1129 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1133 // If the mask is all zeros, a scatter does nothing.
1134 if (ConstMask->isNullValue())
1135 return eraseInstFromFunction(II);
1137 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1138 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1139 APInt UndefElts(DemandedElts.getBitWidth(), 0);
1140 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1141 DemandedElts, UndefElts)) {
1142 II.setOperand(0, V);
1145 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1),
1146 DemandedElts, UndefElts)) {
1147 II.setOperand(1, V);
1154 /// This function transforms launder.invariant.group and strip.invariant.group
1156 /// launder(launder(%x)) -> launder(%x) (the result is not the argument)
1157 /// launder(strip(%x)) -> launder(%x)
1158 /// strip(strip(%x)) -> strip(%x) (the result is not the argument)
1159 /// strip(launder(%x)) -> strip(%x)
1160 /// This is legal because it preserves the most recent information about
1161 /// the presence or absence of invariant.group.
1162 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
1164 auto *Arg = II.getArgOperand(0);
1165 auto *StrippedArg = Arg->stripPointerCasts();
1166 auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
1167 if (StrippedArg == StrippedInvariantGroupsArg)
1168 return nullptr; // No launders/strips to remove.
1170 Value *Result = nullptr;
1172 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
1173 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
1174 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
1175 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
1178 "simplifyInvariantGroupIntrinsic only handles launder and strip");
1179 if (Result->getType()->getPointerAddressSpace() !=
1180 II.getType()->getPointerAddressSpace())
1181 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
1182 if (Result->getType() != II.getType())
1183 Result = IC.Builder.CreateBitCast(Result, II.getType());
1185 return cast<Instruction>(Result);
1188 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
1189 assert((II.getIntrinsicID() == Intrinsic::cttz ||
1190 II.getIntrinsicID() == Intrinsic::ctlz) &&
1191 "Expected cttz or ctlz intrinsic");
1192 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
1193 Value *Op0 = II.getArgOperand(0);
1195 // ctlz(bitreverse(x)) -> cttz(x)
1196 // cttz(bitreverse(x)) -> ctlz(x)
1197 if (match(Op0, m_BitReverse(m_Value(X)))) {
1198 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
1199 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
1200 return CallInst::Create(F, {X, II.getArgOperand(1)});
1204 // cttz(-x) -> cttz(x)
1205 if (match(Op0, m_Neg(m_Value(X)))) {
1206 II.setOperand(0, X);
1210 // cttz(abs(x)) -> cttz(x)
1211 // cttz(nabs(x)) -> cttz(x)
1213 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
1214 if (SPF == SPF_ABS || SPF == SPF_NABS) {
1215 II.setOperand(0, X);
1220 KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
1222 // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1223 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
1224 : Known.countMaxLeadingZeros();
1225 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
1226 : Known.countMinLeadingZeros();
1228 // If all bits above (ctlz) or below (cttz) the first known one are known
1229 // zero, this value is constant.
1230 // FIXME: This should be in InstSimplify because we're replacing an
1231 // instruction with a constant.
1232 if (PossibleZeros == DefiniteZeros) {
1233 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
1234 return IC.replaceInstUsesWith(II, C);
1237 // If the input to cttz/ctlz is known to be non-zero,
1238 // then change the 'ZeroIsUndef' parameter to 'true'
1239 // because we know the zero behavior can't affect the result.
1240 if (!Known.One.isNullValue() ||
1241 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1242 &IC.getDominatorTree())) {
1243 if (!match(II.getArgOperand(1), m_One())) {
1244 II.setOperand(1, IC.Builder.getTrue());
1249 // Add range metadata since known bits can't completely reflect what we know.
1250 // TODO: Handle splat vectors.
1251 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1252 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1253 Metadata *LowAndHigh[] = {
1254 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1255 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1256 II.setMetadata(LLVMContext::MD_range,
1257 MDNode::get(II.getContext(), LowAndHigh));
1264 static Instruction *foldCtpop(IntrinsicInst &II, InstCombiner &IC) {
1265 assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1266 "Expected ctpop intrinsic");
1267 Value *Op0 = II.getArgOperand(0);
1269 // ctpop(bitreverse(x)) -> ctpop(x)
1270 // ctpop(bswap(x)) -> ctpop(x)
1271 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) {
1272 II.setOperand(0, X);
1276 // FIXME: Try to simplify vectors of integers.
1277 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1281 unsigned BitWidth = IT->getBitWidth();
1282 KnownBits Known(BitWidth);
1283 IC.computeKnownBits(Op0, Known, 0, &II);
1285 unsigned MinCount = Known.countMinPopulation();
1286 unsigned MaxCount = Known.countMaxPopulation();
1288 // Add range metadata since known bits can't completely reflect what we know.
1289 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1290 Metadata *LowAndHigh[] = {
1291 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
1292 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1293 II.setMetadata(LLVMContext::MD_range,
1294 MDNode::get(II.getContext(), LowAndHigh));
1301 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1302 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1303 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1304 static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
1305 Value *Ptr = II.getOperand(0);
1306 Value *Mask = II.getOperand(1);
1307 Constant *ZeroVec = Constant::getNullValue(II.getType());
1309 // Special case a zero mask since that's not a ConstantDataVector.
1310 // This masked load instruction creates a zero vector.
1311 if (isa<ConstantAggregateZero>(Mask))
1312 return IC.replaceInstUsesWith(II, ZeroVec);
1314 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1318 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1319 // to allow target-independent optimizations.
1321 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1322 // the LLVM intrinsic definition for the pointer argument.
1323 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1324 PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
1325 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1327 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1328 // on each element's most significant bit (the sign bit).
1329 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1331 // The pass-through vector for an x86 masked load is a zero vector.
1332 CallInst *NewMaskedLoad =
1333 IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
1334 return IC.replaceInstUsesWith(II, NewMaskedLoad);
1337 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1338 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1339 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1340 static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1341 Value *Ptr = II.getOperand(0);
1342 Value *Mask = II.getOperand(1);
1343 Value *Vec = II.getOperand(2);
1345 // Special case a zero mask since that's not a ConstantDataVector:
1346 // this masked store instruction does nothing.
1347 if (isa<ConstantAggregateZero>(Mask)) {
1348 IC.eraseInstFromFunction(II);
1352 // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1353 // anything else at this level.
1354 if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1357 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1361 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1362 // to allow target-independent optimizations.
1364 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1365 // the LLVM intrinsic definition for the pointer argument.
1366 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1367 PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
1368 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1370 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1371 // on each element's most significant bit (the sign bit).
1372 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1374 IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
1376 // 'Replace uses' doesn't work for stores. Erase the original masked store.
1377 IC.eraseInstFromFunction(II);
1381 // Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1383 // A single NaN input is folded to minnum, so we rely on that folding for
1385 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1386 const APFloat &Src2) {
1387 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1389 APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1390 assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1391 if (Cmp0 == APFloat::cmpEqual)
1392 return maxnum(Src1, Src2);
1394 APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1395 assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1396 if (Cmp1 == APFloat::cmpEqual)
1397 return maxnum(Src0, Src2);
1399 return maxnum(Src0, Src1);
1402 /// Convert a table lookup to shufflevector if the mask is constant.
1403 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
1404 /// which case we could lower the shufflevector with rev64 instructions
1405 /// as it's actually a byte reverse.
1406 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
1407 InstCombiner::BuilderTy &Builder) {
1408 // Bail out if the mask is not a constant.
1409 auto *C = dyn_cast<Constant>(II.getArgOperand(1));
1413 auto *VecTy = cast<VectorType>(II.getType());
1414 unsigned NumElts = VecTy->getNumElements();
1416 // Only perform this transformation for <8 x i8> vector types.
1417 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
1420 uint32_t Indexes[8];
1422 for (unsigned I = 0; I < NumElts; ++I) {
1423 Constant *COp = C->getAggregateElement(I);
1425 if (!COp || !isa<ConstantInt>(COp))
1428 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
1430 // Make sure the mask indices are in range.
1431 if (Indexes[I] >= NumElts)
1435 auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
1436 makeArrayRef(Indexes));
1437 auto *V1 = II.getArgOperand(0);
1438 auto *V2 = Constant::getNullValue(V1->getType());
1439 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1442 /// Convert a vector load intrinsic into a simple llvm load instruction.
1443 /// This is beneficial when the underlying object being addressed comes
1444 /// from a constant, since we get constant-folding for free.
1445 static Value *simplifyNeonVld1(const IntrinsicInst &II,
1447 InstCombiner::BuilderTy &Builder) {
1448 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
1453 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign ?
1454 MemAlign : IntrAlign->getLimitedValue();
1456 if (!isPowerOf2_32(Alignment))
1459 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
1460 PointerType::get(II.getType(), 0));
1461 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
1464 // Returns true iff the 2 intrinsics have the same operands, limiting the
1465 // comparison to the first NumOperands.
1466 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1467 unsigned NumOperands) {
1468 assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1469 assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1470 for (unsigned i = 0; i < NumOperands; i++)
1471 if (I.getArgOperand(i) != E.getArgOperand(i))
1476 // Remove trivially empty start/end intrinsic ranges, i.e. a start
1477 // immediately followed by an end (ignoring debuginfo or other
1478 // start/end intrinsics in between). As this handles only the most trivial
1479 // cases, tracking the nesting level is not needed:
1481 // call @llvm.foo.start(i1 0) ; &I
1482 // call @llvm.foo.start(i1 0)
1483 // call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1484 // call @llvm.foo.end(i1 0)
1485 static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1486 unsigned EndID, InstCombiner &IC) {
1487 assert(I.getIntrinsicID() == StartID &&
1488 "Start intrinsic does not have expected ID");
1489 BasicBlock::iterator BI(I), BE(I.getParent()->end());
1490 for (++BI; BI != BE; ++BI) {
1491 if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1492 if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1494 if (E->getIntrinsicID() == EndID &&
1495 haveSameOperands(I, *E, E->getNumArgOperands())) {
1496 IC.eraseInstFromFunction(*E);
1497 IC.eraseInstFromFunction(I);
1507 // Convert NVVM intrinsics to target-generic LLVM code where possible.
1508 static Instruction *SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
1509 // Each NVVM intrinsic we can simplify can be replaced with one of:
1511 // * an LLVM intrinsic,
1512 // * an LLVM cast operation,
1513 // * an LLVM binary operation, or
1514 // * ad-hoc LLVM IR for the particular operation.
1516 // Some transformations are only valid when the module's
1517 // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1518 // transformations are valid regardless of the module's ftz setting.
1519 enum FtzRequirementTy {
1520 FTZ_Any, // Any ftz setting is ok.
1521 FTZ_MustBeOn, // Transformation is valid only if ftz is on.
1522 FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1524 // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1525 // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1531 // SimplifyAction is a poor-man's variant (plus an additional flag) that
1532 // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1533 struct SimplifyAction {
1534 // Invariant: At most one of these Optionals has a value.
1535 Optional<Intrinsic::ID> IID;
1536 Optional<Instruction::CastOps> CastOp;
1537 Optional<Instruction::BinaryOps> BinaryOp;
1538 Optional<SpecialCase> Special;
1540 FtzRequirementTy FtzRequirement = FTZ_Any;
1542 SimplifyAction() = default;
1544 SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1545 : IID(IID), FtzRequirement(FtzReq) {}
1547 // Cast operations don't have anything to do with FTZ, so we skip that
1549 SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
1551 SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1552 : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1554 SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1555 : Special(Special), FtzRequirement(FtzReq) {}
1558 // Try to generate a SimplifyAction describing how to replace our
1559 // IntrinsicInstr with target-generic LLVM IR.
1560 const SimplifyAction Action = [II]() -> SimplifyAction {
1561 switch (II->getIntrinsicID()) {
1562 // NVVM intrinsics that map directly to LLVM intrinsics.
1563 case Intrinsic::nvvm_ceil_d:
1564 return {Intrinsic::ceil, FTZ_Any};
1565 case Intrinsic::nvvm_ceil_f:
1566 return {Intrinsic::ceil, FTZ_MustBeOff};
1567 case Intrinsic::nvvm_ceil_ftz_f:
1568 return {Intrinsic::ceil, FTZ_MustBeOn};
1569 case Intrinsic::nvvm_fabs_d:
1570 return {Intrinsic::fabs, FTZ_Any};
1571 case Intrinsic::nvvm_fabs_f:
1572 return {Intrinsic::fabs, FTZ_MustBeOff};
1573 case Intrinsic::nvvm_fabs_ftz_f:
1574 return {Intrinsic::fabs, FTZ_MustBeOn};
1575 case Intrinsic::nvvm_floor_d:
1576 return {Intrinsic::floor, FTZ_Any};
1577 case Intrinsic::nvvm_floor_f:
1578 return {Intrinsic::floor, FTZ_MustBeOff};
1579 case Intrinsic::nvvm_floor_ftz_f:
1580 return {Intrinsic::floor, FTZ_MustBeOn};
1581 case Intrinsic::nvvm_fma_rn_d:
1582 return {Intrinsic::fma, FTZ_Any};
1583 case Intrinsic::nvvm_fma_rn_f:
1584 return {Intrinsic::fma, FTZ_MustBeOff};
1585 case Intrinsic::nvvm_fma_rn_ftz_f:
1586 return {Intrinsic::fma, FTZ_MustBeOn};
1587 case Intrinsic::nvvm_fmax_d:
1588 return {Intrinsic::maxnum, FTZ_Any};
1589 case Intrinsic::nvvm_fmax_f:
1590 return {Intrinsic::maxnum, FTZ_MustBeOff};
1591 case Intrinsic::nvvm_fmax_ftz_f:
1592 return {Intrinsic::maxnum, FTZ_MustBeOn};
1593 case Intrinsic::nvvm_fmin_d:
1594 return {Intrinsic::minnum, FTZ_Any};
1595 case Intrinsic::nvvm_fmin_f:
1596 return {Intrinsic::minnum, FTZ_MustBeOff};
1597 case Intrinsic::nvvm_fmin_ftz_f:
1598 return {Intrinsic::minnum, FTZ_MustBeOn};
1599 case Intrinsic::nvvm_round_d:
1600 return {Intrinsic::round, FTZ_Any};
1601 case Intrinsic::nvvm_round_f:
1602 return {Intrinsic::round, FTZ_MustBeOff};
1603 case Intrinsic::nvvm_round_ftz_f:
1604 return {Intrinsic::round, FTZ_MustBeOn};
1605 case Intrinsic::nvvm_sqrt_rn_d:
1606 return {Intrinsic::sqrt, FTZ_Any};
1607 case Intrinsic::nvvm_sqrt_f:
1608 // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
1609 // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
1610 // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
1611 // the versions with explicit ftz-ness.
1612 return {Intrinsic::sqrt, FTZ_Any};
1613 case Intrinsic::nvvm_sqrt_rn_f:
1614 return {Intrinsic::sqrt, FTZ_MustBeOff};
1615 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1616 return {Intrinsic::sqrt, FTZ_MustBeOn};
1617 case Intrinsic::nvvm_trunc_d:
1618 return {Intrinsic::trunc, FTZ_Any};
1619 case Intrinsic::nvvm_trunc_f:
1620 return {Intrinsic::trunc, FTZ_MustBeOff};
1621 case Intrinsic::nvvm_trunc_ftz_f:
1622 return {Intrinsic::trunc, FTZ_MustBeOn};
1624 // NVVM intrinsics that map to LLVM cast operations.
1626 // Note that llvm's target-generic conversion operators correspond to the rz
1627 // (round to zero) versions of the nvvm conversion intrinsics, even though
1628 // most everything else here uses the rn (round to nearest even) nvvm ops.
1629 case Intrinsic::nvvm_d2i_rz:
1630 case Intrinsic::nvvm_f2i_rz:
1631 case Intrinsic::nvvm_d2ll_rz:
1632 case Intrinsic::nvvm_f2ll_rz:
1633 return {Instruction::FPToSI};
1634 case Intrinsic::nvvm_d2ui_rz:
1635 case Intrinsic::nvvm_f2ui_rz:
1636 case Intrinsic::nvvm_d2ull_rz:
1637 case Intrinsic::nvvm_f2ull_rz:
1638 return {Instruction::FPToUI};
1639 case Intrinsic::nvvm_i2d_rz:
1640 case Intrinsic::nvvm_i2f_rz:
1641 case Intrinsic::nvvm_ll2d_rz:
1642 case Intrinsic::nvvm_ll2f_rz:
1643 return {Instruction::SIToFP};
1644 case Intrinsic::nvvm_ui2d_rz:
1645 case Intrinsic::nvvm_ui2f_rz:
1646 case Intrinsic::nvvm_ull2d_rz:
1647 case Intrinsic::nvvm_ull2f_rz:
1648 return {Instruction::UIToFP};
1650 // NVVM intrinsics that map to LLVM binary ops.
1651 case Intrinsic::nvvm_add_rn_d:
1652 return {Instruction::FAdd, FTZ_Any};
1653 case Intrinsic::nvvm_add_rn_f:
1654 return {Instruction::FAdd, FTZ_MustBeOff};
1655 case Intrinsic::nvvm_add_rn_ftz_f:
1656 return {Instruction::FAdd, FTZ_MustBeOn};
1657 case Intrinsic::nvvm_mul_rn_d:
1658 return {Instruction::FMul, FTZ_Any};
1659 case Intrinsic::nvvm_mul_rn_f:
1660 return {Instruction::FMul, FTZ_MustBeOff};
1661 case Intrinsic::nvvm_mul_rn_ftz_f:
1662 return {Instruction::FMul, FTZ_MustBeOn};
1663 case Intrinsic::nvvm_div_rn_d:
1664 return {Instruction::FDiv, FTZ_Any};
1665 case Intrinsic::nvvm_div_rn_f:
1666 return {Instruction::FDiv, FTZ_MustBeOff};
1667 case Intrinsic::nvvm_div_rn_ftz_f:
1668 return {Instruction::FDiv, FTZ_MustBeOn};
1670 // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1671 // need special handling.
1673 // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
1675 case Intrinsic::nvvm_rcp_rn_d:
1676 return {SPC_Reciprocal, FTZ_Any};
1677 case Intrinsic::nvvm_rcp_rn_f:
1678 return {SPC_Reciprocal, FTZ_MustBeOff};
1679 case Intrinsic::nvvm_rcp_rn_ftz_f:
1680 return {SPC_Reciprocal, FTZ_MustBeOn};
1682 // We do not currently simplify intrinsics that give an approximate answer.
1685 // - nvvm_cos_approx_{f,ftz_f}
1686 // - nvvm_ex2_approx_{d,f,ftz_f}
1687 // - nvvm_lg2_approx_{d,f,ftz_f}
1688 // - nvvm_sin_approx_{f,ftz_f}
1689 // - nvvm_sqrt_approx_{f,ftz_f}
1690 // - nvvm_rsqrt_approx_{d,f,ftz_f}
1691 // - nvvm_div_approx_{ftz_d,ftz_f,f}
1692 // - nvvm_rcp_approx_ftz_d
1694 // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1695 // means that fastmath is enabled in the intrinsic. Unfortunately only
1696 // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1697 // information gets lost and we can't select on it.
1699 // TODO: div and rcp are lowered to a binary op, so these we could in theory
1700 // lower them to "fast fdiv".
1707 // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1708 // can bail out now. (Notice that in the case that IID is not an NVVM
1709 // intrinsic, we don't have to look up any module metadata, as
1710 // FtzRequirementTy will be FTZ_Any.)
1711 if (Action.FtzRequirement != FTZ_Any) {
1713 II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1716 if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1720 // Simplify to target-generic intrinsic.
1722 SmallVector<Value *, 4> Args(II->arg_operands());
1723 // All the target-generic intrinsics currently of interest to us have one
1724 // type argument, equal to that of the nvvm intrinsic's argument.
1725 Type *Tys[] = {II->getArgOperand(0)->getType()};
1726 return CallInst::Create(
1727 Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1730 // Simplify to target-generic binary op.
1731 if (Action.BinaryOp)
1732 return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1733 II->getArgOperand(1), II->getName());
1735 // Simplify to target-generic cast op.
1737 return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1740 // All that's left are the special cases.
1741 if (!Action.Special)
1744 switch (*Action.Special) {
1745 case SPC_Reciprocal:
1746 // Simplify reciprocal.
1747 return BinaryOperator::Create(
1748 Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1749 II->getArgOperand(0), II->getName());
1751 llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
1754 Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
1755 removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1759 Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
1760 removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1764 static Instruction *canonicalizeConstantArg0ToArg1(CallInst &Call) {
1765 assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
1766 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
1767 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
1768 Call.setArgOperand(0, Arg1);
1769 Call.setArgOperand(1, Arg0);
1775 Instruction *InstCombiner::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
1776 WithOverflowInst *WO = cast<WithOverflowInst>(II);
1777 Value *OperationResult = nullptr;
1778 Constant *OverflowResult = nullptr;
1779 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
1780 WO->getRHS(), *WO, OperationResult, OverflowResult))
1781 return CreateOverflowTuple(WO, OperationResult, OverflowResult);
1785 /// CallInst simplification. This mostly only handles folding of intrinsic
1786 /// instructions. For normal calls, it allows visitCallBase to do the heavy
1788 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
1789 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
1790 return replaceInstUsesWith(CI, V);
1792 if (isFreeCall(&CI, &TLI))
1793 return visitFree(CI);
1795 // If the caller function is nounwind, mark the call as nounwind, even if the
1797 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1798 CI.setDoesNotThrow();
1802 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1803 if (!II) return visitCallBase(CI);
1805 // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1806 // instead of in visitCallBase.
1807 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1808 bool Changed = false;
1810 // memmove/cpy/set of zero bytes is a noop.
1811 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1812 if (NumBytes->isNullValue())
1813 return eraseInstFromFunction(CI);
1815 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1816 if (CI->getZExtValue() == 1) {
1817 // Replace the instruction with just byte operations. We would
1818 // transform other cases to loads/stores, but we don't know if
1819 // alignment is sufficient.
1823 // No other transformations apply to volatile transfers.
1824 if (auto *M = dyn_cast<MemIntrinsic>(MI))
1825 if (M->isVolatile())
1828 // If we have a memmove and the source operation is a constant global,
1829 // then the source and dest pointers can't alias, so we can change this
1830 // into a call to memcpy.
1831 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1832 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1833 if (GVSrc->isConstant()) {
1834 Module *M = CI.getModule();
1835 Intrinsic::ID MemCpyID =
1836 isa<AtomicMemMoveInst>(MMI)
1837 ? Intrinsic::memcpy_element_unordered_atomic
1838 : Intrinsic::memcpy;
1839 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1840 CI.getArgOperand(1)->getType(),
1841 CI.getArgOperand(2)->getType() };
1842 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1847 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1848 // memmove(x,x,size) -> noop.
1849 if (MTI->getSource() == MTI->getDest())
1850 return eraseInstFromFunction(CI);
1853 // If we can determine a pointer alignment that is bigger than currently
1854 // set, update the alignment.
1855 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1856 if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1858 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1859 if (Instruction *I = SimplifyAnyMemSet(MSI))
1863 if (Changed) return II;
1866 // For vector result intrinsics, use the generic demanded vector support.
1867 if (II->getType()->isVectorTy()) {
1868 auto VWidth = II->getType()->getVectorNumElements();
1869 APInt UndefElts(VWidth, 0);
1870 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
1871 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
1873 return replaceInstUsesWith(*II, V);
1878 if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1881 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1882 unsigned DemandedWidth) {
1883 APInt UndefElts(Width, 0);
1884 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1885 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1888 Intrinsic::ID IID = II->getIntrinsicID();
1891 case Intrinsic::objectsize:
1892 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1893 return replaceInstUsesWith(CI, V);
1895 case Intrinsic::bswap: {
1896 Value *IIOperand = II->getArgOperand(0);
1899 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1900 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1901 unsigned C = X->getType()->getPrimitiveSizeInBits() -
1902 IIOperand->getType()->getPrimitiveSizeInBits();
1903 Value *CV = ConstantInt::get(X->getType(), C);
1904 Value *V = Builder.CreateLShr(X, CV);
1905 return new TruncInst(V, IIOperand->getType());
1909 case Intrinsic::masked_load:
1910 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1911 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1913 case Intrinsic::masked_store:
1914 return simplifyMaskedStore(*II);
1915 case Intrinsic::masked_gather:
1916 return simplifyMaskedGather(*II);
1917 case Intrinsic::masked_scatter:
1918 return simplifyMaskedScatter(*II);
1919 case Intrinsic::launder_invariant_group:
1920 case Intrinsic::strip_invariant_group:
1921 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1922 return replaceInstUsesWith(*II, SkippedBarrier);
1924 case Intrinsic::powi:
1925 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1926 // 0 and 1 are handled in instsimplify
1928 // powi(x, -1) -> 1/x
1929 if (Power->isMinusOne())
1930 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
1931 II->getArgOperand(0));
1932 // powi(x, 2) -> x*x
1933 if (Power->equalsInt(2))
1934 return BinaryOperator::CreateFMul(II->getArgOperand(0),
1935 II->getArgOperand(0));
1939 case Intrinsic::cttz:
1940 case Intrinsic::ctlz:
1941 if (auto *I = foldCttzCtlz(*II, *this))
1945 case Intrinsic::ctpop:
1946 if (auto *I = foldCtpop(*II, *this))
1950 case Intrinsic::fshl:
1951 case Intrinsic::fshr: {
1952 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1953 Type *Ty = II->getType();
1954 unsigned BitWidth = Ty->getScalarSizeInBits();
1956 if (match(II->getArgOperand(2), m_Constant(ShAmtC)) &&
1957 !isa<ConstantExpr>(ShAmtC) && !ShAmtC->containsConstantExpression()) {
1958 // Canonicalize a shift amount constant operand to modulo the bit-width.
1959 Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1960 Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
1961 if (ModuloC != ShAmtC) {
1962 II->setArgOperand(2, ModuloC);
1965 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1966 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) &&
1967 "Shift amount expected to be modulo bitwidth");
1969 // Canonicalize funnel shift right by constant to funnel shift left. This
1970 // is not entirely arbitrary. For historical reasons, the backend may
1971 // recognize rotate left patterns but miss rotate right patterns.
1972 if (IID == Intrinsic::fshr) {
1973 // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1974 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1975 Module *Mod = II->getModule();
1976 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1977 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1979 assert(IID == Intrinsic::fshl &&
1980 "All funnel shifts by simple constants should go left");
1982 // fshl(X, 0, C) --> shl X, C
1983 // fshl(X, undef, C) --> shl X, C
1984 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
1985 return BinaryOperator::CreateShl(Op0, ShAmtC);
1987 // fshl(0, X, C) --> lshr X, (BW-C)
1988 // fshl(undef, X, C) --> lshr X, (BW-C)
1989 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
1990 return BinaryOperator::CreateLShr(Op1,
1991 ConstantExpr::getSub(WidthC, ShAmtC));
1993 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1994 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
1995 Module *Mod = II->getModule();
1996 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1997 return CallInst::Create(Bswap, { Op0 });
2001 // Left or right might be masked.
2002 if (SimplifyDemandedInstructionBits(*II))
2005 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
2006 // so only the low bits of the shift amount are demanded if the bitwidth is
2008 if (!isPowerOf2_32(BitWidth))
2010 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2011 KnownBits Op2Known(BitWidth);
2012 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2016 case Intrinsic::uadd_with_overflow:
2017 case Intrinsic::sadd_with_overflow: {
2018 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2020 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2023 // Given 2 constant operands whose sum does not overflow:
2024 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
2025 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
2027 const APInt *C0, *C1;
2028 Value *Arg0 = II->getArgOperand(0);
2029 Value *Arg1 = II->getArgOperand(1);
2030 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2031 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
2032 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
2033 if (HasNWAdd && match(Arg1, m_APInt(C1))) {
2036 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
2038 return replaceInstUsesWith(
2039 *II, Builder.CreateBinaryIntrinsic(
2040 IID, X, ConstantInt::get(Arg1->getType(), NewC)));
2045 case Intrinsic::umul_with_overflow:
2046 case Intrinsic::smul_with_overflow:
2047 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2051 case Intrinsic::usub_with_overflow:
2052 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2056 case Intrinsic::ssub_with_overflow: {
2057 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2061 Value *Arg0 = II->getArgOperand(0);
2062 Value *Arg1 = II->getArgOperand(1);
2063 // Given a constant C that is not the minimum signed value
2064 // for an integer of a given bit width:
2066 // ssubo X, C -> saddo X, -C
2067 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
2068 Value *NegVal = ConstantExpr::getNeg(C);
2069 // Build a saddo call that is equivalent to the discovered
2071 return replaceInstUsesWith(
2072 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2079 case Intrinsic::uadd_sat:
2080 case Intrinsic::sadd_sat:
2081 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2084 case Intrinsic::usub_sat:
2085 case Intrinsic::ssub_sat: {
2086 SaturatingInst *SI = cast<SaturatingInst>(II);
2087 Type *Ty = SI->getType();
2088 Value *Arg0 = SI->getLHS();
2089 Value *Arg1 = SI->getRHS();
2091 // Make use of known overflow information.
2092 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
2095 case OverflowResult::MayOverflow:
2097 case OverflowResult::NeverOverflows:
2099 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
2101 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
2102 case OverflowResult::AlwaysOverflowsLow: {
2103 unsigned BitWidth = Ty->getScalarSizeInBits();
2104 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
2105 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
2107 case OverflowResult::AlwaysOverflowsHigh: {
2108 unsigned BitWidth = Ty->getScalarSizeInBits();
2109 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
2110 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
2114 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2116 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
2117 C->isNotMinSignedValue()) {
2118 Value *NegVal = ConstantExpr::getNeg(C);
2119 return replaceInstUsesWith(
2120 *II, Builder.CreateBinaryIntrinsic(
2121 Intrinsic::sadd_sat, Arg0, NegVal));
2124 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2125 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2126 // if Val and Val2 have the same sign
2127 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2129 const APInt *Val, *Val2;
2132 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2133 if (Other->getIntrinsicID() == IID &&
2134 match(Arg1, m_APInt(Val)) &&
2135 match(Other->getArgOperand(0), m_Value(X)) &&
2136 match(Other->getArgOperand(1), m_APInt(Val2))) {
2138 NewVal = Val->uadd_sat(*Val2);
2139 else if (Val->isNonNegative() == Val2->isNonNegative()) {
2141 NewVal = Val->sadd_ov(*Val2, Overflow);
2143 // Both adds together may add more than SignedMaxValue
2144 // without saturating the final result.
2148 // Cannot fold saturated addition with different signs.
2152 return replaceInstUsesWith(
2153 *II, Builder.CreateBinaryIntrinsic(
2154 IID, X, ConstantInt::get(II->getType(), NewVal)));
2160 case Intrinsic::minnum:
2161 case Intrinsic::maxnum:
2162 case Intrinsic::minimum:
2163 case Intrinsic::maximum: {
2164 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2166 Value *Arg0 = II->getArgOperand(0);
2167 Value *Arg1 = II->getArgOperand(1);
2169 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2170 (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2171 // If both operands are negated, invert the call and negate the result:
2172 // min(-X, -Y) --> -(max(X, Y))
2173 // max(-X, -Y) --> -(min(X, Y))
2174 Intrinsic::ID NewIID;
2176 case Intrinsic::maxnum:
2177 NewIID = Intrinsic::minnum;
2179 case Intrinsic::minnum:
2180 NewIID = Intrinsic::maxnum;
2182 case Intrinsic::maximum:
2183 NewIID = Intrinsic::minimum;
2185 case Intrinsic::minimum:
2186 NewIID = Intrinsic::maximum;
2189 llvm_unreachable("unexpected intrinsic ID");
2191 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2192 Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
2193 FNeg->copyIRFlags(II);
2197 // m(m(X, C2), C1) -> m(X, C)
2198 const APFloat *C1, *C2;
2199 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2200 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2201 ((match(M->getArgOperand(0), m_Value(X)) &&
2202 match(M->getArgOperand(1), m_APFloat(C2))) ||
2203 (match(M->getArgOperand(1), m_Value(X)) &&
2204 match(M->getArgOperand(0), m_APFloat(C2))))) {
2207 case Intrinsic::maxnum:
2208 Res = maxnum(*C1, *C2);
2210 case Intrinsic::minnum:
2211 Res = minnum(*C1, *C2);
2213 case Intrinsic::maximum:
2214 Res = maximum(*C1, *C2);
2216 case Intrinsic::minimum:
2217 Res = minimum(*C1, *C2);
2220 llvm_unreachable("unexpected intrinsic ID");
2222 Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2223 IID, X, ConstantFP::get(Arg0->getType(), Res));
2224 NewCall->copyIRFlags(II);
2225 return replaceInstUsesWith(*II, NewCall);
2231 case Intrinsic::fmuladd: {
2232 // Canonicalize fast fmuladd to the separate fmul + fadd.
2234 BuilderTy::FastMathFlagGuard Guard(Builder);
2235 Builder.setFastMathFlags(II->getFastMathFlags());
2236 Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
2237 II->getArgOperand(1));
2238 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
2240 return replaceInstUsesWith(*II, Add);
2243 // Try to simplify the underlying FMul.
2244 if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
2245 II->getFastMathFlags(),
2246 SQ.getWithInstruction(II))) {
2247 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
2248 FAdd->copyFastMathFlags(II);
2254 case Intrinsic::fma: {
2255 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2258 // fma fneg(x), fneg(y), z -> fma x, y, z
2259 Value *Src0 = II->getArgOperand(0);
2260 Value *Src1 = II->getArgOperand(1);
2262 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
2263 II->setArgOperand(0, X);
2264 II->setArgOperand(1, Y);
2268 // fma fabs(x), fabs(x), z -> fma x, x, z
2269 if (match(Src0, m_FAbs(m_Value(X))) &&
2270 match(Src1, m_FAbs(m_Specific(X)))) {
2271 II->setArgOperand(0, X);
2272 II->setArgOperand(1, X);
2276 // Try to simplify the underlying FMul. We can only apply simplifications
2277 // that do not require rounding.
2278 if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
2279 II->getFastMathFlags(),
2280 SQ.getWithInstruction(II))) {
2281 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
2282 FAdd->copyFastMathFlags(II);
2288 case Intrinsic::copysign: {
2289 if (SignBitMustBeZero(II->getArgOperand(1), &TLI)) {
2290 // If we know that the sign argument is positive, reduce to FABS:
2291 // copysign X, Pos --> fabs X
2292 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs,
2293 II->getArgOperand(0), II);
2294 return replaceInstUsesWith(*II, Fabs);
2296 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne.
2298 if (match(II->getArgOperand(1), m_APFloat(C)) && C->isNegative()) {
2299 // If we know that the sign argument is negative, reduce to FNABS:
2300 // copysign X, Neg --> fneg (fabs X)
2301 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs,
2302 II->getArgOperand(0), II);
2303 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II));
2306 // Propagate sign argument through nested calls:
2307 // copysign X, (copysign ?, SignArg) --> copysign X, SignArg
2309 if (match(II->getArgOperand(1),
2310 m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(SignArg)))) {
2311 II->setArgOperand(1, SignArg);
2317 case Intrinsic::fabs: {
2319 Constant *LHS, *RHS;
2320 if (match(II->getArgOperand(0),
2321 m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
2322 CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
2323 CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
2324 return SelectInst::Create(Cond, Call0, Call1);
2329 case Intrinsic::ceil:
2330 case Intrinsic::floor:
2331 case Intrinsic::round:
2332 case Intrinsic::nearbyint:
2333 case Intrinsic::rint:
2334 case Intrinsic::trunc: {
2336 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2337 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2338 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
2339 return new FPExtInst(NarrowII, II->getType());
2343 case Intrinsic::cos:
2344 case Intrinsic::amdgcn_cos: {
2346 Value *Src = II->getArgOperand(0);
2347 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
2348 // cos(-x) -> cos(x)
2349 // cos(fabs(x)) -> cos(x)
2350 II->setArgOperand(0, X);
2355 case Intrinsic::sin: {
2357 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2358 // sin(-x) --> -sin(x)
2359 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
2360 Instruction *FNeg = BinaryOperator::CreateFNeg(NewSin);
2361 FNeg->copyFastMathFlags(II);
2366 case Intrinsic::ppc_altivec_lvx:
2367 case Intrinsic::ppc_altivec_lvxl:
2368 // Turn PPC lvx -> load if the pointer is known aligned.
2369 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2371 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2372 PointerType::getUnqual(II->getType()));
2373 return new LoadInst(II->getType(), Ptr);
2376 case Intrinsic::ppc_vsx_lxvw4x:
2377 case Intrinsic::ppc_vsx_lxvd2x: {
2378 // Turn PPC VSX loads into normal loads.
2379 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2380 PointerType::getUnqual(II->getType()));
2381 return new LoadInst(II->getType(), Ptr, Twine(""), false, Align::None());
2383 case Intrinsic::ppc_altivec_stvx:
2384 case Intrinsic::ppc_altivec_stvxl:
2385 // Turn stvx -> store if the pointer is known aligned.
2386 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2389 PointerType::getUnqual(II->getArgOperand(0)->getType());
2390 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2391 return new StoreInst(II->getArgOperand(0), Ptr);
2394 case Intrinsic::ppc_vsx_stxvw4x:
2395 case Intrinsic::ppc_vsx_stxvd2x: {
2396 // Turn PPC VSX stores into normal stores.
2397 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
2398 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2399 return new StoreInst(II->getArgOperand(0), Ptr, false, Align::None());
2401 case Intrinsic::ppc_qpx_qvlfs:
2402 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
2403 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2405 Type *VTy = VectorType::get(Builder.getFloatTy(),
2406 II->getType()->getVectorNumElements());
2407 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2408 PointerType::getUnqual(VTy));
2409 Value *Load = Builder.CreateLoad(VTy, Ptr);
2410 return new FPExtInst(Load, II->getType());
2413 case Intrinsic::ppc_qpx_qvlfd:
2414 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
2415 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
2417 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2418 PointerType::getUnqual(II->getType()));
2419 return new LoadInst(II->getType(), Ptr);
2422 case Intrinsic::ppc_qpx_qvstfs:
2423 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
2424 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2426 Type *VTy = VectorType::get(Builder.getFloatTy(),
2427 II->getArgOperand(0)->getType()->getVectorNumElements());
2428 Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
2429 Type *OpPtrTy = PointerType::getUnqual(VTy);
2430 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2431 return new StoreInst(TOp, Ptr);
2434 case Intrinsic::ppc_qpx_qvstfd:
2435 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
2436 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
2439 PointerType::getUnqual(II->getArgOperand(0)->getType());
2440 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2441 return new StoreInst(II->getArgOperand(0), Ptr);
2445 case Intrinsic::x86_bmi_bextr_32:
2446 case Intrinsic::x86_bmi_bextr_64:
2447 case Intrinsic::x86_tbm_bextri_u32:
2448 case Intrinsic::x86_tbm_bextri_u64:
2449 // If the RHS is a constant we can try some simplifications.
2450 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2451 uint64_t Shift = C->getZExtValue();
2452 uint64_t Length = (Shift >> 8) & 0xff;
2454 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2455 // If the length is 0 or the shift is out of range, replace with zero.
2456 if (Length == 0 || Shift >= BitWidth)
2457 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2458 // If the LHS is also a constant, we can completely constant fold this.
2459 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2460 uint64_t Result = InC->getZExtValue() >> Shift;
2461 if (Length > BitWidth)
2463 Result &= maskTrailingOnes<uint64_t>(Length);
2464 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2466 // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2467 // are only masking bits that a shift already cleared?
2471 case Intrinsic::x86_bmi_bzhi_32:
2472 case Intrinsic::x86_bmi_bzhi_64:
2473 // If the RHS is a constant we can try some simplifications.
2474 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2475 uint64_t Index = C->getZExtValue() & 0xff;
2476 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2477 if (Index >= BitWidth)
2478 return replaceInstUsesWith(CI, II->getArgOperand(0));
2480 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2481 // If the LHS is also a constant, we can completely constant fold this.
2482 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2483 uint64_t Result = InC->getZExtValue();
2484 Result &= maskTrailingOnes<uint64_t>(Index);
2485 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2487 // TODO should we convert this to an AND if the RHS is constant?
2490 case Intrinsic::x86_bmi_pext_32:
2491 case Intrinsic::x86_bmi_pext_64:
2492 if (auto *MaskC = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2493 if (MaskC->isNullValue())
2494 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2495 if (MaskC->isAllOnesValue())
2496 return replaceInstUsesWith(CI, II->getArgOperand(0));
2498 if (auto *SrcC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2499 uint64_t Src = SrcC->getZExtValue();
2500 uint64_t Mask = MaskC->getZExtValue();
2501 uint64_t Result = 0;
2502 uint64_t BitToSet = 1;
2505 // Isolate lowest set bit.
2506 uint64_t BitToTest = Mask & -Mask;
2507 if (BitToTest & Src)
2511 // Clear lowest set bit.
2515 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2519 case Intrinsic::x86_bmi_pdep_32:
2520 case Intrinsic::x86_bmi_pdep_64:
2521 if (auto *MaskC = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2522 if (MaskC->isNullValue())
2523 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2524 if (MaskC->isAllOnesValue())
2525 return replaceInstUsesWith(CI, II->getArgOperand(0));
2527 if (auto *SrcC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2528 uint64_t Src = SrcC->getZExtValue();
2529 uint64_t Mask = MaskC->getZExtValue();
2530 uint64_t Result = 0;
2531 uint64_t BitToTest = 1;
2534 // Isolate lowest set bit.
2535 uint64_t BitToSet = Mask & -Mask;
2536 if (BitToTest & Src)
2540 // Clear lowest set bit;
2544 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2549 case Intrinsic::x86_vcvtph2ps_128:
2550 case Intrinsic::x86_vcvtph2ps_256: {
2551 auto Arg = II->getArgOperand(0);
2552 auto ArgType = cast<VectorType>(Arg->getType());
2553 auto RetType = cast<VectorType>(II->getType());
2554 unsigned ArgWidth = ArgType->getNumElements();
2555 unsigned RetWidth = RetType->getNumElements();
2556 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2557 assert(ArgType->isIntOrIntVectorTy() &&
2558 ArgType->getScalarSizeInBits() == 16 &&
2559 "CVTPH2PS input type should be 16-bit integer vector");
2560 assert(RetType->getScalarType()->isFloatTy() &&
2561 "CVTPH2PS output type should be 32-bit float vector");
2563 // Constant folding: Convert to generic half to single conversion.
2564 if (isa<ConstantAggregateZero>(Arg))
2565 return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
2567 if (isa<ConstantDataVector>(Arg)) {
2568 auto VectorHalfAsShorts = Arg;
2569 if (RetWidth < ArgWidth) {
2570 SmallVector<uint32_t, 8> SubVecMask;
2571 for (unsigned i = 0; i != RetWidth; ++i)
2572 SubVecMask.push_back((int)i);
2573 VectorHalfAsShorts = Builder.CreateShuffleVector(
2574 Arg, UndefValue::get(ArgType), SubVecMask);
2577 auto VectorHalfType =
2578 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2580 Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2581 auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
2582 return replaceInstUsesWith(*II, VectorFloats);
2585 // We only use the lowest lanes of the argument.
2586 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
2587 II->setArgOperand(0, V);
2593 case Intrinsic::x86_sse_cvtss2si:
2594 case Intrinsic::x86_sse_cvtss2si64:
2595 case Intrinsic::x86_sse_cvttss2si:
2596 case Intrinsic::x86_sse_cvttss2si64:
2597 case Intrinsic::x86_sse2_cvtsd2si:
2598 case Intrinsic::x86_sse2_cvtsd2si64:
2599 case Intrinsic::x86_sse2_cvttsd2si:
2600 case Intrinsic::x86_sse2_cvttsd2si64:
2601 case Intrinsic::x86_avx512_vcvtss2si32:
2602 case Intrinsic::x86_avx512_vcvtss2si64:
2603 case Intrinsic::x86_avx512_vcvtss2usi32:
2604 case Intrinsic::x86_avx512_vcvtss2usi64:
2605 case Intrinsic::x86_avx512_vcvtsd2si32:
2606 case Intrinsic::x86_avx512_vcvtsd2si64:
2607 case Intrinsic::x86_avx512_vcvtsd2usi32:
2608 case Intrinsic::x86_avx512_vcvtsd2usi64:
2609 case Intrinsic::x86_avx512_cvttss2si:
2610 case Intrinsic::x86_avx512_cvttss2si64:
2611 case Intrinsic::x86_avx512_cvttss2usi:
2612 case Intrinsic::x86_avx512_cvttss2usi64:
2613 case Intrinsic::x86_avx512_cvttsd2si:
2614 case Intrinsic::x86_avx512_cvttsd2si64:
2615 case Intrinsic::x86_avx512_cvttsd2usi:
2616 case Intrinsic::x86_avx512_cvttsd2usi64: {
2617 // These intrinsics only demand the 0th element of their input vectors. If
2618 // we can simplify the input based on that, do so now.
2619 Value *Arg = II->getArgOperand(0);
2620 unsigned VWidth = Arg->getType()->getVectorNumElements();
2621 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
2622 II->setArgOperand(0, V);
2628 case Intrinsic::x86_mmx_pmovmskb:
2629 case Intrinsic::x86_sse_movmsk_ps:
2630 case Intrinsic::x86_sse2_movmsk_pd:
2631 case Intrinsic::x86_sse2_pmovmskb_128:
2632 case Intrinsic::x86_avx_movmsk_pd_256:
2633 case Intrinsic::x86_avx_movmsk_ps_256:
2634 case Intrinsic::x86_avx2_pmovmskb:
2635 if (Value *V = simplifyX86movmsk(*II, Builder))
2636 return replaceInstUsesWith(*II, V);
2639 case Intrinsic::x86_sse_comieq_ss:
2640 case Intrinsic::x86_sse_comige_ss:
2641 case Intrinsic::x86_sse_comigt_ss:
2642 case Intrinsic::x86_sse_comile_ss:
2643 case Intrinsic::x86_sse_comilt_ss:
2644 case Intrinsic::x86_sse_comineq_ss:
2645 case Intrinsic::x86_sse_ucomieq_ss:
2646 case Intrinsic::x86_sse_ucomige_ss:
2647 case Intrinsic::x86_sse_ucomigt_ss:
2648 case Intrinsic::x86_sse_ucomile_ss:
2649 case Intrinsic::x86_sse_ucomilt_ss:
2650 case Intrinsic::x86_sse_ucomineq_ss:
2651 case Intrinsic::x86_sse2_comieq_sd:
2652 case Intrinsic::x86_sse2_comige_sd:
2653 case Intrinsic::x86_sse2_comigt_sd:
2654 case Intrinsic::x86_sse2_comile_sd:
2655 case Intrinsic::x86_sse2_comilt_sd:
2656 case Intrinsic::x86_sse2_comineq_sd:
2657 case Intrinsic::x86_sse2_ucomieq_sd:
2658 case Intrinsic::x86_sse2_ucomige_sd:
2659 case Intrinsic::x86_sse2_ucomigt_sd:
2660 case Intrinsic::x86_sse2_ucomile_sd:
2661 case Intrinsic::x86_sse2_ucomilt_sd:
2662 case Intrinsic::x86_sse2_ucomineq_sd:
2663 case Intrinsic::x86_avx512_vcomi_ss:
2664 case Intrinsic::x86_avx512_vcomi_sd:
2665 case Intrinsic::x86_avx512_mask_cmp_ss:
2666 case Intrinsic::x86_avx512_mask_cmp_sd: {
2667 // These intrinsics only demand the 0th element of their input vectors. If
2668 // we can simplify the input based on that, do so now.
2669 bool MadeChange = false;
2670 Value *Arg0 = II->getArgOperand(0);
2671 Value *Arg1 = II->getArgOperand(1);
2672 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2673 if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2674 II->setArgOperand(0, V);
2677 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2678 II->setArgOperand(1, V);
2685 case Intrinsic::x86_avx512_cmp_pd_128:
2686 case Intrinsic::x86_avx512_cmp_pd_256:
2687 case Intrinsic::x86_avx512_cmp_pd_512:
2688 case Intrinsic::x86_avx512_cmp_ps_128:
2689 case Intrinsic::x86_avx512_cmp_ps_256:
2690 case Intrinsic::x86_avx512_cmp_ps_512: {
2691 // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2692 Value *Arg0 = II->getArgOperand(0);
2693 Value *Arg1 = II->getArgOperand(1);
2694 bool Arg0IsZero = match(Arg0, m_PosZeroFP());
2696 std::swap(Arg0, Arg1);
2698 // This fold requires only the NINF(not +/- inf) since inf minus
2700 // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2701 // equal for both compares.
2702 // NNAN is not needed because nans compare the same for both compares.
2703 // The compare intrinsic uses the above assumptions and therefore
2704 // doesn't require additional flags.
2705 if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
2706 match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
2707 cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2710 II->setArgOperand(0, A);
2711 II->setArgOperand(1, B);
2717 case Intrinsic::x86_avx512_add_ps_512:
2718 case Intrinsic::x86_avx512_div_ps_512:
2719 case Intrinsic::x86_avx512_mul_ps_512:
2720 case Intrinsic::x86_avx512_sub_ps_512:
2721 case Intrinsic::x86_avx512_add_pd_512:
2722 case Intrinsic::x86_avx512_div_pd_512:
2723 case Intrinsic::x86_avx512_mul_pd_512:
2724 case Intrinsic::x86_avx512_sub_pd_512:
2725 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2727 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2728 if (R->getValue() == 4) {
2729 Value *Arg0 = II->getArgOperand(0);
2730 Value *Arg1 = II->getArgOperand(1);
2734 default: llvm_unreachable("Case stmts out of sync!");
2735 case Intrinsic::x86_avx512_add_ps_512:
2736 case Intrinsic::x86_avx512_add_pd_512:
2737 V = Builder.CreateFAdd(Arg0, Arg1);
2739 case Intrinsic::x86_avx512_sub_ps_512:
2740 case Intrinsic::x86_avx512_sub_pd_512:
2741 V = Builder.CreateFSub(Arg0, Arg1);
2743 case Intrinsic::x86_avx512_mul_ps_512:
2744 case Intrinsic::x86_avx512_mul_pd_512:
2745 V = Builder.CreateFMul(Arg0, Arg1);
2747 case Intrinsic::x86_avx512_div_ps_512:
2748 case Intrinsic::x86_avx512_div_pd_512:
2749 V = Builder.CreateFDiv(Arg0, Arg1);
2753 return replaceInstUsesWith(*II, V);
2758 case Intrinsic::x86_avx512_mask_add_ss_round:
2759 case Intrinsic::x86_avx512_mask_div_ss_round:
2760 case Intrinsic::x86_avx512_mask_mul_ss_round:
2761 case Intrinsic::x86_avx512_mask_sub_ss_round:
2762 case Intrinsic::x86_avx512_mask_add_sd_round:
2763 case Intrinsic::x86_avx512_mask_div_sd_round:
2764 case Intrinsic::x86_avx512_mask_mul_sd_round:
2765 case Intrinsic::x86_avx512_mask_sub_sd_round:
2766 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2768 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2769 if (R->getValue() == 4) {
2770 // Extract the element as scalars.
2771 Value *Arg0 = II->getArgOperand(0);
2772 Value *Arg1 = II->getArgOperand(1);
2773 Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2774 Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
2778 default: llvm_unreachable("Case stmts out of sync!");
2779 case Intrinsic::x86_avx512_mask_add_ss_round:
2780 case Intrinsic::x86_avx512_mask_add_sd_round:
2781 V = Builder.CreateFAdd(LHS, RHS);
2783 case Intrinsic::x86_avx512_mask_sub_ss_round:
2784 case Intrinsic::x86_avx512_mask_sub_sd_round:
2785 V = Builder.CreateFSub(LHS, RHS);
2787 case Intrinsic::x86_avx512_mask_mul_ss_round:
2788 case Intrinsic::x86_avx512_mask_mul_sd_round:
2789 V = Builder.CreateFMul(LHS, RHS);
2791 case Intrinsic::x86_avx512_mask_div_ss_round:
2792 case Intrinsic::x86_avx512_mask_div_sd_round:
2793 V = Builder.CreateFDiv(LHS, RHS);
2797 // Handle the masking aspect of the intrinsic.
2798 Value *Mask = II->getArgOperand(3);
2799 auto *C = dyn_cast<ConstantInt>(Mask);
2800 // We don't need a select if we know the mask bit is a 1.
2801 if (!C || !C->getValue()[0]) {
2802 // Cast the mask to an i1 vector and then extract the lowest element.
2803 auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
2804 cast<IntegerType>(Mask->getType())->getBitWidth());
2805 Mask = Builder.CreateBitCast(Mask, MaskTy);
2806 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
2807 // Extract the lowest element from the passthru operand.
2808 Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
2810 V = Builder.CreateSelect(Mask, V, Passthru);
2813 // Insert the result back into the original argument 0.
2814 V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
2816 return replaceInstUsesWith(*II, V);
2821 // Constant fold ashr( <A x Bi>, Ci ).
2822 // Constant fold lshr( <A x Bi>, Ci ).
2823 // Constant fold shl( <A x Bi>, Ci ).
2824 case Intrinsic::x86_sse2_psrai_d:
2825 case Intrinsic::x86_sse2_psrai_w:
2826 case Intrinsic::x86_avx2_psrai_d:
2827 case Intrinsic::x86_avx2_psrai_w:
2828 case Intrinsic::x86_avx512_psrai_q_128:
2829 case Intrinsic::x86_avx512_psrai_q_256:
2830 case Intrinsic::x86_avx512_psrai_d_512:
2831 case Intrinsic::x86_avx512_psrai_q_512:
2832 case Intrinsic::x86_avx512_psrai_w_512:
2833 case Intrinsic::x86_sse2_psrli_d:
2834 case Intrinsic::x86_sse2_psrli_q:
2835 case Intrinsic::x86_sse2_psrli_w:
2836 case Intrinsic::x86_avx2_psrli_d:
2837 case Intrinsic::x86_avx2_psrli_q:
2838 case Intrinsic::x86_avx2_psrli_w:
2839 case Intrinsic::x86_avx512_psrli_d_512:
2840 case Intrinsic::x86_avx512_psrli_q_512:
2841 case Intrinsic::x86_avx512_psrli_w_512:
2842 case Intrinsic::x86_sse2_pslli_d:
2843 case Intrinsic::x86_sse2_pslli_q:
2844 case Intrinsic::x86_sse2_pslli_w:
2845 case Intrinsic::x86_avx2_pslli_d:
2846 case Intrinsic::x86_avx2_pslli_q:
2847 case Intrinsic::x86_avx2_pslli_w:
2848 case Intrinsic::x86_avx512_pslli_d_512:
2849 case Intrinsic::x86_avx512_pslli_q_512:
2850 case Intrinsic::x86_avx512_pslli_w_512:
2851 if (Value *V = simplifyX86immShift(*II, Builder))
2852 return replaceInstUsesWith(*II, V);
2855 case Intrinsic::x86_sse2_psra_d:
2856 case Intrinsic::x86_sse2_psra_w:
2857 case Intrinsic::x86_avx2_psra_d:
2858 case Intrinsic::x86_avx2_psra_w:
2859 case Intrinsic::x86_avx512_psra_q_128:
2860 case Intrinsic::x86_avx512_psra_q_256:
2861 case Intrinsic::x86_avx512_psra_d_512:
2862 case Intrinsic::x86_avx512_psra_q_512:
2863 case Intrinsic::x86_avx512_psra_w_512:
2864 case Intrinsic::x86_sse2_psrl_d:
2865 case Intrinsic::x86_sse2_psrl_q:
2866 case Intrinsic::x86_sse2_psrl_w:
2867 case Intrinsic::x86_avx2_psrl_d:
2868 case Intrinsic::x86_avx2_psrl_q:
2869 case Intrinsic::x86_avx2_psrl_w:
2870 case Intrinsic::x86_avx512_psrl_d_512:
2871 case Intrinsic::x86_avx512_psrl_q_512:
2872 case Intrinsic::x86_avx512_psrl_w_512:
2873 case Intrinsic::x86_sse2_psll_d:
2874 case Intrinsic::x86_sse2_psll_q:
2875 case Intrinsic::x86_sse2_psll_w:
2876 case Intrinsic::x86_avx2_psll_d:
2877 case Intrinsic::x86_avx2_psll_q:
2878 case Intrinsic::x86_avx2_psll_w:
2879 case Intrinsic::x86_avx512_psll_d_512:
2880 case Intrinsic::x86_avx512_psll_q_512:
2881 case Intrinsic::x86_avx512_psll_w_512: {
2882 if (Value *V = simplifyX86immShift(*II, Builder))
2883 return replaceInstUsesWith(*II, V);
2885 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2886 // operand to compute the shift amount.
2887 Value *Arg1 = II->getArgOperand(1);
2888 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
2889 "Unexpected packed shift size");
2890 unsigned VWidth = Arg1->getType()->getVectorNumElements();
2892 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
2893 II->setArgOperand(1, V);
2899 case Intrinsic::x86_avx2_psllv_d:
2900 case Intrinsic::x86_avx2_psllv_d_256:
2901 case Intrinsic::x86_avx2_psllv_q:
2902 case Intrinsic::x86_avx2_psllv_q_256:
2903 case Intrinsic::x86_avx512_psllv_d_512:
2904 case Intrinsic::x86_avx512_psllv_q_512:
2905 case Intrinsic::x86_avx512_psllv_w_128:
2906 case Intrinsic::x86_avx512_psllv_w_256:
2907 case Intrinsic::x86_avx512_psllv_w_512:
2908 case Intrinsic::x86_avx2_psrav_d:
2909 case Intrinsic::x86_avx2_psrav_d_256:
2910 case Intrinsic::x86_avx512_psrav_q_128:
2911 case Intrinsic::x86_avx512_psrav_q_256:
2912 case Intrinsic::x86_avx512_psrav_d_512:
2913 case Intrinsic::x86_avx512_psrav_q_512:
2914 case Intrinsic::x86_avx512_psrav_w_128:
2915 case Intrinsic::x86_avx512_psrav_w_256:
2916 case Intrinsic::x86_avx512_psrav_w_512:
2917 case Intrinsic::x86_avx2_psrlv_d:
2918 case Intrinsic::x86_avx2_psrlv_d_256:
2919 case Intrinsic::x86_avx2_psrlv_q:
2920 case Intrinsic::x86_avx2_psrlv_q_256:
2921 case Intrinsic::x86_avx512_psrlv_d_512:
2922 case Intrinsic::x86_avx512_psrlv_q_512:
2923 case Intrinsic::x86_avx512_psrlv_w_128:
2924 case Intrinsic::x86_avx512_psrlv_w_256:
2925 case Intrinsic::x86_avx512_psrlv_w_512:
2926 if (Value *V = simplifyX86varShift(*II, Builder))
2927 return replaceInstUsesWith(*II, V);
2930 case Intrinsic::x86_sse2_packssdw_128:
2931 case Intrinsic::x86_sse2_packsswb_128:
2932 case Intrinsic::x86_avx2_packssdw:
2933 case Intrinsic::x86_avx2_packsswb:
2934 case Intrinsic::x86_avx512_packssdw_512:
2935 case Intrinsic::x86_avx512_packsswb_512:
2936 if (Value *V = simplifyX86pack(*II, Builder, true))
2937 return replaceInstUsesWith(*II, V);
2940 case Intrinsic::x86_sse2_packuswb_128:
2941 case Intrinsic::x86_sse41_packusdw:
2942 case Intrinsic::x86_avx2_packusdw:
2943 case Intrinsic::x86_avx2_packuswb:
2944 case Intrinsic::x86_avx512_packusdw_512:
2945 case Intrinsic::x86_avx512_packuswb_512:
2946 if (Value *V = simplifyX86pack(*II, Builder, false))
2947 return replaceInstUsesWith(*II, V);
2950 case Intrinsic::x86_pclmulqdq:
2951 case Intrinsic::x86_pclmulqdq_256:
2952 case Intrinsic::x86_pclmulqdq_512: {
2953 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2954 unsigned Imm = C->getZExtValue();
2956 bool MadeChange = false;
2957 Value *Arg0 = II->getArgOperand(0);
2958 Value *Arg1 = II->getArgOperand(1);
2959 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2961 APInt UndefElts1(VWidth, 0);
2962 APInt DemandedElts1 = APInt::getSplat(VWidth,
2963 APInt(2, (Imm & 0x01) ? 2 : 1));
2964 if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
2966 II->setArgOperand(0, V);
2970 APInt UndefElts2(VWidth, 0);
2971 APInt DemandedElts2 = APInt::getSplat(VWidth,
2972 APInt(2, (Imm & 0x10) ? 2 : 1));
2973 if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
2975 II->setArgOperand(1, V);
2979 // If either input elements are undef, the result is zero.
2980 if (DemandedElts1.isSubsetOf(UndefElts1) ||
2981 DemandedElts2.isSubsetOf(UndefElts2))
2982 return replaceInstUsesWith(*II,
2983 ConstantAggregateZero::get(II->getType()));
2991 case Intrinsic::x86_sse41_insertps:
2992 if (Value *V = simplifyX86insertps(*II, Builder))
2993 return replaceInstUsesWith(*II, V);
2996 case Intrinsic::x86_sse4a_extrq: {
2997 Value *Op0 = II->getArgOperand(0);
2998 Value *Op1 = II->getArgOperand(1);
2999 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
3000 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
3001 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
3002 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
3003 VWidth1 == 16 && "Unexpected operand sizes");
3005 // See if we're dealing with constant values.
3006 Constant *C1 = dyn_cast<Constant>(Op1);
3007 ConstantInt *CILength =
3008 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
3010 ConstantInt *CIIndex =
3011 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
3014 // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
3015 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
3016 return replaceInstUsesWith(*II, V);
3018 // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
3019 // operands and the lowest 16-bits of the second.
3020 bool MadeChange = false;
3021 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3022 II->setArgOperand(0, V);
3025 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
3026 II->setArgOperand(1, V);
3034 case Intrinsic::x86_sse4a_extrqi: {
3035 // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
3036 // bits of the lower 64-bits. The upper 64-bits are undefined.
3037 Value *Op0 = II->getArgOperand(0);
3038 unsigned VWidth = Op0->getType()->getVectorNumElements();
3039 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
3040 "Unexpected operand size");
3042 // See if we're dealing with constant values.
3043 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
3044 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
3046 // Attempt to simplify to a constant or shuffle vector.
3047 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
3048 return replaceInstUsesWith(*II, V);
3050 // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
3052 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
3053 II->setArgOperand(0, V);
3059 case Intrinsic::x86_sse4a_insertq: {
3060 Value *Op0 = II->getArgOperand(0);
3061 Value *Op1 = II->getArgOperand(1);
3062 unsigned VWidth = Op0->getType()->getVectorNumElements();
3063 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
3064 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
3065 Op1->getType()->getVectorNumElements() == 2 &&
3066 "Unexpected operand size");
3068 // See if we're dealing with constant values.
3069 Constant *C1 = dyn_cast<Constant>(Op1);
3071 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
3074 // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
3076 const APInt &V11 = CI11->getValue();
3077 APInt Len = V11.zextOrTrunc(6);
3078 APInt Idx = V11.lshr(8).zextOrTrunc(6);
3079 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3080 return replaceInstUsesWith(*II, V);
3083 // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
3085 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
3086 II->setArgOperand(0, V);
3092 case Intrinsic::x86_sse4a_insertqi: {
3093 // INSERTQI: Extract lowest Length bits from lower half of second source and
3094 // insert over first source starting at Index bit. The upper 64-bits are
3096 Value *Op0 = II->getArgOperand(0);
3097 Value *Op1 = II->getArgOperand(1);
3098 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
3099 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
3100 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
3101 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
3102 VWidth1 == 2 && "Unexpected operand sizes");
3104 // See if we're dealing with constant values.
3105 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
3106 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
3108 // Attempt to simplify to a constant or shuffle vector.
3109 if (CILength && CIIndex) {
3110 APInt Len = CILength->getValue().zextOrTrunc(6);
3111 APInt Idx = CIIndex->getValue().zextOrTrunc(6);
3112 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3113 return replaceInstUsesWith(*II, V);
3116 // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
3118 bool MadeChange = false;
3119 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3120 II->setArgOperand(0, V);
3123 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
3124 II->setArgOperand(1, V);
3132 case Intrinsic::x86_sse41_pblendvb:
3133 case Intrinsic::x86_sse41_blendvps:
3134 case Intrinsic::x86_sse41_blendvpd:
3135 case Intrinsic::x86_avx_blendv_ps_256:
3136 case Intrinsic::x86_avx_blendv_pd_256:
3137 case Intrinsic::x86_avx2_pblendvb: {
3138 // fold (blend A, A, Mask) -> A
3139 Value *Op0 = II->getArgOperand(0);
3140 Value *Op1 = II->getArgOperand(1);
3141 Value *Mask = II->getArgOperand(2);
3143 return replaceInstUsesWith(CI, Op0);
3145 // Zero Mask - select 1st argument.
3146 if (isa<ConstantAggregateZero>(Mask))
3147 return replaceInstUsesWith(CI, Op0);
3149 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
3150 if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3151 Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
3152 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
3155 // Convert to a vector select if we can bypass casts and find a boolean
3156 // vector condition value.
3158 Mask = peekThroughBitcast(Mask);
3159 if (match(Mask, m_SExt(m_Value(BoolVec))) &&
3160 BoolVec->getType()->isVectorTy() &&
3161 BoolVec->getType()->getScalarSizeInBits() == 1) {
3162 assert(Mask->getType()->getPrimitiveSizeInBits() ==
3163 II->getType()->getPrimitiveSizeInBits() &&
3164 "Not expecting mask and operands with different sizes");
3166 unsigned NumMaskElts = Mask->getType()->getVectorNumElements();
3167 unsigned NumOperandElts = II->getType()->getVectorNumElements();
3168 if (NumMaskElts == NumOperandElts)
3169 return SelectInst::Create(BoolVec, Op1, Op0);
3171 // If the mask has less elements than the operands, each mask bit maps to
3172 // multiple elements of the operands. Bitcast back and forth.
3173 if (NumMaskElts < NumOperandElts) {
3174 Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->getType());
3175 Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->getType());
3176 Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3177 return new BitCastInst(Sel, II->getType());
3184 case Intrinsic::x86_ssse3_pshuf_b_128:
3185 case Intrinsic::x86_avx2_pshuf_b:
3186 case Intrinsic::x86_avx512_pshuf_b_512:
3187 if (Value *V = simplifyX86pshufb(*II, Builder))
3188 return replaceInstUsesWith(*II, V);
3191 case Intrinsic::x86_avx_vpermilvar_ps:
3192 case Intrinsic::x86_avx_vpermilvar_ps_256:
3193 case Intrinsic::x86_avx512_vpermilvar_ps_512:
3194 case Intrinsic::x86_avx_vpermilvar_pd:
3195 case Intrinsic::x86_avx_vpermilvar_pd_256:
3196 case Intrinsic::x86_avx512_vpermilvar_pd_512:
3197 if (Value *V = simplifyX86vpermilvar(*II, Builder))
3198 return replaceInstUsesWith(*II, V);
3201 case Intrinsic::x86_avx2_permd:
3202 case Intrinsic::x86_avx2_permps:
3203 case Intrinsic::x86_avx512_permvar_df_256:
3204 case Intrinsic::x86_avx512_permvar_df_512:
3205 case Intrinsic::x86_avx512_permvar_di_256:
3206 case Intrinsic::x86_avx512_permvar_di_512:
3207 case Intrinsic::x86_avx512_permvar_hi_128:
3208 case Intrinsic::x86_avx512_permvar_hi_256:
3209 case Intrinsic::x86_avx512_permvar_hi_512:
3210 case Intrinsic::x86_avx512_permvar_qi_128:
3211 case Intrinsic::x86_avx512_permvar_qi_256:
3212 case Intrinsic::x86_avx512_permvar_qi_512:
3213 case Intrinsic::x86_avx512_permvar_sf_512:
3214 case Intrinsic::x86_avx512_permvar_si_512:
3215 if (Value *V = simplifyX86vpermv(*II, Builder))
3216 return replaceInstUsesWith(*II, V);
3219 case Intrinsic::x86_avx_maskload_ps:
3220 case Intrinsic::x86_avx_maskload_pd:
3221 case Intrinsic::x86_avx_maskload_ps_256:
3222 case Intrinsic::x86_avx_maskload_pd_256:
3223 case Intrinsic::x86_avx2_maskload_d:
3224 case Intrinsic::x86_avx2_maskload_q:
3225 case Intrinsic::x86_avx2_maskload_d_256:
3226 case Intrinsic::x86_avx2_maskload_q_256:
3227 if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
3231 case Intrinsic::x86_sse2_maskmov_dqu:
3232 case Intrinsic::x86_avx_maskstore_ps:
3233 case Intrinsic::x86_avx_maskstore_pd:
3234 case Intrinsic::x86_avx_maskstore_ps_256:
3235 case Intrinsic::x86_avx_maskstore_pd_256:
3236 case Intrinsic::x86_avx2_maskstore_d:
3237 case Intrinsic::x86_avx2_maskstore_q:
3238 case Intrinsic::x86_avx2_maskstore_d_256:
3239 case Intrinsic::x86_avx2_maskstore_q_256:
3240 if (simplifyX86MaskedStore(*II, *this))
3244 case Intrinsic::x86_addcarry_32:
3245 case Intrinsic::x86_addcarry_64:
3246 if (Value *V = simplifyX86addcarry(*II, Builder))
3247 return replaceInstUsesWith(*II, V);
3250 case Intrinsic::ppc_altivec_vperm:
3251 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
3252 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
3253 // a vectorshuffle for little endian, we must undo the transformation
3254 // performed on vec_perm in altivec.h. That is, we must complement
3255 // the permutation mask with respect to 31 and reverse the order of
3257 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3258 assert(Mask->getType()->getVectorNumElements() == 16 &&
3259 "Bad type for intrinsic!");
3261 // Check that all of the elements are integer constants or undefs.
3262 bool AllEltsOk = true;
3263 for (unsigned i = 0; i != 16; ++i) {
3264 Constant *Elt = Mask->getAggregateElement(i);
3265 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
3272 // Cast the input vectors to byte vectors.
3273 Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3275 Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3277 Value *Result = UndefValue::get(Op0->getType());
3279 // Only extract each element once.
3280 Value *ExtractedElts[32];
3281 memset(ExtractedElts, 0, sizeof(ExtractedElts));
3283 for (unsigned i = 0; i != 16; ++i) {
3284 if (isa<UndefValue>(Mask->getAggregateElement(i)))
3287 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
3288 Idx &= 31; // Match the hardware behavior.
3289 if (DL.isLittleEndian())
3292 if (!ExtractedElts[Idx]) {
3293 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
3294 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
3295 ExtractedElts[Idx] =
3296 Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
3297 Builder.getInt32(Idx&15));
3300 // Insert this value into the result vector.
3301 Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3302 Builder.getInt32(i));
3304 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3309 case Intrinsic::arm_neon_vld1: {
3310 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
3312 if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
3313 return replaceInstUsesWith(*II, V);
3317 case Intrinsic::arm_neon_vld2:
3318 case Intrinsic::arm_neon_vld3:
3319 case Intrinsic::arm_neon_vld4:
3320 case Intrinsic::arm_neon_vld2lane:
3321 case Intrinsic::arm_neon_vld3lane:
3322 case Intrinsic::arm_neon_vld4lane:
3323 case Intrinsic::arm_neon_vst1:
3324 case Intrinsic::arm_neon_vst2:
3325 case Intrinsic::arm_neon_vst3:
3326 case Intrinsic::arm_neon_vst4:
3327 case Intrinsic::arm_neon_vst2lane:
3328 case Intrinsic::arm_neon_vst3lane:
3329 case Intrinsic::arm_neon_vst4lane: {
3331 getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
3332 unsigned AlignArg = II->getNumArgOperands() - 1;
3333 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
3334 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
3335 II->setArgOperand(AlignArg,
3336 ConstantInt::get(Type::getInt32Ty(II->getContext()),
3343 case Intrinsic::arm_neon_vtbl1:
3344 case Intrinsic::aarch64_neon_tbl1:
3345 if (Value *V = simplifyNeonTbl1(*II, Builder))
3346 return replaceInstUsesWith(*II, V);
3349 case Intrinsic::arm_neon_vmulls:
3350 case Intrinsic::arm_neon_vmullu:
3351 case Intrinsic::aarch64_neon_smull:
3352 case Intrinsic::aarch64_neon_umull: {
3353 Value *Arg0 = II->getArgOperand(0);
3354 Value *Arg1 = II->getArgOperand(1);
3356 // Handle mul by zero first:
3357 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3358 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
3361 // Check for constant LHS & RHS - in this case we just simplify.
3362 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3363 IID == Intrinsic::aarch64_neon_umull);
3364 VectorType *NewVT = cast<VectorType>(II->getType());
3365 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3366 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3367 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
3368 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
3370 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
3373 // Couldn't simplify - canonicalize constant to the RHS.
3374 std::swap(Arg0, Arg1);
3377 // Handle mul by one:
3378 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
3379 if (ConstantInt *Splat =
3380 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3382 return CastInst::CreateIntegerCast(Arg0, II->getType(),
3383 /*isSigned=*/!Zext);
3387 case Intrinsic::arm_neon_aesd:
3388 case Intrinsic::arm_neon_aese:
3389 case Intrinsic::aarch64_crypto_aesd:
3390 case Intrinsic::aarch64_crypto_aese: {
3391 Value *DataArg = II->getArgOperand(0);
3392 Value *KeyArg = II->getArgOperand(1);
3394 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3396 if (match(KeyArg, m_ZeroInt()) &&
3397 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
3398 II->setArgOperand(0, Data);
3399 II->setArgOperand(1, Key);
3404 case Intrinsic::arm_mve_pred_i2v: {
3405 Value *Arg = II->getArgOperand(0);
3407 if (match(Arg, m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(m_Value(ArgArg))) &&
3408 II->getType() == ArgArg->getType())
3409 return replaceInstUsesWith(*II, ArgArg);
3412 m_Xor(m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(m_Value(ArgArg)),
3413 m_Constant(XorMask))) &&
3414 II->getType() == ArgArg->getType()) {
3415 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
3416 if (CI->getValue().trunc(16).isAllOnesValue()) {
3417 auto TrueVector = Builder.CreateVectorSplat(
3418 II->getType()->getVectorNumElements(), Builder.getTrue());
3419 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
3423 KnownBits ScalarKnown(32);
3424 if (SimplifyDemandedBits(II, 0, APInt::getLowBitsSet(32, 16),
3429 case Intrinsic::arm_mve_pred_v2i: {
3430 Value *Arg = II->getArgOperand(0);
3432 if (match(Arg, m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(m_Value(ArgArg))))
3433 return replaceInstUsesWith(*II, ArgArg);
3434 if (!II->getMetadata(LLVMContext::MD_range)) {
3435 Type *IntTy32 = Type::getInt32Ty(II->getContext());
3437 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
3438 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))
3440 II->setMetadata(LLVMContext::MD_range, MDNode::get(II->getContext(), M));
3445 case Intrinsic::arm_mve_vadc:
3446 case Intrinsic::arm_mve_vadc_predicated: {
3448 (II->getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
3449 assert(II->getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
3450 "Bad type for intrinsic!");
3452 KnownBits CarryKnown(32);
3453 if (SimplifyDemandedBits(II, CarryOp, APInt::getOneBitSet(32, 29),
3458 case Intrinsic::amdgcn_rcp: {
3459 Value *Src = II->getArgOperand(0);
3461 // TODO: Move to ConstantFolding/InstSimplify?
3462 if (isa<UndefValue>(Src))
3463 return replaceInstUsesWith(CI, Src);
3465 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3466 const APFloat &ArgVal = C->getValueAPF();
3467 APFloat Val(ArgVal.getSemantics(), 1);
3468 APFloat::opStatus Status = Val.divide(ArgVal,
3469 APFloat::rmNearestTiesToEven);
3470 // Only do this if it was exact and therefore not dependent on the
3472 if (Status == APFloat::opOK)
3473 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
3478 case Intrinsic::amdgcn_rsq: {
3479 Value *Src = II->getArgOperand(0);
3481 // TODO: Move to ConstantFolding/InstSimplify?
3482 if (isa<UndefValue>(Src))
3483 return replaceInstUsesWith(CI, Src);
3486 case Intrinsic::amdgcn_frexp_mant:
3487 case Intrinsic::amdgcn_frexp_exp: {
3488 Value *Src = II->getArgOperand(0);
3489 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3491 APFloat Significand = frexp(C->getValueAPF(), Exp,
3492 APFloat::rmNearestTiesToEven);
3494 if (IID == Intrinsic::amdgcn_frexp_mant) {
3495 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3499 // Match instruction special case behavior.
3500 if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
3503 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3506 if (isa<UndefValue>(Src))
3507 return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
3511 case Intrinsic::amdgcn_class: {
3513 S_NAN = 1 << 0, // Signaling NaN
3514 Q_NAN = 1 << 1, // Quiet NaN
3515 N_INFINITY = 1 << 2, // Negative infinity
3516 N_NORMAL = 1 << 3, // Negative normal
3517 N_SUBNORMAL = 1 << 4, // Negative subnormal
3518 N_ZERO = 1 << 5, // Negative zero
3519 P_ZERO = 1 << 6, // Positive zero
3520 P_SUBNORMAL = 1 << 7, // Positive subnormal
3521 P_NORMAL = 1 << 8, // Positive normal
3522 P_INFINITY = 1 << 9 // Positive infinity
3525 const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3526 N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL | P_NORMAL | P_INFINITY;
3528 Value *Src0 = II->getArgOperand(0);
3529 Value *Src1 = II->getArgOperand(1);
3530 const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3532 if (isa<UndefValue>(Src0))
3533 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3535 if (isa<UndefValue>(Src1))
3536 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3540 uint32_t Mask = CMask->getZExtValue();
3542 // If all tests are made, it doesn't matter what the value is.
3543 if ((Mask & FullMask) == FullMask)
3544 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3546 if ((Mask & FullMask) == 0)
3547 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3549 if (Mask == (S_NAN | Q_NAN)) {
3550 // Equivalent of isnan. Replace with standard fcmp.
3551 Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
3553 return replaceInstUsesWith(*II, FCmp);
3556 if (Mask == (N_ZERO | P_ZERO)) {
3557 // Equivalent of == 0.
3558 Value *FCmp = Builder.CreateFCmpOEQ(
3559 Src0, ConstantFP::get(Src0->getType(), 0.0));
3562 return replaceInstUsesWith(*II, FCmp);
3565 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
3566 if (((Mask & S_NAN) || (Mask & Q_NAN)) && isKnownNeverNaN(Src0, &TLI)) {
3567 II->setArgOperand(1, ConstantInt::get(Src1->getType(),
3568 Mask & ~(S_NAN | Q_NAN)));
3572 const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3574 if (isa<UndefValue>(Src0))
3575 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3577 // Clamp mask to used bits
3578 if ((Mask & FullMask) != Mask) {
3579 CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
3580 { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3583 NewCall->takeName(II);
3584 return replaceInstUsesWith(*II, NewCall);
3590 const APFloat &Val = CVal->getValueAPF();
3593 ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
3594 ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
3595 ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
3596 ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
3597 ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
3598 ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
3599 ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
3600 ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
3601 ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
3602 ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
3604 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3606 case Intrinsic::amdgcn_cvt_pkrtz: {
3607 Value *Src0 = II->getArgOperand(0);
3608 Value *Src1 = II->getArgOperand(1);
3609 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3610 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3611 const fltSemantics &HalfSem
3612 = II->getType()->getScalarType()->getFltSemantics();
3614 APFloat Val0 = C0->getValueAPF();
3615 APFloat Val1 = C1->getValueAPF();
3616 Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3617 Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3619 Constant *Folded = ConstantVector::get({
3620 ConstantFP::get(II->getContext(), Val0),
3621 ConstantFP::get(II->getContext(), Val1) });
3622 return replaceInstUsesWith(*II, Folded);
3626 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3627 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3631 case Intrinsic::amdgcn_cvt_pknorm_i16:
3632 case Intrinsic::amdgcn_cvt_pknorm_u16:
3633 case Intrinsic::amdgcn_cvt_pk_i16:
3634 case Intrinsic::amdgcn_cvt_pk_u16: {
3635 Value *Src0 = II->getArgOperand(0);
3636 Value *Src1 = II->getArgOperand(1);
3638 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3639 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3643 case Intrinsic::amdgcn_ubfe:
3644 case Intrinsic::amdgcn_sbfe: {
3645 // Decompose simple cases into standard shifts.
3646 Value *Src = II->getArgOperand(0);
3647 if (isa<UndefValue>(Src))
3648 return replaceInstUsesWith(*II, Src);
3651 Type *Ty = II->getType();
3652 unsigned IntSize = Ty->getIntegerBitWidth();
3654 ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3656 Width = CWidth->getZExtValue();
3657 if ((Width & (IntSize - 1)) == 0)
3658 return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3660 if (Width >= IntSize) {
3661 // Hardware ignores high bits, so remove those.
3662 II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3663 Width & (IntSize - 1)));
3669 ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3671 Offset = COffset->getZExtValue();
3672 if (Offset >= IntSize) {
3673 II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3674 Offset & (IntSize - 1)));
3679 bool Signed = IID == Intrinsic::amdgcn_sbfe;
3681 if (!CWidth || !COffset)
3684 // The case of Width == 0 is handled above, which makes this tranformation
3685 // safe. If Width == 0, then the ashr and lshr instructions become poison
3686 // value since the shift amount would be equal to the bit size.
3689 // TODO: This allows folding to undef when the hardware has specific
3691 if (Offset + Width < IntSize) {
3692 Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3693 Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3694 : Builder.CreateLShr(Shl, IntSize - Width);
3695 RightShift->takeName(II);
3696 return replaceInstUsesWith(*II, RightShift);
3699 Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3700 : Builder.CreateLShr(Src, Offset);
3702 RightShift->takeName(II);
3703 return replaceInstUsesWith(*II, RightShift);
3705 case Intrinsic::amdgcn_exp:
3706 case Intrinsic::amdgcn_exp_compr: {
3707 ConstantInt *En = cast<ConstantInt>(II->getArgOperand(1));
3708 unsigned EnBits = En->getZExtValue();
3710 break; // All inputs enabled.
3712 bool IsCompr = IID == Intrinsic::amdgcn_exp_compr;
3713 bool Changed = false;
3714 for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
3715 if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
3716 (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
3717 Value *Src = II->getArgOperand(I + 2);
3718 if (!isa<UndefValue>(Src)) {
3719 II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3730 case Intrinsic::amdgcn_fmed3: {
3731 // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3734 Value *Src0 = II->getArgOperand(0);
3735 Value *Src1 = II->getArgOperand(1);
3736 Value *Src2 = II->getArgOperand(2);
3738 // Checking for NaN before canonicalization provides better fidelity when
3739 // mapping other operations onto fmed3 since the order of operands is
3741 CallInst *NewCall = nullptr;
3742 if (match(Src0, m_NaN()) || isa<UndefValue>(Src0)) {
3743 NewCall = Builder.CreateMinNum(Src1, Src2);
3744 } else if (match(Src1, m_NaN()) || isa<UndefValue>(Src1)) {
3745 NewCall = Builder.CreateMinNum(Src0, Src2);
3746 } else if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
3747 NewCall = Builder.CreateMaxNum(Src0, Src1);
3751 NewCall->copyFastMathFlags(II);
3752 NewCall->takeName(II);
3753 return replaceInstUsesWith(*II, NewCall);
3757 // Canonicalize constants to RHS operands.
3759 // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3760 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3761 std::swap(Src0, Src1);
3765 if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3766 std::swap(Src1, Src2);
3770 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3771 std::swap(Src0, Src1);
3776 II->setArgOperand(0, Src0);
3777 II->setArgOperand(1, Src1);
3778 II->setArgOperand(2, Src2);
3782 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3783 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3784 if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3785 APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3787 return replaceInstUsesWith(*II,
3788 ConstantFP::get(Builder.getContext(), Result));
3795 case Intrinsic::amdgcn_icmp:
3796 case Intrinsic::amdgcn_fcmp: {
3797 const ConstantInt *CC = cast<ConstantInt>(II->getArgOperand(2));
3798 // Guard against invalid arguments.
3799 int64_t CCVal = CC->getZExtValue();
3800 bool IsInteger = IID == Intrinsic::amdgcn_icmp;
3801 if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3802 CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3803 (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
3804 CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3807 Value *Src0 = II->getArgOperand(0);
3808 Value *Src1 = II->getArgOperand(1);
3810 if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3811 if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3812 Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
3813 if (CCmp->isNullValue()) {
3814 return replaceInstUsesWith(
3815 *II, ConstantExpr::getSExt(CCmp, II->getType()));
3818 // The result of V_ICMP/V_FCMP assembly instructions (which this
3819 // intrinsic exposes) is one bit per thread, masked with the EXEC
3820 // register (which contains the bitmask of live threads). So a
3821 // comparison that always returns true is the same as a read of the
3823 Function *NewF = Intrinsic::getDeclaration(
3824 II->getModule(), Intrinsic::read_register, II->getType());
3825 Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3826 MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3827 Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
3828 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3829 NewCall->addAttribute(AttributeList::FunctionIndex,
3830 Attribute::Convergent);
3831 NewCall->takeName(II);
3832 return replaceInstUsesWith(*II, NewCall);
3835 // Canonicalize constants to RHS.
3836 CmpInst::Predicate SwapPred
3837 = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3838 II->setArgOperand(0, Src1);
3839 II->setArgOperand(1, Src0);
3840 II->setArgOperand(2, ConstantInt::get(CC->getType(),
3841 static_cast<int>(SwapPred)));
3845 if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
3848 // Canonicalize compare eq with true value to compare != 0
3849 // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3850 // -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3851 // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3852 // -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3854 if (CCVal == CmpInst::ICMP_EQ &&
3855 ((match(Src1, m_One()) && match(Src0, m_ZExt(m_Value(ExtSrc)))) ||
3856 (match(Src1, m_AllOnes()) && match(Src0, m_SExt(m_Value(ExtSrc))))) &&
3857 ExtSrc->getType()->isIntegerTy(1)) {
3858 II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3859 II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3863 CmpInst::Predicate SrcPred;
3867 // Fold compare eq/ne with 0 from a compare result as the predicate to the
3868 // intrinsic. The typical use is a wave vote function in the library, which
3869 // will be fed from a user code condition compared with 0. Fold in the
3870 // redundant compare.
3872 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3873 // -> llvm.amdgcn.[if]cmp(a, b, pred)
3875 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3876 // -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3877 if (match(Src1, m_Zero()) &&
3879 m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3880 if (CCVal == CmpInst::ICMP_EQ)
3881 SrcPred = CmpInst::getInversePredicate(SrcPred);
3883 Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3884 Intrinsic::amdgcn_fcmp : Intrinsic::amdgcn_icmp;
3886 Type *Ty = SrcLHS->getType();
3887 if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3888 // Promote to next legal integer type.
3889 unsigned Width = CmpType->getBitWidth();
3890 unsigned NewWidth = Width;
3892 // Don't do anything for i1 comparisons.
3898 else if (Width <= 32)
3900 else if (Width <= 64)
3902 else if (Width > 64)
3903 break; // Can't handle this.
3905 if (Width != NewWidth) {
3906 IntegerType *CmpTy = Builder.getIntNTy(NewWidth);
3907 if (CmpInst::isSigned(SrcPred)) {
3908 SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3909 SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3911 SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3912 SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3915 } else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
3919 Intrinsic::getDeclaration(II->getModule(), NewIID,
3921 SrcLHS->getType() });
3922 Value *Args[] = { SrcLHS, SrcRHS,
3923 ConstantInt::get(CC->getType(), SrcPred) };
3924 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3925 NewCall->takeName(II);
3926 return replaceInstUsesWith(*II, NewCall);
3931 case Intrinsic::amdgcn_wqm_vote: {
3932 // wqm_vote is identity when the argument is constant.
3933 if (!isa<Constant>(II->getArgOperand(0)))
3936 return replaceInstUsesWith(*II, II->getArgOperand(0));
3938 case Intrinsic::amdgcn_kill: {
3939 const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3940 if (!C || !C->getZExtValue())
3943 // amdgcn.kill(i1 1) is a no-op
3944 return eraseInstFromFunction(CI);
3946 case Intrinsic::amdgcn_update_dpp: {
3947 Value *Old = II->getArgOperand(0);
3949 auto BC = cast<ConstantInt>(II->getArgOperand(5));
3950 auto RM = cast<ConstantInt>(II->getArgOperand(3));
3951 auto BM = cast<ConstantInt>(II->getArgOperand(4));
3952 if (BC->isZeroValue() ||
3953 RM->getZExtValue() != 0xF ||
3954 BM->getZExtValue() != 0xF ||
3955 isa<UndefValue>(Old))
3958 // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
3959 II->setOperand(0, UndefValue::get(Old->getType()));
3962 case Intrinsic::amdgcn_readfirstlane:
3963 case Intrinsic::amdgcn_readlane: {
3964 // A constant value is trivially uniform.
3965 if (Constant *C = dyn_cast<Constant>(II->getArgOperand(0)))
3966 return replaceInstUsesWith(*II, C);
3968 // The rest of these may not be safe if the exec may not be the same between
3970 Value *Src = II->getArgOperand(0);
3971 Instruction *SrcInst = dyn_cast<Instruction>(Src);
3972 if (SrcInst && SrcInst->getParent() != II->getParent())
3975 // readfirstlane (readfirstlane x) -> readfirstlane x
3976 // readlane (readfirstlane x), y -> readfirstlane x
3977 if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readfirstlane>()))
3978 return replaceInstUsesWith(*II, Src);
3980 if (IID == Intrinsic::amdgcn_readfirstlane) {
3981 // readfirstlane (readlane x, y) -> readlane x, y
3982 if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>()))
3983 return replaceInstUsesWith(*II, Src);
3985 // readlane (readlane x, y), y -> readlane x, y
3986 if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>(
3987 m_Value(), m_Specific(II->getArgOperand(1)))))
3988 return replaceInstUsesWith(*II, Src);
3993 case Intrinsic::stackrestore: {
3994 // If the save is right next to the restore, remove the restore. This can
3995 // happen when variable allocas are DCE'd.
3996 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3997 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
3998 // Skip over debug info.
3999 if (SS->getNextNonDebugInstruction() == II) {
4000 return eraseInstFromFunction(CI);
4005 // Scan down this block to see if there is another stack restore in the
4006 // same block without an intervening call/alloca.
4007 BasicBlock::iterator BI(II);
4008 Instruction *TI = II->getParent()->getTerminator();
4009 bool CannotRemove = false;
4010 for (++BI; &*BI != TI; ++BI) {
4011 if (isa<AllocaInst>(BI)) {
4012 CannotRemove = true;
4015 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
4016 if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
4017 // If there is a stackrestore below this one, remove this one.
4018 if (II2->getIntrinsicID() == Intrinsic::stackrestore)
4019 return eraseInstFromFunction(CI);
4021 // Bail if we cross over an intrinsic with side effects, such as
4022 // llvm.stacksave, or llvm.read_register.
4023 if (II2->mayHaveSideEffects()) {
4024 CannotRemove = true;
4028 // If we found a non-intrinsic call, we can't remove the stack
4030 CannotRemove = true;
4036 // If the stack restore is in a return, resume, or unwind block and if there
4037 // are no allocas or calls between the restore and the return, nuke the
4039 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
4040 return eraseInstFromFunction(CI);
4043 case Intrinsic::lifetime_start:
4044 // Asan needs to poison memory to detect invalid access which is possible
4045 // even for empty lifetime range.
4046 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
4047 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
4048 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
4051 if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
4052 Intrinsic::lifetime_end, *this))
4055 case Intrinsic::assume: {
4056 Value *IIOperand = II->getArgOperand(0);
4057 // Remove an assume if it is followed by an identical assume.
4058 // TODO: Do we need this? Unless there are conflicting assumptions, the
4059 // computeKnownBits(IIOperand) below here eliminates redundant assumes.
4060 Instruction *Next = II->getNextNonDebugInstruction();
4061 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
4062 return eraseInstFromFunction(CI);
4064 // Canonicalize assume(a && b) -> assume(a); assume(b);
4065 // Note: New assumption intrinsics created here are registered by
4066 // the InstCombineIRInserter object.
4067 FunctionType *AssumeIntrinsicTy = II->getFunctionType();
4068 Value *AssumeIntrinsic = II->getCalledValue();
4070 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
4071 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
4072 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
4073 return eraseInstFromFunction(*II);
4075 // assume(!(a || b)) -> assume(!a); assume(!b);
4076 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
4077 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
4078 Builder.CreateNot(A), II->getName());
4079 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
4080 Builder.CreateNot(B), II->getName());
4081 return eraseInstFromFunction(*II);
4084 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
4085 // (if assume is valid at the load)
4086 CmpInst::Predicate Pred;
4088 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
4089 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
4090 LHS->getType()->isPointerTy() &&
4091 isValidAssumeForContext(II, LHS, &DT)) {
4092 MDNode *MD = MDNode::get(II->getContext(), None);
4093 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
4094 return eraseInstFromFunction(*II);
4096 // TODO: apply nonnull return attributes to calls and invokes
4097 // TODO: apply range metadata for range check patterns?
4100 // If there is a dominating assume with the same condition as this one,
4101 // then this one is redundant, and should be removed.
4103 computeKnownBits(IIOperand, Known, 0, II);
4104 if (Known.isAllOnes())
4105 return eraseInstFromFunction(*II);
4107 // Update the cache of affected values for this assumption (we might be
4108 // here because we just simplified the condition).
4109 AC.updateAffectedValues(II);
4112 case Intrinsic::experimental_gc_relocate: {
4113 auto &GCR = *cast<GCRelocateInst>(II);
4115 // If we have two copies of the same pointer in the statepoint argument
4116 // list, canonicalize to one. This may let us common gc.relocates.
4117 if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
4118 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
4119 auto *OpIntTy = GCR.getOperand(2)->getType();
4120 II->setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
4124 // Translate facts known about a pointer before relocating into
4125 // facts about the relocate value, while being careful to
4126 // preserve relocation semantics.
4127 Value *DerivedPtr = GCR.getDerivedPtr();
4129 // Remove the relocation if unused, note that this check is required
4130 // to prevent the cases below from looping forever.
4131 if (II->use_empty())
4132 return eraseInstFromFunction(*II);
4134 // Undef is undef, even after relocation.
4135 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
4136 // most practical collectors, but there was discussion in the review thread
4137 // about whether it was legal for all possible collectors.
4138 if (isa<UndefValue>(DerivedPtr))
4139 // Use undef of gc_relocate's type to replace it.
4140 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
4142 if (auto *PT = dyn_cast<PointerType>(II->getType())) {
4143 // The relocation of null will be null for most any collector.
4144 // TODO: provide a hook for this in GCStrategy. There might be some
4145 // weird collector this property does not hold for.
4146 if (isa<ConstantPointerNull>(DerivedPtr))
4147 // Use null-pointer of gc_relocate's type to replace it.
4148 return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
4150 // isKnownNonNull -> nonnull attribute
4151 if (!II->hasRetAttr(Attribute::NonNull) &&
4152 isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) {
4153 II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
4158 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
4159 // Canonicalize on the type from the uses to the defs
4161 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
4165 case Intrinsic::experimental_guard: {
4166 // Is this guard followed by another guard? We scan forward over a small
4167 // fixed window of instructions to handle common cases with conditions
4168 // computed between guards.
4169 Instruction *NextInst = II->getNextNonDebugInstruction();
4170 for (unsigned i = 0; i < GuardWideningWindow; i++) {
4171 // Note: Using context-free form to avoid compile time blow up
4172 if (!isSafeToSpeculativelyExecute(NextInst))
4174 NextInst = NextInst->getNextNonDebugInstruction();
4176 Value *NextCond = nullptr;
4178 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
4179 Value *CurrCond = II->getArgOperand(0);
4181 // Remove a guard that it is immediately preceded by an identical guard.
4182 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
4183 if (CurrCond != NextCond) {
4184 Instruction *MoveI = II->getNextNonDebugInstruction();
4185 while (MoveI != NextInst) {
4187 MoveI = MoveI->getNextNonDebugInstruction();
4188 Temp->moveBefore(II);
4190 II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
4192 eraseInstFromFunction(*NextInst);
4198 return visitCallBase(*II);
4201 // Fence instruction simplification
4202 Instruction *InstCombiner::visitFenceInst(FenceInst &FI) {
4203 // Remove identical consecutive fences.
4204 Instruction *Next = FI.getNextNonDebugInstruction();
4205 if (auto *NFI = dyn_cast<FenceInst>(Next))
4206 if (FI.isIdenticalTo(NFI))
4207 return eraseInstFromFunction(FI);
4211 // InvokeInst simplification
4212 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
4213 return visitCallBase(II);
4216 // CallBrInst simplification
4217 Instruction *InstCombiner::visitCallBrInst(CallBrInst &CBI) {
4218 return visitCallBase(CBI);
4221 /// If this cast does not affect the value passed through the varargs area, we
4222 /// can eliminate the use of the cast.
4223 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
4224 const DataLayout &DL,
4225 const CastInst *const CI,
4227 if (!CI->isLosslessCast())
4230 // If this is a GC intrinsic, avoid munging types. We need types for
4231 // statepoint reconstruction in SelectionDAG.
4232 // TODO: This is probably something which should be expanded to all
4233 // intrinsics since the entire point of intrinsics is that
4234 // they are understandable by the optimizer.
4235 if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call))
4238 // The size of ByVal or InAlloca arguments is derived from the type, so we
4239 // can't change to a type with a different size. If the size were
4240 // passed explicitly we could avoid this check.
4241 if (!Call.isByValOrInAllocaArgument(ix))
4245 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
4246 Type *DstTy = Call.isByValArgument(ix)
4247 ? Call.getParamByValType(ix)
4248 : cast<PointerType>(CI->getType())->getElementType();
4249 if (!SrcTy->isSized() || !DstTy->isSized())
4251 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
4256 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
4257 if (!CI->getCalledFunction()) return nullptr;
4259 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
4260 replaceInstUsesWith(*From, With);
4262 auto InstCombineErase = [this](Instruction *I) {
4263 eraseInstFromFunction(*I);
4265 LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
4267 if (Value *With = Simplifier.optimizeCall(CI)) {
4269 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
4275 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
4276 // Strip off at most one level of pointer casts, looking for an alloca. This
4277 // is good enough in practice and simpler than handling any number of casts.
4278 Value *Underlying = TrampMem->stripPointerCasts();
4279 if (Underlying != TrampMem &&
4280 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4282 if (!isa<AllocaInst>(Underlying))
4285 IntrinsicInst *InitTrampoline = nullptr;
4286 for (User *U : TrampMem->users()) {
4287 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4290 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4292 // More than one init_trampoline writes to this value. Give up.
4294 InitTrampoline = II;
4297 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4298 // Allow any number of calls to adjust.trampoline.
4303 // No call to init.trampoline found.
4304 if (!InitTrampoline)
4307 // Check that the alloca is being used in the expected way.
4308 if (InitTrampoline->getOperand(0) != TrampMem)
4311 return InitTrampoline;
4314 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
4316 // Visit all the previous instructions in the basic block, and try to find a
4317 // init.trampoline which has a direct path to the adjust.trampoline.
4318 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4319 E = AdjustTramp->getParent()->begin();
4321 Instruction *Inst = &*--I;
4322 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
4323 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4324 II->getOperand(0) == TrampMem)
4326 if (Inst->mayWriteToMemory())
4332 // Given a call to llvm.adjust.trampoline, find and return the corresponding
4333 // call to llvm.init.trampoline if the call to the trampoline can be optimized
4334 // to a direct call to a function. Otherwise return NULL.
4335 static IntrinsicInst *findInitTrampoline(Value *Callee) {
4336 Callee = Callee->stripPointerCasts();
4337 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4339 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
4342 Value *TrampMem = AdjustTramp->getOperand(0);
4344 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
4346 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
4351 static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
4352 unsigned NumArgs = Call.getNumArgOperands();
4353 ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
4355 (NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1));
4356 // Bail out if the allocation size is zero.
4357 if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue()))
4360 if (isMallocLikeFn(&Call, TLI) && Op0C) {
4361 if (isOpNewLikeFn(&Call, TLI))
4362 Call.addAttribute(AttributeList::ReturnIndex,
4363 Attribute::getWithDereferenceableBytes(
4364 Call.getContext(), Op0C->getZExtValue()));
4366 Call.addAttribute(AttributeList::ReturnIndex,
4367 Attribute::getWithDereferenceableOrNullBytes(
4368 Call.getContext(), Op0C->getZExtValue()));
4369 } else if (isReallocLikeFn(&Call, TLI) && Op1C) {
4370 Call.addAttribute(AttributeList::ReturnIndex,
4371 Attribute::getWithDereferenceableOrNullBytes(
4372 Call.getContext(), Op1C->getZExtValue()));
4373 } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) {
4375 const APInt &N = Op0C->getValue();
4376 APInt Size = N.umul_ov(Op1C->getValue(), Overflow);
4378 Call.addAttribute(AttributeList::ReturnIndex,
4379 Attribute::getWithDereferenceableOrNullBytes(
4380 Call.getContext(), Size.getZExtValue()));
4381 } else if (isStrdupLikeFn(&Call, TLI)) {
4382 uint64_t Len = GetStringLength(Call.getOperand(0));
4386 Call.addAttribute(AttributeList::ReturnIndex,
4387 Attribute::getWithDereferenceableOrNullBytes(
4388 Call.getContext(), Len));
4390 else if (NumArgs == 2 && Op1C)
4392 AttributeList::ReturnIndex,
4393 Attribute::getWithDereferenceableOrNullBytes(
4394 Call.getContext(), std::min(Len, Op1C->getZExtValue() + 1)));
4399 /// Improvements for call, callbr and invoke instructions.
4400 Instruction *InstCombiner::visitCallBase(CallBase &Call) {
4401 if (isAllocationFn(&Call, &TLI))
4402 annotateAnyAllocSite(Call, &TLI);
4404 bool Changed = false;
4406 // Mark any parameters that are known to be non-null with the nonnull
4407 // attribute. This is helpful for inlining calls to functions with null
4408 // checks on their arguments.
4409 SmallVector<unsigned, 4> ArgNos;
4412 for (Value *V : Call.args()) {
4413 if (V->getType()->isPointerTy() &&
4414 !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
4415 isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
4416 ArgNos.push_back(ArgNo);
4420 assert(ArgNo == Call.arg_size() && "sanity check");
4422 if (!ArgNos.empty()) {
4423 AttributeList AS = Call.getAttributes();
4424 LLVMContext &Ctx = Call.getContext();
4425 AS = AS.addParamAttribute(Ctx, ArgNos,
4426 Attribute::get(Ctx, Attribute::NonNull));
4427 Call.setAttributes(AS);
4431 // If the callee is a pointer to a function, attempt to move any casts to the
4432 // arguments of the call/callbr/invoke.
4433 Value *Callee = Call.getCalledValue();
4434 if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
4437 if (Function *CalleeF = dyn_cast<Function>(Callee)) {
4438 // Remove the convergent attr on calls when the callee is not convergent.
4439 if (Call.isConvergent() && !CalleeF->isConvergent() &&
4440 !CalleeF->isIntrinsic()) {
4441 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
4443 Call.setNotConvergent();
4447 // If the call and callee calling conventions don't match, this call must
4448 // be unreachable, as the call is undefined.
4449 if (CalleeF->getCallingConv() != Call.getCallingConv() &&
4450 // Only do this for calls to a function with a body. A prototype may
4451 // not actually end up matching the implementation's calling conv for a
4452 // variety of reasons (e.g. it may be written in assembly).
4453 !CalleeF->isDeclaration()) {
4454 Instruction *OldCall = &Call;
4455 CreateNonTerminatorUnreachable(OldCall);
4456 // If OldCall does not return void then replaceAllUsesWith undef.
4457 // This allows ValueHandlers and custom metadata to adjust itself.
4458 if (!OldCall->getType()->isVoidTy())
4459 replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
4460 if (isa<CallInst>(OldCall))
4461 return eraseInstFromFunction(*OldCall);
4463 // We cannot remove an invoke or a callbr, because it would change thexi
4464 // CFG, just change the callee to a null pointer.
4465 cast<CallBase>(OldCall)->setCalledFunction(
4466 CalleeF->getFunctionType(),
4467 Constant::getNullValue(CalleeF->getType()));
4472 if ((isa<ConstantPointerNull>(Callee) &&
4473 !NullPointerIsDefined(Call.getFunction())) ||
4474 isa<UndefValue>(Callee)) {
4475 // If Call does not return void then replaceAllUsesWith undef.
4476 // This allows ValueHandlers and custom metadata to adjust itself.
4477 if (!Call.getType()->isVoidTy())
4478 replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
4480 if (Call.isTerminator()) {
4481 // Can't remove an invoke or callbr because we cannot change the CFG.
4485 // This instruction is not reachable, just remove it.
4486 CreateNonTerminatorUnreachable(&Call);
4487 return eraseInstFromFunction(Call);
4490 if (IntrinsicInst *II = findInitTrampoline(Callee))
4491 return transformCallThroughTrampoline(Call, *II);
4493 PointerType *PTy = cast<PointerType>(Callee->getType());
4494 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
4495 if (FTy->isVarArg()) {
4496 int ix = FTy->getNumParams();
4497 // See if we can optimize any arguments passed through the varargs area of
4499 for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
4500 I != E; ++I, ++ix) {
4501 CastInst *CI = dyn_cast<CastInst>(*I);
4502 if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
4503 *I = CI->getOperand(0);
4505 // Update the byval type to match the argument type.
4506 if (Call.isByValArgument(ix)) {
4507 Call.removeParamAttr(ix, Attribute::ByVal);
4509 ix, Attribute::getWithByValType(
4511 CI->getOperand(0)->getType()->getPointerElementType()));
4518 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
4519 // Inline asm calls cannot throw - mark them 'nounwind'.
4520 Call.setDoesNotThrow();
4524 // Try to optimize the call if possible, we require DataLayout for most of
4525 // this. None of these calls are seen as possibly dead so go ahead and
4526 // delete the instruction now.
4527 if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
4528 Instruction *I = tryOptimizeCall(CI);
4529 // If we changed something return the result, etc. Otherwise let
4530 // the fallthrough check.
4531 if (I) return eraseInstFromFunction(*I);
4534 if (isAllocLikeFn(&Call, &TLI))
4535 return visitAllocSite(Call);
4537 return Changed ? &Call : nullptr;
4540 /// If the callee is a constexpr cast of a function, attempt to move the cast to
4541 /// the arguments of the call/callbr/invoke.
4542 bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
4543 auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
4547 // If this is a call to a thunk function, don't remove the cast. Thunks are
4548 // used to transparently forward all incoming parameters and outgoing return
4549 // values, so it's important to leave the cast in place.
4550 if (Callee->hasFnAttribute("thunk"))
4553 // If this is a musttail call, the callee's prototype must match the caller's
4554 // prototype with the exception of pointee types. The code below doesn't
4555 // implement that, so we can't do this transform.
4556 // TODO: Do the transform if it only requires adding pointer casts.
4557 if (Call.isMustTailCall())
4560 Instruction *Caller = &Call;
4561 const AttributeList &CallerPAL = Call.getAttributes();
4563 // Okay, this is a cast from a function to a different type. Unless doing so
4564 // would cause a type conversion of one of our arguments, change this call to
4565 // be a direct call with arguments casted to the appropriate types.
4566 FunctionType *FT = Callee->getFunctionType();
4567 Type *OldRetTy = Caller->getType();
4568 Type *NewRetTy = FT->getReturnType();
4570 // Check to see if we are changing the return type...
4571 if (OldRetTy != NewRetTy) {
4573 if (NewRetTy->isStructTy())
4574 return false; // TODO: Handle multiple return values.
4576 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
4577 if (Callee->isDeclaration())
4578 return false; // Cannot transform this return value.
4580 if (!Caller->use_empty() &&
4581 // void -> non-void is handled specially
4582 !NewRetTy->isVoidTy())
4583 return false; // Cannot transform this return value.
4586 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
4587 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4588 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
4589 return false; // Attribute not compatible with transformed value.
4592 // If the callbase is an invoke/callbr instruction, and the return value is
4593 // used by a PHI node in a successor, we cannot change the return type of
4594 // the call because there is no place to put the cast instruction (without
4595 // breaking the critical edge). Bail out in this case.
4596 if (!Caller->use_empty()) {
4597 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
4598 for (User *U : II->users())
4599 if (PHINode *PN = dyn_cast<PHINode>(U))
4600 if (PN->getParent() == II->getNormalDest() ||
4601 PN->getParent() == II->getUnwindDest())
4603 // FIXME: Be conservative for callbr to avoid a quadratic search.
4604 if (isa<CallBrInst>(Caller))
4609 unsigned NumActualArgs = Call.arg_size();
4610 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4612 // Prevent us turning:
4613 // declare void @takes_i32_inalloca(i32* inalloca)
4614 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4617 // call void @takes_i32_inalloca(i32* null)
4619 // Similarly, avoid folding away bitcasts of byval calls.
4620 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4621 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
4624 auto AI = Call.arg_begin();
4625 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
4626 Type *ParamTy = FT->getParamType(i);
4627 Type *ActTy = (*AI)->getType();
4629 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
4630 return false; // Cannot transform this parameter value.
4632 if (AttrBuilder(CallerPAL.getParamAttributes(i))
4633 .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
4634 return false; // Attribute not compatible with transformed value.
4636 if (Call.isInAllocaArgument(i))
4637 return false; // Cannot transform to and from inalloca.
4639 // If the parameter is passed as a byval argument, then we have to have a
4640 // sized type and the sized type has to have the same size as the old type.
4641 if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4642 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
4643 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
4646 Type *CurElTy = Call.getParamByValType(i);
4647 if (DL.getTypeAllocSize(CurElTy) !=
4648 DL.getTypeAllocSize(ParamPTy->getElementType()))
4653 if (Callee->isDeclaration()) {
4654 // Do not delete arguments unless we have a function body.
4655 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4658 // If the callee is just a declaration, don't change the varargsness of the
4659 // call. We don't want to introduce a varargs call where one doesn't
4661 PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
4662 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
4665 // If both the callee and the cast type are varargs, we still have to make
4666 // sure the number of fixed parameters are the same or we have the same
4667 // ABI issues as if we introduce a varargs call.
4668 if (FT->isVarArg() &&
4669 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
4670 FT->getNumParams() !=
4671 cast<FunctionType>(APTy->getElementType())->getNumParams())
4675 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4676 !CallerPAL.isEmpty()) {
4677 // In this case we have more arguments than the new function type, but we
4678 // won't be dropping them. Check that these extra arguments have attributes
4679 // that are compatible with being a vararg call argument.
4681 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4682 SRetIdx > FT->getNumParams())
4686 // Okay, we decided that this is a safe thing to do: go ahead and start
4687 // inserting cast instructions as necessary.
4688 SmallVector<Value *, 8> Args;
4689 SmallVector<AttributeSet, 8> ArgAttrs;
4690 Args.reserve(NumActualArgs);
4691 ArgAttrs.reserve(NumActualArgs);
4693 // Get any return attributes.
4694 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4696 // If the return value is not being used, the type may not be compatible
4697 // with the existing attributes. Wipe out any problematic attributes.
4698 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
4700 LLVMContext &Ctx = Call.getContext();
4701 AI = Call.arg_begin();
4702 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4703 Type *ParamTy = FT->getParamType(i);
4705 Value *NewArg = *AI;
4706 if ((*AI)->getType() != ParamTy)
4707 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4708 Args.push_back(NewArg);
4710 // Add any parameter attributes.
4711 if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4712 AttrBuilder AB(CallerPAL.getParamAttributes(i));
4713 AB.addByValAttr(NewArg->getType()->getPointerElementType());
4714 ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
4716 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4719 // If the function takes more arguments than the call was taking, add them
4721 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4722 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
4723 ArgAttrs.push_back(AttributeSet());
4726 // If we are removing arguments to the function, emit an obnoxious warning.
4727 if (FT->getNumParams() < NumActualArgs) {
4728 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4729 if (FT->isVarArg()) {
4730 // Add all of the arguments in their promoted form to the arg list.
4731 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4732 Type *PTy = getPromotedType((*AI)->getType());
4733 Value *NewArg = *AI;
4734 if (PTy != (*AI)->getType()) {
4735 // Must promote to pass through va_arg area!
4736 Instruction::CastOps opcode =
4737 CastInst::getCastOpcode(*AI, false, PTy, false);
4738 NewArg = Builder.CreateCast(opcode, *AI, PTy);
4740 Args.push_back(NewArg);
4742 // Add any parameter attributes.
4743 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4748 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
4750 if (NewRetTy->isVoidTy())
4751 Caller->setName(""); // Void type should not have a name.
4753 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4754 "missing argument attributes");
4755 AttributeList NewCallerPAL = AttributeList::get(
4756 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
4758 SmallVector<OperandBundleDef, 1> OpBundles;
4759 Call.getOperandBundlesAsDefs(OpBundles);
4762 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4763 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
4764 II->getUnwindDest(), Args, OpBundles);
4765 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4766 NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
4767 CBI->getIndirectDests(), Args, OpBundles);
4769 NewCall = Builder.CreateCall(Callee, Args, OpBundles);
4770 cast<CallInst>(NewCall)->setTailCallKind(
4771 cast<CallInst>(Caller)->getTailCallKind());
4773 NewCall->takeName(Caller);
4774 NewCall->setCallingConv(Call.getCallingConv());
4775 NewCall->setAttributes(NewCallerPAL);
4777 // Preserve the weight metadata for the new call instruction. The metadata
4778 // is used by SamplePGO to check callsite's hotness.
4780 if (Caller->extractProfTotalWeight(W))
4781 NewCall->setProfWeight(W);
4783 // Insert a cast of the return type as necessary.
4784 Instruction *NC = NewCall;
4786 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4787 if (!NV->getType()->isVoidTy()) {
4788 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
4789 NC->setDebugLoc(Caller->getDebugLoc());
4791 // If this is an invoke/callbr instruction, we should insert it after the
4792 // first non-phi instruction in the normal successor block.
4793 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4794 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
4795 InsertNewInstBefore(NC, *I);
4796 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4797 BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
4798 InsertNewInstBefore(NC, *I);
4800 // Otherwise, it's a call, just insert cast right after the call.
4801 InsertNewInstBefore(NC, *Caller);
4803 Worklist.AddUsersToWorkList(*Caller);
4805 NV = UndefValue::get(Caller->getType());
4809 if (!Caller->use_empty())
4810 replaceInstUsesWith(*Caller, NV);
4811 else if (Caller->hasValueHandle()) {
4812 if (OldRetTy == NV->getType())
4813 ValueHandleBase::ValueIsRAUWd(Caller, NV);
4815 // We cannot call ValueIsRAUWd with a different type, and the
4816 // actual tracked value will disappear.
4817 ValueHandleBase::ValueIsDeleted(Caller);
4820 eraseInstFromFunction(*Caller);
4824 /// Turn a call to a function created by init_trampoline / adjust_trampoline
4825 /// intrinsic pair into a direct call to the underlying function.
4827 InstCombiner::transformCallThroughTrampoline(CallBase &Call,
4828 IntrinsicInst &Tramp) {
4829 Value *Callee = Call.getCalledValue();
4830 Type *CalleeTy = Callee->getType();
4831 FunctionType *FTy = Call.getFunctionType();
4832 AttributeList Attrs = Call.getAttributes();
4834 // If the call already has the 'nest' attribute somewhere then give up -
4835 // otherwise 'nest' would occur twice after splicing in the chain.
4836 if (Attrs.hasAttrSomewhere(Attribute::Nest))
4839 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
4840 FunctionType *NestFTy = NestF->getFunctionType();
4842 AttributeList NestAttrs = NestF->getAttributes();
4843 if (!NestAttrs.isEmpty()) {
4844 unsigned NestArgNo = 0;
4845 Type *NestTy = nullptr;
4846 AttributeSet NestAttr;
4848 // Look for a parameter marked with the 'nest' attribute.
4849 for (FunctionType::param_iterator I = NestFTy->param_begin(),
4850 E = NestFTy->param_end();
4851 I != E; ++NestArgNo, ++I) {
4852 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4853 if (AS.hasAttribute(Attribute::Nest)) {
4854 // Record the parameter type and any other attributes.
4862 std::vector<Value*> NewArgs;
4863 std::vector<AttributeSet> NewArgAttrs;
4864 NewArgs.reserve(Call.arg_size() + 1);
4865 NewArgAttrs.reserve(Call.arg_size());
4867 // Insert the nest argument into the call argument list, which may
4868 // mean appending it. Likewise for attributes.
4872 auto I = Call.arg_begin(), E = Call.arg_end();
4874 if (ArgNo == NestArgNo) {
4875 // Add the chain argument and attributes.
4876 Value *NestVal = Tramp.getArgOperand(2);
4877 if (NestVal->getType() != NestTy)
4878 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
4879 NewArgs.push_back(NestVal);
4880 NewArgAttrs.push_back(NestAttr);
4886 // Add the original argument and attributes.
4887 NewArgs.push_back(*I);
4888 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
4895 // The trampoline may have been bitcast to a bogus type (FTy).
4896 // Handle this by synthesizing a new function type, equal to FTy
4897 // with the chain parameter inserted.
4899 std::vector<Type*> NewTypes;
4900 NewTypes.reserve(FTy->getNumParams()+1);
4902 // Insert the chain's type into the list of parameter types, which may
4903 // mean appending it.
4906 FunctionType::param_iterator I = FTy->param_begin(),
4907 E = FTy->param_end();
4910 if (ArgNo == NestArgNo)
4911 // Add the chain's type.
4912 NewTypes.push_back(NestTy);
4917 // Add the original type.
4918 NewTypes.push_back(*I);
4925 // Replace the trampoline call with a direct call. Let the generic
4926 // code sort out any function type mismatches.
4927 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
4929 Constant *NewCallee =
4930 NestF->getType() == PointerType::getUnqual(NewFTy) ?
4931 NestF : ConstantExpr::getBitCast(NestF,
4932 PointerType::getUnqual(NewFTy));
4933 AttributeList NewPAL =
4934 AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
4935 Attrs.getRetAttributes(), NewArgAttrs);
4937 SmallVector<OperandBundleDef, 1> OpBundles;
4938 Call.getOperandBundlesAsDefs(OpBundles);
4940 Instruction *NewCaller;
4941 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4942 NewCaller = InvokeInst::Create(NewFTy, NewCallee,
4943 II->getNormalDest(), II->getUnwindDest(),
4944 NewArgs, OpBundles);
4945 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4946 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4947 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4949 CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
4950 CBI->getIndirectDests(), NewArgs, OpBundles);
4951 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4952 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4954 NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
4955 cast<CallInst>(NewCaller)->setTailCallKind(
4956 cast<CallInst>(Call).getTailCallKind());
4957 cast<CallInst>(NewCaller)->setCallingConv(
4958 cast<CallInst>(Call).getCallingConv());
4959 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4961 NewCaller->setDebugLoc(Call.getDebugLoc());
4967 // Replace the trampoline call with a direct call. Since there is no 'nest'
4968 // parameter, there is no need to adjust the argument list. Let the generic
4969 // code sort out any function type mismatches.
4970 Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
4971 Call.setCalledFunction(FTy, NewCallee);