1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/PatternMatch.h"
46 #include "llvm/IR/Statepoint.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/IR/ValueHandle.h"
51 #include "llvm/Support/AtomicOrdering.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/KnownBits.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
61 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
70 using namespace PatternMatch;
72 #define DEBUG_TYPE "instcombine"
74 STATISTIC(NumSimplified, "Number of library calls simplified");
76 static cl::opt<unsigned> GuardWideningWindow(
77 "instcombine-guard-widening-window",
79 cl::desc("How wide an instruction window to bypass looking for "
82 /// Return the specified type promoted as it would be to pass though a va_arg
84 static Type *getPromotedType(Type *Ty) {
85 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
86 if (ITy->getBitWidth() < 32)
87 return Type::getInt32Ty(Ty->getContext());
92 /// Return a constant boolean vector that has true elements in all positions
93 /// where the input constant data vector has an element with the sign bit set.
94 static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
95 SmallVector<Constant *, 32> BoolVec;
96 IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
97 for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
98 Constant *Elt = V->getElementAsConstant(I);
99 assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
100 "Unexpected constant data vector element type");
101 bool Sign = V->getElementType()->isIntegerTy()
102 ? cast<ConstantInt>(Elt)->isNegative()
103 : cast<ConstantFP>(Elt)->isNegative();
104 BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
106 return ConstantVector::get(BoolVec);
109 Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
110 unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
111 unsigned CopyDstAlign = MI->getDestAlignment();
112 if (CopyDstAlign < DstAlign){
113 MI->setDestAlignment(DstAlign);
117 unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
118 unsigned CopySrcAlign = MI->getSourceAlignment();
119 if (CopySrcAlign < SrcAlign) {
120 MI->setSourceAlignment(SrcAlign);
124 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
126 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
127 if (!MemOpLength) return nullptr;
129 // Source and destination pointer types are always "i8*" for intrinsic. See
130 // if the size is something we can handle with a single primitive load/store.
131 // A single load+store correctly handles overlapping memory in the memmove
133 uint64_t Size = MemOpLength->getLimitedValue();
134 assert(Size && "0-sized memory transferring should be removed already.");
136 if (Size > 8 || (Size&(Size-1)))
137 return nullptr; // If not 1/2/4/8 bytes, exit.
139 // If it is an atomic and alignment is less than the size then we will
140 // introduce the unaligned memory access which will be later transformed
141 // into libcall in CodeGen. This is not evident performance gain so disable
143 if (isa<AtomicMemTransferInst>(MI))
144 if (CopyDstAlign < Size || CopySrcAlign < Size)
147 // Use an integer load+store unless we can find something better.
149 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
151 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
153 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
154 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
155 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
157 // If the memcpy has metadata describing the members, see if we can get the
158 // TBAA tag describing our copy.
159 MDNode *CopyMD = nullptr;
160 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
162 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
163 if (M->getNumOperands() == 3 && M->getOperand(0) &&
164 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
165 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
167 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
168 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
170 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
171 CopyMD = cast<MDNode>(M->getOperand(2));
174 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
175 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
176 LoadInst *L = Builder.CreateLoad(Src);
177 // Alignment from the mem intrinsic will be better, so use it.
178 L->setAlignment(CopySrcAlign);
180 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
181 MDNode *LoopMemParallelMD =
182 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
183 if (LoopMemParallelMD)
184 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
185 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
187 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
189 StoreInst *S = Builder.CreateStore(L, Dest);
190 // Alignment from the mem intrinsic will be better, so use it.
191 S->setAlignment(CopyDstAlign);
193 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
194 if (LoopMemParallelMD)
195 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
197 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
199 if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
200 // non-atomics can be volatile
201 L->setVolatile(MT->isVolatile());
202 S->setVolatile(MT->isVolatile());
204 if (isa<AtomicMemTransferInst>(MI)) {
205 // atomics have to be unordered
206 L->setOrdering(AtomicOrdering::Unordered);
207 S->setOrdering(AtomicOrdering::Unordered);
210 // Set the size of the copy to 0, it will be deleted on the next iteration.
211 MI->setLength(Constant::getNullValue(MemOpLength->getType()));
215 Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
216 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
217 if (MI->getDestAlignment() < Alignment) {
218 MI->setDestAlignment(Alignment);
222 // Extract the length and alignment and fill if they are constant.
223 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
224 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
225 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
227 uint64_t Len = LenC->getLimitedValue();
228 Alignment = MI->getDestAlignment();
229 assert(Len && "0-sized memory setting should be removed already.");
231 // Alignment 0 is identity for alignment 1 for memset, but not store.
235 // If it is an atomic and alignment is less than the size then we will
236 // introduce the unaligned memory access which will be later transformed
237 // into libcall in CodeGen. This is not evident performance gain so disable
239 if (isa<AtomicMemSetInst>(MI))
243 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
244 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
245 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
247 Value *Dest = MI->getDest();
248 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
249 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
250 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
252 // Extract the fill value and store.
253 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
254 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
256 S->setAlignment(Alignment);
257 if (isa<AtomicMemSetInst>(MI))
258 S->setOrdering(AtomicOrdering::Unordered);
260 // Set the size of the copy to 0, it will be deleted on the next iteration.
261 MI->setLength(Constant::getNullValue(LenC->getType()));
268 static Value *simplifyX86immShift(const IntrinsicInst &II,
269 InstCombiner::BuilderTy &Builder) {
270 bool LogicalShift = false;
271 bool ShiftLeft = false;
273 switch (II.getIntrinsicID()) {
274 default: llvm_unreachable("Unexpected intrinsic!");
275 case Intrinsic::x86_sse2_psra_d:
276 case Intrinsic::x86_sse2_psra_w:
277 case Intrinsic::x86_sse2_psrai_d:
278 case Intrinsic::x86_sse2_psrai_w:
279 case Intrinsic::x86_avx2_psra_d:
280 case Intrinsic::x86_avx2_psra_w:
281 case Intrinsic::x86_avx2_psrai_d:
282 case Intrinsic::x86_avx2_psrai_w:
283 case Intrinsic::x86_avx512_psra_q_128:
284 case Intrinsic::x86_avx512_psrai_q_128:
285 case Intrinsic::x86_avx512_psra_q_256:
286 case Intrinsic::x86_avx512_psrai_q_256:
287 case Intrinsic::x86_avx512_psra_d_512:
288 case Intrinsic::x86_avx512_psra_q_512:
289 case Intrinsic::x86_avx512_psra_w_512:
290 case Intrinsic::x86_avx512_psrai_d_512:
291 case Intrinsic::x86_avx512_psrai_q_512:
292 case Intrinsic::x86_avx512_psrai_w_512:
293 LogicalShift = false; ShiftLeft = false;
295 case Intrinsic::x86_sse2_psrl_d:
296 case Intrinsic::x86_sse2_psrl_q:
297 case Intrinsic::x86_sse2_psrl_w:
298 case Intrinsic::x86_sse2_psrli_d:
299 case Intrinsic::x86_sse2_psrli_q:
300 case Intrinsic::x86_sse2_psrli_w:
301 case Intrinsic::x86_avx2_psrl_d:
302 case Intrinsic::x86_avx2_psrl_q:
303 case Intrinsic::x86_avx2_psrl_w:
304 case Intrinsic::x86_avx2_psrli_d:
305 case Intrinsic::x86_avx2_psrli_q:
306 case Intrinsic::x86_avx2_psrli_w:
307 case Intrinsic::x86_avx512_psrl_d_512:
308 case Intrinsic::x86_avx512_psrl_q_512:
309 case Intrinsic::x86_avx512_psrl_w_512:
310 case Intrinsic::x86_avx512_psrli_d_512:
311 case Intrinsic::x86_avx512_psrli_q_512:
312 case Intrinsic::x86_avx512_psrli_w_512:
313 LogicalShift = true; ShiftLeft = false;
315 case Intrinsic::x86_sse2_psll_d:
316 case Intrinsic::x86_sse2_psll_q:
317 case Intrinsic::x86_sse2_psll_w:
318 case Intrinsic::x86_sse2_pslli_d:
319 case Intrinsic::x86_sse2_pslli_q:
320 case Intrinsic::x86_sse2_pslli_w:
321 case Intrinsic::x86_avx2_psll_d:
322 case Intrinsic::x86_avx2_psll_q:
323 case Intrinsic::x86_avx2_psll_w:
324 case Intrinsic::x86_avx2_pslli_d:
325 case Intrinsic::x86_avx2_pslli_q:
326 case Intrinsic::x86_avx2_pslli_w:
327 case Intrinsic::x86_avx512_psll_d_512:
328 case Intrinsic::x86_avx512_psll_q_512:
329 case Intrinsic::x86_avx512_psll_w_512:
330 case Intrinsic::x86_avx512_pslli_d_512:
331 case Intrinsic::x86_avx512_pslli_q_512:
332 case Intrinsic::x86_avx512_pslli_w_512:
333 LogicalShift = true; ShiftLeft = true;
336 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
338 // Simplify if count is constant.
339 auto Arg1 = II.getArgOperand(1);
340 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
341 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
342 auto CInt = dyn_cast<ConstantInt>(Arg1);
343 if (!CAZ && !CDV && !CInt)
348 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
349 // operand to compute the shift amount.
350 auto VT = cast<VectorType>(CDV->getType());
351 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
352 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
353 unsigned NumSubElts = 64 / BitWidth;
355 // Concatenate the sub-elements to create the 64-bit value.
356 for (unsigned i = 0; i != NumSubElts; ++i) {
357 unsigned SubEltIdx = (NumSubElts - 1) - i;
358 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
360 Count |= SubElt->getValue().zextOrTrunc(64);
364 Count = CInt->getValue();
366 auto Vec = II.getArgOperand(0);
367 auto VT = cast<VectorType>(Vec->getType());
368 auto SVT = VT->getElementType();
369 unsigned VWidth = VT->getNumElements();
370 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
372 // If shift-by-zero then just return the original value.
373 if (Count.isNullValue())
376 // Handle cases when Shift >= BitWidth.
377 if (Count.uge(BitWidth)) {
378 // If LogicalShift - just return zero.
380 return ConstantAggregateZero::get(VT);
382 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
383 Count = APInt(64, BitWidth - 1);
386 // Get a constant vector of the same type as the first operand.
387 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
388 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
391 return Builder.CreateShl(Vec, ShiftVec);
394 return Builder.CreateLShr(Vec, ShiftVec);
396 return Builder.CreateAShr(Vec, ShiftVec);
399 // Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
400 // Unlike the generic IR shifts, the intrinsics have defined behaviour for out
401 // of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
402 static Value *simplifyX86varShift(const IntrinsicInst &II,
403 InstCombiner::BuilderTy &Builder) {
404 bool LogicalShift = false;
405 bool ShiftLeft = false;
407 switch (II.getIntrinsicID()) {
408 default: llvm_unreachable("Unexpected intrinsic!");
409 case Intrinsic::x86_avx2_psrav_d:
410 case Intrinsic::x86_avx2_psrav_d_256:
411 case Intrinsic::x86_avx512_psrav_q_128:
412 case Intrinsic::x86_avx512_psrav_q_256:
413 case Intrinsic::x86_avx512_psrav_d_512:
414 case Intrinsic::x86_avx512_psrav_q_512:
415 case Intrinsic::x86_avx512_psrav_w_128:
416 case Intrinsic::x86_avx512_psrav_w_256:
417 case Intrinsic::x86_avx512_psrav_w_512:
418 LogicalShift = false;
421 case Intrinsic::x86_avx2_psrlv_d:
422 case Intrinsic::x86_avx2_psrlv_d_256:
423 case Intrinsic::x86_avx2_psrlv_q:
424 case Intrinsic::x86_avx2_psrlv_q_256:
425 case Intrinsic::x86_avx512_psrlv_d_512:
426 case Intrinsic::x86_avx512_psrlv_q_512:
427 case Intrinsic::x86_avx512_psrlv_w_128:
428 case Intrinsic::x86_avx512_psrlv_w_256:
429 case Intrinsic::x86_avx512_psrlv_w_512:
433 case Intrinsic::x86_avx2_psllv_d:
434 case Intrinsic::x86_avx2_psllv_d_256:
435 case Intrinsic::x86_avx2_psllv_q:
436 case Intrinsic::x86_avx2_psllv_q_256:
437 case Intrinsic::x86_avx512_psllv_d_512:
438 case Intrinsic::x86_avx512_psllv_q_512:
439 case Intrinsic::x86_avx512_psllv_w_128:
440 case Intrinsic::x86_avx512_psllv_w_256:
441 case Intrinsic::x86_avx512_psllv_w_512:
446 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
448 // Simplify if all shift amounts are constant/undef.
449 auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
453 auto Vec = II.getArgOperand(0);
454 auto VT = cast<VectorType>(II.getType());
455 auto SVT = VT->getVectorElementType();
456 int NumElts = VT->getNumElements();
457 int BitWidth = SVT->getIntegerBitWidth();
459 // Collect each element's shift amount.
460 // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
461 bool AnyOutOfRange = false;
462 SmallVector<int, 8> ShiftAmts;
463 for (int I = 0; I < NumElts; ++I) {
464 auto *CElt = CShift->getAggregateElement(I);
465 if (CElt && isa<UndefValue>(CElt)) {
466 ShiftAmts.push_back(-1);
470 auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
474 // Handle out of range shifts.
475 // If LogicalShift - set to BitWidth (special case).
476 // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
477 APInt ShiftVal = COp->getValue();
478 if (ShiftVal.uge(BitWidth)) {
479 AnyOutOfRange = LogicalShift;
480 ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
484 ShiftAmts.push_back((int)ShiftVal.getZExtValue());
487 // If all elements out of range or UNDEF, return vector of zeros/undefs.
488 // ArithmeticShift should only hit this if they are all UNDEF.
489 auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
490 if (llvm::all_of(ShiftAmts, OutOfRange)) {
491 SmallVector<Constant *, 8> ConstantVec;
492 for (int Idx : ShiftAmts) {
494 ConstantVec.push_back(UndefValue::get(SVT));
496 assert(LogicalShift && "Logical shift expected");
497 ConstantVec.push_back(ConstantInt::getNullValue(SVT));
500 return ConstantVector::get(ConstantVec);
503 // We can't handle only some out of range values with generic logical shifts.
507 // Build the shift amount constant vector.
508 SmallVector<Constant *, 8> ShiftVecAmts;
509 for (int Idx : ShiftAmts) {
511 ShiftVecAmts.push_back(UndefValue::get(SVT));
513 ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
515 auto ShiftVec = ConstantVector::get(ShiftVecAmts);
518 return Builder.CreateShl(Vec, ShiftVec);
521 return Builder.CreateLShr(Vec, ShiftVec);
523 return Builder.CreateAShr(Vec, ShiftVec);
526 static Value *simplifyX86pack(IntrinsicInst &II, bool IsSigned) {
527 Value *Arg0 = II.getArgOperand(0);
528 Value *Arg1 = II.getArgOperand(1);
529 Type *ResTy = II.getType();
531 // Fast all undef handling.
532 if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
533 return UndefValue::get(ResTy);
535 Type *ArgTy = Arg0->getType();
536 unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
537 unsigned NumDstElts = ResTy->getVectorNumElements();
538 unsigned NumSrcElts = ArgTy->getVectorNumElements();
539 assert(NumDstElts == (2 * NumSrcElts) && "Unexpected packing types");
541 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
542 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
543 unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
544 assert(ArgTy->getScalarSizeInBits() == (2 * DstScalarSizeInBits) &&
545 "Unexpected packing types");
548 auto *Cst0 = dyn_cast<Constant>(Arg0);
549 auto *Cst1 = dyn_cast<Constant>(Arg1);
553 SmallVector<Constant *, 32> Vals;
554 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
555 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
556 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
557 auto *Cst = (Elt >= NumSrcEltsPerLane) ? Cst1 : Cst0;
558 auto *COp = Cst->getAggregateElement(SrcIdx);
559 if (COp && isa<UndefValue>(COp)) {
560 Vals.push_back(UndefValue::get(ResTy->getScalarType()));
564 auto *CInt = dyn_cast_or_null<ConstantInt>(COp);
568 APInt Val = CInt->getValue();
569 assert(Val.getBitWidth() == ArgTy->getScalarSizeInBits() &&
570 "Unexpected constant bitwidth");
573 // PACKSS: Truncate signed value with signed saturation.
574 // Source values less than dst minint are saturated to minint.
575 // Source values greater than dst maxint are saturated to maxint.
576 if (Val.isSignedIntN(DstScalarSizeInBits))
577 Val = Val.trunc(DstScalarSizeInBits);
578 else if (Val.isNegative())
579 Val = APInt::getSignedMinValue(DstScalarSizeInBits);
581 Val = APInt::getSignedMaxValue(DstScalarSizeInBits);
583 // PACKUS: Truncate signed value with unsigned saturation.
584 // Source values less than zero are saturated to zero.
585 // Source values greater than dst maxuint are saturated to maxuint.
586 if (Val.isIntN(DstScalarSizeInBits))
587 Val = Val.trunc(DstScalarSizeInBits);
588 else if (Val.isNegative())
589 Val = APInt::getNullValue(DstScalarSizeInBits);
591 Val = APInt::getAllOnesValue(DstScalarSizeInBits);
594 Vals.push_back(ConstantInt::get(ResTy->getScalarType(), Val));
598 return ConstantVector::get(Vals);
601 // Replace X86-specific intrinsics with generic floor-ceil where applicable.
602 static Value *simplifyX86round(IntrinsicInst &II,
603 InstCombiner::BuilderTy &Builder) {
604 ConstantInt *Arg = nullptr;
605 Intrinsic::ID IntrinsicID = II.getIntrinsicID();
607 if (IntrinsicID == Intrinsic::x86_sse41_round_ss ||
608 IntrinsicID == Intrinsic::x86_sse41_round_sd)
609 Arg = dyn_cast<ConstantInt>(II.getArgOperand(2));
610 else if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
611 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd)
612 Arg = dyn_cast<ConstantInt>(II.getArgOperand(4));
614 Arg = dyn_cast<ConstantInt>(II.getArgOperand(1));
617 unsigned RoundControl = Arg->getZExtValue();
621 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_512 ||
622 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_512)
623 Arg = dyn_cast<ConstantInt>(II.getArgOperand(4));
624 else if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
625 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd)
626 Arg = dyn_cast<ConstantInt>(II.getArgOperand(5));
632 SAE = Arg->getZExtValue();
635 if (SAE != 4 || (RoundControl != 2 /*ceil*/ && RoundControl != 1 /*floor*/))
638 Value *Src, *Dst, *Mask;
639 bool IsScalar = false;
640 if (IntrinsicID == Intrinsic::x86_sse41_round_ss ||
641 IntrinsicID == Intrinsic::x86_sse41_round_sd ||
642 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
643 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd) {
645 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
646 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd) {
647 Mask = II.getArgOperand(3);
648 Value *Zero = Constant::getNullValue(Mask->getType());
649 Mask = Builder.CreateAnd(Mask, 1);
650 Mask = Builder.CreateICmp(ICmpInst::ICMP_NE, Mask, Zero);
651 Dst = II.getArgOperand(2);
653 Dst = II.getArgOperand(0);
654 Src = Builder.CreateExtractElement(II.getArgOperand(1), (uint64_t)0);
656 Src = II.getArgOperand(0);
657 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_128 ||
658 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_256 ||
659 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_512 ||
660 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_128 ||
661 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_256 ||
662 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_512) {
663 Dst = II.getArgOperand(2);
664 Mask = II.getArgOperand(3);
667 Mask = ConstantInt::getAllOnesValue(
668 Builder.getIntNTy(Src->getType()->getVectorNumElements()));
672 Intrinsic::ID ID = (RoundControl == 2) ? Intrinsic::ceil : Intrinsic::floor;
673 Value *Res = Builder.CreateUnaryIntrinsic(ID, Src, &II);
675 if (auto *C = dyn_cast<Constant>(Mask))
676 if (C->isAllOnesValue())
678 auto *MaskTy = VectorType::get(
679 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
680 Mask = Builder.CreateBitCast(Mask, MaskTy);
681 unsigned Width = Src->getType()->getVectorNumElements();
682 if (MaskTy->getVectorNumElements() > Width) {
684 for (unsigned i = 0; i != Width; ++i)
686 Mask = Builder.CreateShuffleVector(Mask, Mask,
687 makeArrayRef(Indices, Width));
689 return Builder.CreateSelect(Mask, Res, Dst);
691 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
692 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd) {
693 Dst = Builder.CreateExtractElement(Dst, (uint64_t)0);
694 Res = Builder.CreateSelect(Mask, Res, Dst);
695 Dst = II.getArgOperand(0);
697 return Builder.CreateInsertElement(Dst, Res, (uint64_t)0);
700 static Value *simplifyX86movmsk(const IntrinsicInst &II,
701 InstCombiner::BuilderTy &Builder) {
702 Value *Arg = II.getArgOperand(0);
703 Type *ResTy = II.getType();
704 Type *ArgTy = Arg->getType();
706 // movmsk(undef) -> zero as we must ensure the upper bits are zero.
707 if (isa<UndefValue>(Arg))
708 return Constant::getNullValue(ResTy);
710 // We can't easily peek through x86_mmx types.
711 if (!ArgTy->isVectorTy())
714 if (auto *C = dyn_cast<Constant>(Arg)) {
715 // Extract signbits of the vector input and pack into integer result.
716 APInt Result(ResTy->getPrimitiveSizeInBits(), 0);
717 for (unsigned I = 0, E = ArgTy->getVectorNumElements(); I != E; ++I) {
718 auto *COp = C->getAggregateElement(I);
721 if (isa<UndefValue>(COp))
724 auto *CInt = dyn_cast<ConstantInt>(COp);
725 auto *CFp = dyn_cast<ConstantFP>(COp);
729 if ((CInt && CInt->isNegative()) || (CFp && CFp->isNegative()))
732 return Constant::getIntegerValue(ResTy, Result);
735 // Look for a sign-extended boolean source vector as the argument to this
736 // movmsk. If the argument is bitcast, look through that, but make sure the
737 // source of that bitcast is still a vector with the same number of elements.
738 // TODO: We can also convert a bitcast with wider elements, but that requires
739 // duplicating the bool source sign bits to match the number of elements
740 // expected by the movmsk call.
741 Arg = peekThroughBitcast(Arg);
743 if (Arg->getType()->isVectorTy() &&
744 Arg->getType()->getVectorNumElements() == ArgTy->getVectorNumElements() &&
745 match(Arg, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
746 // call iM movmsk(sext <N x i1> X) --> zext (bitcast <N x i1> X to iN) to iM
747 unsigned NumElts = X->getType()->getVectorNumElements();
748 Type *ScalarTy = Type::getIntNTy(Arg->getContext(), NumElts);
749 Value *BC = Builder.CreateBitCast(X, ScalarTy);
750 return Builder.CreateZExtOrTrunc(BC, ResTy);
756 static Value *simplifyX86insertps(const IntrinsicInst &II,
757 InstCombiner::BuilderTy &Builder) {
758 auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
762 VectorType *VecTy = cast<VectorType>(II.getType());
763 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
765 // The immediate permute control byte looks like this:
766 // [3:0] - zero mask for each 32-bit lane
767 // [5:4] - select one 32-bit destination lane
768 // [7:6] - select one 32-bit source lane
770 uint8_t Imm = CInt->getZExtValue();
771 uint8_t ZMask = Imm & 0xf;
772 uint8_t DestLane = (Imm >> 4) & 0x3;
773 uint8_t SourceLane = (Imm >> 6) & 0x3;
775 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
777 // If all zero mask bits are set, this was just a weird way to
778 // generate a zero vector.
782 // Initialize by passing all of the first source bits through.
783 uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
785 // We may replace the second operand with the zero vector.
786 Value *V1 = II.getArgOperand(1);
789 // If the zero mask is being used with a single input or the zero mask
790 // overrides the destination lane, this is a shuffle with the zero vector.
791 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
792 (ZMask & (1 << DestLane))) {
794 // We may still move 32-bits of the first source vector from one lane
796 ShuffleMask[DestLane] = SourceLane;
797 // The zero mask may override the previous insert operation.
798 for (unsigned i = 0; i < 4; ++i)
799 if ((ZMask >> i) & 0x1)
800 ShuffleMask[i] = i + 4;
802 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
806 // Replace the selected destination lane with the selected source lane.
807 ShuffleMask[DestLane] = SourceLane + 4;
810 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
813 /// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
814 /// or conversion to a shuffle vector.
815 static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
816 ConstantInt *CILength, ConstantInt *CIIndex,
817 InstCombiner::BuilderTy &Builder) {
818 auto LowConstantHighUndef = [&](uint64_t Val) {
819 Type *IntTy64 = Type::getInt64Ty(II.getContext());
820 Constant *Args[] = {ConstantInt::get(IntTy64, Val),
821 UndefValue::get(IntTy64)};
822 return ConstantVector::get(Args);
825 // See if we're dealing with constant values.
826 Constant *C0 = dyn_cast<Constant>(Op0);
828 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
831 // Attempt to constant fold.
832 if (CILength && CIIndex) {
833 // From AMD documentation: "The bit index and field length are each six
834 // bits in length other bits of the field are ignored."
835 APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
836 APInt APLength = CILength->getValue().zextOrTrunc(6);
838 unsigned Index = APIndex.getZExtValue();
840 // From AMD documentation: "a value of zero in the field length is
841 // defined as length of 64".
842 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
844 // From AMD documentation: "If the sum of the bit index + length field
845 // is greater than 64, the results are undefined".
846 unsigned End = Index + Length;
848 // Note that both field index and field length are 8-bit quantities.
849 // Since variables 'Index' and 'Length' are unsigned values
850 // obtained from zero-extending field index and field length
851 // respectively, their sum should never wrap around.
853 return UndefValue::get(II.getType());
855 // If we are inserting whole bytes, we can convert this to a shuffle.
856 // Lowering can recognize EXTRQI shuffle masks.
857 if ((Length % 8) == 0 && (Index % 8) == 0) {
858 // Convert bit indices to byte indices.
862 Type *IntTy8 = Type::getInt8Ty(II.getContext());
863 Type *IntTy32 = Type::getInt32Ty(II.getContext());
864 VectorType *ShufTy = VectorType::get(IntTy8, 16);
866 SmallVector<Constant *, 16> ShuffleMask;
867 for (int i = 0; i != (int)Length; ++i)
868 ShuffleMask.push_back(
869 Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
870 for (int i = Length; i != 8; ++i)
871 ShuffleMask.push_back(
872 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
873 for (int i = 8; i != 16; ++i)
874 ShuffleMask.push_back(UndefValue::get(IntTy32));
876 Value *SV = Builder.CreateShuffleVector(
877 Builder.CreateBitCast(Op0, ShufTy),
878 ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
879 return Builder.CreateBitCast(SV, II.getType());
882 // Constant Fold - shift Index'th bit to lowest position and mask off
885 APInt Elt = CI0->getValue();
886 Elt.lshrInPlace(Index);
887 Elt = Elt.zextOrTrunc(Length);
888 return LowConstantHighUndef(Elt.getZExtValue());
891 // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
892 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
893 Value *Args[] = {Op0, CILength, CIIndex};
894 Module *M = II.getModule();
895 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
896 return Builder.CreateCall(F, Args);
900 // Constant Fold - extraction from zero is always {zero, undef}.
901 if (CI0 && CI0->isZero())
902 return LowConstantHighUndef(0);
907 /// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
908 /// folding or conversion to a shuffle vector.
909 static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
910 APInt APLength, APInt APIndex,
911 InstCombiner::BuilderTy &Builder) {
912 // From AMD documentation: "The bit index and field length are each six bits
913 // in length other bits of the field are ignored."
914 APIndex = APIndex.zextOrTrunc(6);
915 APLength = APLength.zextOrTrunc(6);
917 // Attempt to constant fold.
918 unsigned Index = APIndex.getZExtValue();
920 // From AMD documentation: "a value of zero in the field length is
921 // defined as length of 64".
922 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
924 // From AMD documentation: "If the sum of the bit index + length field
925 // is greater than 64, the results are undefined".
926 unsigned End = Index + Length;
928 // Note that both field index and field length are 8-bit quantities.
929 // Since variables 'Index' and 'Length' are unsigned values
930 // obtained from zero-extending field index and field length
931 // respectively, their sum should never wrap around.
933 return UndefValue::get(II.getType());
935 // If we are inserting whole bytes, we can convert this to a shuffle.
936 // Lowering can recognize INSERTQI shuffle masks.
937 if ((Length % 8) == 0 && (Index % 8) == 0) {
938 // Convert bit indices to byte indices.
942 Type *IntTy8 = Type::getInt8Ty(II.getContext());
943 Type *IntTy32 = Type::getInt32Ty(II.getContext());
944 VectorType *ShufTy = VectorType::get(IntTy8, 16);
946 SmallVector<Constant *, 16> ShuffleMask;
947 for (int i = 0; i != (int)Index; ++i)
948 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
949 for (int i = 0; i != (int)Length; ++i)
950 ShuffleMask.push_back(
951 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
952 for (int i = Index + Length; i != 8; ++i)
953 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
954 for (int i = 8; i != 16; ++i)
955 ShuffleMask.push_back(UndefValue::get(IntTy32));
957 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
958 Builder.CreateBitCast(Op1, ShufTy),
959 ConstantVector::get(ShuffleMask));
960 return Builder.CreateBitCast(SV, II.getType());
963 // See if we're dealing with constant values.
964 Constant *C0 = dyn_cast<Constant>(Op0);
965 Constant *C1 = dyn_cast<Constant>(Op1);
967 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
970 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
973 // Constant Fold - insert bottom Length bits starting at the Index'th bit.
975 APInt V00 = CI00->getValue();
976 APInt V10 = CI10->getValue();
977 APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
979 V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
980 APInt Val = V00 | V10;
981 Type *IntTy64 = Type::getInt64Ty(II.getContext());
982 Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
983 UndefValue::get(IntTy64)};
984 return ConstantVector::get(Args);
987 // If we were an INSERTQ call, we'll save demanded elements if we convert to
989 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
990 Type *IntTy8 = Type::getInt8Ty(II.getContext());
991 Constant *CILength = ConstantInt::get(IntTy8, Length, false);
992 Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
994 Value *Args[] = {Op0, Op1, CILength, CIIndex};
995 Module *M = II.getModule();
996 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
997 return Builder.CreateCall(F, Args);
1003 /// Attempt to convert pshufb* to shufflevector if the mask is constant.
1004 static Value *simplifyX86pshufb(const IntrinsicInst &II,
1005 InstCombiner::BuilderTy &Builder) {
1006 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
1010 auto *VecTy = cast<VectorType>(II.getType());
1011 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1012 unsigned NumElts = VecTy->getNumElements();
1013 assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
1014 "Unexpected number of elements in shuffle mask!");
1016 // Construct a shuffle mask from constant integers or UNDEFs.
1017 Constant *Indexes[64] = {nullptr};
1019 // Each byte in the shuffle control mask forms an index to permute the
1020 // corresponding byte in the destination operand.
1021 for (unsigned I = 0; I < NumElts; ++I) {
1022 Constant *COp = V->getAggregateElement(I);
1023 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1026 if (isa<UndefValue>(COp)) {
1027 Indexes[I] = UndefValue::get(MaskEltTy);
1031 int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
1033 // If the most significant bit (bit[7]) of each byte of the shuffle
1034 // control mask is set, then zero is written in the result byte.
1035 // The zero vector is in the right-hand side of the resulting
1038 // The value of each index for the high 128-bit lane is the least
1039 // significant 4 bits of the respective shuffle control byte.
1040 Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
1041 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1044 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
1045 auto V1 = II.getArgOperand(0);
1046 auto V2 = Constant::getNullValue(VecTy);
1047 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1050 /// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
1051 static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
1052 InstCombiner::BuilderTy &Builder) {
1053 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
1057 auto *VecTy = cast<VectorType>(II.getType());
1058 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1059 unsigned NumElts = VecTy->getVectorNumElements();
1060 bool IsPD = VecTy->getScalarType()->isDoubleTy();
1061 unsigned NumLaneElts = IsPD ? 2 : 4;
1062 assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
1064 // Construct a shuffle mask from constant integers or UNDEFs.
1065 Constant *Indexes[16] = {nullptr};
1067 // The intrinsics only read one or two bits, clear the rest.
1068 for (unsigned I = 0; I < NumElts; ++I) {
1069 Constant *COp = V->getAggregateElement(I);
1070 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1073 if (isa<UndefValue>(COp)) {
1074 Indexes[I] = UndefValue::get(MaskEltTy);
1078 APInt Index = cast<ConstantInt>(COp)->getValue();
1079 Index = Index.zextOrTrunc(32).getLoBits(2);
1081 // The PD variants uses bit 1 to select per-lane element index, so
1082 // shift down to convert to generic shuffle mask index.
1084 Index.lshrInPlace(1);
1086 // The _256 variants are a bit trickier since the mask bits always index
1087 // into the corresponding 128 half. In order to convert to a generic
1088 // shuffle, we have to make that explicit.
1089 Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
1091 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1094 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
1095 auto V1 = II.getArgOperand(0);
1096 auto V2 = UndefValue::get(V1->getType());
1097 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1100 /// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
1101 static Value *simplifyX86vpermv(const IntrinsicInst &II,
1102 InstCombiner::BuilderTy &Builder) {
1103 auto *V = dyn_cast<Constant>(II.getArgOperand(1));
1107 auto *VecTy = cast<VectorType>(II.getType());
1108 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1109 unsigned Size = VecTy->getNumElements();
1110 assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1111 "Unexpected shuffle mask size");
1113 // Construct a shuffle mask from constant integers or UNDEFs.
1114 Constant *Indexes[64] = {nullptr};
1116 for (unsigned I = 0; I < Size; ++I) {
1117 Constant *COp = V->getAggregateElement(I);
1118 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1121 if (isa<UndefValue>(COp)) {
1122 Indexes[I] = UndefValue::get(MaskEltTy);
1126 uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
1128 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1131 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
1132 auto V1 = II.getArgOperand(0);
1133 auto V2 = UndefValue::get(VecTy);
1134 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1137 /// Decode XOP integer vector comparison intrinsics.
1138 static Value *simplifyX86vpcom(const IntrinsicInst &II,
1139 InstCombiner::BuilderTy &Builder,
1141 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
1142 uint64_t Imm = CInt->getZExtValue() & 0x7;
1143 VectorType *VecTy = cast<VectorType>(II.getType());
1144 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1148 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1151 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1154 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1157 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1160 Pred = ICmpInst::ICMP_EQ; break;
1162 Pred = ICmpInst::ICMP_NE; break;
1164 return ConstantInt::getSigned(VecTy, 0); // FALSE
1166 return ConstantInt::getSigned(VecTy, -1); // TRUE
1169 if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0),
1170 II.getArgOperand(1)))
1171 return Builder.CreateSExtOrTrunc(Cmp, VecTy);
1176 static bool maskIsAllOneOrUndef(Value *Mask) {
1177 auto *ConstMask = dyn_cast<Constant>(Mask);
1180 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1182 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
1184 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1185 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1192 static Value *simplifyMaskedLoad(const IntrinsicInst &II,
1193 InstCombiner::BuilderTy &Builder) {
1194 // If the mask is all ones or undefs, this is a plain vector load of the 1st
1196 if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
1197 Value *LoadPtr = II.getArgOperand(0);
1198 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1199 return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload");
1205 static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1206 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1210 // If the mask is all zeros, this instruction does nothing.
1211 if (ConstMask->isNullValue())
1212 return IC.eraseInstFromFunction(II);
1214 // If the mask is all ones, this is a plain vector store of the 1st argument.
1215 if (ConstMask->isAllOnesValue()) {
1216 Value *StorePtr = II.getArgOperand(1);
1217 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1218 return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1224 static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) {
1225 // If the mask is all zeros, return the "passthru" argument of the gather.
1226 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
1227 if (ConstMask && ConstMask->isNullValue())
1228 return IC.replaceInstUsesWith(II, II.getArgOperand(3));
1233 /// This function transforms launder.invariant.group and strip.invariant.group
1235 /// launder(launder(%x)) -> launder(%x) (the result is not the argument)
1236 /// launder(strip(%x)) -> launder(%x)
1237 /// strip(strip(%x)) -> strip(%x) (the result is not the argument)
1238 /// strip(launder(%x)) -> strip(%x)
1239 /// This is legal because it preserves the most recent information about
1240 /// the presence or absence of invariant.group.
1241 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
1243 auto *Arg = II.getArgOperand(0);
1244 auto *StrippedArg = Arg->stripPointerCasts();
1245 auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
1246 if (StrippedArg == StrippedInvariantGroupsArg)
1247 return nullptr; // No launders/strips to remove.
1249 Value *Result = nullptr;
1251 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
1252 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
1253 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
1254 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
1257 "simplifyInvariantGroupIntrinsic only handles launder and strip");
1258 if (Result->getType()->getPointerAddressSpace() !=
1259 II.getType()->getPointerAddressSpace())
1260 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
1261 if (Result->getType() != II.getType())
1262 Result = IC.Builder.CreateBitCast(Result, II.getType());
1264 return cast<Instruction>(Result);
1267 static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) {
1268 // If the mask is all zeros, a scatter does nothing.
1269 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1270 if (ConstMask && ConstMask->isNullValue())
1271 return IC.eraseInstFromFunction(II);
1276 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
1277 assert((II.getIntrinsicID() == Intrinsic::cttz ||
1278 II.getIntrinsicID() == Intrinsic::ctlz) &&
1279 "Expected cttz or ctlz intrinsic");
1280 Value *Op0 = II.getArgOperand(0);
1282 KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
1284 // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1285 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
1286 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
1287 : Known.countMaxLeadingZeros();
1288 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
1289 : Known.countMinLeadingZeros();
1291 // If all bits above (ctlz) or below (cttz) the first known one are known
1292 // zero, this value is constant.
1293 // FIXME: This should be in InstSimplify because we're replacing an
1294 // instruction with a constant.
1295 if (PossibleZeros == DefiniteZeros) {
1296 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
1297 return IC.replaceInstUsesWith(II, C);
1300 // If the input to cttz/ctlz is known to be non-zero,
1301 // then change the 'ZeroIsUndef' parameter to 'true'
1302 // because we know the zero behavior can't affect the result.
1303 if (!Known.One.isNullValue() ||
1304 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1305 &IC.getDominatorTree())) {
1306 if (!match(II.getArgOperand(1), m_One())) {
1307 II.setOperand(1, IC.Builder.getTrue());
1312 // Add range metadata since known bits can't completely reflect what we know.
1313 // TODO: Handle splat vectors.
1314 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1315 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1316 Metadata *LowAndHigh[] = {
1317 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1318 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1319 II.setMetadata(LLVMContext::MD_range,
1320 MDNode::get(II.getContext(), LowAndHigh));
1327 static Instruction *foldCtpop(IntrinsicInst &II, InstCombiner &IC) {
1328 assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1329 "Expected ctpop intrinsic");
1330 Value *Op0 = II.getArgOperand(0);
1331 // FIXME: Try to simplify vectors of integers.
1332 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1336 unsigned BitWidth = IT->getBitWidth();
1337 KnownBits Known(BitWidth);
1338 IC.computeKnownBits(Op0, Known, 0, &II);
1340 unsigned MinCount = Known.countMinPopulation();
1341 unsigned MaxCount = Known.countMaxPopulation();
1343 // Add range metadata since known bits can't completely reflect what we know.
1344 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1345 Metadata *LowAndHigh[] = {
1346 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
1347 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1348 II.setMetadata(LLVMContext::MD_range,
1349 MDNode::get(II.getContext(), LowAndHigh));
1356 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1357 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1358 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1359 static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
1360 Value *Ptr = II.getOperand(0);
1361 Value *Mask = II.getOperand(1);
1362 Constant *ZeroVec = Constant::getNullValue(II.getType());
1364 // Special case a zero mask since that's not a ConstantDataVector.
1365 // This masked load instruction creates a zero vector.
1366 if (isa<ConstantAggregateZero>(Mask))
1367 return IC.replaceInstUsesWith(II, ZeroVec);
1369 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1373 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1374 // to allow target-independent optimizations.
1376 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1377 // the LLVM intrinsic definition for the pointer argument.
1378 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1379 PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
1380 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1382 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1383 // on each element's most significant bit (the sign bit).
1384 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1386 // The pass-through vector for an x86 masked load is a zero vector.
1387 CallInst *NewMaskedLoad =
1388 IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
1389 return IC.replaceInstUsesWith(II, NewMaskedLoad);
1392 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1393 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1394 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1395 static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1396 Value *Ptr = II.getOperand(0);
1397 Value *Mask = II.getOperand(1);
1398 Value *Vec = II.getOperand(2);
1400 // Special case a zero mask since that's not a ConstantDataVector:
1401 // this masked store instruction does nothing.
1402 if (isa<ConstantAggregateZero>(Mask)) {
1403 IC.eraseInstFromFunction(II);
1407 // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1408 // anything else at this level.
1409 if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1412 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1416 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1417 // to allow target-independent optimizations.
1419 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1420 // the LLVM intrinsic definition for the pointer argument.
1421 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1422 PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
1423 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1425 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1426 // on each element's most significant bit (the sign bit).
1427 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1429 IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
1431 // 'Replace uses' doesn't work for stores. Erase the original masked store.
1432 IC.eraseInstFromFunction(II);
1436 // Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1438 // A single NaN input is folded to minnum, so we rely on that folding for
1440 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1441 const APFloat &Src2) {
1442 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1444 APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1445 assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1446 if (Cmp0 == APFloat::cmpEqual)
1447 return maxnum(Src1, Src2);
1449 APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1450 assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1451 if (Cmp1 == APFloat::cmpEqual)
1452 return maxnum(Src0, Src2);
1454 return maxnum(Src0, Src1);
1457 /// Convert a table lookup to shufflevector if the mask is constant.
1458 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
1459 /// which case we could lower the shufflevector with rev64 instructions
1460 /// as it's actually a byte reverse.
1461 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
1462 InstCombiner::BuilderTy &Builder) {
1463 // Bail out if the mask is not a constant.
1464 auto *C = dyn_cast<Constant>(II.getArgOperand(1));
1468 auto *VecTy = cast<VectorType>(II.getType());
1469 unsigned NumElts = VecTy->getNumElements();
1471 // Only perform this transformation for <8 x i8> vector types.
1472 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
1475 uint32_t Indexes[8];
1477 for (unsigned I = 0; I < NumElts; ++I) {
1478 Constant *COp = C->getAggregateElement(I);
1480 if (!COp || !isa<ConstantInt>(COp))
1483 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
1485 // Make sure the mask indices are in range.
1486 if (Indexes[I] >= NumElts)
1490 auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
1491 makeArrayRef(Indexes));
1492 auto *V1 = II.getArgOperand(0);
1493 auto *V2 = Constant::getNullValue(V1->getType());
1494 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1497 /// Convert a vector load intrinsic into a simple llvm load instruction.
1498 /// This is beneficial when the underlying object being addressed comes
1499 /// from a constant, since we get constant-folding for free.
1500 static Value *simplifyNeonVld1(const IntrinsicInst &II,
1502 InstCombiner::BuilderTy &Builder) {
1503 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
1508 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign ?
1509 MemAlign : IntrAlign->getLimitedValue();
1511 if (!isPowerOf2_32(Alignment))
1514 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
1515 PointerType::get(II.getType(), 0));
1516 return Builder.CreateAlignedLoad(BCastInst, Alignment);
1519 // Returns true iff the 2 intrinsics have the same operands, limiting the
1520 // comparison to the first NumOperands.
1521 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1522 unsigned NumOperands) {
1523 assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1524 assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1525 for (unsigned i = 0; i < NumOperands; i++)
1526 if (I.getArgOperand(i) != E.getArgOperand(i))
1531 // Remove trivially empty start/end intrinsic ranges, i.e. a start
1532 // immediately followed by an end (ignoring debuginfo or other
1533 // start/end intrinsics in between). As this handles only the most trivial
1534 // cases, tracking the nesting level is not needed:
1536 // call @llvm.foo.start(i1 0) ; &I
1537 // call @llvm.foo.start(i1 0)
1538 // call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1539 // call @llvm.foo.end(i1 0)
1540 static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1541 unsigned EndID, InstCombiner &IC) {
1542 assert(I.getIntrinsicID() == StartID &&
1543 "Start intrinsic does not have expected ID");
1544 BasicBlock::iterator BI(I), BE(I.getParent()->end());
1545 for (++BI; BI != BE; ++BI) {
1546 if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1547 if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1549 if (E->getIntrinsicID() == EndID &&
1550 haveSameOperands(I, *E, E->getNumArgOperands())) {
1551 IC.eraseInstFromFunction(*E);
1552 IC.eraseInstFromFunction(I);
1562 // Convert NVVM intrinsics to target-generic LLVM code where possible.
1563 static Instruction *SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
1564 // Each NVVM intrinsic we can simplify can be replaced with one of:
1566 // * an LLVM intrinsic,
1567 // * an LLVM cast operation,
1568 // * an LLVM binary operation, or
1569 // * ad-hoc LLVM IR for the particular operation.
1571 // Some transformations are only valid when the module's
1572 // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1573 // transformations are valid regardless of the module's ftz setting.
1574 enum FtzRequirementTy {
1575 FTZ_Any, // Any ftz setting is ok.
1576 FTZ_MustBeOn, // Transformation is valid only if ftz is on.
1577 FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1579 // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1580 // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1586 // SimplifyAction is a poor-man's variant (plus an additional flag) that
1587 // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1588 struct SimplifyAction {
1589 // Invariant: At most one of these Optionals has a value.
1590 Optional<Intrinsic::ID> IID;
1591 Optional<Instruction::CastOps> CastOp;
1592 Optional<Instruction::BinaryOps> BinaryOp;
1593 Optional<SpecialCase> Special;
1595 FtzRequirementTy FtzRequirement = FTZ_Any;
1597 SimplifyAction() = default;
1599 SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1600 : IID(IID), FtzRequirement(FtzReq) {}
1602 // Cast operations don't have anything to do with FTZ, so we skip that
1604 SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
1606 SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1607 : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1609 SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1610 : Special(Special), FtzRequirement(FtzReq) {}
1613 // Try to generate a SimplifyAction describing how to replace our
1614 // IntrinsicInstr with target-generic LLVM IR.
1615 const SimplifyAction Action = [II]() -> SimplifyAction {
1616 switch (II->getIntrinsicID()) {
1617 // NVVM intrinsics that map directly to LLVM intrinsics.
1618 case Intrinsic::nvvm_ceil_d:
1619 return {Intrinsic::ceil, FTZ_Any};
1620 case Intrinsic::nvvm_ceil_f:
1621 return {Intrinsic::ceil, FTZ_MustBeOff};
1622 case Intrinsic::nvvm_ceil_ftz_f:
1623 return {Intrinsic::ceil, FTZ_MustBeOn};
1624 case Intrinsic::nvvm_fabs_d:
1625 return {Intrinsic::fabs, FTZ_Any};
1626 case Intrinsic::nvvm_fabs_f:
1627 return {Intrinsic::fabs, FTZ_MustBeOff};
1628 case Intrinsic::nvvm_fabs_ftz_f:
1629 return {Intrinsic::fabs, FTZ_MustBeOn};
1630 case Intrinsic::nvvm_floor_d:
1631 return {Intrinsic::floor, FTZ_Any};
1632 case Intrinsic::nvvm_floor_f:
1633 return {Intrinsic::floor, FTZ_MustBeOff};
1634 case Intrinsic::nvvm_floor_ftz_f:
1635 return {Intrinsic::floor, FTZ_MustBeOn};
1636 case Intrinsic::nvvm_fma_rn_d:
1637 return {Intrinsic::fma, FTZ_Any};
1638 case Intrinsic::nvvm_fma_rn_f:
1639 return {Intrinsic::fma, FTZ_MustBeOff};
1640 case Intrinsic::nvvm_fma_rn_ftz_f:
1641 return {Intrinsic::fma, FTZ_MustBeOn};
1642 case Intrinsic::nvvm_fmax_d:
1643 return {Intrinsic::maxnum, FTZ_Any};
1644 case Intrinsic::nvvm_fmax_f:
1645 return {Intrinsic::maxnum, FTZ_MustBeOff};
1646 case Intrinsic::nvvm_fmax_ftz_f:
1647 return {Intrinsic::maxnum, FTZ_MustBeOn};
1648 case Intrinsic::nvvm_fmin_d:
1649 return {Intrinsic::minnum, FTZ_Any};
1650 case Intrinsic::nvvm_fmin_f:
1651 return {Intrinsic::minnum, FTZ_MustBeOff};
1652 case Intrinsic::nvvm_fmin_ftz_f:
1653 return {Intrinsic::minnum, FTZ_MustBeOn};
1654 case Intrinsic::nvvm_round_d:
1655 return {Intrinsic::round, FTZ_Any};
1656 case Intrinsic::nvvm_round_f:
1657 return {Intrinsic::round, FTZ_MustBeOff};
1658 case Intrinsic::nvvm_round_ftz_f:
1659 return {Intrinsic::round, FTZ_MustBeOn};
1660 case Intrinsic::nvvm_sqrt_rn_d:
1661 return {Intrinsic::sqrt, FTZ_Any};
1662 case Intrinsic::nvvm_sqrt_f:
1663 // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
1664 // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
1665 // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
1666 // the versions with explicit ftz-ness.
1667 return {Intrinsic::sqrt, FTZ_Any};
1668 case Intrinsic::nvvm_sqrt_rn_f:
1669 return {Intrinsic::sqrt, FTZ_MustBeOff};
1670 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1671 return {Intrinsic::sqrt, FTZ_MustBeOn};
1672 case Intrinsic::nvvm_trunc_d:
1673 return {Intrinsic::trunc, FTZ_Any};
1674 case Intrinsic::nvvm_trunc_f:
1675 return {Intrinsic::trunc, FTZ_MustBeOff};
1676 case Intrinsic::nvvm_trunc_ftz_f:
1677 return {Intrinsic::trunc, FTZ_MustBeOn};
1679 // NVVM intrinsics that map to LLVM cast operations.
1681 // Note that llvm's target-generic conversion operators correspond to the rz
1682 // (round to zero) versions of the nvvm conversion intrinsics, even though
1683 // most everything else here uses the rn (round to nearest even) nvvm ops.
1684 case Intrinsic::nvvm_d2i_rz:
1685 case Intrinsic::nvvm_f2i_rz:
1686 case Intrinsic::nvvm_d2ll_rz:
1687 case Intrinsic::nvvm_f2ll_rz:
1688 return {Instruction::FPToSI};
1689 case Intrinsic::nvvm_d2ui_rz:
1690 case Intrinsic::nvvm_f2ui_rz:
1691 case Intrinsic::nvvm_d2ull_rz:
1692 case Intrinsic::nvvm_f2ull_rz:
1693 return {Instruction::FPToUI};
1694 case Intrinsic::nvvm_i2d_rz:
1695 case Intrinsic::nvvm_i2f_rz:
1696 case Intrinsic::nvvm_ll2d_rz:
1697 case Intrinsic::nvvm_ll2f_rz:
1698 return {Instruction::SIToFP};
1699 case Intrinsic::nvvm_ui2d_rz:
1700 case Intrinsic::nvvm_ui2f_rz:
1701 case Intrinsic::nvvm_ull2d_rz:
1702 case Intrinsic::nvvm_ull2f_rz:
1703 return {Instruction::UIToFP};
1705 // NVVM intrinsics that map to LLVM binary ops.
1706 case Intrinsic::nvvm_add_rn_d:
1707 return {Instruction::FAdd, FTZ_Any};
1708 case Intrinsic::nvvm_add_rn_f:
1709 return {Instruction::FAdd, FTZ_MustBeOff};
1710 case Intrinsic::nvvm_add_rn_ftz_f:
1711 return {Instruction::FAdd, FTZ_MustBeOn};
1712 case Intrinsic::nvvm_mul_rn_d:
1713 return {Instruction::FMul, FTZ_Any};
1714 case Intrinsic::nvvm_mul_rn_f:
1715 return {Instruction::FMul, FTZ_MustBeOff};
1716 case Intrinsic::nvvm_mul_rn_ftz_f:
1717 return {Instruction::FMul, FTZ_MustBeOn};
1718 case Intrinsic::nvvm_div_rn_d:
1719 return {Instruction::FDiv, FTZ_Any};
1720 case Intrinsic::nvvm_div_rn_f:
1721 return {Instruction::FDiv, FTZ_MustBeOff};
1722 case Intrinsic::nvvm_div_rn_ftz_f:
1723 return {Instruction::FDiv, FTZ_MustBeOn};
1725 // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1726 // need special handling.
1728 // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
1730 case Intrinsic::nvvm_rcp_rn_d:
1731 return {SPC_Reciprocal, FTZ_Any};
1732 case Intrinsic::nvvm_rcp_rn_f:
1733 return {SPC_Reciprocal, FTZ_MustBeOff};
1734 case Intrinsic::nvvm_rcp_rn_ftz_f:
1735 return {SPC_Reciprocal, FTZ_MustBeOn};
1737 // We do not currently simplify intrinsics that give an approximate answer.
1740 // - nvvm_cos_approx_{f,ftz_f}
1741 // - nvvm_ex2_approx_{d,f,ftz_f}
1742 // - nvvm_lg2_approx_{d,f,ftz_f}
1743 // - nvvm_sin_approx_{f,ftz_f}
1744 // - nvvm_sqrt_approx_{f,ftz_f}
1745 // - nvvm_rsqrt_approx_{d,f,ftz_f}
1746 // - nvvm_div_approx_{ftz_d,ftz_f,f}
1747 // - nvvm_rcp_approx_ftz_d
1749 // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1750 // means that fastmath is enabled in the intrinsic. Unfortunately only
1751 // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1752 // information gets lost and we can't select on it.
1754 // TODO: div and rcp are lowered to a binary op, so these we could in theory
1755 // lower them to "fast fdiv".
1762 // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1763 // can bail out now. (Notice that in the case that IID is not an NVVM
1764 // intrinsic, we don't have to look up any module metadata, as
1765 // FtzRequirementTy will be FTZ_Any.)
1766 if (Action.FtzRequirement != FTZ_Any) {
1768 II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1771 if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1775 // Simplify to target-generic intrinsic.
1777 SmallVector<Value *, 4> Args(II->arg_operands());
1778 // All the target-generic intrinsics currently of interest to us have one
1779 // type argument, equal to that of the nvvm intrinsic's argument.
1780 Type *Tys[] = {II->getArgOperand(0)->getType()};
1781 return CallInst::Create(
1782 Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1785 // Simplify to target-generic binary op.
1786 if (Action.BinaryOp)
1787 return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1788 II->getArgOperand(1), II->getName());
1790 // Simplify to target-generic cast op.
1792 return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1795 // All that's left are the special cases.
1796 if (!Action.Special)
1799 switch (*Action.Special) {
1800 case SPC_Reciprocal:
1801 // Simplify reciprocal.
1802 return BinaryOperator::Create(
1803 Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1804 II->getArgOperand(0), II->getName());
1806 llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
1809 Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
1810 removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1814 Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
1815 removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1819 static Instruction *canonicalizeConstantArg0ToArg1(CallInst &Call) {
1820 assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
1821 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
1822 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
1823 Call.setArgOperand(0, Arg1);
1824 Call.setArgOperand(1, Arg0);
1830 /// CallInst simplification. This mostly only handles folding of intrinsic
1831 /// instructions. For normal calls, it allows visitCallSite to do the heavy
1833 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
1834 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
1835 return replaceInstUsesWith(CI, V);
1837 if (isFreeCall(&CI, &TLI))
1838 return visitFree(CI);
1840 // If the caller function is nounwind, mark the call as nounwind, even if the
1842 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1843 CI.setDoesNotThrow();
1847 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1848 if (!II) return visitCallSite(&CI);
1850 // Intrinsics cannot occur in an invoke, so handle them here instead of in
1852 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1853 bool Changed = false;
1855 // memmove/cpy/set of zero bytes is a noop.
1856 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1857 if (NumBytes->isNullValue())
1858 return eraseInstFromFunction(CI);
1860 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1861 if (CI->getZExtValue() == 1) {
1862 // Replace the instruction with just byte operations. We would
1863 // transform other cases to loads/stores, but we don't know if
1864 // alignment is sufficient.
1868 // No other transformations apply to volatile transfers.
1869 if (auto *M = dyn_cast<MemIntrinsic>(MI))
1870 if (M->isVolatile())
1873 // If we have a memmove and the source operation is a constant global,
1874 // then the source and dest pointers can't alias, so we can change this
1875 // into a call to memcpy.
1876 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1877 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1878 if (GVSrc->isConstant()) {
1879 Module *M = CI.getModule();
1880 Intrinsic::ID MemCpyID =
1881 isa<AtomicMemMoveInst>(MMI)
1882 ? Intrinsic::memcpy_element_unordered_atomic
1883 : Intrinsic::memcpy;
1884 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1885 CI.getArgOperand(1)->getType(),
1886 CI.getArgOperand(2)->getType() };
1887 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1892 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1893 // memmove(x,x,size) -> noop.
1894 if (MTI->getSource() == MTI->getDest())
1895 return eraseInstFromFunction(CI);
1898 // If we can determine a pointer alignment that is bigger than currently
1899 // set, update the alignment.
1900 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1901 if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1903 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1904 if (Instruction *I = SimplifyAnyMemSet(MSI))
1908 if (Changed) return II;
1911 if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1914 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1915 unsigned DemandedWidth) {
1916 APInt UndefElts(Width, 0);
1917 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1918 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1921 switch (II->getIntrinsicID()) {
1923 case Intrinsic::objectsize:
1924 if (ConstantInt *N =
1925 lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1926 return replaceInstUsesWith(CI, N);
1928 case Intrinsic::bswap: {
1929 Value *IIOperand = II->getArgOperand(0);
1932 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1933 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1934 unsigned C = X->getType()->getPrimitiveSizeInBits() -
1935 IIOperand->getType()->getPrimitiveSizeInBits();
1936 Value *CV = ConstantInt::get(X->getType(), C);
1937 Value *V = Builder.CreateLShr(X, CV);
1938 return new TruncInst(V, IIOperand->getType());
1942 case Intrinsic::masked_load:
1943 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, Builder))
1944 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1946 case Intrinsic::masked_store:
1947 return simplifyMaskedStore(*II, *this);
1948 case Intrinsic::masked_gather:
1949 return simplifyMaskedGather(*II, *this);
1950 case Intrinsic::masked_scatter:
1951 return simplifyMaskedScatter(*II, *this);
1952 case Intrinsic::launder_invariant_group:
1953 case Intrinsic::strip_invariant_group:
1954 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1955 return replaceInstUsesWith(*II, SkippedBarrier);
1957 case Intrinsic::powi:
1958 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1959 // 0 and 1 are handled in instsimplify
1961 // powi(x, -1) -> 1/x
1962 if (Power->isMinusOne())
1963 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
1964 II->getArgOperand(0));
1965 // powi(x, 2) -> x*x
1966 if (Power->equalsInt(2))
1967 return BinaryOperator::CreateFMul(II->getArgOperand(0),
1968 II->getArgOperand(0));
1972 case Intrinsic::cttz:
1973 case Intrinsic::ctlz:
1974 if (auto *I = foldCttzCtlz(*II, *this))
1978 case Intrinsic::ctpop:
1979 if (auto *I = foldCtpop(*II, *this))
1983 case Intrinsic::fshl:
1984 case Intrinsic::fshr: {
1986 if (match(II->getArgOperand(2), m_APInt(SA))) {
1987 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1988 unsigned BitWidth = SA->getBitWidth();
1989 uint64_t ShiftAmt = SA->urem(BitWidth);
1990 assert(ShiftAmt != 0 && "SimplifyCall should have handled zero shift");
1991 // Normalize to funnel shift left.
1992 if (II->getIntrinsicID() == Intrinsic::fshr)
1993 ShiftAmt = BitWidth - ShiftAmt;
1995 // fshl(X, 0, C) -> shl X, C
1996 // fshl(X, undef, C) -> shl X, C
1997 if (match(Op1, m_Zero()) || match(Op1, m_Undef()))
1998 return BinaryOperator::CreateShl(
1999 Op0, ConstantInt::get(II->getType(), ShiftAmt));
2001 // fshl(0, X, C) -> lshr X, (BW-C)
2002 // fshl(undef, X, C) -> lshr X, (BW-C)
2003 if (match(Op0, m_Zero()) || match(Op0, m_Undef()))
2004 return BinaryOperator::CreateLShr(
2005 Op1, ConstantInt::get(II->getType(), BitWidth - ShiftAmt));
2008 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
2009 // so only the low bits of the shift amount are demanded if the bitwidth is
2011 unsigned BitWidth = II->getType()->getScalarSizeInBits();
2012 if (!isPowerOf2_32(BitWidth))
2014 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2015 KnownBits Op2Known(BitWidth);
2016 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2020 case Intrinsic::uadd_with_overflow:
2021 case Intrinsic::sadd_with_overflow:
2022 case Intrinsic::umul_with_overflow:
2023 case Intrinsic::smul_with_overflow:
2024 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2028 case Intrinsic::usub_with_overflow:
2029 case Intrinsic::ssub_with_overflow: {
2030 OverflowCheckFlavor OCF =
2031 IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
2032 assert(OCF != OCF_INVALID && "unexpected!");
2034 Value *OperationResult = nullptr;
2035 Constant *OverflowResult = nullptr;
2036 if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
2037 *II, OperationResult, OverflowResult))
2038 return CreateOverflowTuple(II, OperationResult, OverflowResult);
2043 case Intrinsic::uadd_sat:
2044 case Intrinsic::sadd_sat:
2045 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2048 case Intrinsic::usub_sat:
2049 case Intrinsic::ssub_sat: {
2050 Value *Arg0 = II->getArgOperand(0);
2051 Value *Arg1 = II->getArgOperand(1);
2052 Intrinsic::ID IID = II->getIntrinsicID();
2054 // Make use of known overflow information.
2058 llvm_unreachable("Unexpected intrinsic!");
2059 case Intrinsic::uadd_sat:
2060 OR = computeOverflowForUnsignedAdd(Arg0, Arg1, II);
2061 if (OR == OverflowResult::NeverOverflows)
2062 return BinaryOperator::CreateNUWAdd(Arg0, Arg1);
2063 if (OR == OverflowResult::AlwaysOverflows)
2064 return replaceInstUsesWith(*II,
2065 ConstantInt::getAllOnesValue(II->getType()));
2067 case Intrinsic::usub_sat:
2068 OR = computeOverflowForUnsignedSub(Arg0, Arg1, II);
2069 if (OR == OverflowResult::NeverOverflows)
2070 return BinaryOperator::CreateNUWSub(Arg0, Arg1);
2071 if (OR == OverflowResult::AlwaysOverflows)
2072 return replaceInstUsesWith(*II,
2073 ConstantInt::getNullValue(II->getType()));
2075 case Intrinsic::sadd_sat:
2076 if (willNotOverflowSignedAdd(Arg0, Arg1, *II))
2077 return BinaryOperator::CreateNSWAdd(Arg0, Arg1);
2079 case Intrinsic::ssub_sat:
2080 if (willNotOverflowSignedSub(Arg0, Arg1, *II))
2081 return BinaryOperator::CreateNSWSub(Arg0, Arg1);
2085 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2087 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
2088 C->isNotMinSignedValue()) {
2089 Value *NegVal = ConstantExpr::getNeg(C);
2090 return replaceInstUsesWith(
2091 *II, Builder.CreateBinaryIntrinsic(
2092 Intrinsic::sadd_sat, Arg0, NegVal));
2095 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2096 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2097 // if Val and Val2 have the same sign
2098 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2100 const APInt *Val, *Val2;
2103 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2104 if (Other->getIntrinsicID() == II->getIntrinsicID() &&
2105 match(Arg1, m_APInt(Val)) &&
2106 match(Other->getArgOperand(0), m_Value(X)) &&
2107 match(Other->getArgOperand(1), m_APInt(Val2))) {
2109 NewVal = Val->uadd_sat(*Val2);
2110 else if (Val->isNonNegative() == Val2->isNonNegative()) {
2112 NewVal = Val->sadd_ov(*Val2, Overflow);
2114 // Both adds together may add more than SignedMaxValue
2115 // without saturating the final result.
2119 // Cannot fold saturated addition with different signs.
2123 return replaceInstUsesWith(
2124 *II, Builder.CreateBinaryIntrinsic(
2125 IID, X, ConstantInt::get(II->getType(), NewVal)));
2131 case Intrinsic::minnum:
2132 case Intrinsic::maxnum:
2133 case Intrinsic::minimum:
2134 case Intrinsic::maximum: {
2135 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2137 Value *Arg0 = II->getArgOperand(0);
2138 Value *Arg1 = II->getArgOperand(1);
2139 Intrinsic::ID IID = II->getIntrinsicID();
2141 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2142 (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2143 // If both operands are negated, invert the call and negate the result:
2144 // min(-X, -Y) --> -(max(X, Y))
2145 // max(-X, -Y) --> -(min(X, Y))
2146 Intrinsic::ID NewIID;
2148 case Intrinsic::maxnum:
2149 NewIID = Intrinsic::minnum;
2151 case Intrinsic::minnum:
2152 NewIID = Intrinsic::maxnum;
2154 case Intrinsic::maximum:
2155 NewIID = Intrinsic::minimum;
2157 case Intrinsic::minimum:
2158 NewIID = Intrinsic::maximum;
2161 llvm_unreachable("unexpected intrinsic ID");
2163 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2164 Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
2165 FNeg->copyIRFlags(II);
2169 // m(m(X, C2), C1) -> m(X, C)
2170 const APFloat *C1, *C2;
2171 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2172 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2173 ((match(M->getArgOperand(0), m_Value(X)) &&
2174 match(M->getArgOperand(1), m_APFloat(C2))) ||
2175 (match(M->getArgOperand(1), m_Value(X)) &&
2176 match(M->getArgOperand(0), m_APFloat(C2))))) {
2179 case Intrinsic::maxnum:
2180 Res = maxnum(*C1, *C2);
2182 case Intrinsic::minnum:
2183 Res = minnum(*C1, *C2);
2185 case Intrinsic::maximum:
2186 Res = maximum(*C1, *C2);
2188 case Intrinsic::minimum:
2189 Res = minimum(*C1, *C2);
2192 llvm_unreachable("unexpected intrinsic ID");
2194 Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2195 IID, X, ConstantFP::get(Arg0->getType(), Res));
2196 NewCall->copyIRFlags(II);
2197 return replaceInstUsesWith(*II, NewCall);
2203 case Intrinsic::fmuladd: {
2204 // Canonicalize fast fmuladd to the separate fmul + fadd.
2206 BuilderTy::FastMathFlagGuard Guard(Builder);
2207 Builder.setFastMathFlags(II->getFastMathFlags());
2208 Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
2209 II->getArgOperand(1));
2210 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
2212 return replaceInstUsesWith(*II, Add);
2217 case Intrinsic::fma: {
2218 if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
2221 // fma fneg(x), fneg(y), z -> fma x, y, z
2222 Value *Src0 = II->getArgOperand(0);
2223 Value *Src1 = II->getArgOperand(1);
2225 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
2226 II->setArgOperand(0, X);
2227 II->setArgOperand(1, Y);
2231 // fma fabs(x), fabs(x), z -> fma x, x, z
2232 if (match(Src0, m_FAbs(m_Value(X))) &&
2233 match(Src1, m_FAbs(m_Specific(X)))) {
2234 II->setArgOperand(0, X);
2235 II->setArgOperand(1, X);
2239 // fma x, 1, z -> fadd x, z
2240 if (match(Src1, m_FPOne())) {
2241 auto *FAdd = BinaryOperator::CreateFAdd(Src0, II->getArgOperand(2));
2242 FAdd->copyFastMathFlags(II);
2248 case Intrinsic::fabs: {
2250 Constant *LHS, *RHS;
2251 if (match(II->getArgOperand(0),
2252 m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
2253 CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
2254 CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
2255 return SelectInst::Create(Cond, Call0, Call1);
2260 case Intrinsic::ceil:
2261 case Intrinsic::floor:
2262 case Intrinsic::round:
2263 case Intrinsic::nearbyint:
2264 case Intrinsic::rint:
2265 case Intrinsic::trunc: {
2267 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2268 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2270 Builder.CreateUnaryIntrinsic(II->getIntrinsicID(), ExtSrc, II);
2271 return new FPExtInst(NarrowII, II->getType());
2275 case Intrinsic::cos:
2276 case Intrinsic::amdgcn_cos: {
2278 Value *Src = II->getArgOperand(0);
2279 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
2280 // cos(-x) -> cos(x)
2281 // cos(fabs(x)) -> cos(x)
2282 II->setArgOperand(0, X);
2287 case Intrinsic::sin: {
2289 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2290 // sin(-x) --> -sin(x)
2291 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
2292 Instruction *FNeg = BinaryOperator::CreateFNeg(NewSin);
2293 FNeg->copyFastMathFlags(II);
2298 case Intrinsic::ppc_altivec_lvx:
2299 case Intrinsic::ppc_altivec_lvxl:
2300 // Turn PPC lvx -> load if the pointer is known aligned.
2301 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2303 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2304 PointerType::getUnqual(II->getType()));
2305 return new LoadInst(Ptr);
2308 case Intrinsic::ppc_vsx_lxvw4x:
2309 case Intrinsic::ppc_vsx_lxvd2x: {
2310 // Turn PPC VSX loads into normal loads.
2311 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2312 PointerType::getUnqual(II->getType()));
2313 return new LoadInst(Ptr, Twine(""), false, 1);
2315 case Intrinsic::ppc_altivec_stvx:
2316 case Intrinsic::ppc_altivec_stvxl:
2317 // Turn stvx -> store if the pointer is known aligned.
2318 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2321 PointerType::getUnqual(II->getArgOperand(0)->getType());
2322 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2323 return new StoreInst(II->getArgOperand(0), Ptr);
2326 case Intrinsic::ppc_vsx_stxvw4x:
2327 case Intrinsic::ppc_vsx_stxvd2x: {
2328 // Turn PPC VSX stores into normal stores.
2329 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
2330 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2331 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
2333 case Intrinsic::ppc_qpx_qvlfs:
2334 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
2335 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2337 Type *VTy = VectorType::get(Builder.getFloatTy(),
2338 II->getType()->getVectorNumElements());
2339 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2340 PointerType::getUnqual(VTy));
2341 Value *Load = Builder.CreateLoad(Ptr);
2342 return new FPExtInst(Load, II->getType());
2345 case Intrinsic::ppc_qpx_qvlfd:
2346 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
2347 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
2349 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2350 PointerType::getUnqual(II->getType()));
2351 return new LoadInst(Ptr);
2354 case Intrinsic::ppc_qpx_qvstfs:
2355 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
2356 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2358 Type *VTy = VectorType::get(Builder.getFloatTy(),
2359 II->getArgOperand(0)->getType()->getVectorNumElements());
2360 Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
2361 Type *OpPtrTy = PointerType::getUnqual(VTy);
2362 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2363 return new StoreInst(TOp, Ptr);
2366 case Intrinsic::ppc_qpx_qvstfd:
2367 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
2368 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
2371 PointerType::getUnqual(II->getArgOperand(0)->getType());
2372 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2373 return new StoreInst(II->getArgOperand(0), Ptr);
2377 case Intrinsic::x86_bmi_bextr_32:
2378 case Intrinsic::x86_bmi_bextr_64:
2379 case Intrinsic::x86_tbm_bextri_u32:
2380 case Intrinsic::x86_tbm_bextri_u64:
2381 // If the RHS is a constant we can try some simplifications.
2382 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2383 uint64_t Shift = C->getZExtValue();
2384 uint64_t Length = (Shift >> 8) & 0xff;
2386 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2387 // If the length is 0 or the shift is out of range, replace with zero.
2388 if (Length == 0 || Shift >= BitWidth)
2389 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2390 // If the LHS is also a constant, we can completely constant fold this.
2391 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2392 uint64_t Result = InC->getZExtValue() >> Shift;
2393 if (Length > BitWidth)
2395 Result &= maskTrailingOnes<uint64_t>(Length);
2396 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2398 // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2399 // are only masking bits that a shift already cleared?
2403 case Intrinsic::x86_bmi_bzhi_32:
2404 case Intrinsic::x86_bmi_bzhi_64:
2405 // If the RHS is a constant we can try some simplifications.
2406 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2407 uint64_t Index = C->getZExtValue() & 0xff;
2408 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2409 if (Index >= BitWidth)
2410 return replaceInstUsesWith(CI, II->getArgOperand(0));
2412 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2413 // If the LHS is also a constant, we can completely constant fold this.
2414 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2415 uint64_t Result = InC->getZExtValue();
2416 Result &= maskTrailingOnes<uint64_t>(Index);
2417 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2419 // TODO should we convert this to an AND if the RHS is constant?
2423 case Intrinsic::x86_vcvtph2ps_128:
2424 case Intrinsic::x86_vcvtph2ps_256: {
2425 auto Arg = II->getArgOperand(0);
2426 auto ArgType = cast<VectorType>(Arg->getType());
2427 auto RetType = cast<VectorType>(II->getType());
2428 unsigned ArgWidth = ArgType->getNumElements();
2429 unsigned RetWidth = RetType->getNumElements();
2430 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2431 assert(ArgType->isIntOrIntVectorTy() &&
2432 ArgType->getScalarSizeInBits() == 16 &&
2433 "CVTPH2PS input type should be 16-bit integer vector");
2434 assert(RetType->getScalarType()->isFloatTy() &&
2435 "CVTPH2PS output type should be 32-bit float vector");
2437 // Constant folding: Convert to generic half to single conversion.
2438 if (isa<ConstantAggregateZero>(Arg))
2439 return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
2441 if (isa<ConstantDataVector>(Arg)) {
2442 auto VectorHalfAsShorts = Arg;
2443 if (RetWidth < ArgWidth) {
2444 SmallVector<uint32_t, 8> SubVecMask;
2445 for (unsigned i = 0; i != RetWidth; ++i)
2446 SubVecMask.push_back((int)i);
2447 VectorHalfAsShorts = Builder.CreateShuffleVector(
2448 Arg, UndefValue::get(ArgType), SubVecMask);
2451 auto VectorHalfType =
2452 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2454 Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2455 auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
2456 return replaceInstUsesWith(*II, VectorFloats);
2459 // We only use the lowest lanes of the argument.
2460 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
2461 II->setArgOperand(0, V);
2467 case Intrinsic::x86_sse_cvtss2si:
2468 case Intrinsic::x86_sse_cvtss2si64:
2469 case Intrinsic::x86_sse_cvttss2si:
2470 case Intrinsic::x86_sse_cvttss2si64:
2471 case Intrinsic::x86_sse2_cvtsd2si:
2472 case Intrinsic::x86_sse2_cvtsd2si64:
2473 case Intrinsic::x86_sse2_cvttsd2si:
2474 case Intrinsic::x86_sse2_cvttsd2si64:
2475 case Intrinsic::x86_avx512_vcvtss2si32:
2476 case Intrinsic::x86_avx512_vcvtss2si64:
2477 case Intrinsic::x86_avx512_vcvtss2usi32:
2478 case Intrinsic::x86_avx512_vcvtss2usi64:
2479 case Intrinsic::x86_avx512_vcvtsd2si32:
2480 case Intrinsic::x86_avx512_vcvtsd2si64:
2481 case Intrinsic::x86_avx512_vcvtsd2usi32:
2482 case Intrinsic::x86_avx512_vcvtsd2usi64:
2483 case Intrinsic::x86_avx512_cvttss2si:
2484 case Intrinsic::x86_avx512_cvttss2si64:
2485 case Intrinsic::x86_avx512_cvttss2usi:
2486 case Intrinsic::x86_avx512_cvttss2usi64:
2487 case Intrinsic::x86_avx512_cvttsd2si:
2488 case Intrinsic::x86_avx512_cvttsd2si64:
2489 case Intrinsic::x86_avx512_cvttsd2usi:
2490 case Intrinsic::x86_avx512_cvttsd2usi64: {
2491 // These intrinsics only demand the 0th element of their input vectors. If
2492 // we can simplify the input based on that, do so now.
2493 Value *Arg = II->getArgOperand(0);
2494 unsigned VWidth = Arg->getType()->getVectorNumElements();
2495 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
2496 II->setArgOperand(0, V);
2502 case Intrinsic::x86_sse41_round_ps:
2503 case Intrinsic::x86_sse41_round_pd:
2504 case Intrinsic::x86_avx_round_ps_256:
2505 case Intrinsic::x86_avx_round_pd_256:
2506 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
2507 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
2508 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
2509 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
2510 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
2511 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
2512 case Intrinsic::x86_avx512_mask_rndscale_ss:
2513 case Intrinsic::x86_avx512_mask_rndscale_sd:
2514 if (Value *V = simplifyX86round(*II, Builder))
2515 return replaceInstUsesWith(*II, V);
2518 case Intrinsic::x86_mmx_pmovmskb:
2519 case Intrinsic::x86_sse_movmsk_ps:
2520 case Intrinsic::x86_sse2_movmsk_pd:
2521 case Intrinsic::x86_sse2_pmovmskb_128:
2522 case Intrinsic::x86_avx_movmsk_pd_256:
2523 case Intrinsic::x86_avx_movmsk_ps_256:
2524 case Intrinsic::x86_avx2_pmovmskb:
2525 if (Value *V = simplifyX86movmsk(*II, Builder))
2526 return replaceInstUsesWith(*II, V);
2529 case Intrinsic::x86_sse_comieq_ss:
2530 case Intrinsic::x86_sse_comige_ss:
2531 case Intrinsic::x86_sse_comigt_ss:
2532 case Intrinsic::x86_sse_comile_ss:
2533 case Intrinsic::x86_sse_comilt_ss:
2534 case Intrinsic::x86_sse_comineq_ss:
2535 case Intrinsic::x86_sse_ucomieq_ss:
2536 case Intrinsic::x86_sse_ucomige_ss:
2537 case Intrinsic::x86_sse_ucomigt_ss:
2538 case Intrinsic::x86_sse_ucomile_ss:
2539 case Intrinsic::x86_sse_ucomilt_ss:
2540 case Intrinsic::x86_sse_ucomineq_ss:
2541 case Intrinsic::x86_sse2_comieq_sd:
2542 case Intrinsic::x86_sse2_comige_sd:
2543 case Intrinsic::x86_sse2_comigt_sd:
2544 case Intrinsic::x86_sse2_comile_sd:
2545 case Intrinsic::x86_sse2_comilt_sd:
2546 case Intrinsic::x86_sse2_comineq_sd:
2547 case Intrinsic::x86_sse2_ucomieq_sd:
2548 case Intrinsic::x86_sse2_ucomige_sd:
2549 case Intrinsic::x86_sse2_ucomigt_sd:
2550 case Intrinsic::x86_sse2_ucomile_sd:
2551 case Intrinsic::x86_sse2_ucomilt_sd:
2552 case Intrinsic::x86_sse2_ucomineq_sd:
2553 case Intrinsic::x86_avx512_vcomi_ss:
2554 case Intrinsic::x86_avx512_vcomi_sd:
2555 case Intrinsic::x86_avx512_mask_cmp_ss:
2556 case Intrinsic::x86_avx512_mask_cmp_sd: {
2557 // These intrinsics only demand the 0th element of their input vectors. If
2558 // we can simplify the input based on that, do so now.
2559 bool MadeChange = false;
2560 Value *Arg0 = II->getArgOperand(0);
2561 Value *Arg1 = II->getArgOperand(1);
2562 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2563 if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2564 II->setArgOperand(0, V);
2567 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2568 II->setArgOperand(1, V);
2575 case Intrinsic::x86_avx512_cmp_pd_128:
2576 case Intrinsic::x86_avx512_cmp_pd_256:
2577 case Intrinsic::x86_avx512_cmp_pd_512:
2578 case Intrinsic::x86_avx512_cmp_ps_128:
2579 case Intrinsic::x86_avx512_cmp_ps_256:
2580 case Intrinsic::x86_avx512_cmp_ps_512: {
2581 // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2582 Value *Arg0 = II->getArgOperand(0);
2583 Value *Arg1 = II->getArgOperand(1);
2584 bool Arg0IsZero = match(Arg0, m_PosZeroFP());
2586 std::swap(Arg0, Arg1);
2588 // This fold requires only the NINF(not +/- inf) since inf minus
2590 // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2591 // equal for both compares.
2592 // NNAN is not needed because nans compare the same for both compares.
2593 // The compare intrinsic uses the above assumptions and therefore
2594 // doesn't require additional flags.
2595 if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
2596 match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
2597 cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2600 II->setArgOperand(0, A);
2601 II->setArgOperand(1, B);
2607 case Intrinsic::x86_avx512_add_ps_512:
2608 case Intrinsic::x86_avx512_div_ps_512:
2609 case Intrinsic::x86_avx512_mul_ps_512:
2610 case Intrinsic::x86_avx512_sub_ps_512:
2611 case Intrinsic::x86_avx512_add_pd_512:
2612 case Intrinsic::x86_avx512_div_pd_512:
2613 case Intrinsic::x86_avx512_mul_pd_512:
2614 case Intrinsic::x86_avx512_sub_pd_512:
2615 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2617 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2618 if (R->getValue() == 4) {
2619 Value *Arg0 = II->getArgOperand(0);
2620 Value *Arg1 = II->getArgOperand(1);
2623 switch (II->getIntrinsicID()) {
2624 default: llvm_unreachable("Case stmts out of sync!");
2625 case Intrinsic::x86_avx512_add_ps_512:
2626 case Intrinsic::x86_avx512_add_pd_512:
2627 V = Builder.CreateFAdd(Arg0, Arg1);
2629 case Intrinsic::x86_avx512_sub_ps_512:
2630 case Intrinsic::x86_avx512_sub_pd_512:
2631 V = Builder.CreateFSub(Arg0, Arg1);
2633 case Intrinsic::x86_avx512_mul_ps_512:
2634 case Intrinsic::x86_avx512_mul_pd_512:
2635 V = Builder.CreateFMul(Arg0, Arg1);
2637 case Intrinsic::x86_avx512_div_ps_512:
2638 case Intrinsic::x86_avx512_div_pd_512:
2639 V = Builder.CreateFDiv(Arg0, Arg1);
2643 return replaceInstUsesWith(*II, V);
2648 case Intrinsic::x86_avx512_mask_add_ss_round:
2649 case Intrinsic::x86_avx512_mask_div_ss_round:
2650 case Intrinsic::x86_avx512_mask_mul_ss_round:
2651 case Intrinsic::x86_avx512_mask_sub_ss_round:
2652 case Intrinsic::x86_avx512_mask_add_sd_round:
2653 case Intrinsic::x86_avx512_mask_div_sd_round:
2654 case Intrinsic::x86_avx512_mask_mul_sd_round:
2655 case Intrinsic::x86_avx512_mask_sub_sd_round:
2656 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2658 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2659 if (R->getValue() == 4) {
2660 // Extract the element as scalars.
2661 Value *Arg0 = II->getArgOperand(0);
2662 Value *Arg1 = II->getArgOperand(1);
2663 Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2664 Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
2667 switch (II->getIntrinsicID()) {
2668 default: llvm_unreachable("Case stmts out of sync!");
2669 case Intrinsic::x86_avx512_mask_add_ss_round:
2670 case Intrinsic::x86_avx512_mask_add_sd_round:
2671 V = Builder.CreateFAdd(LHS, RHS);
2673 case Intrinsic::x86_avx512_mask_sub_ss_round:
2674 case Intrinsic::x86_avx512_mask_sub_sd_round:
2675 V = Builder.CreateFSub(LHS, RHS);
2677 case Intrinsic::x86_avx512_mask_mul_ss_round:
2678 case Intrinsic::x86_avx512_mask_mul_sd_round:
2679 V = Builder.CreateFMul(LHS, RHS);
2681 case Intrinsic::x86_avx512_mask_div_ss_round:
2682 case Intrinsic::x86_avx512_mask_div_sd_round:
2683 V = Builder.CreateFDiv(LHS, RHS);
2687 // Handle the masking aspect of the intrinsic.
2688 Value *Mask = II->getArgOperand(3);
2689 auto *C = dyn_cast<ConstantInt>(Mask);
2690 // We don't need a select if we know the mask bit is a 1.
2691 if (!C || !C->getValue()[0]) {
2692 // Cast the mask to an i1 vector and then extract the lowest element.
2693 auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
2694 cast<IntegerType>(Mask->getType())->getBitWidth());
2695 Mask = Builder.CreateBitCast(Mask, MaskTy);
2696 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
2697 // Extract the lowest element from the passthru operand.
2698 Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
2700 V = Builder.CreateSelect(Mask, V, Passthru);
2703 // Insert the result back into the original argument 0.
2704 V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
2706 return replaceInstUsesWith(*II, V);
2711 // X86 scalar intrinsics simplified with SimplifyDemandedVectorElts.
2712 case Intrinsic::x86_avx512_mask_max_ss_round:
2713 case Intrinsic::x86_avx512_mask_min_ss_round:
2714 case Intrinsic::x86_avx512_mask_max_sd_round:
2715 case Intrinsic::x86_avx512_mask_min_sd_round:
2716 case Intrinsic::x86_sse_cmp_ss:
2717 case Intrinsic::x86_sse_min_ss:
2718 case Intrinsic::x86_sse_max_ss:
2719 case Intrinsic::x86_sse2_cmp_sd:
2720 case Intrinsic::x86_sse2_min_sd:
2721 case Intrinsic::x86_sse2_max_sd:
2722 case Intrinsic::x86_xop_vfrcz_ss:
2723 case Intrinsic::x86_xop_vfrcz_sd: {
2724 unsigned VWidth = II->getType()->getVectorNumElements();
2725 APInt UndefElts(VWidth, 0);
2726 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
2727 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
2729 return replaceInstUsesWith(*II, V);
2734 case Intrinsic::x86_sse41_round_ss:
2735 case Intrinsic::x86_sse41_round_sd: {
2736 unsigned VWidth = II->getType()->getVectorNumElements();
2737 APInt UndefElts(VWidth, 0);
2738 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
2739 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
2741 return replaceInstUsesWith(*II, V);
2743 } else if (Value *V = simplifyX86round(*II, Builder))
2744 return replaceInstUsesWith(*II, V);
2748 // Constant fold ashr( <A x Bi>, Ci ).
2749 // Constant fold lshr( <A x Bi>, Ci ).
2750 // Constant fold shl( <A x Bi>, Ci ).
2751 case Intrinsic::x86_sse2_psrai_d:
2752 case Intrinsic::x86_sse2_psrai_w:
2753 case Intrinsic::x86_avx2_psrai_d:
2754 case Intrinsic::x86_avx2_psrai_w:
2755 case Intrinsic::x86_avx512_psrai_q_128:
2756 case Intrinsic::x86_avx512_psrai_q_256:
2757 case Intrinsic::x86_avx512_psrai_d_512:
2758 case Intrinsic::x86_avx512_psrai_q_512:
2759 case Intrinsic::x86_avx512_psrai_w_512:
2760 case Intrinsic::x86_sse2_psrli_d:
2761 case Intrinsic::x86_sse2_psrli_q:
2762 case Intrinsic::x86_sse2_psrli_w:
2763 case Intrinsic::x86_avx2_psrli_d:
2764 case Intrinsic::x86_avx2_psrli_q:
2765 case Intrinsic::x86_avx2_psrli_w:
2766 case Intrinsic::x86_avx512_psrli_d_512:
2767 case Intrinsic::x86_avx512_psrli_q_512:
2768 case Intrinsic::x86_avx512_psrli_w_512:
2769 case Intrinsic::x86_sse2_pslli_d:
2770 case Intrinsic::x86_sse2_pslli_q:
2771 case Intrinsic::x86_sse2_pslli_w:
2772 case Intrinsic::x86_avx2_pslli_d:
2773 case Intrinsic::x86_avx2_pslli_q:
2774 case Intrinsic::x86_avx2_pslli_w:
2775 case Intrinsic::x86_avx512_pslli_d_512:
2776 case Intrinsic::x86_avx512_pslli_q_512:
2777 case Intrinsic::x86_avx512_pslli_w_512:
2778 if (Value *V = simplifyX86immShift(*II, Builder))
2779 return replaceInstUsesWith(*II, V);
2782 case Intrinsic::x86_sse2_psra_d:
2783 case Intrinsic::x86_sse2_psra_w:
2784 case Intrinsic::x86_avx2_psra_d:
2785 case Intrinsic::x86_avx2_psra_w:
2786 case Intrinsic::x86_avx512_psra_q_128:
2787 case Intrinsic::x86_avx512_psra_q_256:
2788 case Intrinsic::x86_avx512_psra_d_512:
2789 case Intrinsic::x86_avx512_psra_q_512:
2790 case Intrinsic::x86_avx512_psra_w_512:
2791 case Intrinsic::x86_sse2_psrl_d:
2792 case Intrinsic::x86_sse2_psrl_q:
2793 case Intrinsic::x86_sse2_psrl_w:
2794 case Intrinsic::x86_avx2_psrl_d:
2795 case Intrinsic::x86_avx2_psrl_q:
2796 case Intrinsic::x86_avx2_psrl_w:
2797 case Intrinsic::x86_avx512_psrl_d_512:
2798 case Intrinsic::x86_avx512_psrl_q_512:
2799 case Intrinsic::x86_avx512_psrl_w_512:
2800 case Intrinsic::x86_sse2_psll_d:
2801 case Intrinsic::x86_sse2_psll_q:
2802 case Intrinsic::x86_sse2_psll_w:
2803 case Intrinsic::x86_avx2_psll_d:
2804 case Intrinsic::x86_avx2_psll_q:
2805 case Intrinsic::x86_avx2_psll_w:
2806 case Intrinsic::x86_avx512_psll_d_512:
2807 case Intrinsic::x86_avx512_psll_q_512:
2808 case Intrinsic::x86_avx512_psll_w_512: {
2809 if (Value *V = simplifyX86immShift(*II, Builder))
2810 return replaceInstUsesWith(*II, V);
2812 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2813 // operand to compute the shift amount.
2814 Value *Arg1 = II->getArgOperand(1);
2815 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
2816 "Unexpected packed shift size");
2817 unsigned VWidth = Arg1->getType()->getVectorNumElements();
2819 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
2820 II->setArgOperand(1, V);
2826 case Intrinsic::x86_avx2_psllv_d:
2827 case Intrinsic::x86_avx2_psllv_d_256:
2828 case Intrinsic::x86_avx2_psllv_q:
2829 case Intrinsic::x86_avx2_psllv_q_256:
2830 case Intrinsic::x86_avx512_psllv_d_512:
2831 case Intrinsic::x86_avx512_psllv_q_512:
2832 case Intrinsic::x86_avx512_psllv_w_128:
2833 case Intrinsic::x86_avx512_psllv_w_256:
2834 case Intrinsic::x86_avx512_psllv_w_512:
2835 case Intrinsic::x86_avx2_psrav_d:
2836 case Intrinsic::x86_avx2_psrav_d_256:
2837 case Intrinsic::x86_avx512_psrav_q_128:
2838 case Intrinsic::x86_avx512_psrav_q_256:
2839 case Intrinsic::x86_avx512_psrav_d_512:
2840 case Intrinsic::x86_avx512_psrav_q_512:
2841 case Intrinsic::x86_avx512_psrav_w_128:
2842 case Intrinsic::x86_avx512_psrav_w_256:
2843 case Intrinsic::x86_avx512_psrav_w_512:
2844 case Intrinsic::x86_avx2_psrlv_d:
2845 case Intrinsic::x86_avx2_psrlv_d_256:
2846 case Intrinsic::x86_avx2_psrlv_q:
2847 case Intrinsic::x86_avx2_psrlv_q_256:
2848 case Intrinsic::x86_avx512_psrlv_d_512:
2849 case Intrinsic::x86_avx512_psrlv_q_512:
2850 case Intrinsic::x86_avx512_psrlv_w_128:
2851 case Intrinsic::x86_avx512_psrlv_w_256:
2852 case Intrinsic::x86_avx512_psrlv_w_512:
2853 if (Value *V = simplifyX86varShift(*II, Builder))
2854 return replaceInstUsesWith(*II, V);
2857 case Intrinsic::x86_sse2_packssdw_128:
2858 case Intrinsic::x86_sse2_packsswb_128:
2859 case Intrinsic::x86_avx2_packssdw:
2860 case Intrinsic::x86_avx2_packsswb:
2861 case Intrinsic::x86_avx512_packssdw_512:
2862 case Intrinsic::x86_avx512_packsswb_512:
2863 if (Value *V = simplifyX86pack(*II, true))
2864 return replaceInstUsesWith(*II, V);
2867 case Intrinsic::x86_sse2_packuswb_128:
2868 case Intrinsic::x86_sse41_packusdw:
2869 case Intrinsic::x86_avx2_packusdw:
2870 case Intrinsic::x86_avx2_packuswb:
2871 case Intrinsic::x86_avx512_packusdw_512:
2872 case Intrinsic::x86_avx512_packuswb_512:
2873 if (Value *V = simplifyX86pack(*II, false))
2874 return replaceInstUsesWith(*II, V);
2877 case Intrinsic::x86_pclmulqdq:
2878 case Intrinsic::x86_pclmulqdq_256:
2879 case Intrinsic::x86_pclmulqdq_512: {
2880 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2881 unsigned Imm = C->getZExtValue();
2883 bool MadeChange = false;
2884 Value *Arg0 = II->getArgOperand(0);
2885 Value *Arg1 = II->getArgOperand(1);
2886 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2888 APInt UndefElts1(VWidth, 0);
2889 APInt DemandedElts1 = APInt::getSplat(VWidth,
2890 APInt(2, (Imm & 0x01) ? 2 : 1));
2891 if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
2893 II->setArgOperand(0, V);
2897 APInt UndefElts2(VWidth, 0);
2898 APInt DemandedElts2 = APInt::getSplat(VWidth,
2899 APInt(2, (Imm & 0x10) ? 2 : 1));
2900 if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
2902 II->setArgOperand(1, V);
2906 // If either input elements are undef, the result is zero.
2907 if (DemandedElts1.isSubsetOf(UndefElts1) ||
2908 DemandedElts2.isSubsetOf(UndefElts2))
2909 return replaceInstUsesWith(*II,
2910 ConstantAggregateZero::get(II->getType()));
2918 case Intrinsic::x86_sse41_insertps:
2919 if (Value *V = simplifyX86insertps(*II, Builder))
2920 return replaceInstUsesWith(*II, V);
2923 case Intrinsic::x86_sse4a_extrq: {
2924 Value *Op0 = II->getArgOperand(0);
2925 Value *Op1 = II->getArgOperand(1);
2926 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2927 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2928 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2929 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2930 VWidth1 == 16 && "Unexpected operand sizes");
2932 // See if we're dealing with constant values.
2933 Constant *C1 = dyn_cast<Constant>(Op1);
2934 ConstantInt *CILength =
2935 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
2937 ConstantInt *CIIndex =
2938 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
2941 // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
2942 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2943 return replaceInstUsesWith(*II, V);
2945 // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
2946 // operands and the lowest 16-bits of the second.
2947 bool MadeChange = false;
2948 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2949 II->setArgOperand(0, V);
2952 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2953 II->setArgOperand(1, V);
2961 case Intrinsic::x86_sse4a_extrqi: {
2962 // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
2963 // bits of the lower 64-bits. The upper 64-bits are undefined.
2964 Value *Op0 = II->getArgOperand(0);
2965 unsigned VWidth = Op0->getType()->getVectorNumElements();
2966 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2967 "Unexpected operand size");
2969 // See if we're dealing with constant values.
2970 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
2971 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
2973 // Attempt to simplify to a constant or shuffle vector.
2974 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2975 return replaceInstUsesWith(*II, V);
2977 // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
2979 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2980 II->setArgOperand(0, V);
2986 case Intrinsic::x86_sse4a_insertq: {
2987 Value *Op0 = II->getArgOperand(0);
2988 Value *Op1 = II->getArgOperand(1);
2989 unsigned VWidth = Op0->getType()->getVectorNumElements();
2990 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2991 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2992 Op1->getType()->getVectorNumElements() == 2 &&
2993 "Unexpected operand size");
2995 // See if we're dealing with constant values.
2996 Constant *C1 = dyn_cast<Constant>(Op1);
2998 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
3001 // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
3003 const APInt &V11 = CI11->getValue();
3004 APInt Len = V11.zextOrTrunc(6);
3005 APInt Idx = V11.lshr(8).zextOrTrunc(6);
3006 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3007 return replaceInstUsesWith(*II, V);
3010 // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
3012 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
3013 II->setArgOperand(0, V);
3019 case Intrinsic::x86_sse4a_insertqi: {
3020 // INSERTQI: Extract lowest Length bits from lower half of second source and
3021 // insert over first source starting at Index bit. The upper 64-bits are
3023 Value *Op0 = II->getArgOperand(0);
3024 Value *Op1 = II->getArgOperand(1);
3025 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
3026 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
3027 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
3028 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
3029 VWidth1 == 2 && "Unexpected operand sizes");
3031 // See if we're dealing with constant values.
3032 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
3033 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
3035 // Attempt to simplify to a constant or shuffle vector.
3036 if (CILength && CIIndex) {
3037 APInt Len = CILength->getValue().zextOrTrunc(6);
3038 APInt Idx = CIIndex->getValue().zextOrTrunc(6);
3039 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3040 return replaceInstUsesWith(*II, V);
3043 // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
3045 bool MadeChange = false;
3046 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3047 II->setArgOperand(0, V);
3050 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
3051 II->setArgOperand(1, V);
3059 case Intrinsic::x86_sse41_pblendvb:
3060 case Intrinsic::x86_sse41_blendvps:
3061 case Intrinsic::x86_sse41_blendvpd:
3062 case Intrinsic::x86_avx_blendv_ps_256:
3063 case Intrinsic::x86_avx_blendv_pd_256:
3064 case Intrinsic::x86_avx2_pblendvb: {
3065 // fold (blend A, A, Mask) -> A
3066 Value *Op0 = II->getArgOperand(0);
3067 Value *Op1 = II->getArgOperand(1);
3068 Value *Mask = II->getArgOperand(2);
3070 return replaceInstUsesWith(CI, Op0);
3072 // Zero Mask - select 1st argument.
3073 if (isa<ConstantAggregateZero>(Mask))
3074 return replaceInstUsesWith(CI, Op0);
3076 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
3077 if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3078 Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
3079 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
3082 // Convert to a vector select if we can bypass casts and find a boolean
3083 // vector condition value.
3085 Mask = peekThroughBitcast(Mask);
3086 if (match(Mask, m_SExt(m_Value(BoolVec))) &&
3087 BoolVec->getType()->isVectorTy() &&
3088 BoolVec->getType()->getScalarSizeInBits() == 1) {
3089 assert(Mask->getType()->getPrimitiveSizeInBits() ==
3090 II->getType()->getPrimitiveSizeInBits() &&
3091 "Not expecting mask and operands with different sizes");
3093 unsigned NumMaskElts = Mask->getType()->getVectorNumElements();
3094 unsigned NumOperandElts = II->getType()->getVectorNumElements();
3095 if (NumMaskElts == NumOperandElts)
3096 return SelectInst::Create(BoolVec, Op1, Op0);
3098 // If the mask has less elements than the operands, each mask bit maps to
3099 // multiple elements of the operands. Bitcast back and forth.
3100 if (NumMaskElts < NumOperandElts) {
3101 Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->getType());
3102 Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->getType());
3103 Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3104 return new BitCastInst(Sel, II->getType());
3111 case Intrinsic::x86_ssse3_pshuf_b_128:
3112 case Intrinsic::x86_avx2_pshuf_b:
3113 case Intrinsic::x86_avx512_pshuf_b_512:
3114 if (Value *V = simplifyX86pshufb(*II, Builder))
3115 return replaceInstUsesWith(*II, V);
3118 case Intrinsic::x86_avx_vpermilvar_ps:
3119 case Intrinsic::x86_avx_vpermilvar_ps_256:
3120 case Intrinsic::x86_avx512_vpermilvar_ps_512:
3121 case Intrinsic::x86_avx_vpermilvar_pd:
3122 case Intrinsic::x86_avx_vpermilvar_pd_256:
3123 case Intrinsic::x86_avx512_vpermilvar_pd_512:
3124 if (Value *V = simplifyX86vpermilvar(*II, Builder))
3125 return replaceInstUsesWith(*II, V);
3128 case Intrinsic::x86_avx2_permd:
3129 case Intrinsic::x86_avx2_permps:
3130 case Intrinsic::x86_avx512_permvar_df_256:
3131 case Intrinsic::x86_avx512_permvar_df_512:
3132 case Intrinsic::x86_avx512_permvar_di_256:
3133 case Intrinsic::x86_avx512_permvar_di_512:
3134 case Intrinsic::x86_avx512_permvar_hi_128:
3135 case Intrinsic::x86_avx512_permvar_hi_256:
3136 case Intrinsic::x86_avx512_permvar_hi_512:
3137 case Intrinsic::x86_avx512_permvar_qi_128:
3138 case Intrinsic::x86_avx512_permvar_qi_256:
3139 case Intrinsic::x86_avx512_permvar_qi_512:
3140 case Intrinsic::x86_avx512_permvar_sf_512:
3141 case Intrinsic::x86_avx512_permvar_si_512:
3142 if (Value *V = simplifyX86vpermv(*II, Builder))
3143 return replaceInstUsesWith(*II, V);
3146 case Intrinsic::x86_avx_maskload_ps:
3147 case Intrinsic::x86_avx_maskload_pd:
3148 case Intrinsic::x86_avx_maskload_ps_256:
3149 case Intrinsic::x86_avx_maskload_pd_256:
3150 case Intrinsic::x86_avx2_maskload_d:
3151 case Intrinsic::x86_avx2_maskload_q:
3152 case Intrinsic::x86_avx2_maskload_d_256:
3153 case Intrinsic::x86_avx2_maskload_q_256:
3154 if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
3158 case Intrinsic::x86_sse2_maskmov_dqu:
3159 case Intrinsic::x86_avx_maskstore_ps:
3160 case Intrinsic::x86_avx_maskstore_pd:
3161 case Intrinsic::x86_avx_maskstore_ps_256:
3162 case Intrinsic::x86_avx_maskstore_pd_256:
3163 case Intrinsic::x86_avx2_maskstore_d:
3164 case Intrinsic::x86_avx2_maskstore_q:
3165 case Intrinsic::x86_avx2_maskstore_d_256:
3166 case Intrinsic::x86_avx2_maskstore_q_256:
3167 if (simplifyX86MaskedStore(*II, *this))
3171 case Intrinsic::x86_xop_vpcomb:
3172 case Intrinsic::x86_xop_vpcomd:
3173 case Intrinsic::x86_xop_vpcomq:
3174 case Intrinsic::x86_xop_vpcomw:
3175 if (Value *V = simplifyX86vpcom(*II, Builder, true))
3176 return replaceInstUsesWith(*II, V);
3179 case Intrinsic::x86_xop_vpcomub:
3180 case Intrinsic::x86_xop_vpcomud:
3181 case Intrinsic::x86_xop_vpcomuq:
3182 case Intrinsic::x86_xop_vpcomuw:
3183 if (Value *V = simplifyX86vpcom(*II, Builder, false))
3184 return replaceInstUsesWith(*II, V);
3187 case Intrinsic::ppc_altivec_vperm:
3188 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
3189 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
3190 // a vectorshuffle for little endian, we must undo the transformation
3191 // performed on vec_perm in altivec.h. That is, we must complement
3192 // the permutation mask with respect to 31 and reverse the order of
3194 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3195 assert(Mask->getType()->getVectorNumElements() == 16 &&
3196 "Bad type for intrinsic!");
3198 // Check that all of the elements are integer constants or undefs.
3199 bool AllEltsOk = true;
3200 for (unsigned i = 0; i != 16; ++i) {
3201 Constant *Elt = Mask->getAggregateElement(i);
3202 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
3209 // Cast the input vectors to byte vectors.
3210 Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3212 Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3214 Value *Result = UndefValue::get(Op0->getType());
3216 // Only extract each element once.
3217 Value *ExtractedElts[32];
3218 memset(ExtractedElts, 0, sizeof(ExtractedElts));
3220 for (unsigned i = 0; i != 16; ++i) {
3221 if (isa<UndefValue>(Mask->getAggregateElement(i)))
3224 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
3225 Idx &= 31; // Match the hardware behavior.
3226 if (DL.isLittleEndian())
3229 if (!ExtractedElts[Idx]) {
3230 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
3231 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
3232 ExtractedElts[Idx] =
3233 Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
3234 Builder.getInt32(Idx&15));
3237 // Insert this value into the result vector.
3238 Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3239 Builder.getInt32(i));
3241 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3246 case Intrinsic::arm_neon_vld1: {
3247 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
3249 if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
3250 return replaceInstUsesWith(*II, V);
3254 case Intrinsic::arm_neon_vld2:
3255 case Intrinsic::arm_neon_vld3:
3256 case Intrinsic::arm_neon_vld4:
3257 case Intrinsic::arm_neon_vld2lane:
3258 case Intrinsic::arm_neon_vld3lane:
3259 case Intrinsic::arm_neon_vld4lane:
3260 case Intrinsic::arm_neon_vst1:
3261 case Intrinsic::arm_neon_vst2:
3262 case Intrinsic::arm_neon_vst3:
3263 case Intrinsic::arm_neon_vst4:
3264 case Intrinsic::arm_neon_vst2lane:
3265 case Intrinsic::arm_neon_vst3lane:
3266 case Intrinsic::arm_neon_vst4lane: {
3268 getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
3269 unsigned AlignArg = II->getNumArgOperands() - 1;
3270 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
3271 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
3272 II->setArgOperand(AlignArg,
3273 ConstantInt::get(Type::getInt32Ty(II->getContext()),
3280 case Intrinsic::arm_neon_vtbl1:
3281 case Intrinsic::aarch64_neon_tbl1:
3282 if (Value *V = simplifyNeonTbl1(*II, Builder))
3283 return replaceInstUsesWith(*II, V);
3286 case Intrinsic::arm_neon_vmulls:
3287 case Intrinsic::arm_neon_vmullu:
3288 case Intrinsic::aarch64_neon_smull:
3289 case Intrinsic::aarch64_neon_umull: {
3290 Value *Arg0 = II->getArgOperand(0);
3291 Value *Arg1 = II->getArgOperand(1);
3293 // Handle mul by zero first:
3294 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3295 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
3298 // Check for constant LHS & RHS - in this case we just simplify.
3299 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
3300 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
3301 VectorType *NewVT = cast<VectorType>(II->getType());
3302 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3303 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3304 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
3305 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
3307 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
3310 // Couldn't simplify - canonicalize constant to the RHS.
3311 std::swap(Arg0, Arg1);
3314 // Handle mul by one:
3315 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
3316 if (ConstantInt *Splat =
3317 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3319 return CastInst::CreateIntegerCast(Arg0, II->getType(),
3320 /*isSigned=*/!Zext);
3324 case Intrinsic::arm_neon_aesd:
3325 case Intrinsic::arm_neon_aese:
3326 case Intrinsic::aarch64_crypto_aesd:
3327 case Intrinsic::aarch64_crypto_aese: {
3328 Value *DataArg = II->getArgOperand(0);
3329 Value *KeyArg = II->getArgOperand(1);
3331 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3333 if (match(KeyArg, m_ZeroInt()) &&
3334 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
3335 II->setArgOperand(0, Data);
3336 II->setArgOperand(1, Key);
3341 case Intrinsic::amdgcn_rcp: {
3342 Value *Src = II->getArgOperand(0);
3344 // TODO: Move to ConstantFolding/InstSimplify?
3345 if (isa<UndefValue>(Src))
3346 return replaceInstUsesWith(CI, Src);
3348 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3349 const APFloat &ArgVal = C->getValueAPF();
3350 APFloat Val(ArgVal.getSemantics(), 1.0);
3351 APFloat::opStatus Status = Val.divide(ArgVal,
3352 APFloat::rmNearestTiesToEven);
3353 // Only do this if it was exact and therefore not dependent on the
3355 if (Status == APFloat::opOK)
3356 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
3361 case Intrinsic::amdgcn_rsq: {
3362 Value *Src = II->getArgOperand(0);
3364 // TODO: Move to ConstantFolding/InstSimplify?
3365 if (isa<UndefValue>(Src))
3366 return replaceInstUsesWith(CI, Src);
3369 case Intrinsic::amdgcn_frexp_mant:
3370 case Intrinsic::amdgcn_frexp_exp: {
3371 Value *Src = II->getArgOperand(0);
3372 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3374 APFloat Significand = frexp(C->getValueAPF(), Exp,
3375 APFloat::rmNearestTiesToEven);
3377 if (II->getIntrinsicID() == Intrinsic::amdgcn_frexp_mant) {
3378 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3382 // Match instruction special case behavior.
3383 if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
3386 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3389 if (isa<UndefValue>(Src))
3390 return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
3394 case Intrinsic::amdgcn_class: {
3396 S_NAN = 1 << 0, // Signaling NaN
3397 Q_NAN = 1 << 1, // Quiet NaN
3398 N_INFINITY = 1 << 2, // Negative infinity
3399 N_NORMAL = 1 << 3, // Negative normal
3400 N_SUBNORMAL = 1 << 4, // Negative subnormal
3401 N_ZERO = 1 << 5, // Negative zero
3402 P_ZERO = 1 << 6, // Positive zero
3403 P_SUBNORMAL = 1 << 7, // Positive subnormal
3404 P_NORMAL = 1 << 8, // Positive normal
3405 P_INFINITY = 1 << 9 // Positive infinity
3408 const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3409 N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL | P_NORMAL | P_INFINITY;
3411 Value *Src0 = II->getArgOperand(0);
3412 Value *Src1 = II->getArgOperand(1);
3413 const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3415 if (isa<UndefValue>(Src0))
3416 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3418 if (isa<UndefValue>(Src1))
3419 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3423 uint32_t Mask = CMask->getZExtValue();
3425 // If all tests are made, it doesn't matter what the value is.
3426 if ((Mask & FullMask) == FullMask)
3427 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3429 if ((Mask & FullMask) == 0)
3430 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3432 if (Mask == (S_NAN | Q_NAN)) {
3433 // Equivalent of isnan. Replace with standard fcmp.
3434 Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
3436 return replaceInstUsesWith(*II, FCmp);
3439 if (Mask == (N_ZERO | P_ZERO)) {
3440 // Equivalent of == 0.
3441 Value *FCmp = Builder.CreateFCmpOEQ(
3442 Src0, ConstantFP::get(Src0->getType(), 0.0));
3445 return replaceInstUsesWith(*II, FCmp);
3448 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
3449 if (((Mask & S_NAN) || (Mask & Q_NAN)) && isKnownNeverNaN(Src0, &TLI)) {
3450 II->setArgOperand(1, ConstantInt::get(Src1->getType(),
3451 Mask & ~(S_NAN | Q_NAN)));
3455 const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3457 if (isa<UndefValue>(Src0))
3458 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3460 // Clamp mask to used bits
3461 if ((Mask & FullMask) != Mask) {
3462 CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
3463 { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3466 NewCall->takeName(II);
3467 return replaceInstUsesWith(*II, NewCall);
3473 const APFloat &Val = CVal->getValueAPF();
3476 ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
3477 ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
3478 ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
3479 ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
3480 ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
3481 ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
3482 ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
3483 ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
3484 ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
3485 ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
3487 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3489 case Intrinsic::amdgcn_cvt_pkrtz: {
3490 Value *Src0 = II->getArgOperand(0);
3491 Value *Src1 = II->getArgOperand(1);
3492 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3493 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3494 const fltSemantics &HalfSem
3495 = II->getType()->getScalarType()->getFltSemantics();
3497 APFloat Val0 = C0->getValueAPF();
3498 APFloat Val1 = C1->getValueAPF();
3499 Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3500 Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3502 Constant *Folded = ConstantVector::get({
3503 ConstantFP::get(II->getContext(), Val0),
3504 ConstantFP::get(II->getContext(), Val1) });
3505 return replaceInstUsesWith(*II, Folded);
3509 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3510 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3514 case Intrinsic::amdgcn_cvt_pknorm_i16:
3515 case Intrinsic::amdgcn_cvt_pknorm_u16:
3516 case Intrinsic::amdgcn_cvt_pk_i16:
3517 case Intrinsic::amdgcn_cvt_pk_u16: {
3518 Value *Src0 = II->getArgOperand(0);
3519 Value *Src1 = II->getArgOperand(1);
3521 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3522 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3526 case Intrinsic::amdgcn_ubfe:
3527 case Intrinsic::amdgcn_sbfe: {
3528 // Decompose simple cases into standard shifts.
3529 Value *Src = II->getArgOperand(0);
3530 if (isa<UndefValue>(Src))
3531 return replaceInstUsesWith(*II, Src);
3534 Type *Ty = II->getType();
3535 unsigned IntSize = Ty->getIntegerBitWidth();
3537 ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3539 Width = CWidth->getZExtValue();
3540 if ((Width & (IntSize - 1)) == 0)
3541 return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3543 if (Width >= IntSize) {
3544 // Hardware ignores high bits, so remove those.
3545 II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3546 Width & (IntSize - 1)));
3552 ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3554 Offset = COffset->getZExtValue();
3555 if (Offset >= IntSize) {
3556 II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3557 Offset & (IntSize - 1)));
3562 bool Signed = II->getIntrinsicID() == Intrinsic::amdgcn_sbfe;
3564 if (!CWidth || !COffset)
3567 // The case of Width == 0 is handled above, which makes this tranformation
3568 // safe. If Width == 0, then the ashr and lshr instructions become poison
3569 // value since the shift amount would be equal to the bit size.
3572 // TODO: This allows folding to undef when the hardware has specific
3574 if (Offset + Width < IntSize) {
3575 Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3576 Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3577 : Builder.CreateLShr(Shl, IntSize - Width);
3578 RightShift->takeName(II);
3579 return replaceInstUsesWith(*II, RightShift);
3582 Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3583 : Builder.CreateLShr(Src, Offset);
3585 RightShift->takeName(II);
3586 return replaceInstUsesWith(*II, RightShift);
3588 case Intrinsic::amdgcn_exp:
3589 case Intrinsic::amdgcn_exp_compr: {
3590 ConstantInt *En = dyn_cast<ConstantInt>(II->getArgOperand(1));
3591 if (!En) // Illegal.
3594 unsigned EnBits = En->getZExtValue();
3596 break; // All inputs enabled.
3598 bool IsCompr = II->getIntrinsicID() == Intrinsic::amdgcn_exp_compr;
3599 bool Changed = false;
3600 for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
3601 if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
3602 (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
3603 Value *Src = II->getArgOperand(I + 2);
3604 if (!isa<UndefValue>(Src)) {
3605 II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3616 case Intrinsic::amdgcn_fmed3: {
3617 // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3620 Value *Src0 = II->getArgOperand(0);
3621 Value *Src1 = II->getArgOperand(1);
3622 Value *Src2 = II->getArgOperand(2);
3624 // Checking for NaN before canonicalization provides better fidelity when
3625 // mapping other operations onto fmed3 since the order of operands is
3627 CallInst *NewCall = nullptr;
3628 if (match(Src0, m_NaN()) || isa<UndefValue>(Src0)) {
3629 NewCall = Builder.CreateMinNum(Src1, Src2);
3630 } else if (match(Src1, m_NaN()) || isa<UndefValue>(Src1)) {
3631 NewCall = Builder.CreateMinNum(Src0, Src2);
3632 } else if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
3633 NewCall = Builder.CreateMaxNum(Src0, Src1);
3637 NewCall->copyFastMathFlags(II);
3638 NewCall->takeName(II);
3639 return replaceInstUsesWith(*II, NewCall);
3643 // Canonicalize constants to RHS operands.
3645 // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3646 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3647 std::swap(Src0, Src1);
3651 if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3652 std::swap(Src1, Src2);
3656 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3657 std::swap(Src0, Src1);
3662 II->setArgOperand(0, Src0);
3663 II->setArgOperand(1, Src1);
3664 II->setArgOperand(2, Src2);
3668 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3669 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3670 if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3671 APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3673 return replaceInstUsesWith(*II,
3674 ConstantFP::get(Builder.getContext(), Result));
3681 case Intrinsic::amdgcn_icmp:
3682 case Intrinsic::amdgcn_fcmp: {
3683 const ConstantInt *CC = dyn_cast<ConstantInt>(II->getArgOperand(2));
3687 // Guard against invalid arguments.
3688 int64_t CCVal = CC->getZExtValue();
3689 bool IsInteger = II->getIntrinsicID() == Intrinsic::amdgcn_icmp;
3690 if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3691 CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3692 (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
3693 CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3696 Value *Src0 = II->getArgOperand(0);
3697 Value *Src1 = II->getArgOperand(1);
3699 if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3700 if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3701 Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
3702 if (CCmp->isNullValue()) {
3703 return replaceInstUsesWith(
3704 *II, ConstantExpr::getSExt(CCmp, II->getType()));
3707 // The result of V_ICMP/V_FCMP assembly instructions (which this
3708 // intrinsic exposes) is one bit per thread, masked with the EXEC
3709 // register (which contains the bitmask of live threads). So a
3710 // comparison that always returns true is the same as a read of the
3712 Value *NewF = Intrinsic::getDeclaration(
3713 II->getModule(), Intrinsic::read_register, II->getType());
3714 Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3715 MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3716 Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
3717 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3718 NewCall->addAttribute(AttributeList::FunctionIndex,
3719 Attribute::Convergent);
3720 NewCall->takeName(II);
3721 return replaceInstUsesWith(*II, NewCall);
3724 // Canonicalize constants to RHS.
3725 CmpInst::Predicate SwapPred
3726 = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3727 II->setArgOperand(0, Src1);
3728 II->setArgOperand(1, Src0);
3729 II->setArgOperand(2, ConstantInt::get(CC->getType(),
3730 static_cast<int>(SwapPred)));
3734 if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
3737 // Canonicalize compare eq with true value to compare != 0
3738 // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3739 // -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3740 // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3741 // -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3743 if (CCVal == CmpInst::ICMP_EQ &&
3744 ((match(Src1, m_One()) && match(Src0, m_ZExt(m_Value(ExtSrc)))) ||
3745 (match(Src1, m_AllOnes()) && match(Src0, m_SExt(m_Value(ExtSrc))))) &&
3746 ExtSrc->getType()->isIntegerTy(1)) {
3747 II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3748 II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3752 CmpInst::Predicate SrcPred;
3756 // Fold compare eq/ne with 0 from a compare result as the predicate to the
3757 // intrinsic. The typical use is a wave vote function in the library, which
3758 // will be fed from a user code condition compared with 0. Fold in the
3759 // redundant compare.
3761 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3762 // -> llvm.amdgcn.[if]cmp(a, b, pred)
3764 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3765 // -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3766 if (match(Src1, m_Zero()) &&
3768 m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3769 if (CCVal == CmpInst::ICMP_EQ)
3770 SrcPred = CmpInst::getInversePredicate(SrcPred);
3772 Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3773 Intrinsic::amdgcn_fcmp : Intrinsic::amdgcn_icmp;
3775 Type *Ty = SrcLHS->getType();
3776 if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3777 // Promote to next legal integer type.
3778 unsigned Width = CmpType->getBitWidth();
3779 unsigned NewWidth = Width;
3781 // Don't do anything for i1 comparisons.
3787 else if (Width <= 32)
3789 else if (Width <= 64)
3791 else if (Width > 64)
3792 break; // Can't handle this.
3794 if (Width != NewWidth) {
3795 IntegerType *CmpTy = Builder.getIntNTy(NewWidth);
3796 if (CmpInst::isSigned(SrcPred)) {
3797 SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3798 SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3800 SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3801 SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3804 } else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
3807 Value *NewF = Intrinsic::getDeclaration(II->getModule(), NewIID,
3809 Value *Args[] = { SrcLHS, SrcRHS,
3810 ConstantInt::get(CC->getType(), SrcPred) };
3811 CallInst *NewCall = Builder.CreateCall(NewF, Args);
3812 NewCall->takeName(II);
3813 return replaceInstUsesWith(*II, NewCall);
3818 case Intrinsic::amdgcn_wqm_vote: {
3819 // wqm_vote is identity when the argument is constant.
3820 if (!isa<Constant>(II->getArgOperand(0)))
3823 return replaceInstUsesWith(*II, II->getArgOperand(0));
3825 case Intrinsic::amdgcn_kill: {
3826 const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3827 if (!C || !C->getZExtValue())
3830 // amdgcn.kill(i1 1) is a no-op
3831 return eraseInstFromFunction(CI);
3833 case Intrinsic::amdgcn_update_dpp: {
3834 Value *Old = II->getArgOperand(0);
3836 auto BC = dyn_cast<ConstantInt>(II->getArgOperand(5));
3837 auto RM = dyn_cast<ConstantInt>(II->getArgOperand(3));
3838 auto BM = dyn_cast<ConstantInt>(II->getArgOperand(4));
3839 if (!BC || !RM || !BM ||
3840 BC->isZeroValue() ||
3841 RM->getZExtValue() != 0xF ||
3842 BM->getZExtValue() != 0xF ||
3843 isa<UndefValue>(Old))
3846 // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
3847 II->setOperand(0, UndefValue::get(Old->getType()));
3850 case Intrinsic::stackrestore: {
3851 // If the save is right next to the restore, remove the restore. This can
3852 // happen when variable allocas are DCE'd.
3853 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3854 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
3855 // Skip over debug info.
3856 if (SS->getNextNonDebugInstruction() == II) {
3857 return eraseInstFromFunction(CI);
3862 // Scan down this block to see if there is another stack restore in the
3863 // same block without an intervening call/alloca.
3864 BasicBlock::iterator BI(II);
3865 Instruction *TI = II->getParent()->getTerminator();
3866 bool CannotRemove = false;
3867 for (++BI; &*BI != TI; ++BI) {
3868 if (isa<AllocaInst>(BI)) {
3869 CannotRemove = true;
3872 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3873 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
3874 // If there is a stackrestore below this one, remove this one.
3875 if (II->getIntrinsicID() == Intrinsic::stackrestore)
3876 return eraseInstFromFunction(CI);
3878 // Bail if we cross over an intrinsic with side effects, such as
3879 // llvm.stacksave, llvm.read_register, or llvm.setjmp.
3880 if (II->mayHaveSideEffects()) {
3881 CannotRemove = true;
3885 // If we found a non-intrinsic call, we can't remove the stack
3887 CannotRemove = true;
3893 // If the stack restore is in a return, resume, or unwind block and if there
3894 // are no allocas or calls between the restore and the return, nuke the
3896 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3897 return eraseInstFromFunction(CI);
3900 case Intrinsic::lifetime_start:
3901 // Asan needs to poison memory to detect invalid access which is possible
3902 // even for empty lifetime range.
3903 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3904 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3907 if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
3908 Intrinsic::lifetime_end, *this))
3911 case Intrinsic::assume: {
3912 Value *IIOperand = II->getArgOperand(0);
3913 // Remove an assume if it is followed by an identical assume.
3914 // TODO: Do we need this? Unless there are conflicting assumptions, the
3915 // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3916 Instruction *Next = II->getNextNonDebugInstruction();
3917 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
3918 return eraseInstFromFunction(CI);
3920 // Canonicalize assume(a && b) -> assume(a); assume(b);
3921 // Note: New assumption intrinsics created here are registered by
3922 // the InstCombineIRInserter object.
3923 Value *AssumeIntrinsic = II->getCalledValue(), *A, *B;
3924 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
3925 Builder.CreateCall(AssumeIntrinsic, A, II->getName());
3926 Builder.CreateCall(AssumeIntrinsic, B, II->getName());
3927 return eraseInstFromFunction(*II);
3929 // assume(!(a || b)) -> assume(!a); assume(!b);
3930 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
3931 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(A), II->getName());
3932 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(B), II->getName());
3933 return eraseInstFromFunction(*II);
3936 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3937 // (if assume is valid at the load)
3938 CmpInst::Predicate Pred;
3940 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3941 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
3942 LHS->getType()->isPointerTy() &&
3943 isValidAssumeForContext(II, LHS, &DT)) {
3944 MDNode *MD = MDNode::get(II->getContext(), None);
3945 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3946 return eraseInstFromFunction(*II);
3948 // TODO: apply nonnull return attributes to calls and invokes
3949 // TODO: apply range metadata for range check patterns?
3952 // If there is a dominating assume with the same condition as this one,
3953 // then this one is redundant, and should be removed.
3955 computeKnownBits(IIOperand, Known, 0, II);
3956 if (Known.isAllOnes())
3957 return eraseInstFromFunction(*II);
3959 // Update the cache of affected values for this assumption (we might be
3960 // here because we just simplified the condition).
3961 AC.updateAffectedValues(II);
3964 case Intrinsic::experimental_gc_relocate: {
3965 // Translate facts known about a pointer before relocating into
3966 // facts about the relocate value, while being careful to
3967 // preserve relocation semantics.
3968 Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
3970 // Remove the relocation if unused, note that this check is required
3971 // to prevent the cases below from looping forever.
3972 if (II->use_empty())
3973 return eraseInstFromFunction(*II);
3975 // Undef is undef, even after relocation.
3976 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
3977 // most practical collectors, but there was discussion in the review thread
3978 // about whether it was legal for all possible collectors.
3979 if (isa<UndefValue>(DerivedPtr))
3980 // Use undef of gc_relocate's type to replace it.
3981 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3983 if (auto *PT = dyn_cast<PointerType>(II->getType())) {
3984 // The relocation of null will be null for most any collector.
3985 // TODO: provide a hook for this in GCStrategy. There might be some
3986 // weird collector this property does not hold for.
3987 if (isa<ConstantPointerNull>(DerivedPtr))
3988 // Use null-pointer of gc_relocate's type to replace it.
3989 return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
3991 // isKnownNonNull -> nonnull attribute
3992 if (!II->hasRetAttr(Attribute::NonNull) &&
3993 isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) {
3994 II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
3999 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
4000 // Canonicalize on the type from the uses to the defs
4002 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
4006 case Intrinsic::experimental_guard: {
4007 // Is this guard followed by another guard? We scan forward over a small
4008 // fixed window of instructions to handle common cases with conditions
4009 // computed between guards.
4010 Instruction *NextInst = II->getNextNode();
4011 for (unsigned i = 0; i < GuardWideningWindow; i++) {
4012 // Note: Using context-free form to avoid compile time blow up
4013 if (!isSafeToSpeculativelyExecute(NextInst))
4015 NextInst = NextInst->getNextNode();
4017 Value *NextCond = nullptr;
4019 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
4020 Value *CurrCond = II->getArgOperand(0);
4022 // Remove a guard that it is immediately preceded by an identical guard.
4023 if (CurrCond == NextCond)
4024 return eraseInstFromFunction(*NextInst);
4026 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
4027 Instruction* MoveI = II->getNextNode();
4028 while (MoveI != NextInst) {
4030 MoveI = MoveI->getNextNode();
4031 Temp->moveBefore(II);
4033 II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
4034 return eraseInstFromFunction(*NextInst);
4039 return visitCallSite(II);
4042 // Fence instruction simplification
4043 Instruction *InstCombiner::visitFenceInst(FenceInst &FI) {
4044 // Remove identical consecutive fences.
4045 Instruction *Next = FI.getNextNonDebugInstruction();
4046 if (auto *NFI = dyn_cast<FenceInst>(Next))
4047 if (FI.isIdenticalTo(NFI))
4048 return eraseInstFromFunction(FI);
4052 // InvokeInst simplification
4053 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
4054 return visitCallSite(&II);
4057 /// If this cast does not affect the value passed through the varargs area, we
4058 /// can eliminate the use of the cast.
4059 static bool isSafeToEliminateVarargsCast(const CallSite CS,
4060 const DataLayout &DL,
4061 const CastInst *const CI,
4063 if (!CI->isLosslessCast())
4066 // If this is a GC intrinsic, avoid munging types. We need types for
4067 // statepoint reconstruction in SelectionDAG.
4068 // TODO: This is probably something which should be expanded to all
4069 // intrinsics since the entire point of intrinsics is that
4070 // they are understandable by the optimizer.
4071 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
4074 // The size of ByVal or InAlloca arguments is derived from the type, so we
4075 // can't change to a type with a different size. If the size were
4076 // passed explicitly we could avoid this check.
4077 if (!CS.isByValOrInAllocaArgument(ix))
4081 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
4082 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
4083 if (!SrcTy->isSized() || !DstTy->isSized())
4085 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
4090 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
4091 if (!CI->getCalledFunction()) return nullptr;
4093 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
4094 replaceInstUsesWith(*From, With);
4096 auto InstCombineErase = [this](Instruction *I) {
4097 eraseInstFromFunction(*I);
4099 LibCallSimplifier Simplifier(DL, &TLI, ORE, InstCombineRAUW,
4101 if (Value *With = Simplifier.optimizeCall(CI)) {
4103 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
4109 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
4110 // Strip off at most one level of pointer casts, looking for an alloca. This
4111 // is good enough in practice and simpler than handling any number of casts.
4112 Value *Underlying = TrampMem->stripPointerCasts();
4113 if (Underlying != TrampMem &&
4114 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4116 if (!isa<AllocaInst>(Underlying))
4119 IntrinsicInst *InitTrampoline = nullptr;
4120 for (User *U : TrampMem->users()) {
4121 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4124 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4126 // More than one init_trampoline writes to this value. Give up.
4128 InitTrampoline = II;
4131 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4132 // Allow any number of calls to adjust.trampoline.
4137 // No call to init.trampoline found.
4138 if (!InitTrampoline)
4141 // Check that the alloca is being used in the expected way.
4142 if (InitTrampoline->getOperand(0) != TrampMem)
4145 return InitTrampoline;
4148 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
4150 // Visit all the previous instructions in the basic block, and try to find a
4151 // init.trampoline which has a direct path to the adjust.trampoline.
4152 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4153 E = AdjustTramp->getParent()->begin();
4155 Instruction *Inst = &*--I;
4156 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
4157 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4158 II->getOperand(0) == TrampMem)
4160 if (Inst->mayWriteToMemory())
4166 // Given a call to llvm.adjust.trampoline, find and return the corresponding
4167 // call to llvm.init.trampoline if the call to the trampoline can be optimized
4168 // to a direct call to a function. Otherwise return NULL.
4169 static IntrinsicInst *findInitTrampoline(Value *Callee) {
4170 Callee = Callee->stripPointerCasts();
4171 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4173 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
4176 Value *TrampMem = AdjustTramp->getOperand(0);
4178 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
4180 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
4185 /// Improvements for call and invoke instructions.
4186 Instruction *InstCombiner::visitCallSite(CallSite CS) {
4187 if (isAllocLikeFn(CS.getInstruction(), &TLI))
4188 return visitAllocSite(*CS.getInstruction());
4190 bool Changed = false;
4192 // Mark any parameters that are known to be non-null with the nonnull
4193 // attribute. This is helpful for inlining calls to functions with null
4194 // checks on their arguments.
4195 SmallVector<unsigned, 4> ArgNos;
4198 for (Value *V : CS.args()) {
4199 if (V->getType()->isPointerTy() &&
4200 !CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
4201 isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT))
4202 ArgNos.push_back(ArgNo);
4206 assert(ArgNo == CS.arg_size() && "sanity check");
4208 if (!ArgNos.empty()) {
4209 AttributeList AS = CS.getAttributes();
4210 LLVMContext &Ctx = CS.getInstruction()->getContext();
4211 AS = AS.addParamAttribute(Ctx, ArgNos,
4212 Attribute::get(Ctx, Attribute::NonNull));
4213 CS.setAttributes(AS);
4217 // If the callee is a pointer to a function, attempt to move any casts to the
4218 // arguments of the call/invoke.
4219 Value *Callee = CS.getCalledValue();
4220 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
4223 if (Function *CalleeF = dyn_cast<Function>(Callee)) {
4224 // Remove the convergent attr on calls when the callee is not convergent.
4225 if (CS.isConvergent() && !CalleeF->isConvergent() &&
4226 !CalleeF->isIntrinsic()) {
4227 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr "
4228 << CS.getInstruction() << "\n");
4229 CS.setNotConvergent();
4230 return CS.getInstruction();
4233 // If the call and callee calling conventions don't match, this call must
4234 // be unreachable, as the call is undefined.
4235 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
4236 // Only do this for calls to a function with a body. A prototype may
4237 // not actually end up matching the implementation's calling conv for a
4238 // variety of reasons (e.g. it may be written in assembly).
4239 !CalleeF->isDeclaration()) {
4240 Instruction *OldCall = CS.getInstruction();
4241 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
4242 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
4244 // If OldCall does not return void then replaceAllUsesWith undef.
4245 // This allows ValueHandlers and custom metadata to adjust itself.
4246 if (!OldCall->getType()->isVoidTy())
4247 replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
4248 if (isa<CallInst>(OldCall))
4249 return eraseInstFromFunction(*OldCall);
4251 // We cannot remove an invoke, because it would change the CFG, just
4252 // change the callee to a null pointer.
4253 cast<InvokeInst>(OldCall)->setCalledFunction(
4254 Constant::getNullValue(CalleeF->getType()));
4259 if ((isa<ConstantPointerNull>(Callee) &&
4260 !NullPointerIsDefined(CS.getInstruction()->getFunction())) ||
4261 isa<UndefValue>(Callee)) {
4262 // If CS does not return void then replaceAllUsesWith undef.
4263 // This allows ValueHandlers and custom metadata to adjust itself.
4264 if (!CS.getInstruction()->getType()->isVoidTy())
4265 replaceInstUsesWith(*CS.getInstruction(),
4266 UndefValue::get(CS.getInstruction()->getType()));
4268 if (isa<InvokeInst>(CS.getInstruction())) {
4269 // Can't remove an invoke because we cannot change the CFG.
4273 // This instruction is not reachable, just remove it. We insert a store to
4274 // undef so that we know that this code is not reachable, despite the fact
4275 // that we can't modify the CFG here.
4276 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
4277 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
4278 CS.getInstruction());
4280 return eraseInstFromFunction(*CS.getInstruction());
4283 if (IntrinsicInst *II = findInitTrampoline(Callee))
4284 return transformCallThroughTrampoline(CS, II);
4286 PointerType *PTy = cast<PointerType>(Callee->getType());
4287 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
4288 if (FTy->isVarArg()) {
4289 int ix = FTy->getNumParams();
4290 // See if we can optimize any arguments passed through the varargs area of
4292 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
4293 E = CS.arg_end(); I != E; ++I, ++ix) {
4294 CastInst *CI = dyn_cast<CastInst>(*I);
4295 if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
4296 *I = CI->getOperand(0);
4302 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
4303 // Inline asm calls cannot throw - mark them 'nounwind'.
4304 CS.setDoesNotThrow();
4308 // Try to optimize the call if possible, we require DataLayout for most of
4309 // this. None of these calls are seen as possibly dead so go ahead and
4310 // delete the instruction now.
4311 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
4312 Instruction *I = tryOptimizeCall(CI);
4313 // If we changed something return the result, etc. Otherwise let
4314 // the fallthrough check.
4315 if (I) return eraseInstFromFunction(*I);
4318 return Changed ? CS.getInstruction() : nullptr;
4321 /// If the callee is a constexpr cast of a function, attempt to move the cast to
4322 /// the arguments of the call/invoke.
4323 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
4324 auto *Callee = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
4328 // If this is a call to a thunk function, don't remove the cast. Thunks are
4329 // used to transparently forward all incoming parameters and outgoing return
4330 // values, so it's important to leave the cast in place.
4331 if (Callee->hasFnAttribute("thunk"))
4334 // If this is a musttail call, the callee's prototype must match the caller's
4335 // prototype with the exception of pointee types. The code below doesn't
4336 // implement that, so we can't do this transform.
4337 // TODO: Do the transform if it only requires adding pointer casts.
4338 if (CS.isMustTailCall())
4341 Instruction *Caller = CS.getInstruction();
4342 const AttributeList &CallerPAL = CS.getAttributes();
4344 // Okay, this is a cast from a function to a different type. Unless doing so
4345 // would cause a type conversion of one of our arguments, change this call to
4346 // be a direct call with arguments casted to the appropriate types.
4347 FunctionType *FT = Callee->getFunctionType();
4348 Type *OldRetTy = Caller->getType();
4349 Type *NewRetTy = FT->getReturnType();
4351 // Check to see if we are changing the return type...
4352 if (OldRetTy != NewRetTy) {
4354 if (NewRetTy->isStructTy())
4355 return false; // TODO: Handle multiple return values.
4357 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
4358 if (Callee->isDeclaration())
4359 return false; // Cannot transform this return value.
4361 if (!Caller->use_empty() &&
4362 // void -> non-void is handled specially
4363 !NewRetTy->isVoidTy())
4364 return false; // Cannot transform this return value.
4367 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
4368 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4369 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
4370 return false; // Attribute not compatible with transformed value.
4373 // If the callsite is an invoke instruction, and the return value is used by
4374 // a PHI node in a successor, we cannot change the return type of the call
4375 // because there is no place to put the cast instruction (without breaking
4376 // the critical edge). Bail out in this case.
4377 if (!Caller->use_empty())
4378 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
4379 for (User *U : II->users())
4380 if (PHINode *PN = dyn_cast<PHINode>(U))
4381 if (PN->getParent() == II->getNormalDest() ||
4382 PN->getParent() == II->getUnwindDest())
4386 unsigned NumActualArgs = CS.arg_size();
4387 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4389 // Prevent us turning:
4390 // declare void @takes_i32_inalloca(i32* inalloca)
4391 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4394 // call void @takes_i32_inalloca(i32* null)
4396 // Similarly, avoid folding away bitcasts of byval calls.
4397 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4398 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
4401 CallSite::arg_iterator AI = CS.arg_begin();
4402 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
4403 Type *ParamTy = FT->getParamType(i);
4404 Type *ActTy = (*AI)->getType();
4406 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
4407 return false; // Cannot transform this parameter value.
4409 if (AttrBuilder(CallerPAL.getParamAttributes(i))
4410 .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
4411 return false; // Attribute not compatible with transformed value.
4413 if (CS.isInAllocaArgument(i))
4414 return false; // Cannot transform to and from inalloca.
4416 // If the parameter is passed as a byval argument, then we have to have a
4417 // sized type and the sized type has to have the same size as the old type.
4418 if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4419 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
4420 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
4423 Type *CurElTy = ActTy->getPointerElementType();
4424 if (DL.getTypeAllocSize(CurElTy) !=
4425 DL.getTypeAllocSize(ParamPTy->getElementType()))
4430 if (Callee->isDeclaration()) {
4431 // Do not delete arguments unless we have a function body.
4432 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4435 // If the callee is just a declaration, don't change the varargsness of the
4436 // call. We don't want to introduce a varargs call where one doesn't
4438 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
4439 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
4442 // If both the callee and the cast type are varargs, we still have to make
4443 // sure the number of fixed parameters are the same or we have the same
4444 // ABI issues as if we introduce a varargs call.
4445 if (FT->isVarArg() &&
4446 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
4447 FT->getNumParams() !=
4448 cast<FunctionType>(APTy->getElementType())->getNumParams())
4452 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4453 !CallerPAL.isEmpty()) {
4454 // In this case we have more arguments than the new function type, but we
4455 // won't be dropping them. Check that these extra arguments have attributes
4456 // that are compatible with being a vararg call argument.
4458 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4459 SRetIdx > FT->getNumParams())
4463 // Okay, we decided that this is a safe thing to do: go ahead and start
4464 // inserting cast instructions as necessary.
4465 SmallVector<Value *, 8> Args;
4466 SmallVector<AttributeSet, 8> ArgAttrs;
4467 Args.reserve(NumActualArgs);
4468 ArgAttrs.reserve(NumActualArgs);
4470 // Get any return attributes.
4471 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4473 // If the return value is not being used, the type may not be compatible
4474 // with the existing attributes. Wipe out any problematic attributes.
4475 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
4477 AI = CS.arg_begin();
4478 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4479 Type *ParamTy = FT->getParamType(i);
4481 Value *NewArg = *AI;
4482 if ((*AI)->getType() != ParamTy)
4483 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4484 Args.push_back(NewArg);
4486 // Add any parameter attributes.
4487 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4490 // If the function takes more arguments than the call was taking, add them
4492 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4493 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
4494 ArgAttrs.push_back(AttributeSet());
4497 // If we are removing arguments to the function, emit an obnoxious warning.
4498 if (FT->getNumParams() < NumActualArgs) {
4499 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4500 if (FT->isVarArg()) {
4501 // Add all of the arguments in their promoted form to the arg list.
4502 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4503 Type *PTy = getPromotedType((*AI)->getType());
4504 Value *NewArg = *AI;
4505 if (PTy != (*AI)->getType()) {
4506 // Must promote to pass through va_arg area!
4507 Instruction::CastOps opcode =
4508 CastInst::getCastOpcode(*AI, false, PTy, false);
4509 NewArg = Builder.CreateCast(opcode, *AI, PTy);
4511 Args.push_back(NewArg);
4513 // Add any parameter attributes.
4514 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4519 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
4521 if (NewRetTy->isVoidTy())
4522 Caller->setName(""); // Void type should not have a name.
4524 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4525 "missing argument attributes");
4526 LLVMContext &Ctx = Callee->getContext();
4527 AttributeList NewCallerPAL = AttributeList::get(
4528 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
4530 SmallVector<OperandBundleDef, 1> OpBundles;
4531 CS.getOperandBundlesAsDefs(OpBundles);
4534 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4535 NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(),
4536 II->getUnwindDest(), Args, OpBundles);
4538 NewCS = Builder.CreateCall(Callee, Args, OpBundles);
4539 cast<CallInst>(NewCS.getInstruction())
4540 ->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
4542 NewCS->takeName(Caller);
4543 NewCS.setCallingConv(CS.getCallingConv());
4544 NewCS.setAttributes(NewCallerPAL);
4546 // Preserve the weight metadata for the new call instruction. The metadata
4547 // is used by SamplePGO to check callsite's hotness.
4549 if (Caller->extractProfTotalWeight(W))
4550 NewCS->setProfWeight(W);
4552 // Insert a cast of the return type as necessary.
4553 Instruction *NC = NewCS.getInstruction();
4555 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4556 if (!NV->getType()->isVoidTy()) {
4557 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
4558 NC->setDebugLoc(Caller->getDebugLoc());
4560 // If this is an invoke instruction, we should insert it after the first
4561 // non-phi, instruction in the normal successor block.
4562 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4563 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
4564 InsertNewInstBefore(NC, *I);
4566 // Otherwise, it's a call, just insert cast right after the call.
4567 InsertNewInstBefore(NC, *Caller);
4569 Worklist.AddUsersToWorkList(*Caller);
4571 NV = UndefValue::get(Caller->getType());
4575 if (!Caller->use_empty())
4576 replaceInstUsesWith(*Caller, NV);
4577 else if (Caller->hasValueHandle()) {
4578 if (OldRetTy == NV->getType())
4579 ValueHandleBase::ValueIsRAUWd(Caller, NV);
4581 // We cannot call ValueIsRAUWd with a different type, and the
4582 // actual tracked value will disappear.
4583 ValueHandleBase::ValueIsDeleted(Caller);
4586 eraseInstFromFunction(*Caller);
4590 /// Turn a call to a function created by init_trampoline / adjust_trampoline
4591 /// intrinsic pair into a direct call to the underlying function.
4593 InstCombiner::transformCallThroughTrampoline(CallSite CS,
4594 IntrinsicInst *Tramp) {
4595 Value *Callee = CS.getCalledValue();
4596 PointerType *PTy = cast<PointerType>(Callee->getType());
4597 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
4598 AttributeList Attrs = CS.getAttributes();
4600 // If the call already has the 'nest' attribute somewhere then give up -
4601 // otherwise 'nest' would occur twice after splicing in the chain.
4602 if (Attrs.hasAttrSomewhere(Attribute::Nest))
4606 "transformCallThroughTrampoline called with incorrect CallSite.");
4608 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
4609 FunctionType *NestFTy = cast<FunctionType>(NestF->getValueType());
4611 AttributeList NestAttrs = NestF->getAttributes();
4612 if (!NestAttrs.isEmpty()) {
4613 unsigned NestArgNo = 0;
4614 Type *NestTy = nullptr;
4615 AttributeSet NestAttr;
4617 // Look for a parameter marked with the 'nest' attribute.
4618 for (FunctionType::param_iterator I = NestFTy->param_begin(),
4619 E = NestFTy->param_end();
4620 I != E; ++NestArgNo, ++I) {
4621 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4622 if (AS.hasAttribute(Attribute::Nest)) {
4623 // Record the parameter type and any other attributes.
4631 Instruction *Caller = CS.getInstruction();
4632 std::vector<Value*> NewArgs;
4633 std::vector<AttributeSet> NewArgAttrs;
4634 NewArgs.reserve(CS.arg_size() + 1);
4635 NewArgAttrs.reserve(CS.arg_size());
4637 // Insert the nest argument into the call argument list, which may
4638 // mean appending it. Likewise for attributes.
4642 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
4644 if (ArgNo == NestArgNo) {
4645 // Add the chain argument and attributes.
4646 Value *NestVal = Tramp->getArgOperand(2);
4647 if (NestVal->getType() != NestTy)
4648 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
4649 NewArgs.push_back(NestVal);
4650 NewArgAttrs.push_back(NestAttr);
4656 // Add the original argument and attributes.
4657 NewArgs.push_back(*I);
4658 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
4665 // The trampoline may have been bitcast to a bogus type (FTy).
4666 // Handle this by synthesizing a new function type, equal to FTy
4667 // with the chain parameter inserted.
4669 std::vector<Type*> NewTypes;
4670 NewTypes.reserve(FTy->getNumParams()+1);
4672 // Insert the chain's type into the list of parameter types, which may
4673 // mean appending it.
4676 FunctionType::param_iterator I = FTy->param_begin(),
4677 E = FTy->param_end();
4680 if (ArgNo == NestArgNo)
4681 // Add the chain's type.
4682 NewTypes.push_back(NestTy);
4687 // Add the original type.
4688 NewTypes.push_back(*I);
4695 // Replace the trampoline call with a direct call. Let the generic
4696 // code sort out any function type mismatches.
4697 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
4699 Constant *NewCallee =
4700 NestF->getType() == PointerType::getUnqual(NewFTy) ?
4701 NestF : ConstantExpr::getBitCast(NestF,
4702 PointerType::getUnqual(NewFTy));
4703 AttributeList NewPAL =
4704 AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
4705 Attrs.getRetAttributes(), NewArgAttrs);
4707 SmallVector<OperandBundleDef, 1> OpBundles;
4708 CS.getOperandBundlesAsDefs(OpBundles);
4710 Instruction *NewCaller;
4711 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4712 NewCaller = InvokeInst::Create(NewCallee,
4713 II->getNormalDest(), II->getUnwindDest(),
4714 NewArgs, OpBundles);
4715 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4716 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4718 NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles);
4719 cast<CallInst>(NewCaller)->setTailCallKind(
4720 cast<CallInst>(Caller)->getTailCallKind());
4721 cast<CallInst>(NewCaller)->setCallingConv(
4722 cast<CallInst>(Caller)->getCallingConv());
4723 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4725 NewCaller->setDebugLoc(Caller->getDebugLoc());
4731 // Replace the trampoline call with a direct call. Since there is no 'nest'
4732 // parameter, there is no need to adjust the argument list. Let the generic
4733 // code sort out any function type mismatches.
4734 Constant *NewCallee =
4735 NestF->getType() == PTy ? NestF :
4736 ConstantExpr::getBitCast(NestF, PTy);
4737 CS.setCalledFunction(NewCallee);
4738 return CS.getInstruction();