1 //===- InstCombineVectorOps.cpp -------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements instcombine for ExtractElement, InsertElement and
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/BasicBlock.h"
25 #include "llvm/IR/Constant.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/Operator.h"
32 #include "llvm/IR/PatternMatch.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/IR/User.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Support/Casting.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Transforms/InstCombine/InstCombiner.h"
44 #define DEBUG_TYPE "instcombine"
45 #include "llvm/Transforms/Utils/InstructionWorklist.h"
48 using namespace PatternMatch;
50 STATISTIC(NumAggregateReconstructionsSimplified,
51 "Number of aggregate reconstructions turned into reuse of the "
52 "original aggregate");
54 /// Return true if the value is cheaper to scalarize than it is to leave as a
55 /// vector operation. If the extract index \p EI is a constant integer then
56 /// some operations may be cheap to scalarize.
58 /// FIXME: It's possible to create more instructions than previously existed.
59 static bool cheapToScalarize(Value *V, Value *EI) {
60 ConstantInt *CEI = dyn_cast<ConstantInt>(EI);
62 // If we can pick a scalar constant value out of a vector, that is free.
63 if (auto *C = dyn_cast<Constant>(V))
64 return CEI || C->getSplatValue();
66 if (CEI && match(V, m_Intrinsic<Intrinsic::experimental_stepvector>())) {
67 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
68 // Index needs to be lower than the minimum size of the vector, because
69 // for scalable vector, the vector size is known at run time.
70 return CEI->getValue().ult(EC.getKnownMinValue());
73 // An insertelement to the same constant index as our extract will simplify
74 // to the scalar inserted element. An insertelement to a different constant
75 // index is irrelevant to our extract.
76 if (match(V, m_InsertElt(m_Value(), m_Value(), m_ConstantInt())))
79 if (match(V, m_OneUse(m_Load(m_Value()))))
82 if (match(V, m_OneUse(m_UnOp())))
86 if (match(V, m_OneUse(m_BinOp(m_Value(V0), m_Value(V1)))))
87 if (cheapToScalarize(V0, EI) || cheapToScalarize(V1, EI))
90 CmpInst::Predicate UnusedPred;
91 if (match(V, m_OneUse(m_Cmp(UnusedPred, m_Value(V0), m_Value(V1)))))
92 if (cheapToScalarize(V0, EI) || cheapToScalarize(V1, EI))
98 // If we have a PHI node with a vector type that is only used to feed
99 // itself and be an operand of extractelement at a constant location,
100 // try to replace the PHI of the vector type with a PHI of a scalar type.
101 Instruction *InstCombinerImpl::scalarizePHI(ExtractElementInst &EI,
103 SmallVector<Instruction *, 2> Extracts;
104 // The users we want the PHI to have are:
105 // 1) The EI ExtractElement (we already know this)
106 // 2) Possibly more ExtractElements with the same index.
107 // 3) Another operand, which will feed back into the PHI.
108 Instruction *PHIUser = nullptr;
109 for (auto U : PN->users()) {
110 if (ExtractElementInst *EU = dyn_cast<ExtractElementInst>(U)) {
111 if (EI.getIndexOperand() == EU->getIndexOperand())
112 Extracts.push_back(EU);
115 } else if (!PHIUser) {
116 PHIUser = cast<Instruction>(U);
125 // Verify that this PHI user has one use, which is the PHI itself,
126 // and that it is a binary operation which is cheap to scalarize.
127 // otherwise return nullptr.
128 if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
129 !(isa<BinaryOperator>(PHIUser)) ||
130 !cheapToScalarize(PHIUser, EI.getIndexOperand()))
133 // Create a scalar PHI node that will replace the vector PHI node
134 // just before the current PHI node.
135 PHINode *scalarPHI = cast<PHINode>(InsertNewInstWith(
136 PHINode::Create(EI.getType(), PN->getNumIncomingValues(), ""), *PN));
137 // Scalarize each PHI operand.
138 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
139 Value *PHIInVal = PN->getIncomingValue(i);
140 BasicBlock *inBB = PN->getIncomingBlock(i);
141 Value *Elt = EI.getIndexOperand();
142 // If the operand is the PHI induction variable:
143 if (PHIInVal == PHIUser) {
144 // Scalarize the binary operation. Its first operand is the
145 // scalar PHI, and the second operand is extracted from the other
147 BinaryOperator *B0 = cast<BinaryOperator>(PHIUser);
148 unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0;
149 Value *Op = InsertNewInstWith(
150 ExtractElementInst::Create(B0->getOperand(opId), Elt,
151 B0->getOperand(opId)->getName() + ".Elt"),
153 Value *newPHIUser = InsertNewInstWith(
154 BinaryOperator::CreateWithCopiedFlags(B0->getOpcode(),
155 scalarPHI, Op, B0), *B0);
156 scalarPHI->addIncoming(newPHIUser, inBB);
158 // Scalarize PHI input:
159 Instruction *newEI = ExtractElementInst::Create(PHIInVal, Elt, "");
160 // Insert the new instruction into the predecessor basic block.
161 Instruction *pos = dyn_cast<Instruction>(PHIInVal);
162 BasicBlock::iterator InsertPos;
163 if (pos && !isa<PHINode>(pos)) {
164 InsertPos = ++pos->getIterator();
166 InsertPos = inBB->getFirstInsertionPt();
169 InsertNewInstWith(newEI, *InsertPos);
171 scalarPHI->addIncoming(newEI, inBB);
175 for (auto E : Extracts)
176 replaceInstUsesWith(*E, scalarPHI);
181 Instruction *InstCombinerImpl::foldBitcastExtElt(ExtractElementInst &Ext) {
184 if (!match(Ext.getVectorOperand(), m_BitCast(m_Value(X))) ||
185 !match(Ext.getIndexOperand(), m_ConstantInt(ExtIndexC)))
188 ElementCount NumElts =
189 cast<VectorType>(Ext.getVectorOperandType())->getElementCount();
190 Type *DestTy = Ext.getType();
191 bool IsBigEndian = DL.isBigEndian();
193 // If we are casting an integer to vector and extracting a portion, that is
194 // a shift-right and truncate.
195 // TODO: Allow FP dest type by casting the trunc to FP?
196 if (X->getType()->isIntegerTy() && DestTy->isIntegerTy() &&
197 isDesirableIntType(X->getType()->getPrimitiveSizeInBits())) {
198 assert(isa<FixedVectorType>(Ext.getVectorOperand()->getType()) &&
199 "Expected fixed vector type for bitcast from scalar integer");
201 // Big endian requires adjusting the extract index since MSB is at index 0.
202 // LittleEndian: extelt (bitcast i32 X to v4i8), 0 -> trunc i32 X to i8
203 // BigEndian: extelt (bitcast i32 X to v4i8), 0 -> trunc i32 (X >> 24) to i8
205 ExtIndexC = NumElts.getKnownMinValue() - 1 - ExtIndexC;
206 unsigned ShiftAmountC = ExtIndexC * DestTy->getPrimitiveSizeInBits();
207 if (!ShiftAmountC || Ext.getVectorOperand()->hasOneUse()) {
208 Value *Lshr = Builder.CreateLShr(X, ShiftAmountC, "extelt.offset");
209 return new TruncInst(Lshr, DestTy);
213 if (!X->getType()->isVectorTy())
216 // If this extractelement is using a bitcast from a vector of the same number
217 // of elements, see if we can find the source element from the source vector:
218 // extelt (bitcast VecX), IndexC --> bitcast X[IndexC]
219 auto *SrcTy = cast<VectorType>(X->getType());
220 ElementCount NumSrcElts = SrcTy->getElementCount();
221 if (NumSrcElts == NumElts)
222 if (Value *Elt = findScalarElement(X, ExtIndexC))
223 return new BitCastInst(Elt, DestTy);
225 assert(NumSrcElts.isScalable() == NumElts.isScalable() &&
226 "Src and Dst must be the same sort of vector type");
228 // If the source elements are wider than the destination, try to shift and
229 // truncate a subset of scalar bits of an insert op.
230 if (NumSrcElts.getKnownMinValue() < NumElts.getKnownMinValue()) {
233 if (!match(X, m_InsertElt(m_Value(), m_Value(Scalar),
234 m_ConstantInt(InsIndexC))))
237 // The extract must be from the subset of vector elements that we inserted
238 // into. Example: if we inserted element 1 of a <2 x i64> and we are
239 // extracting an i16 (narrowing ratio = 4), then this extract must be from 1
240 // of elements 4-7 of the bitcasted vector.
241 unsigned NarrowingRatio =
242 NumElts.getKnownMinValue() / NumSrcElts.getKnownMinValue();
243 if (ExtIndexC / NarrowingRatio != InsIndexC)
246 // We are extracting part of the original scalar. How that scalar is
247 // inserted into the vector depends on the endian-ness. Example:
248 // Vector Byte Elt Index: 0 1 2 3 4 5 6 7
249 // +--+--+--+--+--+--+--+--+
250 // inselt <2 x i32> V, <i32> S, 1: |V0|V1|V2|V3|S0|S1|S2|S3|
251 // extelt <4 x i16> V', 3: | |S2|S3|
252 // +--+--+--+--+--+--+--+--+
253 // If this is little-endian, S2|S3 are the MSB of the 32-bit 'S' value.
254 // If this is big-endian, S2|S3 are the LSB of the 32-bit 'S' value.
255 // In this example, we must right-shift little-endian. Big-endian is just a
257 unsigned Chunk = ExtIndexC % NarrowingRatio;
259 Chunk = NarrowingRatio - 1 - Chunk;
261 // Bail out if this is an FP vector to FP vector sequence. That would take
262 // more instructions than we started with unless there is no shift, and it
263 // may not be handled as well in the backend.
264 bool NeedSrcBitcast = SrcTy->getScalarType()->isFloatingPointTy();
265 bool NeedDestBitcast = DestTy->isFloatingPointTy();
266 if (NeedSrcBitcast && NeedDestBitcast)
269 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
270 unsigned DestWidth = DestTy->getPrimitiveSizeInBits();
271 unsigned ShAmt = Chunk * DestWidth;
273 // TODO: This limitation is more strict than necessary. We could sum the
274 // number of new instructions and subtract the number eliminated to know if
276 if (!X->hasOneUse() || !Ext.getVectorOperand()->hasOneUse())
277 if (NeedSrcBitcast || NeedDestBitcast)
280 if (NeedSrcBitcast) {
281 Type *SrcIntTy = IntegerType::getIntNTy(Scalar->getContext(), SrcWidth);
282 Scalar = Builder.CreateBitCast(Scalar, SrcIntTy);
286 // Bail out if we could end with more instructions than we started with.
287 if (!Ext.getVectorOperand()->hasOneUse())
289 Scalar = Builder.CreateLShr(Scalar, ShAmt);
292 if (NeedDestBitcast) {
293 Type *DestIntTy = IntegerType::getIntNTy(Scalar->getContext(), DestWidth);
294 return new BitCastInst(Builder.CreateTrunc(Scalar, DestIntTy), DestTy);
296 return new TruncInst(Scalar, DestTy);
302 /// Find elements of V demanded by UserInstr.
303 static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) {
304 unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
306 // Conservatively assume that all elements are needed.
307 APInt UsedElts(APInt::getAllOnes(VWidth));
309 switch (UserInstr->getOpcode()) {
310 case Instruction::ExtractElement: {
311 ExtractElementInst *EEI = cast<ExtractElementInst>(UserInstr);
312 assert(EEI->getVectorOperand() == V);
313 ConstantInt *EEIIndexC = dyn_cast<ConstantInt>(EEI->getIndexOperand());
314 if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) {
315 UsedElts = APInt::getOneBitSet(VWidth, EEIIndexC->getZExtValue());
319 case Instruction::ShuffleVector: {
320 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(UserInstr);
321 unsigned MaskNumElts =
322 cast<FixedVectorType>(UserInstr->getType())->getNumElements();
324 UsedElts = APInt(VWidth, 0);
325 for (unsigned i = 0; i < MaskNumElts; i++) {
326 unsigned MaskVal = Shuffle->getMaskValue(i);
327 if (MaskVal == -1u || MaskVal >= 2 * VWidth)
329 if (Shuffle->getOperand(0) == V && (MaskVal < VWidth))
330 UsedElts.setBit(MaskVal);
331 if (Shuffle->getOperand(1) == V &&
332 ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth)))
333 UsedElts.setBit(MaskVal - VWidth);
343 /// Find union of elements of V demanded by all its users.
344 /// If it is known by querying findDemandedEltsBySingleUser that
345 /// no user demands an element of V, then the corresponding bit
346 /// remains unset in the returned value.
347 static APInt findDemandedEltsByAllUsers(Value *V) {
348 unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
350 APInt UnionUsedElts(VWidth, 0);
351 for (const Use &U : V->uses()) {
352 if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
353 UnionUsedElts |= findDemandedEltsBySingleUser(V, I);
355 UnionUsedElts = APInt::getAllOnes(VWidth);
359 if (UnionUsedElts.isAllOnes())
363 return UnionUsedElts;
366 /// Given a constant index for a extractelement or insertelement instruction,
367 /// return it with the canonical type if it isn't already canonical. We
368 /// arbitrarily pick 64 bit as our canonical type. The actual bitwidth doesn't
369 /// matter, we just want a consistent type to simplify CSE.
370 ConstantInt *getPreferredVectorIndex(ConstantInt *IndexC) {
371 const unsigned IndexBW = IndexC->getType()->getBitWidth();
372 if (IndexBW == 64 || IndexC->getValue().getActiveBits() > 64)
374 return ConstantInt::get(IndexC->getContext(),
375 IndexC->getValue().zextOrTrunc(64));
378 Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
379 Value *SrcVec = EI.getVectorOperand();
380 Value *Index = EI.getIndexOperand();
381 if (Value *V = SimplifyExtractElementInst(SrcVec, Index,
382 SQ.getWithInstruction(&EI)))
383 return replaceInstUsesWith(EI, V);
385 // If extracting a specified index from the vector, see if we can recursively
386 // find a previously computed scalar that was inserted into the vector.
387 auto *IndexC = dyn_cast<ConstantInt>(Index);
389 // Canonicalize type of constant indices to i64 to simplify CSE
390 if (auto *NewIdx = getPreferredVectorIndex(IndexC))
391 return replaceOperand(EI, 1, NewIdx);
393 ElementCount EC = EI.getVectorOperandType()->getElementCount();
394 unsigned NumElts = EC.getKnownMinValue();
396 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(SrcVec)) {
397 Intrinsic::ID IID = II->getIntrinsicID();
398 // Index needs to be lower than the minimum size of the vector, because
399 // for scalable vector, the vector size is known at run time.
400 if (IID == Intrinsic::experimental_stepvector &&
401 IndexC->getValue().ult(NumElts)) {
402 Type *Ty = EI.getType();
403 unsigned BitWidth = Ty->getIntegerBitWidth();
405 // Return index when its value does not exceed the allowed limit
406 // for the element type of the vector, otherwise return undefined.
407 if (IndexC->getValue().getActiveBits() <= BitWidth)
408 Idx = ConstantInt::get(Ty, IndexC->getValue().zextOrTrunc(BitWidth));
410 Idx = UndefValue::get(Ty);
411 return replaceInstUsesWith(EI, Idx);
415 // InstSimplify should handle cases where the index is invalid.
416 // For fixed-length vector, it's invalid to extract out-of-range element.
417 if (!EC.isScalable() && IndexC->getValue().uge(NumElts))
420 if (Instruction *I = foldBitcastExtElt(EI))
423 // If there's a vector PHI feeding a scalar use through this extractelement
424 // instruction, try to scalarize the PHI.
425 if (auto *Phi = dyn_cast<PHINode>(SrcVec))
426 if (Instruction *ScalarPHI = scalarizePHI(EI, Phi))
430 // TODO come up with a n-ary matcher that subsumes both unary and
433 if (match(SrcVec, m_UnOp(UO)) && cheapToScalarize(SrcVec, Index)) {
434 // extelt (unop X), Index --> unop (extelt X, Index)
435 Value *X = UO->getOperand(0);
436 Value *E = Builder.CreateExtractElement(X, Index);
437 return UnaryOperator::CreateWithCopiedFlags(UO->getOpcode(), E, UO);
441 if (match(SrcVec, m_BinOp(BO)) && cheapToScalarize(SrcVec, Index)) {
442 // extelt (binop X, Y), Index --> binop (extelt X, Index), (extelt Y, Index)
443 Value *X = BO->getOperand(0), *Y = BO->getOperand(1);
444 Value *E0 = Builder.CreateExtractElement(X, Index);
445 Value *E1 = Builder.CreateExtractElement(Y, Index);
446 return BinaryOperator::CreateWithCopiedFlags(BO->getOpcode(), E0, E1, BO);
450 CmpInst::Predicate Pred;
451 if (match(SrcVec, m_Cmp(Pred, m_Value(X), m_Value(Y))) &&
452 cheapToScalarize(SrcVec, Index)) {
453 // extelt (cmp X, Y), Index --> cmp (extelt X, Index), (extelt Y, Index)
454 Value *E0 = Builder.CreateExtractElement(X, Index);
455 Value *E1 = Builder.CreateExtractElement(Y, Index);
456 return CmpInst::Create(cast<CmpInst>(SrcVec)->getOpcode(), Pred, E0, E1);
459 if (auto *I = dyn_cast<Instruction>(SrcVec)) {
460 if (auto *IE = dyn_cast<InsertElementInst>(I)) {
461 // instsimplify already handled the case where the indices are constants
462 // and equal by value, if both are constants, they must not be the same
463 // value, extract from the pre-inserted value instead.
464 if (isa<Constant>(IE->getOperand(2)) && IndexC)
465 return replaceOperand(EI, 0, IE->getOperand(0));
466 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
467 auto *VecType = cast<VectorType>(GEP->getType());
468 ElementCount EC = VecType->getElementCount();
469 uint64_t IdxVal = IndexC ? IndexC->getZExtValue() : 0;
470 if (IndexC && IdxVal < EC.getKnownMinValue() && GEP->hasOneUse()) {
471 // Find out why we have a vector result - these are a few examples:
472 // 1. We have a scalar pointer and a vector of indices, or
473 // 2. We have a vector of pointers and a scalar index, or
474 // 3. We have a vector of pointers and a vector of indices, etc.
475 // Here we only consider combining when there is exactly one vector
476 // operand, since the optimization is less obviously a win due to
477 // needing more than one extractelements.
480 llvm::count_if(GEP->operands(), [](const Value *V) {
481 return isa<VectorType>(V->getType());
483 if (VectorOps == 1) {
484 Value *NewPtr = GEP->getPointerOperand();
485 if (isa<VectorType>(NewPtr->getType()))
486 NewPtr = Builder.CreateExtractElement(NewPtr, IndexC);
488 SmallVector<Value *> NewOps;
489 for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
490 Value *Op = GEP->getOperand(I);
491 if (isa<VectorType>(Op->getType()))
492 NewOps.push_back(Builder.CreateExtractElement(Op, IndexC));
494 NewOps.push_back(Op);
497 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
498 GEP->getSourceElementType(), NewPtr, NewOps);
499 NewGEP->setIsInBounds(GEP->isInBounds());
503 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
504 // If this is extracting an element from a shufflevector, figure out where
505 // it came from and extract from the appropriate input element instead.
506 // Restrict the following transformation to fixed-length vector.
507 if (isa<FixedVectorType>(SVI->getType()) && isa<ConstantInt>(Index)) {
509 SVI->getMaskValue(cast<ConstantInt>(Index)->getZExtValue());
511 unsigned LHSWidth = cast<FixedVectorType>(SVI->getOperand(0)->getType())
515 return replaceInstUsesWith(EI, UndefValue::get(EI.getType()));
516 if (SrcIdx < (int)LHSWidth)
517 Src = SVI->getOperand(0);
520 Src = SVI->getOperand(1);
522 Type *Int32Ty = Type::getInt32Ty(EI.getContext());
523 return ExtractElementInst::Create(
524 Src, ConstantInt::get(Int32Ty, SrcIdx, false));
526 } else if (auto *CI = dyn_cast<CastInst>(I)) {
527 // Canonicalize extractelement(cast) -> cast(extractelement).
528 // Bitcasts can change the number of vector elements, and they cost
530 if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {
531 Value *EE = Builder.CreateExtractElement(CI->getOperand(0), Index);
532 return CastInst::Create(CI->getOpcode(), EE, EI.getType());
537 // Run demanded elements after other transforms as this can drop flags on
538 // binops. If there's two paths to the same final result, we prefer the
539 // one which doesn't force us to drop flags.
541 ElementCount EC = EI.getVectorOperandType()->getElementCount();
542 unsigned NumElts = EC.getKnownMinValue();
543 // This instruction only demands the single element from the input vector.
544 // Skip for scalable type, the number of elements is unknown at
546 if (!EC.isScalable() && NumElts != 1) {
547 // If the input vector has a single use, simplify it based on this use
549 if (SrcVec->hasOneUse()) {
550 APInt UndefElts(NumElts, 0);
551 APInt DemandedElts(NumElts, 0);
552 DemandedElts.setBit(IndexC->getZExtValue());
554 SimplifyDemandedVectorElts(SrcVec, DemandedElts, UndefElts))
555 return replaceOperand(EI, 0, V);
557 // If the input vector has multiple uses, simplify it based on a union
558 // of all elements used.
559 APInt DemandedElts = findDemandedEltsByAllUsers(SrcVec);
560 if (!DemandedElts.isAllOnes()) {
561 APInt UndefElts(NumElts, 0);
562 if (Value *V = SimplifyDemandedVectorElts(
563 SrcVec, DemandedElts, UndefElts, 0 /* Depth */,
564 true /* AllowMultipleUsers */)) {
566 SrcVec->replaceAllUsesWith(V);
577 /// If V is a shuffle of values that ONLY returns elements from either LHS or
578 /// RHS, return the shuffle mask and true. Otherwise, return false.
579 static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
580 SmallVectorImpl<int> &Mask) {
581 assert(LHS->getType() == RHS->getType() &&
582 "Invalid CollectSingleShuffleElements");
583 unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
585 if (match(V, m_Undef())) {
586 Mask.assign(NumElts, -1);
591 for (unsigned i = 0; i != NumElts; ++i)
597 for (unsigned i = 0; i != NumElts; ++i)
598 Mask.push_back(i + NumElts);
602 if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
603 // If this is an insert of an extract from some other vector, include it.
604 Value *VecOp = IEI->getOperand(0);
605 Value *ScalarOp = IEI->getOperand(1);
606 Value *IdxOp = IEI->getOperand(2);
608 if (!isa<ConstantInt>(IdxOp))
610 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
612 if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
613 // We can handle this if the vector we are inserting into is
615 if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
616 // If so, update the mask to reflect the inserted undef.
617 Mask[InsertedIdx] = -1;
620 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
621 if (isa<ConstantInt>(EI->getOperand(1))) {
622 unsigned ExtractedIdx =
623 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
624 unsigned NumLHSElts =
625 cast<FixedVectorType>(LHS->getType())->getNumElements();
627 // This must be extracting from either LHS or RHS.
628 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
629 // We can handle this if the vector we are inserting into is
631 if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
632 // If so, update the mask to reflect the inserted value.
633 if (EI->getOperand(0) == LHS) {
634 Mask[InsertedIdx % NumElts] = ExtractedIdx;
636 assert(EI->getOperand(0) == RHS);
637 Mask[InsertedIdx % NumElts] = ExtractedIdx + NumLHSElts;
649 /// If we have insertion into a vector that is wider than the vector that we
650 /// are extracting from, try to widen the source vector to allow a single
651 /// shufflevector to replace one or more insert/extract pairs.
652 static void replaceExtractElements(InsertElementInst *InsElt,
653 ExtractElementInst *ExtElt,
654 InstCombinerImpl &IC) {
655 auto *InsVecType = cast<FixedVectorType>(InsElt->getType());
656 auto *ExtVecType = cast<FixedVectorType>(ExtElt->getVectorOperandType());
657 unsigned NumInsElts = InsVecType->getNumElements();
658 unsigned NumExtElts = ExtVecType->getNumElements();
660 // The inserted-to vector must be wider than the extracted-from vector.
661 if (InsVecType->getElementType() != ExtVecType->getElementType() ||
662 NumExtElts >= NumInsElts)
665 // Create a shuffle mask to widen the extended-from vector using poison
666 // values. The mask selects all of the values of the original vector followed
667 // by as many poison values as needed to create a vector of the same length
668 // as the inserted-to vector.
669 SmallVector<int, 16> ExtendMask;
670 for (unsigned i = 0; i < NumExtElts; ++i)
671 ExtendMask.push_back(i);
672 for (unsigned i = NumExtElts; i < NumInsElts; ++i)
673 ExtendMask.push_back(-1);
675 Value *ExtVecOp = ExtElt->getVectorOperand();
676 auto *ExtVecOpInst = dyn_cast<Instruction>(ExtVecOp);
677 BasicBlock *InsertionBlock = (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
678 ? ExtVecOpInst->getParent()
679 : ExtElt->getParent();
681 // TODO: This restriction matches the basic block check below when creating
682 // new extractelement instructions. If that limitation is removed, this one
683 // could also be removed. But for now, we just bail out to ensure that we
684 // will replace the extractelement instruction that is feeding our
685 // insertelement instruction. This allows the insertelement to then be
686 // replaced by a shufflevector. If the insertelement is not replaced, we can
687 // induce infinite looping because there's an optimization for extractelement
688 // that will delete our widening shuffle. This would trigger another attempt
689 // here to create that shuffle, and we spin forever.
690 if (InsertionBlock != InsElt->getParent())
693 // TODO: This restriction matches the check in visitInsertElementInst() and
694 // prevents an infinite loop caused by not turning the extract/insert pair
695 // into a shuffle. We really should not need either check, but we're lacking
696 // folds for shufflevectors because we're afraid to generate shuffle masks
697 // that the backend can't handle.
698 if (InsElt->hasOneUse() && isa<InsertElementInst>(InsElt->user_back()))
701 auto *WideVec = new ShuffleVectorInst(ExtVecOp, ExtendMask);
703 // Insert the new shuffle after the vector operand of the extract is defined
704 // (as long as it's not a PHI) or at the start of the basic block of the
705 // extract, so any subsequent extracts in the same basic block can use it.
706 // TODO: Insert before the earliest ExtractElementInst that is replaced.
707 if (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
708 WideVec->insertAfter(ExtVecOpInst);
710 IC.InsertNewInstWith(WideVec, *ExtElt->getParent()->getFirstInsertionPt());
712 // Replace extracts from the original narrow vector with extracts from the new
714 for (User *U : ExtVecOp->users()) {
715 ExtractElementInst *OldExt = dyn_cast<ExtractElementInst>(U);
716 if (!OldExt || OldExt->getParent() != WideVec->getParent())
718 auto *NewExt = ExtractElementInst::Create(WideVec, OldExt->getOperand(1));
719 NewExt->insertAfter(OldExt);
720 IC.replaceInstUsesWith(*OldExt, NewExt);
724 /// We are building a shuffle to create V, which is a sequence of insertelement,
725 /// extractelement pairs. If PermittedRHS is set, then we must either use it or
726 /// not rely on the second vector source. Return a std::pair containing the
727 /// left and right vectors of the proposed shuffle (or 0), and set the Mask
728 /// parameter as required.
730 /// Note: we intentionally don't try to fold earlier shuffles since they have
731 /// often been chosen carefully to be efficiently implementable on the target.
732 using ShuffleOps = std::pair<Value *, Value *>;
734 static ShuffleOps collectShuffleElements(Value *V, SmallVectorImpl<int> &Mask,
736 InstCombinerImpl &IC) {
737 assert(V->getType()->isVectorTy() && "Invalid shuffle!");
738 unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
740 if (match(V, m_Undef())) {
741 Mask.assign(NumElts, -1);
742 return std::make_pair(
743 PermittedRHS ? UndefValue::get(PermittedRHS->getType()) : V, nullptr);
746 if (isa<ConstantAggregateZero>(V)) {
747 Mask.assign(NumElts, 0);
748 return std::make_pair(V, nullptr);
751 if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
752 // If this is an insert of an extract from some other vector, include it.
753 Value *VecOp = IEI->getOperand(0);
754 Value *ScalarOp = IEI->getOperand(1);
755 Value *IdxOp = IEI->getOperand(2);
757 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
758 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp)) {
759 unsigned ExtractedIdx =
760 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
761 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
763 // Either the extracted from or inserted into vector must be RHSVec,
764 // otherwise we'd end up with a shuffle of three inputs.
765 if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) {
766 Value *RHS = EI->getOperand(0);
767 ShuffleOps LR = collectShuffleElements(VecOp, Mask, RHS, IC);
768 assert(LR.second == nullptr || LR.second == RHS);
770 if (LR.first->getType() != RHS->getType()) {
771 // Although we are giving up for now, see if we can create extracts
772 // that match the inserts for another round of combining.
773 replaceExtractElements(IEI, EI, IC);
775 // We tried our best, but we can't find anything compatible with RHS
776 // further up the chain. Return a trivial shuffle.
777 for (unsigned i = 0; i < NumElts; ++i)
779 return std::make_pair(V, nullptr);
782 unsigned NumLHSElts =
783 cast<FixedVectorType>(RHS->getType())->getNumElements();
784 Mask[InsertedIdx % NumElts] = NumLHSElts + ExtractedIdx;
785 return std::make_pair(LR.first, RHS);
788 if (VecOp == PermittedRHS) {
789 // We've gone as far as we can: anything on the other side of the
790 // extractelement will already have been converted into a shuffle.
791 unsigned NumLHSElts =
792 cast<FixedVectorType>(EI->getOperand(0)->getType())
794 for (unsigned i = 0; i != NumElts; ++i)
795 Mask.push_back(i == InsertedIdx ? ExtractedIdx : NumLHSElts + i);
796 return std::make_pair(EI->getOperand(0), PermittedRHS);
799 // If this insertelement is a chain that comes from exactly these two
800 // vectors, return the vector and the effective shuffle.
801 if (EI->getOperand(0)->getType() == PermittedRHS->getType() &&
802 collectSingleShuffleElements(IEI, EI->getOperand(0), PermittedRHS,
804 return std::make_pair(EI->getOperand(0), PermittedRHS);
809 // Otherwise, we can't do anything fancy. Return an identity vector.
810 for (unsigned i = 0; i != NumElts; ++i)
812 return std::make_pair(V, nullptr);
815 /// Look for chain of insertvalue's that fully define an aggregate, and trace
816 /// back the values inserted, see if they are all were extractvalue'd from
817 /// the same source aggregate from the exact same element indexes.
818 /// If they were, just reuse the source aggregate.
819 /// This potentially deals with PHI indirections.
820 Instruction *InstCombinerImpl::foldAggregateConstructionIntoAggregateReuse(
821 InsertValueInst &OrigIVI) {
822 Type *AggTy = OrigIVI.getType();
824 switch (AggTy->getTypeID()) {
825 case Type::StructTyID:
826 NumAggElts = AggTy->getStructNumElements();
828 case Type::ArrayTyID:
829 NumAggElts = AggTy->getArrayNumElements();
832 llvm_unreachable("Unhandled aggregate type?");
835 // Arbitrary aggregate size cut-off. Motivation for limit of 2 is to be able
836 // to handle clang C++ exception struct (which is hardcoded as {i8*, i32}),
837 // FIXME: any interesting patterns to be caught with larger limit?
838 assert(NumAggElts > 0 && "Aggregate should have elements.");
842 static constexpr auto NotFound = None;
843 static constexpr auto FoundMismatch = nullptr;
845 // Try to find a value of each element of an aggregate.
846 // FIXME: deal with more complex, not one-dimensional, aggregate types
847 SmallVector<Optional<Instruction *>, 2> AggElts(NumAggElts, NotFound);
849 // Do we know values for each element of the aggregate?
850 auto KnowAllElts = [&AggElts]() {
851 return all_of(AggElts,
852 [](Optional<Instruction *> Elt) { return Elt != NotFound; });
857 // Arbitrary `insertvalue` visitation depth limit. Let's be okay with
858 // every element being overwritten twice, which should never happen.
859 static const int DepthLimit = 2 * NumAggElts;
861 // Recurse up the chain of `insertvalue` aggregate operands until either we've
862 // reconstructed full initializer or can't visit any more `insertvalue`'s.
863 for (InsertValueInst *CurrIVI = &OrigIVI;
864 Depth < DepthLimit && CurrIVI && !KnowAllElts();
865 CurrIVI = dyn_cast<InsertValueInst>(CurrIVI->getAggregateOperand()),
867 auto *InsertedValue =
868 dyn_cast<Instruction>(CurrIVI->getInsertedValueOperand());
870 return nullptr; // Inserted value must be produced by an instruction.
872 ArrayRef<unsigned int> Indices = CurrIVI->getIndices();
874 // Don't bother with more than single-level aggregates.
875 if (Indices.size() != 1)
876 return nullptr; // FIXME: deal with more complex aggregates?
878 // Now, we may have already previously recorded the value for this element
879 // of an aggregate. If we did, that means the CurrIVI will later be
880 // overwritten with the already-recorded value. But if not, let's record it!
881 Optional<Instruction *> &Elt = AggElts[Indices.front()];
882 Elt = Elt.getValueOr(InsertedValue);
884 // FIXME: should we handle chain-terminating undef base operand?
887 // Was that sufficient to deduce the full initializer for the aggregate?
889 return nullptr; // Give up then.
891 // We now want to find the source[s] of the aggregate elements we've found.
892 // And with "source" we mean the original aggregate[s] from which
893 // the inserted elements were extracted. This may require PHI translation.
895 enum class AggregateDescription {
896 /// When analyzing the value that was inserted into an aggregate, we did
897 /// not manage to find defining `extractvalue` instruction to analyze.
899 /// When analyzing the value that was inserted into an aggregate, we did
900 /// manage to find defining `extractvalue` instruction[s], and everything
901 /// matched perfectly - aggregate type, element insertion/extraction index.
903 /// When analyzing the value that was inserted into an aggregate, we did
904 /// manage to find defining `extractvalue` instruction, but there was
905 /// a mismatch: either the source type from which the extraction was didn't
906 /// match the aggregate type into which the insertion was,
907 /// or the extraction/insertion channels mismatched,
908 /// or different elements had different source aggregates.
911 auto Describe = [](Optional<Value *> SourceAggregate) {
912 if (SourceAggregate == NotFound)
913 return AggregateDescription::NotFound;
914 if (*SourceAggregate == FoundMismatch)
915 return AggregateDescription::FoundMismatch;
916 return AggregateDescription::Found;
919 // Given the value \p Elt that was being inserted into element \p EltIdx of an
920 // aggregate AggTy, see if \p Elt was originally defined by an
921 // appropriate extractvalue (same element index, same aggregate type).
922 // If found, return the source aggregate from which the extraction was.
923 // If \p PredBB is provided, does PHI translation of an \p Elt first.
924 auto FindSourceAggregate =
925 [&](Instruction *Elt, unsigned EltIdx, Optional<BasicBlock *> UseBB,
926 Optional<BasicBlock *> PredBB) -> Optional<Value *> {
927 // For now(?), only deal with, at most, a single level of PHI indirection.
929 Elt = dyn_cast<Instruction>(Elt->DoPHITranslation(*UseBB, *PredBB));
930 // FIXME: deal with multiple levels of PHI indirection?
932 // Did we find an extraction?
933 auto *EVI = dyn_cast_or_null<ExtractValueInst>(Elt);
937 Value *SourceAggregate = EVI->getAggregateOperand();
939 // Is the extraction from the same type into which the insertion was?
940 if (SourceAggregate->getType() != AggTy)
941 return FoundMismatch;
942 // And the element index doesn't change between extraction and insertion?
943 if (EVI->getNumIndices() != 1 || EltIdx != EVI->getIndices().front())
944 return FoundMismatch;
946 return SourceAggregate; // AggregateDescription::Found
949 // Given elements AggElts that were constructing an aggregate OrigIVI,
950 // see if we can find appropriate source aggregate for each of the elements,
951 // and see it's the same aggregate for each element. If so, return it.
952 auto FindCommonSourceAggregate =
953 [&](Optional<BasicBlock *> UseBB,
954 Optional<BasicBlock *> PredBB) -> Optional<Value *> {
955 Optional<Value *> SourceAggregate;
957 for (auto I : enumerate(AggElts)) {
958 assert(Describe(SourceAggregate) != AggregateDescription::FoundMismatch &&
959 "We don't store nullptr in SourceAggregate!");
960 assert((Describe(SourceAggregate) == AggregateDescription::Found) ==
962 "SourceAggregate should be valid after the first element,");
964 // For this element, is there a plausible source aggregate?
965 // FIXME: we could special-case undef element, IFF we know that in the
966 // source aggregate said element isn't poison.
967 Optional<Value *> SourceAggregateForElement =
968 FindSourceAggregate(*I.value(), I.index(), UseBB, PredBB);
970 // Okay, what have we found? Does that correlate with previous findings?
972 // Regardless of whether or not we have previously found source
973 // aggregate for previous elements (if any), if we didn't find one for
974 // this element, passthrough whatever we have just found.
975 if (Describe(SourceAggregateForElement) != AggregateDescription::Found)
976 return SourceAggregateForElement;
978 // Okay, we have found source aggregate for this element.
979 // Let's see what we already know from previous elements, if any.
980 switch (Describe(SourceAggregate)) {
981 case AggregateDescription::NotFound:
982 // This is apparently the first element that we have examined.
983 SourceAggregate = SourceAggregateForElement; // Record the aggregate!
984 continue; // Great, now look at next element.
985 case AggregateDescription::Found:
986 // We have previously already successfully examined other elements.
987 // Is this the same source aggregate we've found for other elements?
988 if (*SourceAggregateForElement != *SourceAggregate)
989 return FoundMismatch;
990 continue; // Still the same aggregate, look at next element.
991 case AggregateDescription::FoundMismatch:
992 llvm_unreachable("Can't happen. We would have early-exited then.");
996 assert(Describe(SourceAggregate) == AggregateDescription::Found &&
997 "Must be a valid Value");
998 return *SourceAggregate;
1001 Optional<Value *> SourceAggregate;
1003 // Can we find the source aggregate without looking at predecessors?
1004 SourceAggregate = FindCommonSourceAggregate(/*UseBB=*/None, /*PredBB=*/None);
1005 if (Describe(SourceAggregate) != AggregateDescription::NotFound) {
1006 if (Describe(SourceAggregate) == AggregateDescription::FoundMismatch)
1007 return nullptr; // Conflicting source aggregates!
1008 ++NumAggregateReconstructionsSimplified;
1009 return replaceInstUsesWith(OrigIVI, *SourceAggregate);
1012 // Okay, apparently we need to look at predecessors.
1014 // We should be smart about picking the "use" basic block, which will be the
1015 // merge point for aggregate, where we'll insert the final PHI that will be
1016 // used instead of OrigIVI. Basic block of OrigIVI is *not* the right choice.
1017 // We should look in which blocks each of the AggElts is being defined,
1018 // they all should be defined in the same basic block.
1019 BasicBlock *UseBB = nullptr;
1021 for (const Optional<Instruction *> &I : AggElts) {
1022 BasicBlock *BB = (*I)->getParent();
1023 // If it's the first instruction we've encountered, record the basic block.
1028 // Otherwise, this must be the same basic block we've seen previously.
1033 // If *all* of the elements are basic-block-independent, meaning they are
1034 // either function arguments, or constant expressions, then if we didn't
1035 // handle them without predecessor-aware handling, we won't handle them now.
1039 // If we didn't manage to find source aggregate without looking at
1040 // predecessors, and there are no predecessors to look at, then we're done.
1041 if (pred_empty(UseBB))
1044 // Arbitrary predecessor count limit.
1045 static const int PredCountLimit = 64;
1047 // Cache the (non-uniqified!) list of predecessors in a vector,
1048 // checking the limit at the same time for efficiency.
1049 SmallVector<BasicBlock *, 4> Preds; // May have duplicates!
1050 for (BasicBlock *Pred : predecessors(UseBB)) {
1051 // Don't bother if there are too many predecessors.
1052 if (Preds.size() >= PredCountLimit) // FIXME: only count duplicates once?
1054 Preds.emplace_back(Pred);
1057 // For each predecessor, what is the source aggregate,
1058 // from which all the elements were originally extracted from?
1059 // Note that we want for the map to have stable iteration order!
1060 SmallDenseMap<BasicBlock *, Value *, 4> SourceAggregates;
1061 for (BasicBlock *Pred : Preds) {
1062 std::pair<decltype(SourceAggregates)::iterator, bool> IV =
1063 SourceAggregates.insert({Pred, nullptr});
1064 // Did we already evaluate this predecessor?
1068 // Let's hope that when coming from predecessor Pred, all elements of the
1069 // aggregate produced by OrigIVI must have been originally extracted from
1070 // the same aggregate. Is that so? Can we find said original aggregate?
1071 SourceAggregate = FindCommonSourceAggregate(UseBB, Pred);
1072 if (Describe(SourceAggregate) != AggregateDescription::Found)
1073 return nullptr; // Give up.
1074 IV.first->second = *SourceAggregate;
1077 // All good! Now we just need to thread the source aggregates here.
1078 // Note that we have to insert the new PHI here, ourselves, because we can't
1079 // rely on InstCombinerImpl::run() inserting it into the right basic block.
1080 // Note that the same block can be a predecessor more than once,
1081 // and we need to preserve that invariant for the PHI node.
1082 BuilderTy::InsertPointGuard Guard(Builder);
1083 Builder.SetInsertPoint(UseBB->getFirstNonPHI());
1085 Builder.CreatePHI(AggTy, Preds.size(), OrigIVI.getName() + ".merged");
1086 for (BasicBlock *Pred : Preds)
1087 PHI->addIncoming(SourceAggregates[Pred], Pred);
1089 ++NumAggregateReconstructionsSimplified;
1090 return replaceInstUsesWith(OrigIVI, PHI);
1093 /// Try to find redundant insertvalue instructions, like the following ones:
1094 /// %0 = insertvalue { i8, i32 } undef, i8 %x, 0
1095 /// %1 = insertvalue { i8, i32 } %0, i8 %y, 0
1096 /// Here the second instruction inserts values at the same indices, as the
1097 /// first one, making the first one redundant.
1098 /// It should be transformed to:
1099 /// %0 = insertvalue { i8, i32 } undef, i8 %y, 0
1100 Instruction *InstCombinerImpl::visitInsertValueInst(InsertValueInst &I) {
1101 bool IsRedundant = false;
1102 ArrayRef<unsigned int> FirstIndices = I.getIndices();
1104 // If there is a chain of insertvalue instructions (each of them except the
1105 // last one has only one use and it's another insertvalue insn from this
1106 // chain), check if any of the 'children' uses the same indices as the first
1107 // instruction. In this case, the first one is redundant.
1110 while (V->hasOneUse() && Depth < 10) {
1111 User *U = V->user_back();
1112 auto UserInsInst = dyn_cast<InsertValueInst>(U);
1113 if (!UserInsInst || U->getOperand(0) != V)
1115 if (UserInsInst->getIndices() == FirstIndices) {
1124 return replaceInstUsesWith(I, I.getOperand(0));
1126 if (Instruction *NewI = foldAggregateConstructionIntoAggregateReuse(I))
1132 static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf) {
1133 // Can not analyze scalable type, the number of elements is not a compile-time
1135 if (isa<ScalableVectorType>(Shuf.getOperand(0)->getType()))
1138 int MaskSize = Shuf.getShuffleMask().size();
1140 cast<FixedVectorType>(Shuf.getOperand(0)->getType())->getNumElements();
1142 // A vector select does not change the size of the operands.
1143 if (MaskSize != VecSize)
1146 // Each mask element must be undefined or choose a vector element from one of
1147 // the source operands without crossing vector lanes.
1148 for (int i = 0; i != MaskSize; ++i) {
1149 int Elt = Shuf.getMaskValue(i);
1150 if (Elt != -1 && Elt != i && Elt != i + VecSize)
1157 /// Turn a chain of inserts that splats a value into an insert + shuffle:
1158 /// insertelt(insertelt(insertelt(insertelt X, %k, 0), %k, 1), %k, 2) ... ->
1159 /// shufflevector(insertelt(X, %k, 0), poison, zero)
1160 static Instruction *foldInsSequenceIntoSplat(InsertElementInst &InsElt) {
1161 // We are interested in the last insert in a chain. So if this insert has a
1162 // single user and that user is an insert, bail.
1163 if (InsElt.hasOneUse() && isa<InsertElementInst>(InsElt.user_back()))
1166 VectorType *VecTy = InsElt.getType();
1167 // Can not handle scalable type, the number of elements is not a compile-time
1169 if (isa<ScalableVectorType>(VecTy))
1171 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1173 // Do not try to do this for a one-element vector, since that's a nop,
1174 // and will cause an inf-loop.
1175 if (NumElements == 1)
1178 Value *SplatVal = InsElt.getOperand(1);
1179 InsertElementInst *CurrIE = &InsElt;
1180 SmallBitVector ElementPresent(NumElements, false);
1181 InsertElementInst *FirstIE = nullptr;
1183 // Walk the chain backwards, keeping track of which indices we inserted into,
1184 // until we hit something that isn't an insert of the splatted value.
1186 auto *Idx = dyn_cast<ConstantInt>(CurrIE->getOperand(2));
1187 if (!Idx || CurrIE->getOperand(1) != SplatVal)
1190 auto *NextIE = dyn_cast<InsertElementInst>(CurrIE->getOperand(0));
1191 // Check none of the intermediate steps have any additional uses, except
1192 // for the root insertelement instruction, which can be re-used, if it
1193 // inserts at position 0.
1194 if (CurrIE != &InsElt &&
1195 (!CurrIE->hasOneUse() && (NextIE != nullptr || !Idx->isZero())))
1198 ElementPresent[Idx->getZExtValue()] = true;
1203 // If this is just a single insertelement (not a sequence), we are done.
1204 if (FirstIE == &InsElt)
1207 // If we are not inserting into an undef vector, make sure we've seen an
1208 // insert into every element.
1209 // TODO: If the base vector is not undef, it might be better to create a splat
1210 // and then a select-shuffle (blend) with the base vector.
1211 if (!match(FirstIE->getOperand(0), m_Undef()))
1212 if (!ElementPresent.all())
1215 // Create the insert + shuffle.
1216 Type *Int32Ty = Type::getInt32Ty(InsElt.getContext());
1217 PoisonValue *PoisonVec = PoisonValue::get(VecTy);
1218 Constant *Zero = ConstantInt::get(Int32Ty, 0);
1219 if (!cast<ConstantInt>(FirstIE->getOperand(2))->isZero())
1220 FirstIE = InsertElementInst::Create(PoisonVec, SplatVal, Zero, "", &InsElt);
1222 // Splat from element 0, but replace absent elements with undef in the mask.
1223 SmallVector<int, 16> Mask(NumElements, 0);
1224 for (unsigned i = 0; i != NumElements; ++i)
1225 if (!ElementPresent[i])
1228 return new ShuffleVectorInst(FirstIE, Mask);
1231 /// Try to fold an insert element into an existing splat shuffle by changing
1232 /// the shuffle's mask to include the index of this insert element.
1233 static Instruction *foldInsEltIntoSplat(InsertElementInst &InsElt) {
1234 // Check if the vector operand of this insert is a canonical splat shuffle.
1235 auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
1236 if (!Shuf || !Shuf->isZeroEltSplat())
1239 // Bail out early if shuffle is scalable type. The number of elements in
1240 // shuffle mask is unknown at compile-time.
1241 if (isa<ScalableVectorType>(Shuf->getType()))
1244 // Check for a constant insertion index.
1246 if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC)))
1249 // Check if the splat shuffle's input is the same as this insert's scalar op.
1250 Value *X = InsElt.getOperand(1);
1251 Value *Op0 = Shuf->getOperand(0);
1252 if (!match(Op0, m_InsertElt(m_Undef(), m_Specific(X), m_ZeroInt())))
1255 // Replace the shuffle mask element at the index of this insert with a zero.
1257 // inselt (shuf (inselt undef, X, 0), _, <0,undef,0,undef>), X, 1
1258 // --> shuf (inselt undef, X, 0), poison, <0,0,0,undef>
1259 unsigned NumMaskElts =
1260 cast<FixedVectorType>(Shuf->getType())->getNumElements();
1261 SmallVector<int, 16> NewMask(NumMaskElts);
1262 for (unsigned i = 0; i != NumMaskElts; ++i)
1263 NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i);
1265 return new ShuffleVectorInst(Op0, NewMask);
1268 /// Try to fold an extract+insert element into an existing identity shuffle by
1269 /// changing the shuffle's mask to include the index of this insert element.
1270 static Instruction *foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt) {
1271 // Check if the vector operand of this insert is an identity shuffle.
1272 auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
1273 if (!Shuf || !match(Shuf->getOperand(1), m_Undef()) ||
1274 !(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding()))
1277 // Bail out early if shuffle is scalable type. The number of elements in
1278 // shuffle mask is unknown at compile-time.
1279 if (isa<ScalableVectorType>(Shuf->getType()))
1282 // Check for a constant insertion index.
1284 if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC)))
1287 // Check if this insert's scalar op is extracted from the identity shuffle's
1289 Value *Scalar = InsElt.getOperand(1);
1290 Value *X = Shuf->getOperand(0);
1291 if (!match(Scalar, m_ExtractElt(m_Specific(X), m_SpecificInt(IdxC))))
1294 // Replace the shuffle mask element at the index of this extract+insert with
1295 // that same index value.
1297 // inselt (shuf X, IdMask), (extelt X, IdxC), IdxC --> shuf X, IdMask'
1298 unsigned NumMaskElts =
1299 cast<FixedVectorType>(Shuf->getType())->getNumElements();
1300 SmallVector<int, 16> NewMask(NumMaskElts);
1301 ArrayRef<int> OldMask = Shuf->getShuffleMask();
1302 for (unsigned i = 0; i != NumMaskElts; ++i) {
1304 // All mask elements besides the inserted element remain the same.
1305 NewMask[i] = OldMask[i];
1306 } else if (OldMask[i] == (int)IdxC) {
1307 // If the mask element was already set, there's nothing to do
1308 // (demanded elements analysis may unset it later).
1311 assert(OldMask[i] == UndefMaskElem &&
1312 "Unexpected shuffle mask element for identity shuffle");
1317 return new ShuffleVectorInst(X, Shuf->getOperand(1), NewMask);
1320 /// If we have an insertelement instruction feeding into another insertelement
1321 /// and the 2nd is inserting a constant into the vector, canonicalize that
1322 /// constant insertion before the insertion of a variable:
1324 /// insertelement (insertelement X, Y, IdxC1), ScalarC, IdxC2 -->
1325 /// insertelement (insertelement X, ScalarC, IdxC2), Y, IdxC1
1327 /// This has the potential of eliminating the 2nd insertelement instruction
1328 /// via constant folding of the scalar constant into a vector constant.
1329 static Instruction *hoistInsEltConst(InsertElementInst &InsElt2,
1330 InstCombiner::BuilderTy &Builder) {
1331 auto *InsElt1 = dyn_cast<InsertElementInst>(InsElt2.getOperand(0));
1332 if (!InsElt1 || !InsElt1->hasOneUse())
1337 ConstantInt *IdxC1, *IdxC2;
1338 if (match(InsElt1->getOperand(0), m_Value(X)) &&
1339 match(InsElt1->getOperand(1), m_Value(Y)) && !isa<Constant>(Y) &&
1340 match(InsElt1->getOperand(2), m_ConstantInt(IdxC1)) &&
1341 match(InsElt2.getOperand(1), m_Constant(ScalarC)) &&
1342 match(InsElt2.getOperand(2), m_ConstantInt(IdxC2)) && IdxC1 != IdxC2) {
1343 Value *NewInsElt1 = Builder.CreateInsertElement(X, ScalarC, IdxC2);
1344 return InsertElementInst::Create(NewInsElt1, Y, IdxC1);
1350 /// insertelt (shufflevector X, CVec, Mask|insertelt X, C1, CIndex1), C, CIndex
1351 /// --> shufflevector X, CVec', Mask'
1352 static Instruction *foldConstantInsEltIntoShuffle(InsertElementInst &InsElt) {
1353 auto *Inst = dyn_cast<Instruction>(InsElt.getOperand(0));
1354 // Bail out if the parent has more than one use. In that case, we'd be
1355 // replacing the insertelt with a shuffle, and that's not a clear win.
1356 if (!Inst || !Inst->hasOneUse())
1358 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0))) {
1359 // The shuffle must have a constant vector operand. The insertelt must have
1360 // a constant scalar being inserted at a constant position in the vector.
1361 Constant *ShufConstVec, *InsEltScalar;
1362 uint64_t InsEltIndex;
1363 if (!match(Shuf->getOperand(1), m_Constant(ShufConstVec)) ||
1364 !match(InsElt.getOperand(1), m_Constant(InsEltScalar)) ||
1365 !match(InsElt.getOperand(2), m_ConstantInt(InsEltIndex)))
1368 // Adding an element to an arbitrary shuffle could be expensive, but a
1369 // shuffle that selects elements from vectors without crossing lanes is
1371 // If we're just adding a constant into that shuffle, it will still be
1373 if (!isShuffleEquivalentToSelect(*Shuf))
1376 // From the above 'select' check, we know that the mask has the same number
1377 // of elements as the vector input operands. We also know that each constant
1378 // input element is used in its lane and can not be used more than once by
1379 // the shuffle. Therefore, replace the constant in the shuffle's constant
1380 // vector with the insertelt constant. Replace the constant in the shuffle's
1381 // mask vector with the insertelt index plus the length of the vector
1382 // (because the constant vector operand of a shuffle is always the 2nd
1384 ArrayRef<int> Mask = Shuf->getShuffleMask();
1385 unsigned NumElts = Mask.size();
1386 SmallVector<Constant *, 16> NewShufElts(NumElts);
1387 SmallVector<int, 16> NewMaskElts(NumElts);
1388 for (unsigned I = 0; I != NumElts; ++I) {
1389 if (I == InsEltIndex) {
1390 NewShufElts[I] = InsEltScalar;
1391 NewMaskElts[I] = InsEltIndex + NumElts;
1393 // Copy over the existing values.
1394 NewShufElts[I] = ShufConstVec->getAggregateElement(I);
1395 NewMaskElts[I] = Mask[I];
1398 // Bail if we failed to find an element.
1399 if (!NewShufElts[I])
1403 // Create new operands for a shuffle that includes the constant of the
1404 // original insertelt. The old shuffle will be dead now.
1405 return new ShuffleVectorInst(Shuf->getOperand(0),
1406 ConstantVector::get(NewShufElts), NewMaskElts);
1407 } else if (auto *IEI = dyn_cast<InsertElementInst>(Inst)) {
1408 // Transform sequences of insertelements ops with constant data/indexes into
1409 // a single shuffle op.
1410 // Can not handle scalable type, the number of elements needed to create
1411 // shuffle mask is not a compile-time constant.
1412 if (isa<ScalableVectorType>(InsElt.getType()))
1415 cast<FixedVectorType>(InsElt.getType())->getNumElements();
1417 uint64_t InsertIdx[2];
1419 if (!match(InsElt.getOperand(2), m_ConstantInt(InsertIdx[0])) ||
1420 !match(InsElt.getOperand(1), m_Constant(Val[0])) ||
1421 !match(IEI->getOperand(2), m_ConstantInt(InsertIdx[1])) ||
1422 !match(IEI->getOperand(1), m_Constant(Val[1])))
1424 SmallVector<Constant *, 16> Values(NumElts);
1425 SmallVector<int, 16> Mask(NumElts);
1426 auto ValI = std::begin(Val);
1427 // Generate new constant vector and mask.
1428 // We have 2 values/masks from the insertelements instructions. Insert them
1429 // into new value/mask vectors.
1430 for (uint64_t I : InsertIdx) {
1433 Mask[I] = NumElts + I;
1437 // Remaining values are filled with 'undef' values.
1438 for (unsigned I = 0; I < NumElts; ++I) {
1440 Values[I] = UndefValue::get(InsElt.getType()->getElementType());
1444 // Create new operands for a shuffle that includes the constant of the
1445 // original insertelt.
1446 return new ShuffleVectorInst(IEI->getOperand(0),
1447 ConstantVector::get(Values), Mask);
1452 /// If both the base vector and the inserted element are extended from the same
1453 /// type, do the insert element in the narrow source type followed by extend.
1454 /// TODO: This can be extended to include other cast opcodes, but particularly
1455 /// if we create a wider insertelement, make sure codegen is not harmed.
1456 static Instruction *narrowInsElt(InsertElementInst &InsElt,
1457 InstCombiner::BuilderTy &Builder) {
1458 // We are creating a vector extend. If the original vector extend has another
1459 // use, that would mean we end up with 2 vector extends, so avoid that.
1460 // TODO: We could ease the use-clause to "if at least one op has one use"
1461 // (assuming that the source types match - see next TODO comment).
1462 Value *Vec = InsElt.getOperand(0);
1463 if (!Vec->hasOneUse())
1466 Value *Scalar = InsElt.getOperand(1);
1468 CastInst::CastOps CastOpcode;
1469 if (match(Vec, m_FPExt(m_Value(X))) && match(Scalar, m_FPExt(m_Value(Y))))
1470 CastOpcode = Instruction::FPExt;
1471 else if (match(Vec, m_SExt(m_Value(X))) && match(Scalar, m_SExt(m_Value(Y))))
1472 CastOpcode = Instruction::SExt;
1473 else if (match(Vec, m_ZExt(m_Value(X))) && match(Scalar, m_ZExt(m_Value(Y))))
1474 CastOpcode = Instruction::ZExt;
1478 // TODO: We can allow mismatched types by creating an intermediate cast.
1479 if (X->getType()->getScalarType() != Y->getType())
1482 // inselt (ext X), (ext Y), Index --> ext (inselt X, Y, Index)
1483 Value *NewInsElt = Builder.CreateInsertElement(X, Y, InsElt.getOperand(2));
1484 return CastInst::Create(CastOpcode, NewInsElt, InsElt.getType());
1487 Instruction *InstCombinerImpl::visitInsertElementInst(InsertElementInst &IE) {
1488 Value *VecOp = IE.getOperand(0);
1489 Value *ScalarOp = IE.getOperand(1);
1490 Value *IdxOp = IE.getOperand(2);
1492 if (auto *V = SimplifyInsertElementInst(
1493 VecOp, ScalarOp, IdxOp, SQ.getWithInstruction(&IE)))
1494 return replaceInstUsesWith(IE, V);
1496 // Canonicalize type of constant indices to i64 to simplify CSE
1497 if (auto *IndexC = dyn_cast<ConstantInt>(IdxOp))
1498 if (auto *NewIdx = getPreferredVectorIndex(IndexC))
1499 return replaceOperand(IE, 2, NewIdx);
1501 // If the scalar is bitcast and inserted into undef, do the insert in the
1502 // source type followed by bitcast.
1503 // TODO: Generalize for insert into any constant, not just undef?
1505 if (match(VecOp, m_Undef()) &&
1506 match(ScalarOp, m_OneUse(m_BitCast(m_Value(ScalarSrc)))) &&
1507 (ScalarSrc->getType()->isIntegerTy() ||
1508 ScalarSrc->getType()->isFloatingPointTy())) {
1509 // inselt undef, (bitcast ScalarSrc), IdxOp -->
1510 // bitcast (inselt undef, ScalarSrc, IdxOp)
1511 Type *ScalarTy = ScalarSrc->getType();
1512 Type *VecTy = VectorType::get(ScalarTy, IE.getType()->getElementCount());
1513 UndefValue *NewUndef = UndefValue::get(VecTy);
1514 Value *NewInsElt = Builder.CreateInsertElement(NewUndef, ScalarSrc, IdxOp);
1515 return new BitCastInst(NewInsElt, IE.getType());
1518 // If the vector and scalar are both bitcast from the same element type, do
1519 // the insert in that source type followed by bitcast.
1521 if (match(VecOp, m_BitCast(m_Value(VecSrc))) &&
1522 match(ScalarOp, m_BitCast(m_Value(ScalarSrc))) &&
1523 (VecOp->hasOneUse() || ScalarOp->hasOneUse()) &&
1524 VecSrc->getType()->isVectorTy() && !ScalarSrc->getType()->isVectorTy() &&
1525 cast<VectorType>(VecSrc->getType())->getElementType() ==
1526 ScalarSrc->getType()) {
1527 // inselt (bitcast VecSrc), (bitcast ScalarSrc), IdxOp -->
1528 // bitcast (inselt VecSrc, ScalarSrc, IdxOp)
1529 Value *NewInsElt = Builder.CreateInsertElement(VecSrc, ScalarSrc, IdxOp);
1530 return new BitCastInst(NewInsElt, IE.getType());
1533 // If the inserted element was extracted from some other fixed-length vector
1534 // and both indexes are valid constants, try to turn this into a shuffle.
1535 // Can not handle scalable vector type, the number of elements needed to
1536 // create shuffle mask is not a compile-time constant.
1537 uint64_t InsertedIdx, ExtractedIdx;
1539 if (isa<FixedVectorType>(IE.getType()) &&
1540 match(IdxOp, m_ConstantInt(InsertedIdx)) &&
1542 m_ExtractElt(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) &&
1543 isa<FixedVectorType>(ExtVecOp->getType()) &&
1545 cast<FixedVectorType>(ExtVecOp->getType())->getNumElements()) {
1546 // TODO: Looking at the user(s) to determine if this insert is a
1547 // fold-to-shuffle opportunity does not match the usual instcombine
1548 // constraints. We should decide if the transform is worthy based only
1549 // on this instruction and its operands, but that may not work currently.
1551 // Here, we are trying to avoid creating shuffles before reaching
1552 // the end of a chain of extract-insert pairs. This is complicated because
1553 // we do not generally form arbitrary shuffle masks in instcombine
1554 // (because those may codegen poorly), but collectShuffleElements() does
1557 // The rules for determining what is an acceptable target-independent
1558 // shuffle mask are fuzzy because they evolve based on the backend's
1559 // capabilities and real-world impact.
1560 auto isShuffleRootCandidate = [](InsertElementInst &Insert) {
1561 if (!Insert.hasOneUse())
1563 auto *InsertUser = dyn_cast<InsertElementInst>(Insert.user_back());
1569 // Try to form a shuffle from a chain of extract-insert ops.
1570 if (isShuffleRootCandidate(IE)) {
1571 SmallVector<int, 16> Mask;
1572 ShuffleOps LR = collectShuffleElements(&IE, Mask, nullptr, *this);
1574 // The proposed shuffle may be trivial, in which case we shouldn't
1575 // perform the combine.
1576 if (LR.first != &IE && LR.second != &IE) {
1577 // We now have a shuffle of LHS, RHS, Mask.
1578 if (LR.second == nullptr)
1579 LR.second = UndefValue::get(LR.first->getType());
1580 return new ShuffleVectorInst(LR.first, LR.second, Mask);
1585 if (auto VecTy = dyn_cast<FixedVectorType>(VecOp->getType())) {
1586 unsigned VWidth = VecTy->getNumElements();
1587 APInt UndefElts(VWidth, 0);
1588 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
1589 if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) {
1591 return replaceInstUsesWith(IE, V);
1596 if (Instruction *Shuf = foldConstantInsEltIntoShuffle(IE))
1599 if (Instruction *NewInsElt = hoistInsEltConst(IE, Builder))
1602 if (Instruction *Broadcast = foldInsSequenceIntoSplat(IE))
1605 if (Instruction *Splat = foldInsEltIntoSplat(IE))
1608 if (Instruction *IdentityShuf = foldInsEltIntoIdentityShuffle(IE))
1609 return IdentityShuf;
1611 if (Instruction *Ext = narrowInsElt(IE, Builder))
1617 /// Return true if we can evaluate the specified expression tree if the vector
1618 /// elements were shuffled in a different order.
1619 static bool canEvaluateShuffled(Value *V, ArrayRef<int> Mask,
1620 unsigned Depth = 5) {
1621 // We can always reorder the elements of a constant.
1622 if (isa<Constant>(V))
1625 // We won't reorder vector arguments. No IPO here.
1626 Instruction *I = dyn_cast<Instruction>(V);
1627 if (!I) return false;
1629 // Two users may expect different orders of the elements. Don't try it.
1630 if (!I->hasOneUse())
1633 if (Depth == 0) return false;
1635 switch (I->getOpcode()) {
1636 case Instruction::UDiv:
1637 case Instruction::SDiv:
1638 case Instruction::URem:
1639 case Instruction::SRem:
1640 // Propagating an undefined shuffle mask element to integer div/rem is not
1641 // allowed because those opcodes can create immediate undefined behavior
1642 // from an undefined element in an operand.
1643 if (llvm::is_contained(Mask, -1))
1646 case Instruction::Add:
1647 case Instruction::FAdd:
1648 case Instruction::Sub:
1649 case Instruction::FSub:
1650 case Instruction::Mul:
1651 case Instruction::FMul:
1652 case Instruction::FDiv:
1653 case Instruction::FRem:
1654 case Instruction::Shl:
1655 case Instruction::LShr:
1656 case Instruction::AShr:
1657 case Instruction::And:
1658 case Instruction::Or:
1659 case Instruction::Xor:
1660 case Instruction::ICmp:
1661 case Instruction::FCmp:
1662 case Instruction::Trunc:
1663 case Instruction::ZExt:
1664 case Instruction::SExt:
1665 case Instruction::FPToUI:
1666 case Instruction::FPToSI:
1667 case Instruction::UIToFP:
1668 case Instruction::SIToFP:
1669 case Instruction::FPTrunc:
1670 case Instruction::FPExt:
1671 case Instruction::GetElementPtr: {
1672 // Bail out if we would create longer vector ops. We could allow creating
1673 // longer vector ops, but that may result in more expensive codegen.
1674 Type *ITy = I->getType();
1675 if (ITy->isVectorTy() &&
1676 Mask.size() > cast<FixedVectorType>(ITy)->getNumElements())
1678 for (Value *Operand : I->operands()) {
1679 if (!canEvaluateShuffled(Operand, Mask, Depth - 1))
1684 case Instruction::InsertElement: {
1685 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
1686 if (!CI) return false;
1687 int ElementNumber = CI->getLimitedValue();
1689 // Verify that 'CI' does not occur twice in Mask. A single 'insertelement'
1690 // can't put an element into multiple indices.
1691 bool SeenOnce = false;
1692 for (int i = 0, e = Mask.size(); i != e; ++i) {
1693 if (Mask[i] == ElementNumber) {
1699 return canEvaluateShuffled(I->getOperand(0), Mask, Depth - 1);
1705 /// Rebuild a new instruction just like 'I' but with the new operands given.
1706 /// In the event of type mismatch, the type of the operands is correct.
1707 static Value *buildNew(Instruction *I, ArrayRef<Value*> NewOps) {
1708 // We don't want to use the IRBuilder here because we want the replacement
1709 // instructions to appear next to 'I', not the builder's insertion point.
1710 switch (I->getOpcode()) {
1711 case Instruction::Add:
1712 case Instruction::FAdd:
1713 case Instruction::Sub:
1714 case Instruction::FSub:
1715 case Instruction::Mul:
1716 case Instruction::FMul:
1717 case Instruction::UDiv:
1718 case Instruction::SDiv:
1719 case Instruction::FDiv:
1720 case Instruction::URem:
1721 case Instruction::SRem:
1722 case Instruction::FRem:
1723 case Instruction::Shl:
1724 case Instruction::LShr:
1725 case Instruction::AShr:
1726 case Instruction::And:
1727 case Instruction::Or:
1728 case Instruction::Xor: {
1729 BinaryOperator *BO = cast<BinaryOperator>(I);
1730 assert(NewOps.size() == 2 && "binary operator with #ops != 2");
1731 BinaryOperator *New =
1732 BinaryOperator::Create(cast<BinaryOperator>(I)->getOpcode(),
1733 NewOps[0], NewOps[1], "", BO);
1734 if (isa<OverflowingBinaryOperator>(BO)) {
1735 New->setHasNoUnsignedWrap(BO->hasNoUnsignedWrap());
1736 New->setHasNoSignedWrap(BO->hasNoSignedWrap());
1738 if (isa<PossiblyExactOperator>(BO)) {
1739 New->setIsExact(BO->isExact());
1741 if (isa<FPMathOperator>(BO))
1742 New->copyFastMathFlags(I);
1745 case Instruction::ICmp:
1746 assert(NewOps.size() == 2 && "icmp with #ops != 2");
1747 return new ICmpInst(I, cast<ICmpInst>(I)->getPredicate(),
1748 NewOps[0], NewOps[1]);
1749 case Instruction::FCmp:
1750 assert(NewOps.size() == 2 && "fcmp with #ops != 2");
1751 return new FCmpInst(I, cast<FCmpInst>(I)->getPredicate(),
1752 NewOps[0], NewOps[1]);
1753 case Instruction::Trunc:
1754 case Instruction::ZExt:
1755 case Instruction::SExt:
1756 case Instruction::FPToUI:
1757 case Instruction::FPToSI:
1758 case Instruction::UIToFP:
1759 case Instruction::SIToFP:
1760 case Instruction::FPTrunc:
1761 case Instruction::FPExt: {
1762 // It's possible that the mask has a different number of elements from
1763 // the original cast. We recompute the destination type to match the mask.
1764 Type *DestTy = VectorType::get(
1765 I->getType()->getScalarType(),
1766 cast<VectorType>(NewOps[0]->getType())->getElementCount());
1767 assert(NewOps.size() == 1 && "cast with #ops != 1");
1768 return CastInst::Create(cast<CastInst>(I)->getOpcode(), NewOps[0], DestTy,
1771 case Instruction::GetElementPtr: {
1772 Value *Ptr = NewOps[0];
1773 ArrayRef<Value*> Idx = NewOps.slice(1);
1774 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1775 cast<GetElementPtrInst>(I)->getSourceElementType(), Ptr, Idx, "", I);
1776 GEP->setIsInBounds(cast<GetElementPtrInst>(I)->isInBounds());
1780 llvm_unreachable("failed to rebuild vector instructions");
1783 static Value *evaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
1784 // Mask.size() does not need to be equal to the number of vector elements.
1786 assert(V->getType()->isVectorTy() && "can't reorder non-vector elements");
1787 Type *EltTy = V->getType()->getScalarType();
1788 Type *I32Ty = IntegerType::getInt32Ty(V->getContext());
1789 if (match(V, m_Undef()))
1790 return UndefValue::get(FixedVectorType::get(EltTy, Mask.size()));
1792 if (isa<ConstantAggregateZero>(V))
1793 return ConstantAggregateZero::get(FixedVectorType::get(EltTy, Mask.size()));
1795 if (Constant *C = dyn_cast<Constant>(V))
1796 return ConstantExpr::getShuffleVector(C, PoisonValue::get(C->getType()),
1799 Instruction *I = cast<Instruction>(V);
1800 switch (I->getOpcode()) {
1801 case Instruction::Add:
1802 case Instruction::FAdd:
1803 case Instruction::Sub:
1804 case Instruction::FSub:
1805 case Instruction::Mul:
1806 case Instruction::FMul:
1807 case Instruction::UDiv:
1808 case Instruction::SDiv:
1809 case Instruction::FDiv:
1810 case Instruction::URem:
1811 case Instruction::SRem:
1812 case Instruction::FRem:
1813 case Instruction::Shl:
1814 case Instruction::LShr:
1815 case Instruction::AShr:
1816 case Instruction::And:
1817 case Instruction::Or:
1818 case Instruction::Xor:
1819 case Instruction::ICmp:
1820 case Instruction::FCmp:
1821 case Instruction::Trunc:
1822 case Instruction::ZExt:
1823 case Instruction::SExt:
1824 case Instruction::FPToUI:
1825 case Instruction::FPToSI:
1826 case Instruction::UIToFP:
1827 case Instruction::SIToFP:
1828 case Instruction::FPTrunc:
1829 case Instruction::FPExt:
1830 case Instruction::Select:
1831 case Instruction::GetElementPtr: {
1832 SmallVector<Value*, 8> NewOps;
1835 cast<FixedVectorType>(I->getType())->getNumElements());
1836 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
1838 // Recursively call evaluateInDifferentElementOrder on vector arguments
1839 // as well. E.g. GetElementPtr may have scalar operands even if the
1840 // return value is a vector, so we need to examine the operand type.
1841 if (I->getOperand(i)->getType()->isVectorTy())
1842 V = evaluateInDifferentElementOrder(I->getOperand(i), Mask);
1844 V = I->getOperand(i);
1845 NewOps.push_back(V);
1846 NeedsRebuild |= (V != I->getOperand(i));
1849 return buildNew(I, NewOps);
1853 case Instruction::InsertElement: {
1854 int Element = cast<ConstantInt>(I->getOperand(2))->getLimitedValue();
1856 // The insertelement was inserting at Element. Figure out which element
1857 // that becomes after shuffling. The answer is guaranteed to be unique
1858 // by CanEvaluateShuffled.
1861 for (int e = Mask.size(); Index != e; ++Index) {
1862 if (Mask[Index] == Element) {
1868 // If element is not in Mask, no need to handle the operand 1 (element to
1869 // be inserted). Just evaluate values in operand 0 according to Mask.
1871 return evaluateInDifferentElementOrder(I->getOperand(0), Mask);
1873 Value *V = evaluateInDifferentElementOrder(I->getOperand(0), Mask);
1874 return InsertElementInst::Create(V, I->getOperand(1),
1875 ConstantInt::get(I32Ty, Index), "", I);
1878 llvm_unreachable("failed to reorder elements of vector instruction!");
1881 // Returns true if the shuffle is extracting a contiguous range of values from
1882 // LHS, for example:
1883 // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1884 // Input: |AA|BB|CC|DD|EE|FF|GG|HH|II|JJ|KK|LL|MM|NN|OO|PP|
1885 // Shuffles to: |EE|FF|GG|HH|
1887 static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI,
1888 ArrayRef<int> Mask) {
1890 cast<FixedVectorType>(SVI.getOperand(0)->getType())->getNumElements();
1891 unsigned MaskElems = Mask.size();
1892 unsigned BegIdx = Mask.front();
1893 unsigned EndIdx = Mask.back();
1894 if (BegIdx > EndIdx || EndIdx >= LHSElems || EndIdx - BegIdx != MaskElems - 1)
1896 for (unsigned I = 0; I != MaskElems; ++I)
1897 if (static_cast<unsigned>(Mask[I]) != BegIdx + I)
1902 /// These are the ingredients in an alternate form binary operator as described
1905 BinaryOperator::BinaryOps Opcode;
1908 BinopElts(BinaryOperator::BinaryOps Opc = (BinaryOperator::BinaryOps)0,
1909 Value *V0 = nullptr, Value *V1 = nullptr) :
1910 Opcode(Opc), Op0(V0), Op1(V1) {}
1911 operator bool() const { return Opcode != 0; }
1914 /// Binops may be transformed into binops with different opcodes and operands.
1915 /// Reverse the usual canonicalization to enable folds with the non-canonical
1916 /// form of the binop. If a transform is possible, return the elements of the
1917 /// new binop. If not, return invalid elements.
1918 static BinopElts getAlternateBinop(BinaryOperator *BO, const DataLayout &DL) {
1919 Value *BO0 = BO->getOperand(0), *BO1 = BO->getOperand(1);
1920 Type *Ty = BO->getType();
1921 switch (BO->getOpcode()) {
1922 case Instruction::Shl: {
1923 // shl X, C --> mul X, (1 << C)
1925 if (match(BO1, m_Constant(C))) {
1926 Constant *ShlOne = ConstantExpr::getShl(ConstantInt::get(Ty, 1), C);
1927 return { Instruction::Mul, BO0, ShlOne };
1931 case Instruction::Or: {
1932 // or X, C --> add X, C (when X and C have no common bits set)
1934 if (match(BO1, m_APInt(C)) && MaskedValueIsZero(BO0, *C, DL))
1935 return { Instruction::Add, BO0, BO1 };
1944 static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf) {
1945 assert(Shuf.isSelect() && "Must have select-equivalent shuffle");
1947 // Are we shuffling together some value and that same value after it has been
1948 // modified by a binop with a constant?
1949 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
1952 if (match(Op0, m_BinOp(m_Specific(Op1), m_Constant(C))))
1954 else if (match(Op1, m_BinOp(m_Specific(Op0), m_Constant(C))))
1959 // The identity constant for a binop leaves a variable operand unchanged. For
1960 // a vector, this is a splat of something like 0, -1, or 1.
1961 // If there's no identity constant for this binop, we're done.
1962 auto *BO = cast<BinaryOperator>(Op0IsBinop ? Op0 : Op1);
1963 BinaryOperator::BinaryOps BOpcode = BO->getOpcode();
1964 Constant *IdC = ConstantExpr::getBinOpIdentity(BOpcode, Shuf.getType(), true);
1968 // Shuffle identity constants into the lanes that return the original value.
1969 // Example: shuf (mul X, {-1,-2,-3,-4}), X, {0,5,6,3} --> mul X, {-1,1,1,-4}
1970 // Example: shuf X, (add X, {-1,-2,-3,-4}), {0,1,6,7} --> add X, {0,0,-3,-4}
1971 // The existing binop constant vector remains in the same operand position.
1972 ArrayRef<int> Mask = Shuf.getShuffleMask();
1973 Constant *NewC = Op0IsBinop ? ConstantExpr::getShuffleVector(C, IdC, Mask) :
1974 ConstantExpr::getShuffleVector(IdC, C, Mask);
1976 bool MightCreatePoisonOrUB =
1977 is_contained(Mask, UndefMaskElem) &&
1978 (Instruction::isIntDivRem(BOpcode) || Instruction::isShift(BOpcode));
1979 if (MightCreatePoisonOrUB)
1980 NewC = InstCombiner::getSafeVectorConstantForBinop(BOpcode, NewC, true);
1982 // shuf (bop X, C), X, M --> bop X, C'
1983 // shuf X, (bop X, C), M --> bop X, C'
1984 Value *X = Op0IsBinop ? Op1 : Op0;
1985 Instruction *NewBO = BinaryOperator::Create(BOpcode, X, NewC);
1986 NewBO->copyIRFlags(BO);
1988 // An undef shuffle mask element may propagate as an undef constant element in
1989 // the new binop. That would produce poison where the original code might not.
1990 // If we already made a safe constant, then there's no danger.
1991 if (is_contained(Mask, UndefMaskElem) && !MightCreatePoisonOrUB)
1992 NewBO->dropPoisonGeneratingFlags();
1996 /// If we have an insert of a scalar to a non-zero element of an undefined
1997 /// vector and then shuffle that value, that's the same as inserting to the zero
1998 /// element and shuffling. Splatting from the zero element is recognized as the
1999 /// canonical form of splat.
2000 static Instruction *canonicalizeInsertSplat(ShuffleVectorInst &Shuf,
2001 InstCombiner::BuilderTy &Builder) {
2002 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2003 ArrayRef<int> Mask = Shuf.getShuffleMask();
2007 // Match a shuffle that is a splat to a non-zero element.
2008 if (!match(Op0, m_OneUse(m_InsertElt(m_Undef(), m_Value(X),
2009 m_ConstantInt(IndexC)))) ||
2010 !match(Op1, m_Undef()) || match(Mask, m_ZeroMask()) || IndexC == 0)
2013 // Insert into element 0 of an undef vector.
2014 UndefValue *UndefVec = UndefValue::get(Shuf.getType());
2015 Constant *Zero = Builder.getInt32(0);
2016 Value *NewIns = Builder.CreateInsertElement(UndefVec, X, Zero);
2018 // Splat from element 0. Any mask element that is undefined remains undefined.
2020 // shuf (inselt undef, X, 2), _, <2,2,undef>
2021 // --> shuf (inselt undef, X, 0), poison, <0,0,undef>
2022 unsigned NumMaskElts =
2023 cast<FixedVectorType>(Shuf.getType())->getNumElements();
2024 SmallVector<int, 16> NewMask(NumMaskElts, 0);
2025 for (unsigned i = 0; i != NumMaskElts; ++i)
2026 if (Mask[i] == UndefMaskElem)
2027 NewMask[i] = Mask[i];
2029 return new ShuffleVectorInst(NewIns, NewMask);
2032 /// Try to fold shuffles that are the equivalent of a vector select.
2033 Instruction *InstCombinerImpl::foldSelectShuffle(ShuffleVectorInst &Shuf) {
2034 if (!Shuf.isSelect())
2037 // Canonicalize to choose from operand 0 first unless operand 1 is undefined.
2038 // Commuting undef to operand 0 conflicts with another canonicalization.
2039 unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
2040 if (!match(Shuf.getOperand(1), m_Undef()) &&
2041 Shuf.getMaskValue(0) >= (int)NumElts) {
2042 // TODO: Can we assert that both operands of a shuffle-select are not undef
2043 // (otherwise, it would have been folded by instsimplify?
2048 if (Instruction *I = foldSelectShuffleWith1Binop(Shuf))
2051 BinaryOperator *B0, *B1;
2052 if (!match(Shuf.getOperand(0), m_BinOp(B0)) ||
2053 !match(Shuf.getOperand(1), m_BinOp(B1)))
2058 bool ConstantsAreOp1;
2059 if (match(B0, m_BinOp(m_Value(X), m_Constant(C0))) &&
2060 match(B1, m_BinOp(m_Value(Y), m_Constant(C1))))
2061 ConstantsAreOp1 = true;
2062 else if (match(B0, m_BinOp(m_Constant(C0), m_Value(X))) &&
2063 match(B1, m_BinOp(m_Constant(C1), m_Value(Y))))
2064 ConstantsAreOp1 = false;
2068 // We need matching binops to fold the lanes together.
2069 BinaryOperator::BinaryOps Opc0 = B0->getOpcode();
2070 BinaryOperator::BinaryOps Opc1 = B1->getOpcode();
2071 bool DropNSW = false;
2072 if (ConstantsAreOp1 && Opc0 != Opc1) {
2073 // TODO: We drop "nsw" if shift is converted into multiply because it may
2074 // not be correct when the shift amount is BitWidth - 1. We could examine
2075 // each vector element to determine if it is safe to keep that flag.
2076 if (Opc0 == Instruction::Shl || Opc1 == Instruction::Shl)
2078 if (BinopElts AltB0 = getAlternateBinop(B0, DL)) {
2079 assert(isa<Constant>(AltB0.Op1) && "Expecting constant with alt binop");
2080 Opc0 = AltB0.Opcode;
2081 C0 = cast<Constant>(AltB0.Op1);
2082 } else if (BinopElts AltB1 = getAlternateBinop(B1, DL)) {
2083 assert(isa<Constant>(AltB1.Op1) && "Expecting constant with alt binop");
2084 Opc1 = AltB1.Opcode;
2085 C1 = cast<Constant>(AltB1.Op1);
2092 // The opcodes must be the same. Use a new name to make that clear.
2093 BinaryOperator::BinaryOps BOpc = Opc0;
2095 // Select the constant elements needed for the single binop.
2096 ArrayRef<int> Mask = Shuf.getShuffleMask();
2097 Constant *NewC = ConstantExpr::getShuffleVector(C0, C1, Mask);
2099 // We are moving a binop after a shuffle. When a shuffle has an undefined
2100 // mask element, the result is undefined, but it is not poison or undefined
2101 // behavior. That is not necessarily true for div/rem/shift.
2102 bool MightCreatePoisonOrUB =
2103 is_contained(Mask, UndefMaskElem) &&
2104 (Instruction::isIntDivRem(BOpc) || Instruction::isShift(BOpc));
2105 if (MightCreatePoisonOrUB)
2106 NewC = InstCombiner::getSafeVectorConstantForBinop(BOpc, NewC,
2111 // Remove a binop and the shuffle by rearranging the constant:
2112 // shuffle (op V, C0), (op V, C1), M --> op V, C'
2113 // shuffle (op C0, V), (op C1, V), M --> op C', V
2116 // If there are 2 different variable operands, we must create a new shuffle
2117 // (select) first, so check uses to ensure that we don't end up with more
2118 // instructions than we started with.
2119 if (!B0->hasOneUse() && !B1->hasOneUse())
2122 // If we use the original shuffle mask and op1 is *variable*, we would be
2123 // putting an undef into operand 1 of div/rem/shift. This is either UB or
2124 // poison. We do not have to guard against UB when *constants* are op1
2125 // because safe constants guarantee that we do not overflow sdiv/srem (and
2126 // there's no danger for other opcodes).
2127 // TODO: To allow this case, create a new shuffle mask with no undefs.
2128 if (MightCreatePoisonOrUB && !ConstantsAreOp1)
2131 // Note: In general, we do not create new shuffles in InstCombine because we
2132 // do not know if a target can lower an arbitrary shuffle optimally. In this
2133 // case, the shuffle uses the existing mask, so there is no additional risk.
2135 // Select the variable vectors first, then perform the binop:
2136 // shuffle (op X, C0), (op Y, C1), M --> op (shuffle X, Y, M), C'
2137 // shuffle (op C0, X), (op C1, Y), M --> op C', (shuffle X, Y, M)
2138 V = Builder.CreateShuffleVector(X, Y, Mask);
2141 Value *NewBO = ConstantsAreOp1 ? Builder.CreateBinOp(BOpc, V, NewC) :
2142 Builder.CreateBinOp(BOpc, NewC, V);
2144 // Flags are intersected from the 2 source binops. But there are 2 exceptions:
2145 // 1. If we changed an opcode, poison conditions might have changed.
2146 // 2. If the shuffle had undef mask elements, the new binop might have undefs
2147 // where the original code did not. But if we already made a safe constant,
2148 // then there's no danger.
2149 if (auto *NewI = dyn_cast<Instruction>(NewBO)) {
2150 NewI->copyIRFlags(B0);
2151 NewI->andIRFlags(B1);
2153 NewI->setHasNoSignedWrap(false);
2154 if (is_contained(Mask, UndefMaskElem) && !MightCreatePoisonOrUB)
2155 NewI->dropPoisonGeneratingFlags();
2157 return replaceInstUsesWith(Shuf, NewBO);
2160 /// Convert a narrowing shuffle of a bitcasted vector into a vector truncate.
2161 /// Example (little endian):
2162 /// shuf (bitcast <4 x i16> X to <8 x i8>), <0, 2, 4, 6> --> trunc X to <4 x i8>
2163 static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf,
2165 // This must be a bitcasted shuffle of 1 vector integer operand.
2166 Type *DestType = Shuf.getType();
2168 if (!match(Shuf.getOperand(0), m_BitCast(m_Value(X))) ||
2169 !match(Shuf.getOperand(1), m_Undef()) || !DestType->isIntOrIntVectorTy())
2172 // The source type must have the same number of elements as the shuffle,
2173 // and the source element type must be larger than the shuffle element type.
2174 Type *SrcType = X->getType();
2175 if (!SrcType->isVectorTy() || !SrcType->isIntOrIntVectorTy() ||
2176 cast<FixedVectorType>(SrcType)->getNumElements() !=
2177 cast<FixedVectorType>(DestType)->getNumElements() ||
2178 SrcType->getScalarSizeInBits() % DestType->getScalarSizeInBits() != 0)
2181 assert(Shuf.changesLength() && !Shuf.increasesLength() &&
2182 "Expected a shuffle that decreases length");
2184 // Last, check that the mask chooses the correct low bits for each narrow
2185 // element in the result.
2186 uint64_t TruncRatio =
2187 SrcType->getScalarSizeInBits() / DestType->getScalarSizeInBits();
2188 ArrayRef<int> Mask = Shuf.getShuffleMask();
2189 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
2190 if (Mask[i] == UndefMaskElem)
2192 uint64_t LSBIndex = IsBigEndian ? (i + 1) * TruncRatio - 1 : i * TruncRatio;
2193 assert(LSBIndex <= INT32_MAX && "Overflowed 32-bits");
2194 if (Mask[i] != (int)LSBIndex)
2198 return new TruncInst(X, DestType);
2201 /// Match a shuffle-select-shuffle pattern where the shuffles are widening and
2202 /// narrowing (concatenating with undef and extracting back to the original
2203 /// length). This allows replacing the wide select with a narrow select.
2204 static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
2205 InstCombiner::BuilderTy &Builder) {
2206 // This must be a narrowing identity shuffle. It extracts the 1st N elements
2207 // of the 1st vector operand of a shuffle.
2208 if (!match(Shuf.getOperand(1), m_Undef()) || !Shuf.isIdentityWithExtract())
2211 // The vector being shuffled must be a vector select that we can eliminate.
2212 // TODO: The one-use requirement could be eased if X and/or Y are constants.
2213 Value *Cond, *X, *Y;
2214 if (!match(Shuf.getOperand(0),
2215 m_OneUse(m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))))
2218 // We need a narrow condition value. It must be extended with undef elements
2219 // and have the same number of elements as this shuffle.
2220 unsigned NarrowNumElts =
2221 cast<FixedVectorType>(Shuf.getType())->getNumElements();
2223 if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Undef()))) ||
2224 cast<FixedVectorType>(NarrowCond->getType())->getNumElements() !=
2226 !cast<ShuffleVectorInst>(Cond)->isIdentityWithPadding())
2229 // shuf (sel (shuf NarrowCond, undef, WideMask), X, Y), undef, NarrowMask) -->
2230 // sel NarrowCond, (shuf X, undef, NarrowMask), (shuf Y, undef, NarrowMask)
2231 Value *NarrowX = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
2232 Value *NarrowY = Builder.CreateShuffleVector(Y, Shuf.getShuffleMask());
2233 return SelectInst::Create(NarrowCond, NarrowX, NarrowY);
2236 /// Try to fold an extract subvector operation.
2237 static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
2238 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2239 if (!Shuf.isIdentityWithExtract() || !match(Op1, m_Undef()))
2242 // Check if we are extracting all bits of an inserted scalar:
2243 // extract-subvec (bitcast (inselt ?, X, 0) --> bitcast X to subvec type
2245 if (match(Op0, m_BitCast(m_InsertElt(m_Value(), m_Value(X), m_Zero()))) &&
2246 X->getType()->getPrimitiveSizeInBits() ==
2247 Shuf.getType()->getPrimitiveSizeInBits())
2248 return new BitCastInst(X, Shuf.getType());
2250 // Try to combine 2 shuffles into 1 shuffle by concatenating a shuffle mask.
2253 if (!match(Op0, m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask))))
2256 // Be conservative with shuffle transforms. If we can't kill the 1st shuffle,
2257 // then combining may result in worse codegen.
2258 if (!Op0->hasOneUse())
2261 // We are extracting a subvector from a shuffle. Remove excess elements from
2262 // the 1st shuffle mask to eliminate the extract.
2264 // This transform is conservatively limited to identity extracts because we do
2265 // not allow arbitrary shuffle mask creation as a target-independent transform
2266 // (because we can't guarantee that will lower efficiently).
2268 // If the extracting shuffle has an undef mask element, it transfers to the
2269 // new shuffle mask. Otherwise, copy the original mask element. Example:
2270 // shuf (shuf X, Y, <C0, C1, C2, undef, C4>), undef, <0, undef, 2, 3> -->
2271 // shuf X, Y, <C0, undef, C2, undef>
2272 unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
2273 SmallVector<int, 16> NewMask(NumElts);
2274 assert(NumElts < Mask.size() &&
2275 "Identity with extract must have less elements than its inputs");
2277 for (unsigned i = 0; i != NumElts; ++i) {
2278 int ExtractMaskElt = Shuf.getMaskValue(i);
2279 int MaskElt = Mask[i];
2280 NewMask[i] = ExtractMaskElt == UndefMaskElem ? ExtractMaskElt : MaskElt;
2282 return new ShuffleVectorInst(X, Y, NewMask);
2285 /// Try to replace a shuffle with an insertelement or try to replace a shuffle
2286 /// operand with the operand of an insertelement.
2287 static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf,
2288 InstCombinerImpl &IC) {
2289 Value *V0 = Shuf.getOperand(0), *V1 = Shuf.getOperand(1);
2290 SmallVector<int, 16> Mask;
2291 Shuf.getShuffleMask(Mask);
2293 int NumElts = Mask.size();
2294 int InpNumElts = cast<FixedVectorType>(V0->getType())->getNumElements();
2296 // This is a specialization of a fold in SimplifyDemandedVectorElts. We may
2297 // not be able to handle it there if the insertelement has >1 use.
2298 // If the shuffle has an insertelement operand but does not choose the
2299 // inserted scalar element from that value, then we can replace that shuffle
2300 // operand with the source vector of the insertelement.
2303 if (match(V0, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
2304 // shuf (inselt X, ?, IdxC), ?, Mask --> shuf X, ?, Mask
2305 if (!is_contained(Mask, (int)IdxC))
2306 return IC.replaceOperand(Shuf, 0, X);
2308 if (match(V1, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
2309 // Offset the index constant by the vector width because we are checking for
2310 // accesses to the 2nd vector input of the shuffle.
2312 // shuf ?, (inselt X, ?, IdxC), Mask --> shuf ?, X, Mask
2313 if (!is_contained(Mask, (int)IdxC))
2314 return IC.replaceOperand(Shuf, 1, X);
2316 // For the rest of the transform, the shuffle must not change vector sizes.
2317 // TODO: This restriction could be removed if the insert has only one use
2318 // (because the transform would require a new length-changing shuffle).
2319 if (NumElts != InpNumElts)
2322 // shuffle (insert ?, Scalar, IndexC), V1, Mask --> insert V1, Scalar, IndexC'
2323 auto isShufflingScalarIntoOp1 = [&](Value *&Scalar, ConstantInt *&IndexC) {
2324 // We need an insertelement with a constant index.
2325 if (!match(V0, m_InsertElt(m_Value(), m_Value(Scalar),
2326 m_ConstantInt(IndexC))))
2329 // Test the shuffle mask to see if it splices the inserted scalar into the
2330 // operand 1 vector of the shuffle.
2331 int NewInsIndex = -1;
2332 for (int i = 0; i != NumElts; ++i) {
2333 // Ignore undef mask elements.
2337 // The shuffle takes elements of operand 1 without lane changes.
2338 if (Mask[i] == NumElts + i)
2341 // The shuffle must choose the inserted scalar exactly once.
2342 if (NewInsIndex != -1 || Mask[i] != IndexC->getSExtValue())
2345 // The shuffle is placing the inserted scalar into element i.
2349 assert(NewInsIndex != -1 && "Did not fold shuffle with unused operand?");
2351 // Index is updated to the potentially translated insertion lane.
2352 IndexC = ConstantInt::get(IndexC->getType(), NewInsIndex);
2356 // If the shuffle is unnecessary, insert the scalar operand directly into
2357 // operand 1 of the shuffle. Example:
2358 // shuffle (insert ?, S, 1), V1, <1, 5, 6, 7> --> insert V1, S, 0
2360 ConstantInt *IndexC;
2361 if (isShufflingScalarIntoOp1(Scalar, IndexC))
2362 return InsertElementInst::Create(V1, Scalar, IndexC);
2364 // Try again after commuting shuffle. Example:
2365 // shuffle V0, (insert ?, S, 0), <0, 1, 2, 4> -->
2366 // shuffle (insert ?, S, 0), V0, <4, 5, 6, 0> --> insert V0, S, 3
2368 ShuffleVectorInst::commuteShuffleMask(Mask, NumElts);
2369 if (isShufflingScalarIntoOp1(Scalar, IndexC))
2370 return InsertElementInst::Create(V1, Scalar, IndexC);
2375 static Instruction *foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf) {
2376 // Match the operands as identity with padding (also known as concatenation
2377 // with undef) shuffles of the same source type. The backend is expected to
2378 // recreate these concatenations from a shuffle of narrow operands.
2379 auto *Shuffle0 = dyn_cast<ShuffleVectorInst>(Shuf.getOperand(0));
2380 auto *Shuffle1 = dyn_cast<ShuffleVectorInst>(Shuf.getOperand(1));
2381 if (!Shuffle0 || !Shuffle0->isIdentityWithPadding() ||
2382 !Shuffle1 || !Shuffle1->isIdentityWithPadding())
2385 // We limit this transform to power-of-2 types because we expect that the
2386 // backend can convert the simplified IR patterns to identical nodes as the
2388 // TODO: If we can verify the same behavior for arbitrary types, the
2389 // power-of-2 checks can be removed.
2390 Value *X = Shuffle0->getOperand(0);
2391 Value *Y = Shuffle1->getOperand(0);
2392 if (X->getType() != Y->getType() ||
2393 !isPowerOf2_32(cast<FixedVectorType>(Shuf.getType())->getNumElements()) ||
2395 cast<FixedVectorType>(Shuffle0->getType())->getNumElements()) ||
2396 !isPowerOf2_32(cast<FixedVectorType>(X->getType())->getNumElements()) ||
2397 match(X, m_Undef()) || match(Y, m_Undef()))
2399 assert(match(Shuffle0->getOperand(1), m_Undef()) &&
2400 match(Shuffle1->getOperand(1), m_Undef()) &&
2401 "Unexpected operand for identity shuffle");
2403 // This is a shuffle of 2 widening shuffles. We can shuffle the narrow source
2404 // operands directly by adjusting the shuffle mask to account for the narrower
2406 // shuf (widen X), (widen Y), Mask --> shuf X, Y, Mask'
2407 int NarrowElts = cast<FixedVectorType>(X->getType())->getNumElements();
2408 int WideElts = cast<FixedVectorType>(Shuffle0->getType())->getNumElements();
2409 assert(WideElts > NarrowElts && "Unexpected types for identity with padding");
2411 ArrayRef<int> Mask = Shuf.getShuffleMask();
2412 SmallVector<int, 16> NewMask(Mask.size(), -1);
2413 for (int i = 0, e = Mask.size(); i != e; ++i) {
2417 // If this shuffle is choosing an undef element from 1 of the sources, that
2418 // element is undef.
2419 if (Mask[i] < WideElts) {
2420 if (Shuffle0->getMaskValue(Mask[i]) == -1)
2423 if (Shuffle1->getMaskValue(Mask[i] - WideElts) == -1)
2427 // If this shuffle is choosing from the 1st narrow op, the mask element is
2428 // the same. If this shuffle is choosing from the 2nd narrow op, the mask
2429 // element is offset down to adjust for the narrow vector widths.
2430 if (Mask[i] < WideElts) {
2431 assert(Mask[i] < NarrowElts && "Unexpected shuffle mask");
2432 NewMask[i] = Mask[i];
2434 assert(Mask[i] < (WideElts + NarrowElts) && "Unexpected shuffle mask");
2435 NewMask[i] = Mask[i] - (WideElts - NarrowElts);
2438 return new ShuffleVectorInst(X, Y, NewMask);
2441 Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
2442 Value *LHS = SVI.getOperand(0);
2443 Value *RHS = SVI.getOperand(1);
2444 SimplifyQuery ShufQuery = SQ.getWithInstruction(&SVI);
2445 if (auto *V = SimplifyShuffleVectorInst(LHS, RHS, SVI.getShuffleMask(),
2446 SVI.getType(), ShufQuery))
2447 return replaceInstUsesWith(SVI, V);
2449 // Bail out for scalable vectors
2450 if (isa<ScalableVectorType>(LHS->getType()))
2453 unsigned VWidth = cast<FixedVectorType>(SVI.getType())->getNumElements();
2454 unsigned LHSWidth = cast<FixedVectorType>(LHS->getType())->getNumElements();
2456 // shuffle (bitcast X), (bitcast Y), Mask --> bitcast (shuffle X, Y, Mask)
2458 // if X and Y are of the same (vector) type, and the element size is not
2459 // changed by the bitcasts, we can distribute the bitcasts through the
2460 // shuffle, hopefully reducing the number of instructions. We make sure that
2461 // at least one bitcast only has one use, so we don't *increase* the number of
2462 // instructions here.
2464 if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_BitCast(m_Value(Y))) &&
2465 X->getType()->isVectorTy() && X->getType() == Y->getType() &&
2466 X->getType()->getScalarSizeInBits() ==
2467 SVI.getType()->getScalarSizeInBits() &&
2468 (LHS->hasOneUse() || RHS->hasOneUse())) {
2469 Value *V = Builder.CreateShuffleVector(X, Y, SVI.getShuffleMask(),
2470 SVI.getName() + ".uncasted");
2471 return new BitCastInst(V, SVI.getType());
2474 ArrayRef<int> Mask = SVI.getShuffleMask();
2475 Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
2477 // Peek through a bitcasted shuffle operand by scaling the mask. If the
2478 // simulated shuffle can simplify, then this shuffle is unnecessary:
2479 // shuf (bitcast X), undef, Mask --> bitcast X'
2480 // TODO: This could be extended to allow length-changing shuffles.
2481 // The transform might also be obsoleted if we allowed canonicalization
2482 // of bitcasted shuffles.
2483 if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_Undef()) &&
2484 X->getType()->isVectorTy() && VWidth == LHSWidth) {
2485 // Try to create a scaled mask constant.
2486 auto *XType = cast<FixedVectorType>(X->getType());
2487 unsigned XNumElts = XType->getNumElements();
2488 SmallVector<int, 16> ScaledMask;
2489 if (XNumElts >= VWidth) {
2490 assert(XNumElts % VWidth == 0 && "Unexpected vector bitcast");
2491 narrowShuffleMaskElts(XNumElts / VWidth, Mask, ScaledMask);
2493 assert(VWidth % XNumElts == 0 && "Unexpected vector bitcast");
2494 if (!widenShuffleMaskElts(VWidth / XNumElts, Mask, ScaledMask))
2497 if (!ScaledMask.empty()) {
2498 // If the shuffled source vector simplifies, cast that value to this
2500 if (auto *V = SimplifyShuffleVectorInst(X, UndefValue::get(XType),
2501 ScaledMask, XType, ShufQuery))
2502 return BitCastInst::Create(Instruction::BitCast, V, SVI.getType());
2506 // shuffle x, x, mask --> shuffle x, undef, mask'
2508 assert(!match(RHS, m_Undef()) &&
2509 "Shuffle with 2 undef ops not simplified?");
2510 return new ShuffleVectorInst(LHS, createUnaryMask(Mask, LHSWidth));
2513 // shuffle undef, x, mask --> shuffle x, undef, mask'
2514 if (match(LHS, m_Undef())) {
2519 if (Instruction *I = canonicalizeInsertSplat(SVI, Builder))
2522 if (Instruction *I = foldSelectShuffle(SVI))
2525 if (Instruction *I = foldTruncShuffle(SVI, DL.isBigEndian()))
2528 if (Instruction *I = narrowVectorSelect(SVI, Builder))
2531 APInt UndefElts(VWidth, 0);
2532 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2533 if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
2535 return replaceInstUsesWith(SVI, V);
2539 if (Instruction *I = foldIdentityExtractShuffle(SVI))
2542 // These transforms have the potential to lose undef knowledge, so they are
2543 // intentionally placed after SimplifyDemandedVectorElts().
2544 if (Instruction *I = foldShuffleWithInsert(SVI, *this))
2546 if (Instruction *I = foldIdentityPaddedShuffles(SVI))
2549 if (match(RHS, m_Undef()) && canEvaluateShuffled(LHS, Mask)) {
2550 Value *V = evaluateInDifferentElementOrder(LHS, Mask);
2551 return replaceInstUsesWith(SVI, V);
2554 // SROA generates shuffle+bitcast when the extracted sub-vector is bitcast to
2555 // a non-vector type. We can instead bitcast the original vector followed by
2556 // an extract of the desired element:
2558 // %sroa = shufflevector <16 x i8> %in, <16 x i8> undef,
2559 // <4 x i32> <i32 0, i32 1, i32 2, i32 3>
2560 // %1 = bitcast <4 x i8> %sroa to i32
2562 // %bc = bitcast <16 x i8> %in to <4 x i32>
2563 // %ext = extractelement <4 x i32> %bc, i32 0
2565 // If the shuffle is extracting a contiguous range of values from the input
2566 // vector then each use which is a bitcast of the extracted size can be
2567 // replaced. This will work if the vector types are compatible, and the begin
2568 // index is aligned to a value in the casted vector type. If the begin index
2569 // isn't aligned then we can shuffle the original vector (keeping the same
2570 // vector type) before extracting.
2572 // This code will bail out if the target type is fundamentally incompatible
2573 // with vectors of the source type.
2575 // Example of <16 x i8>, target type i32:
2576 // Index range [4,8): v-----------v Will work.
2577 // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
2578 // <16 x i8>: | | | | | | | | | | | | | | | | |
2579 // <4 x i32>: | | | | |
2580 // +-----------+-----------+-----------+-----------+
2581 // Index range [6,10): ^-----------^ Needs an extra shuffle.
2582 // Target type i40: ^--------------^ Won't work, bail.
2583 bool MadeChange = false;
2584 if (isShuffleExtractingFromLHS(SVI, Mask)) {
2586 unsigned MaskElems = Mask.size();
2587 auto *SrcTy = cast<FixedVectorType>(V->getType());
2588 unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedSize();
2589 unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType());
2590 assert(SrcElemBitWidth && "vector elements must have a bitwidth");
2591 unsigned SrcNumElems = SrcTy->getNumElements();
2592 SmallVector<BitCastInst *, 8> BCs;
2593 DenseMap<Type *, Value *> NewBCs;
2594 for (User *U : SVI.users())
2595 if (BitCastInst *BC = dyn_cast<BitCastInst>(U))
2596 if (!BC->use_empty())
2597 // Only visit bitcasts that weren't previously handled.
2599 for (BitCastInst *BC : BCs) {
2600 unsigned BegIdx = Mask.front();
2601 Type *TgtTy = BC->getDestTy();
2602 unsigned TgtElemBitWidth = DL.getTypeSizeInBits(TgtTy);
2603 if (!TgtElemBitWidth)
2605 unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth;
2606 bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth;
2607 bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth);
2608 if (!VecBitWidthsEqual)
2610 if (!VectorType::isValidElementType(TgtTy))
2612 auto *CastSrcTy = FixedVectorType::get(TgtTy, TgtNumElems);
2613 if (!BegIsAligned) {
2614 // Shuffle the input so [0,NumElements) contains the output, and
2615 // [NumElems,SrcNumElems) is undef.
2616 SmallVector<int, 16> ShuffleMask(SrcNumElems, -1);
2617 for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I)
2618 ShuffleMask[I] = Idx;
2619 V = Builder.CreateShuffleVector(V, ShuffleMask,
2620 SVI.getName() + ".extract");
2623 unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;
2624 assert(SrcElemsPerTgtElem);
2625 BegIdx /= SrcElemsPerTgtElem;
2626 bool BCAlreadyExists = NewBCs.find(CastSrcTy) != NewBCs.end();
2630 : Builder.CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc");
2631 if (!BCAlreadyExists)
2632 NewBCs[CastSrcTy] = NewBC;
2633 auto *Ext = Builder.CreateExtractElement(
2634 NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract");
2635 // The shufflevector isn't being replaced: the bitcast that used it
2636 // is. InstCombine will visit the newly-created instructions.
2637 replaceInstUsesWith(*BC, Ext);
2642 // If the LHS is a shufflevector itself, see if we can combine it with this
2643 // one without producing an unusual shuffle.
2644 // Cases that might be simplified:
2646 // x1=shuffle(v1,v2,mask1)
2647 // x=shuffle(x1,undef,mask)
2649 // x=shuffle(v1,undef,newMask)
2650 // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : -1
2652 // x1=shuffle(v1,undef,mask1)
2653 // x=shuffle(x1,x2,mask)
2654 // where v1.size() == mask1.size()
2656 // x=shuffle(v1,x2,newMask)
2657 // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : mask[i]
2659 // x2=shuffle(v2,undef,mask2)
2660 // x=shuffle(x1,x2,mask)
2661 // where v2.size() == mask2.size()
2663 // x=shuffle(x1,v2,newMask)
2664 // newMask[i] = (mask[i] < x1.size())
2665 // ? mask[i] : mask2[mask[i]-x1.size()]+x1.size()
2667 // x1=shuffle(v1,undef,mask1)
2668 // x2=shuffle(v2,undef,mask2)
2669 // x=shuffle(x1,x2,mask)
2670 // where v1.size() == v2.size()
2672 // x=shuffle(v1,v2,newMask)
2673 // newMask[i] = (mask[i] < x1.size())
2674 // ? mask1[mask[i]] : mask2[mask[i]-x1.size()]+v1.size()
2676 // Here we are really conservative:
2677 // we are absolutely afraid of producing a shuffle mask not in the input
2678 // program, because the code gen may not be smart enough to turn a merged
2679 // shuffle into two specific shuffles: it may produce worse code. As such,
2680 // we only merge two shuffles if the result is either a splat or one of the
2681 // input shuffle masks. In this case, merging the shuffles just removes
2682 // one instruction, which we know is safe. This is good for things like
2683 // turning: (splat(splat)) -> splat, or
2684 // merge(V[0..n], V[n+1..2n]) -> V[0..2n]
2685 ShuffleVectorInst* LHSShuffle = dyn_cast<ShuffleVectorInst>(LHS);
2686 ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS);
2688 if (!match(LHSShuffle->getOperand(1), m_Undef()) && !match(RHS, m_Undef()))
2689 LHSShuffle = nullptr;
2691 if (!match(RHSShuffle->getOperand(1), m_Undef()))
2692 RHSShuffle = nullptr;
2693 if (!LHSShuffle && !RHSShuffle)
2694 return MadeChange ? &SVI : nullptr;
2696 Value* LHSOp0 = nullptr;
2697 Value* LHSOp1 = nullptr;
2698 Value* RHSOp0 = nullptr;
2699 unsigned LHSOp0Width = 0;
2700 unsigned RHSOp0Width = 0;
2702 LHSOp0 = LHSShuffle->getOperand(0);
2703 LHSOp1 = LHSShuffle->getOperand(1);
2704 LHSOp0Width = cast<FixedVectorType>(LHSOp0->getType())->getNumElements();
2707 RHSOp0 = RHSShuffle->getOperand(0);
2708 RHSOp0Width = cast<FixedVectorType>(RHSOp0->getType())->getNumElements();
2710 Value* newLHS = LHS;
2711 Value* newRHS = RHS;
2714 if (match(RHS, m_Undef())) {
2719 else if (LHSOp0Width == LHSWidth) {
2724 if (RHSShuffle && RHSOp0Width == LHSWidth) {
2728 if (LHSOp0 == RHSOp0) {
2733 if (newLHS == LHS && newRHS == RHS)
2734 return MadeChange ? &SVI : nullptr;
2736 ArrayRef<int> LHSMask;
2737 ArrayRef<int> RHSMask;
2739 LHSMask = LHSShuffle->getShuffleMask();
2740 if (RHSShuffle && newRHS != RHS)
2741 RHSMask = RHSShuffle->getShuffleMask();
2743 unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth;
2744 SmallVector<int, 16> newMask;
2745 bool isSplat = true;
2747 // Create a new mask for the new ShuffleVectorInst so that the new
2748 // ShuffleVectorInst is equivalent to the original one.
2749 for (unsigned i = 0; i < VWidth; ++i) {
2752 // This element is an undef value.
2754 } else if (Mask[i] < (int)LHSWidth) {
2755 // This element is from left hand side vector operand.
2757 // If LHS is going to be replaced (case 1, 2, or 4), calculate the
2758 // new mask value for the element.
2759 if (newLHS != LHS) {
2760 eltMask = LHSMask[Mask[i]];
2761 // If the value selected is an undef value, explicitly specify it
2762 // with a -1 mask value.
2763 if (eltMask >= (int)LHSOp0Width && isa<UndefValue>(LHSOp1))
2768 // This element is from right hand side vector operand
2770 // If the value selected is an undef value, explicitly specify it
2771 // with a -1 mask value. (case 1)
2772 if (match(RHS, m_Undef()))
2774 // If RHS is going to be replaced (case 3 or 4), calculate the
2775 // new mask value for the element.
2776 else if (newRHS != RHS) {
2777 eltMask = RHSMask[Mask[i]-LHSWidth];
2778 // If the value selected is an undef value, explicitly specify it
2779 // with a -1 mask value.
2780 if (eltMask >= (int)RHSOp0Width) {
2781 assert(match(RHSShuffle->getOperand(1), m_Undef()) &&
2782 "should have been check above");
2786 eltMask = Mask[i]-LHSWidth;
2788 // If LHS's width is changed, shift the mask value accordingly.
2789 // If newRHS == nullptr, i.e. LHSOp0 == RHSOp0, we want to remap any
2790 // references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
2791 // If newRHS == newLHS, we want to remap any references from newRHS to
2792 // newLHS so that we can properly identify splats that may occur due to
2793 // obfuscation across the two vectors.
2794 if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS)
2795 eltMask += newLHSWidth;
2798 // Check if this could still be a splat.
2800 if (SplatElt >= 0 && SplatElt != eltMask)
2805 newMask.push_back(eltMask);
2808 // If the result mask is equal to one of the original shuffle masks,
2809 // or is a splat, do the replacement.
2810 if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) {
2812 newRHS = UndefValue::get(newLHS->getType());
2813 return new ShuffleVectorInst(newLHS, newRHS, newMask);
2816 return MadeChange ? &SVI : nullptr;