1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/ADT/SetVector.h"
10 #include "llvm/ADT/SmallBitVector.h"
11 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
12 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
13 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
14 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
15 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
16 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/GlobalISel/Utils.h"
19 #include "llvm/CodeGen/LowLevelType.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterBankInfo.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetOpcodes.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/DivisionByConstantInfo.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Target/TargetMachine.h"
36 #define DEBUG_TYPE "gi-combiner"
39 using namespace MIPatternMatch;
41 // Option to allow testing of the combiner while no targets know about indexed
44 ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
45 cl::desc("Force all indexed operations to be "
46 "legal for the GlobalISel combiner"));
48 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
49 MachineIRBuilder &B, GISelKnownBits *KB,
50 MachineDominatorTree *MDT,
51 const LegalizerInfo *LI)
52 : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
53 MDT(MDT), LI(LI), RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
54 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
58 const TargetLowering &CombinerHelper::getTargetLowering() const {
59 return *Builder.getMF().getSubtarget().getTargetLowering();
62 /// \returns The little endian in-memory byte position of byte \p I in a
63 /// \p ByteWidth bytes wide type.
65 /// E.g. Given a 4-byte type x, x[0] -> byte 0
66 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
67 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
71 /// Determines the LogBase2 value for a non-null input value using the
72 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
73 static Register buildLogBase2(Register V, MachineIRBuilder &MIB) {
74 auto &MRI = *MIB.getMRI();
75 LLT Ty = MRI.getType(V);
76 auto Ctlz = MIB.buildCTLZ(Ty, V);
77 auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1);
78 return MIB.buildSub(Ty, Base, Ctlz).getReg(0);
81 /// \returns The big endian in-memory byte position of byte \p I in a
82 /// \p ByteWidth bytes wide type.
84 /// E.g. Given a 4-byte type x, x[0] -> byte 3
85 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
86 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
87 return ByteWidth - I - 1;
90 /// Given a map from byte offsets in memory to indices in a load/store,
91 /// determine if that map corresponds to a little or big endian byte pattern.
93 /// \param MemOffset2Idx maps memory offsets to address offsets.
94 /// \param LowestIdx is the lowest index in \p MemOffset2Idx.
96 /// \returns true if the map corresponds to a big endian byte pattern, false
97 /// if it corresponds to a little endian byte pattern, and None otherwise.
99 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
102 /// AddrOffset Little endian Big endian
107 static Optional<bool>
108 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
110 // Need at least two byte positions to decide on endianness.
111 unsigned Width = MemOffset2Idx.size();
114 bool BigEndian = true, LittleEndian = true;
115 for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
116 auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
117 if (MemOffsetAndIdx == MemOffset2Idx.end())
119 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
120 assert(Idx >= 0 && "Expected non-negative byte offset?");
121 LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
122 BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
123 if (!BigEndian && !LittleEndian)
127 assert((BigEndian != LittleEndian) &&
128 "Pattern cannot be both big and little endian!");
132 bool CombinerHelper::isPreLegalize() const { return !LI; }
134 bool CombinerHelper::isLegal(const LegalityQuery &Query) const {
135 assert(LI && "Must have LegalizerInfo to query isLegal!");
136 return LI->getAction(Query).Action == LegalizeActions::Legal;
139 bool CombinerHelper::isLegalOrBeforeLegalizer(
140 const LegalityQuery &Query) const {
141 return isPreLegalize() || isLegal(Query);
144 bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const {
146 return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}});
147 // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs.
150 LLT EltTy = Ty.getElementType();
151 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
152 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
155 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
156 Register ToReg) const {
157 Observer.changingAllUsesOfReg(MRI, FromReg);
159 if (MRI.constrainRegAttrs(ToReg, FromReg))
160 MRI.replaceRegWith(FromReg, ToReg);
162 Builder.buildCopy(ToReg, FromReg);
164 Observer.finishedChangingAllUsesOfReg();
167 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
168 MachineOperand &FromRegOp,
169 Register ToReg) const {
170 assert(FromRegOp.getParent() && "Expected an operand in an MI");
171 Observer.changingInstr(*FromRegOp.getParent());
173 FromRegOp.setReg(ToReg);
175 Observer.changedInstr(*FromRegOp.getParent());
178 void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI,
179 unsigned ToOpcode) const {
180 Observer.changingInstr(FromMI);
182 FromMI.setDesc(Builder.getTII().get(ToOpcode));
184 Observer.changedInstr(FromMI);
187 const RegisterBank *CombinerHelper::getRegBank(Register Reg) const {
188 return RBI->getRegBank(Reg, MRI, *TRI);
191 void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) {
193 MRI.setRegBank(Reg, *RegBank);
196 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
197 if (matchCombineCopy(MI)) {
198 applyCombineCopy(MI);
203 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
204 if (MI.getOpcode() != TargetOpcode::COPY)
206 Register DstReg = MI.getOperand(0).getReg();
207 Register SrcReg = MI.getOperand(1).getReg();
208 return canReplaceReg(DstReg, SrcReg, MRI);
210 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
211 Register DstReg = MI.getOperand(0).getReg();
212 Register SrcReg = MI.getOperand(1).getReg();
213 MI.eraseFromParent();
214 replaceRegWith(MRI, DstReg, SrcReg);
217 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
218 bool IsUndef = false;
219 SmallVector<Register, 4> Ops;
220 if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
221 applyCombineConcatVectors(MI, IsUndef, Ops);
227 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
228 SmallVectorImpl<Register> &Ops) {
229 assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
230 "Invalid instruction");
232 MachineInstr *Undef = nullptr;
234 // Walk over all the operands of concat vectors and check if they are
235 // build_vector themselves or undef.
236 // Then collect their operands in Ops.
237 for (const MachineOperand &MO : MI.uses()) {
238 Register Reg = MO.getReg();
239 MachineInstr *Def = MRI.getVRegDef(Reg);
240 assert(Def && "Operand not defined");
241 switch (Def->getOpcode()) {
242 case TargetOpcode::G_BUILD_VECTOR:
244 // Remember the operands of the build_vector to fold
245 // them into the yet-to-build flattened concat vectors.
246 for (const MachineOperand &BuildVecMO : Def->uses())
247 Ops.push_back(BuildVecMO.getReg());
249 case TargetOpcode::G_IMPLICIT_DEF: {
250 LLT OpType = MRI.getType(Reg);
251 // Keep one undef value for all the undef operands.
253 Builder.setInsertPt(*MI.getParent(), MI);
254 Undef = Builder.buildUndef(OpType.getScalarType());
256 assert(MRI.getType(Undef->getOperand(0).getReg()) ==
257 OpType.getScalarType() &&
258 "All undefs should have the same type");
259 // Break the undef vector in as many scalar elements as needed
260 // for the flattening.
261 for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
262 EltIdx != EltEnd; ++EltIdx)
263 Ops.push_back(Undef->getOperand(0).getReg());
272 void CombinerHelper::applyCombineConcatVectors(
273 MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
274 // We determined that the concat_vectors can be flatten.
275 // Generate the flattened build_vector.
276 Register DstReg = MI.getOperand(0).getReg();
277 Builder.setInsertPt(*MI.getParent(), MI);
278 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
280 // Note: IsUndef is sort of redundant. We could have determine it by
281 // checking that at all Ops are undef. Alternatively, we could have
282 // generate a build_vector of undefs and rely on another combine to
283 // clean that up. For now, given we already gather this information
284 // in tryCombineConcatVectors, just save compile time and issue the
287 Builder.buildUndef(NewDstReg);
289 Builder.buildBuildVector(NewDstReg, Ops);
290 MI.eraseFromParent();
291 replaceRegWith(MRI, DstReg, NewDstReg);
294 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
295 SmallVector<Register, 4> Ops;
296 if (matchCombineShuffleVector(MI, Ops)) {
297 applyCombineShuffleVector(MI, Ops);
303 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
304 SmallVectorImpl<Register> &Ops) {
305 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
306 "Invalid instruction kind");
307 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
308 Register Src1 = MI.getOperand(1).getReg();
309 LLT SrcType = MRI.getType(Src1);
310 // As bizarre as it may look, shuffle vector can actually produce
311 // scalar! This is because at the IR level a <1 x ty> shuffle
312 // vector is perfectly valid.
313 unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
314 unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
316 // If the resulting vector is smaller than the size of the source
317 // vectors being concatenated, we won't be able to replace the
318 // shuffle vector into a concat_vectors.
320 // Note: We may still be able to produce a concat_vectors fed by
321 // extract_vector_elt and so on. It is less clear that would
322 // be better though, so don't bother for now.
324 // If the destination is a scalar, the size of the sources doesn't
325 // matter. we will lower the shuffle to a plain copy. This will
326 // work only if the source and destination have the same size. But
327 // that's covered by the next condition.
329 // TODO: If the size between the source and destination don't match
330 // we could still emit an extract vector element in that case.
331 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
334 // Check that the shuffle mask can be broken evenly between the
335 // different sources.
336 if (DstNumElts % SrcNumElts != 0)
339 // Mask length is a multiple of the source vector length.
340 // Check if the shuffle is some kind of concatenation of the input
342 unsigned NumConcat = DstNumElts / SrcNumElts;
343 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
344 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
345 for (unsigned i = 0; i != DstNumElts; ++i) {
350 // Ensure the indices in each SrcType sized piece are sequential and that
351 // the same source is used for the whole piece.
352 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
353 (ConcatSrcs[i / SrcNumElts] >= 0 &&
354 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
356 // Remember which source this index came from.
357 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
360 // The shuffle is concatenating multiple vectors together.
361 // Collect the different operands for that.
363 Register Src2 = MI.getOperand(2).getReg();
364 for (auto Src : ConcatSrcs) {
367 Builder.setInsertPt(*MI.getParent(), MI);
368 UndefReg = Builder.buildUndef(SrcType).getReg(0);
370 Ops.push_back(UndefReg);
379 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
380 const ArrayRef<Register> Ops) {
381 Register DstReg = MI.getOperand(0).getReg();
382 Builder.setInsertPt(*MI.getParent(), MI);
383 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
386 Builder.buildCopy(NewDstReg, Ops[0]);
388 Builder.buildMerge(NewDstReg, Ops);
390 MI.eraseFromParent();
391 replaceRegWith(MRI, DstReg, NewDstReg);
396 /// Select a preference between two uses. CurrentUse is the current preference
397 /// while *ForCandidate is attributes of the candidate under consideration.
398 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
399 const LLT TyForCandidate,
400 unsigned OpcodeForCandidate,
401 MachineInstr *MIForCandidate) {
402 if (!CurrentUse.Ty.isValid()) {
403 if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
404 CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
405 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
409 // We permit the extend to hoist through basic blocks but this is only
410 // sensible if the target has extending loads. If you end up lowering back
411 // into a load and extend during the legalizer then the end result is
412 // hoisting the extend up to the load.
414 // Prefer defined extensions to undefined extensions as these are more
415 // likely to reduce the number of instructions.
416 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
417 CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
419 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
420 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
421 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
423 // Prefer sign extensions to zero extensions as sign-extensions tend to be
425 if (CurrentUse.Ty == TyForCandidate) {
426 if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
427 OpcodeForCandidate == TargetOpcode::G_ZEXT)
429 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
430 OpcodeForCandidate == TargetOpcode::G_SEXT)
431 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
434 // This is potentially target specific. We've chosen the largest type
435 // because G_TRUNC is usually free. One potential catch with this is that
436 // some targets have a reduced number of larger registers than smaller
437 // registers and this choice potentially increases the live-range for the
439 if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
440 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
445 /// Find a suitable place to insert some instructions and insert them. This
446 /// function accounts for special cases like inserting before a PHI node.
447 /// The current strategy for inserting before PHI's is to duplicate the
448 /// instructions for each predecessor. However, while that's ok for G_TRUNC
449 /// on most targets since it generally requires no code, other targets/cases may
450 /// want to try harder to find a dominating block.
451 static void InsertInsnsWithoutSideEffectsBeforeUse(
452 MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
453 std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
454 MachineOperand &UseMO)>
456 MachineInstr &UseMI = *UseMO.getParent();
458 MachineBasicBlock *InsertBB = UseMI.getParent();
460 // If the use is a PHI then we want the predecessor block instead.
462 MachineOperand *PredBB = std::next(&UseMO);
463 InsertBB = PredBB->getMBB();
466 // If the block is the same block as the def then we want to insert just after
467 // the def instead of at the start of the block.
468 if (InsertBB == DefMI.getParent()) {
469 MachineBasicBlock::iterator InsertPt = &DefMI;
470 Inserter(InsertBB, std::next(InsertPt), UseMO);
474 // Otherwise we want the start of the BB
475 Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
477 } // end anonymous namespace
479 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
480 PreferredTuple Preferred;
481 if (matchCombineExtendingLoads(MI, Preferred)) {
482 applyCombineExtendingLoads(MI, Preferred);
488 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
489 PreferredTuple &Preferred) {
490 // We match the loads and follow the uses to the extend instead of matching
491 // the extends and following the def to the load. This is because the load
492 // must remain in the same position for correctness (unless we also add code
493 // to find a safe place to sink it) whereas the extend is freely movable.
494 // It also prevents us from duplicating the load for the volatile case or just
496 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI);
500 Register LoadReg = LoadMI->getDstReg();
502 LLT LoadValueTy = MRI.getType(LoadReg);
503 if (!LoadValueTy.isScalar())
506 // Most architectures are going to legalize <s8 loads into at least a 1 byte
507 // load, and the MMOs can only describe memory accesses in multiples of bytes.
508 // If we try to perform extload combining on those, we can end up with
509 // %a(s8) = extload %ptr (load 1 byte from %ptr)
510 // ... which is an illegal extload instruction.
511 if (LoadValueTy.getSizeInBits() < 8)
514 // For non power-of-2 types, they will very likely be legalized into multiple
515 // loads. Don't bother trying to match them into extending loads.
516 if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
519 // Find the preferred type aside from the any-extends (unless it's the only
520 // one) and non-extending ops. We'll emit an extending load to that type and
521 // and emit a variant of (extend (trunc X)) for the others according to the
522 // relative type sizes. At the same time, pick an extend to use based on the
523 // extend involved in the chosen type.
524 unsigned PreferredOpcode =
526 ? TargetOpcode::G_ANYEXT
527 : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
528 Preferred = {LLT(), PreferredOpcode, nullptr};
529 for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) {
530 if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
531 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
532 (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
533 const auto &MMO = LoadMI->getMMO();
534 // For atomics, only form anyextending loads.
535 if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
537 // Check for legality.
539 LegalityQuery::MemDesc MMDesc(MMO);
540 LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
541 LLT SrcTy = MRI.getType(LoadMI->getPointerReg());
542 if (LI->getAction({LoadMI->getOpcode(), {UseTy, SrcTy}, {MMDesc}})
543 .Action != LegalizeActions::Legal)
546 Preferred = ChoosePreferredUse(Preferred,
547 MRI.getType(UseMI.getOperand(0).getReg()),
548 UseMI.getOpcode(), &UseMI);
552 // There were no extends
555 // It should be impossible to chose an extend without selecting a different
556 // type since by definition the result of an extend is larger.
557 assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
559 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
563 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
564 PreferredTuple &Preferred) {
565 // Rewrite the load to the chosen extending load.
566 Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
568 // Inserter to insert a truncate back to the original type at a given point
569 // with some basic CSE to limit truncate duplication to one per BB.
570 DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
571 auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
572 MachineBasicBlock::iterator InsertBefore,
573 MachineOperand &UseMO) {
574 MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
575 if (PreviouslyEmitted) {
576 Observer.changingInstr(*UseMO.getParent());
577 UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
578 Observer.changedInstr(*UseMO.getParent());
582 Builder.setInsertPt(*InsertIntoBB, InsertBefore);
583 Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
584 MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
585 EmittedInsns[InsertIntoBB] = NewMI;
586 replaceRegOpWith(MRI, UseMO, NewDstReg);
589 Observer.changingInstr(MI);
591 Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
592 ? TargetOpcode::G_SEXTLOAD
593 : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
594 ? TargetOpcode::G_ZEXTLOAD
595 : TargetOpcode::G_LOAD));
597 // Rewrite all the uses to fix up the types.
598 auto &LoadValue = MI.getOperand(0);
599 SmallVector<MachineOperand *, 4> Uses;
600 for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
601 Uses.push_back(&UseMO);
603 for (auto *UseMO : Uses) {
604 MachineInstr *UseMI = UseMO->getParent();
606 // If the extend is compatible with the preferred extend then we should fix
607 // up the type and extend so that it uses the preferred use.
608 if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
609 UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
610 Register UseDstReg = UseMI->getOperand(0).getReg();
611 MachineOperand &UseSrcMO = UseMI->getOperand(1);
612 const LLT UseDstTy = MRI.getType(UseDstReg);
613 if (UseDstReg != ChosenDstReg) {
614 if (Preferred.Ty == UseDstTy) {
615 // If the use has the same type as the preferred use, then merge
616 // the vregs and erase the extend. For example:
617 // %1:_(s8) = G_LOAD ...
618 // %2:_(s32) = G_SEXT %1(s8)
619 // %3:_(s32) = G_ANYEXT %1(s8)
622 // %2:_(s32) = G_SEXTLOAD ...
624 replaceRegWith(MRI, UseDstReg, ChosenDstReg);
625 Observer.erasingInstr(*UseMO->getParent());
626 UseMO->getParent()->eraseFromParent();
627 } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
628 // If the preferred size is smaller, then keep the extend but extend
629 // from the result of the extending load. For example:
630 // %1:_(s8) = G_LOAD ...
631 // %2:_(s32) = G_SEXT %1(s8)
632 // %3:_(s64) = G_ANYEXT %1(s8)
635 // %2:_(s32) = G_SEXTLOAD ...
636 // %3:_(s64) = G_ANYEXT %2:_(s32)
638 replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
640 // If the preferred size is large, then insert a truncate. For
642 // %1:_(s8) = G_LOAD ...
643 // %2:_(s64) = G_SEXT %1(s8)
644 // %3:_(s32) = G_ZEXT %1(s8)
647 // %2:_(s64) = G_SEXTLOAD ...
648 // %4:_(s8) = G_TRUNC %2:_(s32)
649 // %3:_(s64) = G_ZEXT %2:_(s8)
651 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
656 // The use is (one of) the uses of the preferred use we chose earlier.
657 // We're going to update the load to def this value later so just erase
659 Observer.erasingInstr(*UseMO->getParent());
660 UseMO->getParent()->eraseFromParent();
664 // The use isn't an extend. Truncate back to the type we originally loaded.
665 // This is free on many targets.
666 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
669 MI.getOperand(0).setReg(ChosenDstReg);
670 Observer.changedInstr(MI);
673 bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
674 BuildFnTy &MatchInfo) {
675 assert(MI.getOpcode() == TargetOpcode::G_AND);
677 // If we have the following code:
678 // %mask = G_CONSTANT 255
679 // %ld = G_LOAD %ptr, (load s16)
680 // %and = G_AND %ld, %mask
682 // Try to fold it into
683 // %ld = G_ZEXTLOAD %ptr, (load s8)
685 Register Dst = MI.getOperand(0).getReg();
686 if (MRI.getType(Dst).isVector())
690 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
694 APInt MaskVal = MaybeMask->Value;
696 if (!MaskVal.isMask())
699 Register SrcReg = MI.getOperand(1).getReg();
700 // Don't use getOpcodeDef() here since intermediate instructions may have
702 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg));
703 if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg()))
706 Register LoadReg = LoadMI->getDstReg();
707 LLT RegTy = MRI.getType(LoadReg);
708 Register PtrReg = LoadMI->getPointerReg();
709 unsigned RegSize = RegTy.getSizeInBits();
710 uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
711 unsigned MaskSizeBits = MaskVal.countTrailingOnes();
713 // The mask may not be larger than the in-memory type, as it might cover sign
715 if (MaskSizeBits > LoadSizeBits)
718 // If the mask covers the whole destination register, there's nothing to
720 if (MaskSizeBits >= RegSize)
723 // Most targets cannot deal with loads of size < 8 and need to re-legalize to
724 // at least byte loads. Avoid creating such loads here
725 if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits))
728 const MachineMemOperand &MMO = LoadMI->getMMO();
729 LegalityQuery::MemDesc MemDesc(MMO);
731 // Don't modify the memory access size if this is atomic/volatile, but we can
732 // still adjust the opcode to indicate the high bit behavior.
733 if (LoadMI->isSimple())
734 MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
735 else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
738 // TODO: Could check if it's legal with the reduced or original memory size.
739 if (!isLegalOrBeforeLegalizer(
740 {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}}))
743 MatchInfo = [=](MachineIRBuilder &B) {
744 B.setInstrAndDebugLoc(*LoadMI);
745 auto &MF = B.getMF();
746 auto PtrInfo = MMO.getPointerInfo();
747 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy);
748 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
749 LoadMI->eraseFromParent();
754 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
755 const MachineInstr &UseMI) {
756 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
757 "shouldn't consider debug uses");
758 assert(DefMI.getParent() == UseMI.getParent());
759 if (&DefMI == &UseMI)
761 const MachineBasicBlock &MBB = *DefMI.getParent();
762 auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
763 return &MI == &DefMI || &MI == &UseMI;
765 if (DefOrUse == MBB.end())
766 llvm_unreachable("Block must contain both DefMI and UseMI!");
767 return &*DefOrUse == &DefMI;
770 bool CombinerHelper::dominates(const MachineInstr &DefMI,
771 const MachineInstr &UseMI) {
772 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
773 "shouldn't consider debug uses");
775 return MDT->dominates(&DefMI, &UseMI);
776 else if (DefMI.getParent() != UseMI.getParent())
779 return isPredecessor(DefMI, UseMI);
782 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
783 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
784 Register SrcReg = MI.getOperand(1).getReg();
785 Register LoadUser = SrcReg;
787 if (MRI.getType(SrcReg).isVector())
791 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
794 uint64_t SizeInBits = MI.getOperand(2).getImm();
795 // If the source is a G_SEXTLOAD from the same bit width, then we don't
796 // need any extend at all, just a truncate.
797 if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
798 // If truncating more than the original extended value, abort.
799 auto LoadSizeBits = LoadMI->getMemSizeInBits();
800 if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
802 if (LoadSizeBits == SizeInBits)
808 void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
809 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
810 Builder.setInstrAndDebugLoc(MI);
811 Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
812 MI.eraseFromParent();
815 bool CombinerHelper::matchSextInRegOfLoad(
816 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
817 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
819 Register DstReg = MI.getOperand(0).getReg();
820 LLT RegTy = MRI.getType(DstReg);
822 // Only supports scalars for now.
823 if (RegTy.isVector())
826 Register SrcReg = MI.getOperand(1).getReg();
827 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI);
828 if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
831 uint64_t MemBits = LoadDef->getMemSizeInBits();
833 // If the sign extend extends from a narrower width than the load's width,
834 // then we can narrow the load width when we combine to a G_SEXTLOAD.
835 // Avoid widening the load at all.
836 unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits);
838 // Don't generate G_SEXTLOADs with a < 1 byte width.
841 // Don't bother creating a non-power-2 sextload, it will likely be broken up
842 // anyway for most targets.
843 if (!isPowerOf2_32(NewSizeBits))
846 const MachineMemOperand &MMO = LoadDef->getMMO();
847 LegalityQuery::MemDesc MMDesc(MMO);
849 // Don't modify the memory access size if this is atomic/volatile, but we can
850 // still adjust the opcode to indicate the high bit behavior.
851 if (LoadDef->isSimple())
852 MMDesc.MemoryTy = LLT::scalar(NewSizeBits);
853 else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits())
856 // TODO: Could check if it's legal with the reduced or original memory size.
857 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD,
858 {MRI.getType(LoadDef->getDstReg()),
859 MRI.getType(LoadDef->getPointerReg())},
863 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
867 void CombinerHelper::applySextInRegOfLoad(
868 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
869 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
871 unsigned ScalarSizeBits;
872 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
873 GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg));
875 // If we have the following:
876 // %ld = G_LOAD %ptr, (load 2)
877 // %ext = G_SEXT_INREG %ld, 8
879 // %ld = G_SEXTLOAD %ptr (load 1)
881 auto &MMO = LoadDef->getMMO();
882 Builder.setInstrAndDebugLoc(*LoadDef);
883 auto &MF = Builder.getMF();
884 auto PtrInfo = MMO.getPointerInfo();
885 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
886 Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
887 LoadDef->getPointerReg(), *NewMMO);
888 MI.eraseFromParent();
891 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
892 Register &Base, Register &Offset) {
893 auto &MF = *MI.getParent()->getParent();
894 const auto &TLI = *MF.getSubtarget().getTargetLowering();
897 unsigned Opcode = MI.getOpcode();
898 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
899 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
902 Base = MI.getOperand(1).getReg();
903 MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
904 if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
907 LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
908 // FIXME: The following use traversal needs a bail out for patholigical cases.
909 for (auto &Use : MRI.use_nodbg_instructions(Base)) {
910 if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
913 Offset = Use.getOperand(2).getReg();
914 if (!ForceLegalIndexing &&
915 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
916 LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: "
921 // Make sure the offset calculation is before the potentially indexed op.
922 // FIXME: we really care about dependency here. The offset calculation might
924 MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
925 if (!OffsetDef || !dominates(*OffsetDef, MI)) {
926 LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: "
931 // FIXME: check whether all uses of Base are load/store with foldable
932 // addressing modes. If so, using the normal addr-modes is better than
933 // forming an indexed one.
935 bool MemOpDominatesAddrUses = true;
936 for (auto &PtrAddUse :
937 MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
938 if (!dominates(MI, PtrAddUse)) {
939 MemOpDominatesAddrUses = false;
944 if (!MemOpDominatesAddrUses) {
946 dbgs() << " Ignoring candidate as memop does not dominate uses: "
951 LLVM_DEBUG(dbgs() << " Found match: " << Use);
952 Addr = Use.getOperand(0).getReg();
959 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
960 Register &Base, Register &Offset) {
961 auto &MF = *MI.getParent()->getParent();
962 const auto &TLI = *MF.getSubtarget().getTargetLowering();
965 unsigned Opcode = MI.getOpcode();
966 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
967 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
970 Addr = MI.getOperand(1).getReg();
971 MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
972 if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
975 Base = AddrDef->getOperand(1).getReg();
976 Offset = AddrDef->getOperand(2).getReg();
978 LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
980 if (!ForceLegalIndexing &&
981 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
982 LLVM_DEBUG(dbgs() << " Skipping, not legal for target");
986 MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
987 if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
988 LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway.");
992 if (MI.getOpcode() == TargetOpcode::G_STORE) {
993 // Would require a copy.
994 if (Base == MI.getOperand(0).getReg()) {
995 LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway.");
999 // We're expecting one use of Addr in MI, but it could also be the
1000 // value stored, which isn't actually dominated by the instruction.
1001 if (MI.getOperand(0).getReg() == Addr) {
1002 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses");
1007 // FIXME: check whether all uses of the base pointer are constant PtrAdds.
1008 // That might allow us to end base's liveness here by adjusting the constant.
1010 for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
1011 if (!dominates(MI, UseMI)) {
1012 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses.");
1020 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
1021 IndexedLoadStoreMatchInfo MatchInfo;
1022 if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
1023 applyCombineIndexedLoadStore(MI, MatchInfo);
1029 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1030 unsigned Opcode = MI.getOpcode();
1031 if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
1032 Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
1035 // For now, no targets actually support these opcodes so don't waste time
1036 // running these unless we're forced to for testing.
1037 if (!ForceLegalIndexing)
1040 MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1042 if (!MatchInfo.IsPre &&
1043 !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
1050 void CombinerHelper::applyCombineIndexedLoadStore(
1051 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
1052 MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
1053 MachineIRBuilder MIRBuilder(MI);
1054 unsigned Opcode = MI.getOpcode();
1055 bool IsStore = Opcode == TargetOpcode::G_STORE;
1058 case TargetOpcode::G_LOAD:
1059 NewOpcode = TargetOpcode::G_INDEXED_LOAD;
1061 case TargetOpcode::G_SEXTLOAD:
1062 NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
1064 case TargetOpcode::G_ZEXTLOAD:
1065 NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
1067 case TargetOpcode::G_STORE:
1068 NewOpcode = TargetOpcode::G_INDEXED_STORE;
1071 llvm_unreachable("Unknown load/store opcode");
1074 auto MIB = MIRBuilder.buildInstr(NewOpcode);
1076 MIB.addDef(MatchInfo.Addr);
1077 MIB.addUse(MI.getOperand(0).getReg());
1079 MIB.addDef(MI.getOperand(0).getReg());
1080 MIB.addDef(MatchInfo.Addr);
1083 MIB.addUse(MatchInfo.Base);
1084 MIB.addUse(MatchInfo.Offset);
1085 MIB.addImm(MatchInfo.IsPre);
1086 MI.eraseFromParent();
1087 AddrDef.eraseFromParent();
1089 LLVM_DEBUG(dbgs() << " Combinined to indexed operation");
1092 bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
1093 MachineInstr *&OtherMI) {
1094 unsigned Opcode = MI.getOpcode();
1095 bool IsDiv, IsSigned;
1099 llvm_unreachable("Unexpected opcode!");
1100 case TargetOpcode::G_SDIV:
1101 case TargetOpcode::G_UDIV: {
1103 IsSigned = Opcode == TargetOpcode::G_SDIV;
1106 case TargetOpcode::G_SREM:
1107 case TargetOpcode::G_UREM: {
1109 IsSigned = Opcode == TargetOpcode::G_SREM;
1114 Register Src1 = MI.getOperand(1).getReg();
1115 unsigned DivOpcode, RemOpcode, DivremOpcode;
1117 DivOpcode = TargetOpcode::G_SDIV;
1118 RemOpcode = TargetOpcode::G_SREM;
1119 DivremOpcode = TargetOpcode::G_SDIVREM;
1121 DivOpcode = TargetOpcode::G_UDIV;
1122 RemOpcode = TargetOpcode::G_UREM;
1123 DivremOpcode = TargetOpcode::G_UDIVREM;
1126 if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}}))
1130 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1131 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1133 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1136 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1137 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1139 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1141 for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) {
1142 if (MI.getParent() == UseMI.getParent() &&
1143 ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
1144 (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
1145 matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2))) {
1154 void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
1155 MachineInstr *&OtherMI) {
1156 unsigned Opcode = MI.getOpcode();
1157 assert(OtherMI && "OtherMI shouldn't be empty.");
1159 Register DestDivReg, DestRemReg;
1160 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1161 DestDivReg = MI.getOperand(0).getReg();
1162 DestRemReg = OtherMI->getOperand(0).getReg();
1164 DestDivReg = OtherMI->getOperand(0).getReg();
1165 DestRemReg = MI.getOperand(0).getReg();
1169 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1171 // Check which instruction is first in the block so we don't break def-use
1172 // deps by "moving" the instruction incorrectly.
1173 if (dominates(MI, *OtherMI))
1174 Builder.setInstrAndDebugLoc(MI);
1176 Builder.setInstrAndDebugLoc(*OtherMI);
1178 Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
1179 : TargetOpcode::G_UDIVREM,
1180 {DestDivReg, DestRemReg},
1181 {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
1182 MI.eraseFromParent();
1183 OtherMI->eraseFromParent();
1186 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI,
1187 MachineInstr *&BrCond) {
1188 assert(MI.getOpcode() == TargetOpcode::G_BR);
1190 // Try to match the following:
1192 // G_BRCOND %c1, %bb2
1198 // The above pattern does not have a fall through to the successor bb2, always
1199 // resulting in a branch no matter which path is taken. Here we try to find
1200 // and replace that pattern with conditional branch to bb3 and otherwise
1201 // fallthrough to bb2. This is generally better for branch predictors.
1203 MachineBasicBlock *MBB = MI.getParent();
1204 MachineBasicBlock::iterator BrIt(MI);
1205 if (BrIt == MBB->begin())
1207 assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
1209 BrCond = &*std::prev(BrIt);
1210 if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1213 // Check that the next block is the conditional branch target. Also make sure
1214 // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1215 MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
1216 return BrCondTarget != MI.getOperand(0).getMBB() &&
1217 MBB->isLayoutSuccessor(BrCondTarget);
1220 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI,
1221 MachineInstr *&BrCond) {
1222 MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
1223 Builder.setInstrAndDebugLoc(*BrCond);
1224 LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
1225 // FIXME: Does int/fp matter for this? If so, we might need to restrict
1226 // this to i1 only since we might not know for sure what kind of
1227 // compare generated the condition value.
1228 auto True = Builder.buildConstant(
1229 Ty, getICmpTrueVal(getTargetLowering(), false, false));
1230 auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
1232 auto *FallthroughBB = BrCond->getOperand(1).getMBB();
1233 Observer.changingInstr(MI);
1234 MI.getOperand(0).setMBB(FallthroughBB);
1235 Observer.changedInstr(MI);
1237 // Change the conditional branch to use the inverted condition and
1238 // new target block.
1239 Observer.changingInstr(*BrCond);
1240 BrCond->getOperand(0).setReg(Xor.getReg(0));
1241 BrCond->getOperand(1).setMBB(BrTarget);
1242 Observer.changedInstr(*BrCond);
1245 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1247 return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1248 Ty.getNumElements());
1249 return IntegerType::get(C, Ty.getSizeInBits());
1252 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) {
1253 MachineIRBuilder HelperBuilder(MI);
1254 GISelObserverWrapper DummyObserver;
1255 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1256 return Helper.lowerMemcpyInline(MI) ==
1257 LegalizerHelper::LegalizeResult::Legalized;
1260 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1261 MachineIRBuilder HelperBuilder(MI);
1262 GISelObserverWrapper DummyObserver;
1263 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1264 return Helper.lowerMemCpyFamily(MI, MaxLen) ==
1265 LegalizerHelper::LegalizeResult::Legalized;
1268 static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
1270 const MachineRegisterInfo &MRI) {
1271 const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
1275 APFloat V = MaybeCst->getValueAPF();
1278 llvm_unreachable("Unexpected opcode!");
1279 case TargetOpcode::G_FNEG: {
1283 case TargetOpcode::G_FABS: {
1287 case TargetOpcode::G_FPTRUNC:
1289 case TargetOpcode::G_FSQRT: {
1291 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1292 V = APFloat(sqrt(V.convertToDouble()));
1295 case TargetOpcode::G_FLOG2: {
1297 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1298 V = APFloat(log2(V.convertToDouble()));
1302 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1303 // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`,
1304 // and `G_FLOG2` reach here.
1306 V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused);
1310 bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
1311 Optional<APFloat> &Cst) {
1312 Register DstReg = MI.getOperand(0).getReg();
1313 Register SrcReg = MI.getOperand(1).getReg();
1314 LLT DstTy = MRI.getType(DstReg);
1315 Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
1316 return Cst.has_value();
1319 void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
1320 Optional<APFloat> &Cst) {
1321 assert(Cst && "Optional is unexpectedly empty!");
1322 Builder.setInstrAndDebugLoc(MI);
1323 MachineFunction &MF = Builder.getMF();
1324 auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
1325 Register DstReg = MI.getOperand(0).getReg();
1326 Builder.buildFConstant(DstReg, *FPVal);
1327 MI.eraseFromParent();
1330 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1331 PtrAddChain &MatchInfo) {
1332 // We're trying to match the following pattern:
1333 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1334 // %root = G_PTR_ADD %t1, G_CONSTANT imm2
1336 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1338 if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1341 Register Add2 = MI.getOperand(1).getReg();
1342 Register Imm1 = MI.getOperand(2).getReg();
1343 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1347 MachineInstr *Add2Def = MRI.getVRegDef(Add2);
1348 if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1351 Register Base = Add2Def->getOperand(1).getReg();
1352 Register Imm2 = Add2Def->getOperand(2).getReg();
1353 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1357 // Check if the new combined immediate forms an illegal addressing mode.
1358 // Do not combine if it was legal before but would get illegal.
1359 // To do so, we need to find a load/store user of the pointer to get
1361 Type *AccessTy = nullptr;
1362 auto &MF = *MI.getMF();
1363 for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
1364 if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) {
1365 AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)),
1366 MF.getFunction().getContext());
1370 TargetLoweringBase::AddrMode AMNew;
1371 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1372 AMNew.BaseOffs = CombinedImm.getSExtValue();
1374 AMNew.HasBaseReg = true;
1375 TargetLoweringBase::AddrMode AMOld;
1376 AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue();
1377 AMOld.HasBaseReg = true;
1378 unsigned AS = MRI.getType(Add2).getAddressSpace();
1379 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1380 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1381 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1385 // Pass the combined immediate to the apply function.
1386 MatchInfo.Imm = AMNew.BaseOffs;
1387 MatchInfo.Base = Base;
1388 MatchInfo.Bank = getRegBank(Imm2);
1392 void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1393 PtrAddChain &MatchInfo) {
1394 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1395 MachineIRBuilder MIB(MI);
1396 LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1397 auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1398 setRegBank(NewOffset.getReg(0), MatchInfo.Bank);
1399 Observer.changingInstr(MI);
1400 MI.getOperand(1).setReg(MatchInfo.Base);
1401 MI.getOperand(2).setReg(NewOffset.getReg(0));
1402 Observer.changedInstr(MI);
1405 bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
1406 RegisterImmPair &MatchInfo) {
1407 // We're trying to match the following pattern with any of
1408 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1409 // %t1 = SHIFT %base, G_CONSTANT imm1
1410 // %root = SHIFT %t1, G_CONSTANT imm2
1412 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1414 unsigned Opcode = MI.getOpcode();
1415 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1416 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1417 Opcode == TargetOpcode::G_USHLSAT) &&
1418 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1420 Register Shl2 = MI.getOperand(1).getReg();
1421 Register Imm1 = MI.getOperand(2).getReg();
1422 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1426 MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1427 if (Shl2Def->getOpcode() != Opcode)
1430 Register Base = Shl2Def->getOperand(1).getReg();
1431 Register Imm2 = Shl2Def->getOperand(2).getReg();
1432 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1436 // Pass the combined immediate to the apply function.
1438 (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1439 MatchInfo.Reg = Base;
1441 // There is no simple replacement for a saturating unsigned left shift that
1442 // exceeds the scalar size.
1443 if (Opcode == TargetOpcode::G_USHLSAT &&
1444 MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1450 void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
1451 RegisterImmPair &MatchInfo) {
1452 unsigned Opcode = MI.getOpcode();
1453 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1454 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1455 Opcode == TargetOpcode::G_USHLSAT) &&
1456 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1458 Builder.setInstrAndDebugLoc(MI);
1459 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1460 unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1461 auto Imm = MatchInfo.Imm;
1463 if (Imm >= ScalarSizeInBits) {
1464 // Any logical shift that exceeds scalar size will produce zero.
1465 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1466 Builder.buildConstant(MI.getOperand(0), 0);
1467 MI.eraseFromParent();
1470 // Arithmetic shift and saturating signed left shift have no effect beyond
1472 Imm = ScalarSizeInBits - 1;
1475 LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1476 Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1477 Observer.changingInstr(MI);
1478 MI.getOperand(1).setReg(MatchInfo.Reg);
1479 MI.getOperand(2).setReg(NewImm);
1480 Observer.changedInstr(MI);
1483 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
1484 ShiftOfShiftedLogic &MatchInfo) {
1485 // We're trying to match the following pattern with any of
1486 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1487 // with any of G_AND/G_OR/G_XOR logic instructions.
1488 // %t1 = SHIFT %X, G_CONSTANT C0
1489 // %t2 = LOGIC %t1, %Y
1490 // %root = SHIFT %t2, G_CONSTANT C1
1492 // %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1493 // %t4 = SHIFT %Y, G_CONSTANT C1
1494 // %root = LOGIC %t3, %t4
1495 unsigned ShiftOpcode = MI.getOpcode();
1496 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1497 ShiftOpcode == TargetOpcode::G_ASHR ||
1498 ShiftOpcode == TargetOpcode::G_LSHR ||
1499 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1500 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1501 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1503 // Match a one-use bitwise logic op.
1504 Register LogicDest = MI.getOperand(1).getReg();
1505 if (!MRI.hasOneNonDBGUse(LogicDest))
1508 MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1509 unsigned LogicOpcode = LogicMI->getOpcode();
1510 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1511 LogicOpcode != TargetOpcode::G_XOR)
1514 // Find a matching one-use shift by constant.
1515 const Register C1 = MI.getOperand(2).getReg();
1516 auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI);
1520 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1522 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1523 // Shift should match previous one and should be a one-use.
1524 if (MI->getOpcode() != ShiftOpcode ||
1525 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1528 // Must be a constant.
1530 getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1534 ShiftVal = MaybeImmVal->Value.getSExtValue();
1538 // Logic ops are commutative, so check each operand for a match.
1539 Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1540 MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1541 Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1542 MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1545 if (matchFirstShift(LogicMIOp1, C0Val)) {
1546 MatchInfo.LogicNonShiftReg = LogicMIReg2;
1547 MatchInfo.Shift2 = LogicMIOp1;
1548 } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1549 MatchInfo.LogicNonShiftReg = LogicMIReg1;
1550 MatchInfo.Shift2 = LogicMIOp2;
1554 MatchInfo.ValSum = C0Val + C1Val;
1556 // The fold is not valid if the sum of the shift values exceeds bitwidth.
1557 if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1560 MatchInfo.Logic = LogicMI;
1564 void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
1565 ShiftOfShiftedLogic &MatchInfo) {
1566 unsigned Opcode = MI.getOpcode();
1567 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1568 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1569 Opcode == TargetOpcode::G_SSHLSAT) &&
1570 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1572 LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1573 LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1574 Builder.setInstrAndDebugLoc(MI);
1576 Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1578 Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1580 Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1582 Register Shift2Const = MI.getOperand(2).getReg();
1583 Register Shift2 = Builder
1584 .buildInstr(Opcode, {DestType},
1585 {MatchInfo.LogicNonShiftReg, Shift2Const})
1588 Register Dest = MI.getOperand(0).getReg();
1589 Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1591 // These were one use so it's safe to remove them.
1592 MatchInfo.Shift2->eraseFromParent();
1593 MatchInfo.Logic->eraseFromParent();
1595 MI.eraseFromParent();
1598 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1599 unsigned &ShiftVal) {
1600 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1602 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1606 ShiftVal = MaybeImmVal->Value.exactLogBase2();
1607 return (static_cast<int32_t>(ShiftVal) != -1);
1610 void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1611 unsigned &ShiftVal) {
1612 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1613 MachineIRBuilder MIB(MI);
1614 LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1615 auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1616 Observer.changingInstr(MI);
1617 MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1618 MI.getOperand(2).setReg(ShiftCst.getReg(0));
1619 Observer.changedInstr(MI);
1622 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1623 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1624 RegisterImmPair &MatchData) {
1625 assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1627 Register LHS = MI.getOperand(1).getReg();
1630 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1631 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1632 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1635 // TODO: Should handle vector splat.
1636 Register RHS = MI.getOperand(2).getReg();
1637 auto MaybeShiftAmtVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1638 if (!MaybeShiftAmtVal)
1642 LLT SrcTy = MRI.getType(ExtSrc);
1644 // We only really care about the legality with the shifted value. We can
1645 // pick any type the constant shift amount, so ask the target what to
1646 // use. Otherwise we would have to guess and hope it is reported as legal.
1647 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1648 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1652 int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
1653 MatchData.Reg = ExtSrc;
1654 MatchData.Imm = ShiftAmt;
1656 unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
1657 return MinLeadingZeros >= ShiftAmt;
1660 void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1661 const RegisterImmPair &MatchData) {
1662 Register ExtSrcReg = MatchData.Reg;
1663 int64_t ShiftAmtVal = MatchData.Imm;
1665 LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1666 Builder.setInstrAndDebugLoc(MI);
1667 auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1669 Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1670 Builder.buildZExt(MI.getOperand(0), NarrowShift);
1671 MI.eraseFromParent();
1674 bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI,
1675 Register &MatchInfo) {
1676 GMerge &Merge = cast<GMerge>(MI);
1677 SmallVector<Register, 16> MergedValues;
1678 for (unsigned I = 0; I < Merge.getNumSources(); ++I)
1679 MergedValues.emplace_back(Merge.getSourceReg(I));
1681 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI);
1682 if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
1685 for (unsigned I = 0; I < MergedValues.size(); ++I)
1686 if (MergedValues[I] != Unmerge->getReg(I))
1689 MatchInfo = Unmerge->getSourceReg();
1693 static Register peekThroughBitcast(Register Reg,
1694 const MachineRegisterInfo &MRI) {
1695 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1701 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1702 MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1703 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1704 "Expected an unmerge");
1705 auto &Unmerge = cast<GUnmerge>(MI);
1706 Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI);
1708 auto *SrcInstr = getOpcodeDef<GMergeLikeOp>(SrcReg, MRI);
1712 // Check the source type of the merge.
1713 LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0));
1714 LLT Dst0Ty = MRI.getType(Unmerge.getReg(0));
1715 bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1716 if (SrcMergeTy != Dst0Ty && !SameSize)
1718 // They are the same now (modulo a bitcast).
1719 // We can collect all the src registers.
1720 for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
1721 Operands.push_back(SrcInstr->getSourceReg(Idx));
1725 void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1726 MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1727 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1728 "Expected an unmerge");
1729 assert((MI.getNumOperands() - 1 == Operands.size()) &&
1730 "Not enough operands to replace all defs");
1731 unsigned NumElems = MI.getNumOperands() - 1;
1733 LLT SrcTy = MRI.getType(Operands[0]);
1734 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1735 bool CanReuseInputDirectly = DstTy == SrcTy;
1736 Builder.setInstrAndDebugLoc(MI);
1737 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1738 Register DstReg = MI.getOperand(Idx).getReg();
1739 Register SrcReg = Operands[Idx];
1740 if (CanReuseInputDirectly)
1741 replaceRegWith(MRI, DstReg, SrcReg);
1743 Builder.buildCast(DstReg, SrcReg);
1745 MI.eraseFromParent();
1748 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
1749 SmallVectorImpl<APInt> &Csts) {
1750 unsigned SrcIdx = MI.getNumOperands() - 1;
1751 Register SrcReg = MI.getOperand(SrcIdx).getReg();
1752 MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1753 if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1754 SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1756 // Break down the big constant in smaller ones.
1757 const MachineOperand &CstVal = SrcInstr->getOperand(1);
1758 APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1759 ? CstVal.getCImm()->getValue()
1760 : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1762 LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1763 unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1764 // Unmerge a constant.
1765 for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1766 Csts.emplace_back(Val.trunc(ShiftAmt));
1767 Val = Val.lshr(ShiftAmt);
1773 void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
1774 SmallVectorImpl<APInt> &Csts) {
1775 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1776 "Expected an unmerge");
1777 assert((MI.getNumOperands() - 1 == Csts.size()) &&
1778 "Not enough operands to replace all defs");
1779 unsigned NumElems = MI.getNumOperands() - 1;
1780 Builder.setInstrAndDebugLoc(MI);
1781 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1782 Register DstReg = MI.getOperand(Idx).getReg();
1783 Builder.buildConstant(DstReg, Csts[Idx]);
1786 MI.eraseFromParent();
1789 bool CombinerHelper::matchCombineUnmergeUndef(
1790 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
1791 unsigned SrcIdx = MI.getNumOperands() - 1;
1792 Register SrcReg = MI.getOperand(SrcIdx).getReg();
1793 MatchInfo = [&MI](MachineIRBuilder &B) {
1794 unsigned NumElems = MI.getNumOperands() - 1;
1795 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1796 Register DstReg = MI.getOperand(Idx).getReg();
1797 B.buildUndef(DstReg);
1800 return isa<GImplicitDef>(MRI.getVRegDef(SrcReg));
1803 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1804 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1805 "Expected an unmerge");
1806 // Check that all the lanes are dead except the first one.
1807 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1808 if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1814 void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1815 Builder.setInstrAndDebugLoc(MI);
1816 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1817 // Truncating a vector is going to truncate every single lane,
1818 // whereas we want the full lowbits.
1819 // Do the operation on a scalar instead.
1820 LLT SrcTy = MRI.getType(SrcReg);
1821 if (SrcTy.isVector())
1823 Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1825 Register Dst0Reg = MI.getOperand(0).getReg();
1826 LLT Dst0Ty = MRI.getType(Dst0Reg);
1827 if (Dst0Ty.isVector()) {
1828 auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
1829 Builder.buildCast(Dst0Reg, MIB);
1831 Builder.buildTrunc(Dst0Reg, SrcReg);
1832 MI.eraseFromParent();
1835 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
1836 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1837 "Expected an unmerge");
1838 Register Dst0Reg = MI.getOperand(0).getReg();
1839 LLT Dst0Ty = MRI.getType(Dst0Reg);
1840 // G_ZEXT on vector applies to each lane, so it will
1841 // affect all destinations. Therefore we won't be able
1842 // to simplify the unmerge to just the first definition.
1843 if (Dst0Ty.isVector())
1845 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1846 LLT SrcTy = MRI.getType(SrcReg);
1847 if (SrcTy.isVector())
1850 Register ZExtSrcReg;
1851 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
1854 // Finally we can replace the first definition with
1855 // a zext of the source if the definition is big enough to hold
1856 // all of ZExtSrc bits.
1857 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1858 return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
1861 void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
1862 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1863 "Expected an unmerge");
1865 Register Dst0Reg = MI.getOperand(0).getReg();
1867 MachineInstr *ZExtInstr =
1868 MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
1869 assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
1870 "Expecting a G_ZEXT");
1872 Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
1873 LLT Dst0Ty = MRI.getType(Dst0Reg);
1874 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1876 Builder.setInstrAndDebugLoc(MI);
1878 if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
1879 Builder.buildZExt(Dst0Reg, ZExtSrcReg);
1881 assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
1882 "ZExt src doesn't fit in destination");
1883 replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
1887 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1889 ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
1890 replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
1892 MI.eraseFromParent();
1895 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
1896 unsigned TargetShiftSize,
1897 unsigned &ShiftVal) {
1898 assert((MI.getOpcode() == TargetOpcode::G_SHL ||
1899 MI.getOpcode() == TargetOpcode::G_LSHR ||
1900 MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
1902 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1903 if (Ty.isVector()) // TODO:
1906 // Don't narrow further than the requested size.
1907 unsigned Size = Ty.getSizeInBits();
1908 if (Size <= TargetShiftSize)
1912 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1916 ShiftVal = MaybeImmVal->Value.getSExtValue();
1917 return ShiftVal >= Size / 2 && ShiftVal < Size;
1920 void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
1921 const unsigned &ShiftVal) {
1922 Register DstReg = MI.getOperand(0).getReg();
1923 Register SrcReg = MI.getOperand(1).getReg();
1924 LLT Ty = MRI.getType(SrcReg);
1925 unsigned Size = Ty.getSizeInBits();
1926 unsigned HalfSize = Size / 2;
1927 assert(ShiftVal >= HalfSize);
1929 LLT HalfTy = LLT::scalar(HalfSize);
1931 Builder.setInstr(MI);
1932 auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
1933 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1935 if (MI.getOpcode() == TargetOpcode::G_LSHR) {
1936 Register Narrowed = Unmerge.getReg(1);
1938 // dst = G_LSHR s64:x, C for C >= 32
1940 // lo, hi = G_UNMERGE_VALUES x
1941 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
1943 if (NarrowShiftAmt != 0) {
1944 Narrowed = Builder.buildLShr(HalfTy, Narrowed,
1945 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1948 auto Zero = Builder.buildConstant(HalfTy, 0);
1949 Builder.buildMerge(DstReg, { Narrowed, Zero });
1950 } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
1951 Register Narrowed = Unmerge.getReg(0);
1952 // dst = G_SHL s64:x, C for C >= 32
1954 // lo, hi = G_UNMERGE_VALUES x
1955 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
1956 if (NarrowShiftAmt != 0) {
1957 Narrowed = Builder.buildShl(HalfTy, Narrowed,
1958 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1961 auto Zero = Builder.buildConstant(HalfTy, 0);
1962 Builder.buildMerge(DstReg, { Zero, Narrowed });
1964 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
1965 auto Hi = Builder.buildAShr(
1966 HalfTy, Unmerge.getReg(1),
1967 Builder.buildConstant(HalfTy, HalfSize - 1));
1969 if (ShiftVal == HalfSize) {
1970 // (G_ASHR i64:x, 32) ->
1971 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
1972 Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
1973 } else if (ShiftVal == Size - 1) {
1974 // Don't need a second shift.
1975 // (G_ASHR i64:x, 63) ->
1976 // %narrowed = (G_ASHR hi_32(x), 31)
1977 // G_MERGE_VALUES %narrowed, %narrowed
1978 Builder.buildMerge(DstReg, { Hi, Hi });
1980 auto Lo = Builder.buildAShr(
1981 HalfTy, Unmerge.getReg(1),
1982 Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
1984 // (G_ASHR i64:x, C) ->, for C >= 32
1985 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
1986 Builder.buildMerge(DstReg, { Lo, Hi });
1990 MI.eraseFromParent();
1993 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
1994 unsigned TargetShiftAmount) {
1996 if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
1997 applyCombineShiftToUnmerge(MI, ShiftAmt);
2004 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2005 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2006 Register DstReg = MI.getOperand(0).getReg();
2007 LLT DstTy = MRI.getType(DstReg);
2008 Register SrcReg = MI.getOperand(1).getReg();
2009 return mi_match(SrcReg, MRI,
2010 m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
2013 void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2014 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2015 Register DstReg = MI.getOperand(0).getReg();
2016 Builder.setInstr(MI);
2017 Builder.buildCopy(DstReg, Reg);
2018 MI.eraseFromParent();
2021 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2022 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2023 Register SrcReg = MI.getOperand(1).getReg();
2024 return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
2027 void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2028 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2029 Register DstReg = MI.getOperand(0).getReg();
2030 Builder.setInstr(MI);
2031 Builder.buildZExtOrTrunc(DstReg, Reg);
2032 MI.eraseFromParent();
2035 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2036 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2037 assert(MI.getOpcode() == TargetOpcode::G_ADD);
2038 Register LHS = MI.getOperand(1).getReg();
2039 Register RHS = MI.getOperand(2).getReg();
2040 LLT IntTy = MRI.getType(LHS);
2042 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2044 PtrReg.second = false;
2045 for (Register SrcReg : {LHS, RHS}) {
2046 if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
2047 // Don't handle cases where the integer is implicitly converted to the
2049 LLT PtrTy = MRI.getType(PtrReg.first);
2050 if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2054 PtrReg.second = true;
2060 void CombinerHelper::applyCombineAddP2IToPtrAdd(
2061 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2062 Register Dst = MI.getOperand(0).getReg();
2063 Register LHS = MI.getOperand(1).getReg();
2064 Register RHS = MI.getOperand(2).getReg();
2066 const bool DoCommute = PtrReg.second;
2068 std::swap(LHS, RHS);
2071 LLT PtrTy = MRI.getType(LHS);
2073 Builder.setInstrAndDebugLoc(MI);
2074 auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2075 Builder.buildPtrToInt(Dst, PtrAdd);
2076 MI.eraseFromParent();
2079 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
2081 auto &PtrAdd = cast<GPtrAdd>(MI);
2082 Register LHS = PtrAdd.getBaseReg();
2083 Register RHS = PtrAdd.getOffsetReg();
2084 MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
2086 if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) {
2088 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2089 auto DstTy = MRI.getType(PtrAdd.getReg(0));
2090 // G_INTTOPTR uses zero-extension
2091 NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits());
2092 NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits());
2100 void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
2102 auto &PtrAdd = cast<GPtrAdd>(MI);
2103 Register Dst = PtrAdd.getReg(0);
2105 Builder.setInstrAndDebugLoc(MI);
2106 Builder.buildConstant(Dst, NewCst);
2107 PtrAdd.eraseFromParent();
2110 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2111 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2112 Register DstReg = MI.getOperand(0).getReg();
2113 Register SrcReg = MI.getOperand(1).getReg();
2114 LLT DstTy = MRI.getType(DstReg);
2115 return mi_match(SrcReg, MRI,
2116 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2119 bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) {
2120 assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
2121 Register DstReg = MI.getOperand(0).getReg();
2122 Register SrcReg = MI.getOperand(1).getReg();
2123 LLT DstTy = MRI.getType(DstReg);
2124 if (mi_match(SrcReg, MRI,
2125 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) {
2126 unsigned DstSize = DstTy.getScalarSizeInBits();
2127 unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
2128 return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2133 bool CombinerHelper::matchCombineExtOfExt(
2134 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2135 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2136 MI.getOpcode() == TargetOpcode::G_SEXT ||
2137 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2138 "Expected a G_[ASZ]EXT");
2139 Register SrcReg = MI.getOperand(1).getReg();
2140 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2141 // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2142 unsigned Opc = MI.getOpcode();
2143 unsigned SrcOpc = SrcMI->getOpcode();
2144 if (Opc == SrcOpc ||
2145 (Opc == TargetOpcode::G_ANYEXT &&
2146 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2147 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2148 MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2154 void CombinerHelper::applyCombineExtOfExt(
2155 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2156 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2157 MI.getOpcode() == TargetOpcode::G_SEXT ||
2158 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2159 "Expected a G_[ASZ]EXT");
2161 Register Reg = std::get<0>(MatchInfo);
2162 unsigned SrcExtOp = std::get<1>(MatchInfo);
2164 // Combine exts with the same opcode.
2165 if (MI.getOpcode() == SrcExtOp) {
2166 Observer.changingInstr(MI);
2167 MI.getOperand(1).setReg(Reg);
2168 Observer.changedInstr(MI);
2173 // - anyext([sz]ext x) to [sz]ext x
2174 // - sext(zext x) to zext x
2175 if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2176 (MI.getOpcode() == TargetOpcode::G_SEXT &&
2177 SrcExtOp == TargetOpcode::G_ZEXT)) {
2178 Register DstReg = MI.getOperand(0).getReg();
2179 Builder.setInstrAndDebugLoc(MI);
2180 Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2181 MI.eraseFromParent();
2185 void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) {
2186 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2187 Register DstReg = MI.getOperand(0).getReg();
2188 Register SrcReg = MI.getOperand(1).getReg();
2189 LLT DstTy = MRI.getType(DstReg);
2191 Builder.setInstrAndDebugLoc(MI);
2192 Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2194 MI.eraseFromParent();
2197 bool CombinerHelper::matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg) {
2198 assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG");
2199 Register SrcReg = MI.getOperand(1).getReg();
2200 return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg)));
2203 bool CombinerHelper::matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2204 assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2205 Src = MI.getOperand(1).getReg();
2207 return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc)));
2210 bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr &MI,
2211 BuildFnTy &MatchInfo) {
2212 assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2213 Register Src = MI.getOperand(1).getReg();
2216 if (!mi_match(Src, MRI, m_GFNeg(m_Reg(NegSrc))))
2219 MatchInfo = [=, &MI](MachineIRBuilder &B) {
2220 Observer.changingInstr(MI);
2221 MI.getOperand(1).setReg(NegSrc);
2222 Observer.changedInstr(MI);
2227 bool CombinerHelper::matchCombineTruncOfExt(
2228 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2229 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2230 Register SrcReg = MI.getOperand(1).getReg();
2231 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2232 unsigned SrcOpc = SrcMI->getOpcode();
2233 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2234 SrcOpc == TargetOpcode::G_ZEXT) {
2235 MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2241 void CombinerHelper::applyCombineTruncOfExt(
2242 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2243 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2244 Register SrcReg = MatchInfo.first;
2245 unsigned SrcExtOp = MatchInfo.second;
2246 Register DstReg = MI.getOperand(0).getReg();
2247 LLT SrcTy = MRI.getType(SrcReg);
2248 LLT DstTy = MRI.getType(DstReg);
2249 if (SrcTy == DstTy) {
2250 MI.eraseFromParent();
2251 replaceRegWith(MRI, DstReg, SrcReg);
2254 Builder.setInstrAndDebugLoc(MI);
2255 if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2256 Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2258 Builder.buildTrunc(DstReg, SrcReg);
2259 MI.eraseFromParent();
2262 bool CombinerHelper::matchCombineTruncOfShl(
2263 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2264 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2265 Register DstReg = MI.getOperand(0).getReg();
2266 Register SrcReg = MI.getOperand(1).getReg();
2267 LLT DstTy = MRI.getType(DstReg);
2271 if (MRI.hasOneNonDBGUse(SrcReg) &&
2272 mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) &&
2273 isLegalOrBeforeLegalizer(
2274 {TargetOpcode::G_SHL,
2275 {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) {
2276 KnownBits Known = KB->getKnownBits(ShiftAmt);
2277 unsigned Size = DstTy.getSizeInBits();
2278 if (Known.countMaxActiveBits() <= Log2_32(Size)) {
2279 MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
2286 void CombinerHelper::applyCombineTruncOfShl(
2287 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2288 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2289 Register DstReg = MI.getOperand(0).getReg();
2290 Register SrcReg = MI.getOperand(1).getReg();
2291 LLT DstTy = MRI.getType(DstReg);
2292 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2294 Register ShiftSrc = MatchInfo.first;
2295 Register ShiftAmt = MatchInfo.second;
2296 Builder.setInstrAndDebugLoc(MI);
2297 auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc);
2298 Builder.buildShl(DstReg, TruncShiftSrc, ShiftAmt, SrcMI->getFlags());
2299 MI.eraseFromParent();
2302 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2303 return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2304 return MO.isReg() &&
2305 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2309 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2310 return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2311 return !MO.isReg() ||
2312 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2316 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2317 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2318 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2319 return all_of(Mask, [](int Elt) { return Elt < 0; });
2322 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2323 assert(MI.getOpcode() == TargetOpcode::G_STORE);
2324 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2328 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2329 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2330 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2334 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2335 GSelect &SelMI = cast<GSelect>(MI);
2337 isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI);
2340 OpIdx = Cst->isZero() ? 3 : 2;
2344 bool CombinerHelper::eraseInst(MachineInstr &MI) {
2345 MI.eraseFromParent();
2349 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2350 const MachineOperand &MOP2) {
2351 if (!MOP1.isReg() || !MOP2.isReg())
2353 auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
2356 auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI);
2359 MachineInstr *I1 = InstAndDef1->MI;
2360 MachineInstr *I2 = InstAndDef2->MI;
2362 // Handle a case like this:
2364 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2366 // Even though %0 and %1 are produced by the same instruction they are not
2369 return MOP1.getReg() == MOP2.getReg();
2371 // If we have an instruction which loads or stores, we can't guarantee that
2374 // For example, we may have
2376 // %x1 = G_LOAD %addr (load N from @somewhere)
2380 // %x2 = G_LOAD %addr (load N from @somewhere)
2382 // %or = G_OR %x1, %x2
2384 // It's possible that @foo will modify whatever lives at the address we're
2385 // loading from. To be safe, let's just assume that all loads and stores
2386 // are different (unless we have something which is guaranteed to not
2388 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2391 // If both instructions are loads or stores, they are equal only if both
2392 // are dereferenceable invariant loads with the same number of bits.
2393 if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) {
2394 GLoadStore *LS1 = dyn_cast<GLoadStore>(I1);
2395 GLoadStore *LS2 = dyn_cast<GLoadStore>(I2);
2399 if (!I2->isDereferenceableInvariantLoad() ||
2400 (LS1->getMemSizeInBits() != LS2->getMemSizeInBits()))
2404 // Check for physical registers on the instructions first to avoid cases
2407 // %a = COPY $physreg
2409 // SOMETHING implicit-def $physreg
2411 // %b = COPY $physreg
2413 // These copies are not equivalent.
2414 if (any_of(I1->uses(), [](const MachineOperand &MO) {
2415 return MO.isReg() && MO.getReg().isPhysical();
2417 // Check if we have a case like this:
2419 // %a = COPY $physreg
2422 // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2423 // From that, we know that they must have the same value, since they must
2424 // have come from the same COPY.
2425 return I1->isIdenticalTo(*I2);
2428 // We don't have any physical registers, so we don't necessarily need the
2431 // On the off-chance that there's some target instruction feeding into the
2432 // instruction, let's use produceSameValue instead of isIdenticalTo.
2433 if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) {
2434 // Handle instructions with multiple defs that produce same values. Values
2435 // are same for operands with same index.
2436 // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2437 // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2438 // I1 and I2 are different instructions but produce same values,
2439 // %1 and %6 are same, %1 and %7 are not the same value.
2440 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2441 I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
2446 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2449 auto *MI = MRI.getVRegDef(MOP.getReg());
2450 auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI);
2451 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2452 MaybeCst->getSExtValue() == C;
2455 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2457 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2458 Register OldReg = MI.getOperand(0).getReg();
2459 Register Replacement = MI.getOperand(OpIdx).getReg();
2460 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2461 MI.eraseFromParent();
2462 replaceRegWith(MRI, OldReg, Replacement);
2466 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2467 Register Replacement) {
2468 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2469 Register OldReg = MI.getOperand(0).getReg();
2470 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2471 MI.eraseFromParent();
2472 replaceRegWith(MRI, OldReg, Replacement);
2476 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2477 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2478 // Match (cond ? x : x)
2479 return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2480 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2484 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2485 return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2486 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2490 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2491 return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2492 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2496 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2497 MachineOperand &MO = MI.getOperand(OpIdx);
2498 return MO.isReg() &&
2499 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2502 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
2504 MachineOperand &MO = MI.getOperand(OpIdx);
2505 return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2508 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2509 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2510 Builder.setInstr(MI);
2511 Builder.buildFConstant(MI.getOperand(0), C);
2512 MI.eraseFromParent();
2516 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2517 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2518 Builder.setInstr(MI);
2519 Builder.buildConstant(MI.getOperand(0), C);
2520 MI.eraseFromParent();
2524 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) {
2525 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2526 Builder.setInstr(MI);
2527 Builder.buildConstant(MI.getOperand(0), C);
2528 MI.eraseFromParent();
2532 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2533 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2534 Builder.setInstr(MI);
2535 Builder.buildUndef(MI.getOperand(0));
2536 MI.eraseFromParent();
2540 bool CombinerHelper::matchSimplifyAddToSub(
2541 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2542 Register LHS = MI.getOperand(1).getReg();
2543 Register RHS = MI.getOperand(2).getReg();
2544 Register &NewLHS = std::get<0>(MatchInfo);
2545 Register &NewRHS = std::get<1>(MatchInfo);
2547 // Helper lambda to check for opportunities for
2548 // ((0-A) + B) -> B - A
2549 // (A + (0-B)) -> A - B
2550 auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2551 if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2553 NewLHS = MaybeNewLHS;
2557 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2560 bool CombinerHelper::matchCombineInsertVecElts(
2561 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2562 assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2564 Register DstReg = MI.getOperand(0).getReg();
2565 LLT DstTy = MRI.getType(DstReg);
2566 assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2567 unsigned NumElts = DstTy.getNumElements();
2568 // If this MI is part of a sequence of insert_vec_elts, then
2569 // don't do the combine in the middle of the sequence.
2570 if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2571 TargetOpcode::G_INSERT_VECTOR_ELT)
2573 MachineInstr *CurrInst = &MI;
2574 MachineInstr *TmpInst;
2577 MatchInfo.resize(NumElts);
2579 CurrInst->getOperand(0).getReg(), MRI,
2580 m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2581 if (IntImm >= NumElts)
2583 if (!MatchInfo[IntImm])
2584 MatchInfo[IntImm] = TmpReg;
2588 if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2590 if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2591 for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2592 if (!MatchInfo[I - 1].isValid())
2593 MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2597 // If we didn't end in a G_IMPLICIT_DEF, bail out.
2598 return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2601 void CombinerHelper::applyCombineInsertVecElts(
2602 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2603 Builder.setInstr(MI);
2605 auto GetUndef = [&]() {
2608 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2609 UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2612 for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2614 MatchInfo[I] = GetUndef();
2616 Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2617 MI.eraseFromParent();
2620 void CombinerHelper::applySimplifyAddToSub(
2621 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2622 Builder.setInstr(MI);
2623 Register SubLHS, SubRHS;
2624 std::tie(SubLHS, SubRHS) = MatchInfo;
2625 Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2626 MI.eraseFromParent();
2629 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2630 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2631 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2633 // Creates the new hand + logic instruction (but does not insert them.)
2635 // On success, MatchInfo is populated with the new instructions. These are
2636 // inserted in applyHoistLogicOpWithSameOpcodeHands.
2637 unsigned LogicOpcode = MI.getOpcode();
2638 assert(LogicOpcode == TargetOpcode::G_AND ||
2639 LogicOpcode == TargetOpcode::G_OR ||
2640 LogicOpcode == TargetOpcode::G_XOR);
2641 MachineIRBuilder MIB(MI);
2642 Register Dst = MI.getOperand(0).getReg();
2643 Register LHSReg = MI.getOperand(1).getReg();
2644 Register RHSReg = MI.getOperand(2).getReg();
2646 // Don't recompute anything.
2647 if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2650 // Make sure we have (hand x, ...), (hand y, ...)
2651 MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2652 MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2653 if (!LeftHandInst || !RightHandInst)
2655 unsigned HandOpcode = LeftHandInst->getOpcode();
2656 if (HandOpcode != RightHandInst->getOpcode())
2658 if (!LeftHandInst->getOperand(1).isReg() ||
2659 !RightHandInst->getOperand(1).isReg())
2662 // Make sure the types match up, and if we're doing this post-legalization,
2663 // we end up with legal types.
2664 Register X = LeftHandInst->getOperand(1).getReg();
2665 Register Y = RightHandInst->getOperand(1).getReg();
2666 LLT XTy = MRI.getType(X);
2667 LLT YTy = MRI.getType(Y);
2670 if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2673 // Optional extra source register.
2674 Register ExtraHandOpSrcReg;
2675 switch (HandOpcode) {
2678 case TargetOpcode::G_ANYEXT:
2679 case TargetOpcode::G_SEXT:
2680 case TargetOpcode::G_ZEXT: {
2681 // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2684 case TargetOpcode::G_AND:
2685 case TargetOpcode::G_ASHR:
2686 case TargetOpcode::G_LSHR:
2687 case TargetOpcode::G_SHL: {
2688 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2689 MachineOperand &ZOp = LeftHandInst->getOperand(2);
2690 if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2692 ExtraHandOpSrcReg = ZOp.getReg();
2697 // Record the steps to build the new instructions.
2699 // Steps to build (logic x, y)
2700 auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2701 OperandBuildSteps LogicBuildSteps = {
2702 [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2703 [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2704 [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2705 InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2707 // Steps to build hand (logic x, y), ...z
2708 OperandBuildSteps HandBuildSteps = {
2709 [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2710 [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2711 if (ExtraHandOpSrcReg.isValid())
2712 HandBuildSteps.push_back(
2713 [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2714 InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2716 MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2720 void CombinerHelper::applyBuildInstructionSteps(
2721 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2722 assert(MatchInfo.InstrsToBuild.size() &&
2723 "Expected at least one instr to build?");
2724 Builder.setInstr(MI);
2725 for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2726 assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2727 assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2728 MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2729 for (auto &OperandFn : InstrToBuild.OperandFns)
2732 MI.eraseFromParent();
2735 bool CombinerHelper::matchAshrShlToSextInreg(
2736 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2737 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2738 int64_t ShlCst, AshrCst;
2740 // FIXME: detect splat constant vectors.
2741 if (!mi_match(MI.getOperand(0).getReg(), MRI,
2742 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
2744 if (ShlCst != AshrCst)
2746 if (!isLegalOrBeforeLegalizer(
2747 {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2749 MatchInfo = std::make_tuple(Src, ShlCst);
2753 void CombinerHelper::applyAshShlToSextInreg(
2754 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2755 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2758 std::tie(Src, ShiftAmt) = MatchInfo;
2759 unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2760 Builder.setInstrAndDebugLoc(MI);
2761 Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2762 MI.eraseFromParent();
2765 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
2766 bool CombinerHelper::matchOverlappingAnd(
2767 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
2768 assert(MI.getOpcode() == TargetOpcode::G_AND);
2770 Register Dst = MI.getOperand(0).getReg();
2771 LLT Ty = MRI.getType(Dst);
2778 m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2))))
2781 MatchInfo = [=](MachineIRBuilder &B) {
2783 B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2));
2786 auto Zero = B.buildConstant(Ty, 0);
2787 replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg());
2792 bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
2793 Register &Replacement) {
2796 // %y:_(sN) = G_SOMETHING
2797 // %x:_(sN) = G_SOMETHING
2798 // %res:_(sN) = G_AND %x, %y
2800 // Eliminate the G_AND when it is known that x & y == x or x & y == y.
2802 // Patterns like this can appear as a result of legalization. E.g.
2804 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2805 // %one:_(s32) = G_CONSTANT i32 1
2806 // %and:_(s32) = G_AND %cmp, %one
2808 // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2809 assert(MI.getOpcode() == TargetOpcode::G_AND);
2813 Register AndDst = MI.getOperand(0).getReg();
2814 LLT DstTy = MRI.getType(AndDst);
2816 // FIXME: This should be removed once GISelKnownBits supports vectors.
2817 if (DstTy.isVector())
2820 Register LHS = MI.getOperand(1).getReg();
2821 Register RHS = MI.getOperand(2).getReg();
2822 KnownBits LHSBits = KB->getKnownBits(LHS);
2823 KnownBits RHSBits = KB->getKnownBits(RHS);
2825 // Check that x & Mask == x.
2826 // x & 1 == x, always
2827 // x & 0 == x, only if x is also 0
2828 // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
2830 // Check if we can replace AndDst with the LHS of the G_AND
2831 if (canReplaceReg(AndDst, LHS, MRI) &&
2832 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2837 // Check if we can replace AndDst with the RHS of the G_AND
2838 if (canReplaceReg(AndDst, RHS, MRI) &&
2839 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2847 bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
2850 // %y:_(sN) = G_SOMETHING
2851 // %x:_(sN) = G_SOMETHING
2852 // %res:_(sN) = G_OR %x, %y
2854 // Eliminate the G_OR when it is known that x | y == x or x | y == y.
2855 assert(MI.getOpcode() == TargetOpcode::G_OR);
2859 Register OrDst = MI.getOperand(0).getReg();
2860 LLT DstTy = MRI.getType(OrDst);
2862 // FIXME: This should be removed once GISelKnownBits supports vectors.
2863 if (DstTy.isVector())
2866 Register LHS = MI.getOperand(1).getReg();
2867 Register RHS = MI.getOperand(2).getReg();
2868 KnownBits LHSBits = KB->getKnownBits(LHS);
2869 KnownBits RHSBits = KB->getKnownBits(RHS);
2871 // Check that x | Mask == x.
2872 // x | 0 == x, always
2873 // x | 1 == x, only if x is also 1
2874 // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
2876 // Check if we can replace OrDst with the LHS of the G_OR
2877 if (canReplaceReg(OrDst, LHS, MRI) &&
2878 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2883 // Check if we can replace OrDst with the RHS of the G_OR
2884 if (canReplaceReg(OrDst, RHS, MRI) &&
2885 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2893 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
2894 // If the input is already sign extended, just drop the extension.
2895 Register Src = MI.getOperand(1).getReg();
2896 unsigned ExtBits = MI.getOperand(2).getImm();
2897 unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
2898 return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
2901 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
2902 int64_t Cst, bool IsVector, bool IsFP) {
2903 // For i1, Cst will always be -1 regardless of boolean contents.
2904 return (ScalarSizeBits == 1 && Cst == -1) ||
2905 isConstTrueVal(TLI, Cst, IsVector, IsFP);
2908 bool CombinerHelper::matchNotCmp(MachineInstr &MI,
2909 SmallVectorImpl<Register> &RegsToNegate) {
2910 assert(MI.getOpcode() == TargetOpcode::G_XOR);
2911 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2912 const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
2915 // We match xor(src, true) here.
2916 if (!mi_match(MI.getOperand(0).getReg(), MRI,
2917 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
2920 if (!MRI.hasOneNonDBGUse(XorSrc))
2923 // Check that XorSrc is the root of a tree of comparisons combined with ANDs
2924 // and ORs. The suffix of RegsToNegate starting from index I is used a work
2925 // list of tree nodes to visit.
2926 RegsToNegate.push_back(XorSrc);
2927 // Remember whether the comparisons are all integer or all floating point.
2930 for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
2931 Register Reg = RegsToNegate[I];
2932 if (!MRI.hasOneNonDBGUse(Reg))
2934 MachineInstr *Def = MRI.getVRegDef(Reg);
2935 switch (Def->getOpcode()) {
2937 // Don't match if the tree contains anything other than ANDs, ORs and
2940 case TargetOpcode::G_ICMP:
2944 // When we apply the combine we will invert the predicate.
2946 case TargetOpcode::G_FCMP:
2950 // When we apply the combine we will invert the predicate.
2952 case TargetOpcode::G_AND:
2953 case TargetOpcode::G_OR:
2954 // Implement De Morgan's laws:
2955 // ~(x & y) -> ~x | ~y
2956 // ~(x | y) -> ~x & ~y
2957 // When we apply the combine we will change the opcode and recursively
2958 // negate the operands.
2959 RegsToNegate.push_back(Def->getOperand(1).getReg());
2960 RegsToNegate.push_back(Def->getOperand(2).getReg());
2965 // Now we know whether the comparisons are integer or floating point, check
2966 // the constant in the xor.
2968 if (Ty.isVector()) {
2969 MachineInstr *CstDef = MRI.getVRegDef(CstReg);
2970 auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI);
2973 if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
2976 if (!mi_match(CstReg, MRI, m_ICst(Cst)))
2978 if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
2985 void CombinerHelper::applyNotCmp(MachineInstr &MI,
2986 SmallVectorImpl<Register> &RegsToNegate) {
2987 for (Register Reg : RegsToNegate) {
2988 MachineInstr *Def = MRI.getVRegDef(Reg);
2989 Observer.changingInstr(*Def);
2990 // For each comparison, invert the opcode. For each AND and OR, change the
2992 switch (Def->getOpcode()) {
2994 llvm_unreachable("Unexpected opcode");
2995 case TargetOpcode::G_ICMP:
2996 case TargetOpcode::G_FCMP: {
2997 MachineOperand &PredOp = Def->getOperand(1);
2998 CmpInst::Predicate NewP = CmpInst::getInversePredicate(
2999 (CmpInst::Predicate)PredOp.getPredicate());
3000 PredOp.setPredicate(NewP);
3003 case TargetOpcode::G_AND:
3004 Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
3006 case TargetOpcode::G_OR:
3007 Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3010 Observer.changedInstr(*Def);
3013 replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
3014 MI.eraseFromParent();
3017 bool CombinerHelper::matchXorOfAndWithSameReg(
3018 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3019 // Match (xor (and x, y), y) (or any of its commuted cases)
3020 assert(MI.getOpcode() == TargetOpcode::G_XOR);
3021 Register &X = MatchInfo.first;
3022 Register &Y = MatchInfo.second;
3023 Register AndReg = MI.getOperand(1).getReg();
3024 Register SharedReg = MI.getOperand(2).getReg();
3026 // Find a G_AND on either side of the G_XOR.
3029 // (xor (and x, y), SharedReg)
3030 // (xor SharedReg, (and x, y))
3031 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
3032 std::swap(AndReg, SharedReg);
3033 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
3037 // Only do this if we'll eliminate the G_AND.
3038 if (!MRI.hasOneNonDBGUse(AndReg))
3041 // We can combine if SharedReg is the same as either the LHS or RHS of the
3045 return Y == SharedReg;
3048 void CombinerHelper::applyXorOfAndWithSameReg(
3049 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3050 // Fold (xor (and x, y), y) -> (and (not x), y)
3051 Builder.setInstrAndDebugLoc(MI);
3053 std::tie(X, Y) = MatchInfo;
3054 auto Not = Builder.buildNot(MRI.getType(X), X);
3055 Observer.changingInstr(MI);
3056 MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3057 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3058 MI.getOperand(2).setReg(Y);
3059 Observer.changedInstr(MI);
3062 bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
3063 auto &PtrAdd = cast<GPtrAdd>(MI);
3064 Register DstReg = PtrAdd.getReg(0);
3065 LLT Ty = MRI.getType(DstReg);
3066 const DataLayout &DL = Builder.getMF().getDataLayout();
3068 if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
3071 if (Ty.isPointer()) {
3072 auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI);
3073 return ConstVal && *ConstVal == 0;
3076 assert(Ty.isVector() && "Expecting a vector type");
3077 const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg());
3078 return isBuildVectorAllZeros(*VecMI, MRI);
3081 void CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
3082 auto &PtrAdd = cast<GPtrAdd>(MI);
3083 Builder.setInstrAndDebugLoc(PtrAdd);
3084 Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
3085 PtrAdd.eraseFromParent();
3088 /// The second source operand is known to be a power of 2.
3089 void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
3090 Register DstReg = MI.getOperand(0).getReg();
3091 Register Src0 = MI.getOperand(1).getReg();
3092 Register Pow2Src1 = MI.getOperand(2).getReg();
3093 LLT Ty = MRI.getType(DstReg);
3094 Builder.setInstrAndDebugLoc(MI);
3096 // Fold (urem x, pow2) -> (and x, pow2-1)
3097 auto NegOne = Builder.buildConstant(Ty, -1);
3098 auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
3099 Builder.buildAnd(DstReg, Src0, Add);
3100 MI.eraseFromParent();
3103 bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
3104 unsigned &SelectOpNo) {
3105 Register LHS = MI.getOperand(1).getReg();
3106 Register RHS = MI.getOperand(2).getReg();
3108 Register OtherOperandReg = RHS;
3110 MachineInstr *Select = MRI.getVRegDef(LHS);
3112 // Don't do this unless the old select is going away. We want to eliminate the
3113 // binary operator, not replace a binop with a select.
3114 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3115 !MRI.hasOneNonDBGUse(LHS)) {
3116 OtherOperandReg = LHS;
3118 Select = MRI.getVRegDef(RHS);
3119 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3120 !MRI.hasOneNonDBGUse(RHS))
3124 MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg());
3125 MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg());
3127 if (!isConstantOrConstantVector(*SelectLHS, MRI,
3129 /*AllowOpaqueConstants*/ false))
3131 if (!isConstantOrConstantVector(*SelectRHS, MRI,
3133 /*AllowOpaqueConstants*/ false))
3136 unsigned BinOpcode = MI.getOpcode();
3138 // We know know one of the operands is a select of constants. Now verify that
3139 // the other binary operator operand is either a constant, or we can handle a
3141 bool CanFoldNonConst =
3142 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3143 (isNullOrNullSplat(*SelectLHS, MRI) ||
3144 isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) &&
3145 (isNullOrNullSplat(*SelectRHS, MRI) ||
3146 isAllOnesOrAllOnesSplat(*SelectRHS, MRI));
3147 if (CanFoldNonConst)
3150 return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI,
3152 /*AllowOpaqueConstants*/ false);
3155 /// \p SelectOperand is the operand in binary operator \p MI that is the select
3157 bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
3158 const unsigned &SelectOperand) {
3159 Builder.setInstrAndDebugLoc(MI);
3161 Register Dst = MI.getOperand(0).getReg();
3162 Register LHS = MI.getOperand(1).getReg();
3163 Register RHS = MI.getOperand(2).getReg();
3164 MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg());
3166 Register SelectCond = Select->getOperand(1).getReg();
3167 Register SelectTrue = Select->getOperand(2).getReg();
3168 Register SelectFalse = Select->getOperand(3).getReg();
3170 LLT Ty = MRI.getType(Dst);
3171 unsigned BinOpcode = MI.getOpcode();
3173 Register FoldTrue, FoldFalse;
3175 // We have a select-of-constants followed by a binary operator with a
3176 // constant. Eliminate the binop by pulling the constant math into the select.
3177 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
3178 if (SelectOperand == 1) {
3179 // TODO: SelectionDAG verifies this actually constant folds before
3180 // committing to the combine.
3182 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0);
3184 Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0);
3186 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0);
3188 Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0);
3191 Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags());
3192 Observer.erasingInstr(*Select);
3193 Select->eraseFromParent();
3194 MI.eraseFromParent();
3199 Optional<SmallVector<Register, 8>>
3200 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3201 assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
3202 // We want to detect if Root is part of a tree which represents a bunch
3203 // of loads being merged into a larger load. We'll try to recognize patterns
3204 // like, for example:
3223 // Each "Reg" may have been produced by a load + some arithmetic. This
3224 // function will save each of them.
3225 SmallVector<Register, 8> RegsToVisit;
3226 SmallVector<const MachineInstr *, 7> Ors = {Root};
3228 // In the "worst" case, we're dealing with a load for each byte. So, there
3229 // are at most #bytes - 1 ORs.
3230 const unsigned MaxIter =
3231 MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3232 for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3235 const MachineInstr *Curr = Ors.pop_back_val();
3236 Register OrLHS = Curr->getOperand(1).getReg();
3237 Register OrRHS = Curr->getOperand(2).getReg();
3239 // In the combine, we want to elimate the entire tree.
3240 if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3243 // If it's a G_OR, save it and continue to walk. If it's not, then it's
3244 // something that may be a load + arithmetic.
3245 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3248 RegsToVisit.push_back(OrLHS);
3249 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3252 RegsToVisit.push_back(OrRHS);
3255 // We're going to try and merge each register into a wider power-of-2 type,
3256 // so we ought to have an even number of registers.
3257 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3262 /// Helper function for findLoadOffsetsForLoadOrCombine.
3264 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3265 /// and then moving that value into a specific byte offset.
3269 /// \returns The load instruction and the byte offset it is moved into.
3270 static Optional<std::pair<GZExtLoad *, int64_t>>
3271 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3272 const MachineRegisterInfo &MRI) {
3273 assert(MRI.hasOneNonDBGUse(Reg) &&
3274 "Expected Reg to only have one non-debug use?");
3277 if (!mi_match(Reg, MRI,
3278 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3283 if (Shift % MemSizeInBits != 0)
3286 // TODO: Handle other types of loads.
3287 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI);
3291 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3294 return std::make_pair(Load, Shift / MemSizeInBits);
3297 Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3298 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3299 SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3300 const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3302 // Each load found for the pattern. There should be one for each RegsToVisit.
3303 SmallSetVector<const MachineInstr *, 8> Loads;
3305 // The lowest index used in any load. (The lowest "i" for each x[i].)
3306 int64_t LowestIdx = INT64_MAX;
3308 // The load which uses the lowest index.
3309 GZExtLoad *LowestIdxLoad = nullptr;
3311 // Keeps track of the load indices we see. We shouldn't see any indices twice.
3312 SmallSet<int64_t, 8> SeenIdx;
3314 // Ensure each load is in the same MBB.
3315 // TODO: Support multiple MachineBasicBlocks.
3316 MachineBasicBlock *MBB = nullptr;
3317 const MachineMemOperand *MMO = nullptr;
3319 // Earliest instruction-order load in the pattern.
3320 GZExtLoad *EarliestLoad = nullptr;
3322 // Latest instruction-order load in the pattern.
3323 GZExtLoad *LatestLoad = nullptr;
3325 // Base pointer which every load should share.
3328 // We want to find a load for each register. Each load should have some
3329 // appropriate bit twiddling arithmetic. During this loop, we will also keep
3330 // track of the load which uses the lowest index. Later, we will check if we
3331 // can use its pointer in the final, combined load.
3332 for (auto Reg : RegsToVisit) {
3333 // Find the load, and find the position that it will end up in (e.g. a
3335 auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3340 std::tie(Load, DstPos) = *LoadAndPos;
3342 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3343 // it is difficult to check for stores/calls/etc between loads.
3344 MachineBasicBlock *LoadMBB = Load->getParent();
3350 // Make sure that the MachineMemOperands of every seen load are compatible.
3351 auto &LoadMMO = Load->getMMO();
3354 if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3357 // Find out what the base pointer and index for the load is.
3360 if (!mi_match(Load->getOperand(1).getReg(), MRI,
3361 m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3362 LoadPtr = Load->getOperand(1).getReg();
3366 // Don't combine things like a[i], a[i] -> a bigger load.
3367 if (!SeenIdx.insert(Idx).second)
3370 // Every load must share the same base pointer; don't combine things like:
3372 // a[i], b[i + 1] -> a bigger load.
3373 if (!BasePtr.isValid())
3375 if (BasePtr != LoadPtr)
3378 if (Idx < LowestIdx) {
3380 LowestIdxLoad = Load;
3383 // Keep track of the byte offset that this load ends up at. If we have seen
3384 // the byte offset, then stop here. We do not want to combine:
3386 // a[i] << 16, a[i + k] << 16 -> a bigger load.
3387 if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3391 // Keep track of the position of the earliest/latest loads in the pattern.
3392 // We will check that there are no load fold barriers between them later
3395 // FIXME: Is there a better way to check for load fold barriers?
3396 if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3397 EarliestLoad = Load;
3398 if (!LatestLoad || dominates(*LatestLoad, *Load))
3402 // We found a load for each register. Let's check if each load satisfies the
3404 assert(Loads.size() == RegsToVisit.size() &&
3405 "Expected to find a load for each register?");
3406 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3407 LatestLoad && "Expected at least two loads?");
3409 // Check if there are any stores, calls, etc. between any of the loads. If
3410 // there are, then we can't safely perform the combine.
3412 // MaxIter is chosen based off the (worst case) number of iterations it
3413 // typically takes to succeed in the LLVM test suite plus some padding.
3415 // FIXME: Is there a better way to check for load fold barriers?
3416 const unsigned MaxIter = 20;
3418 for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3419 LatestLoad->getIterator())) {
3420 if (Loads.count(&MI))
3422 if (MI.isLoadFoldBarrier())
3424 if (Iter++ == MaxIter)
3428 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3431 bool CombinerHelper::matchLoadOrCombine(
3432 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3433 assert(MI.getOpcode() == TargetOpcode::G_OR);
3434 MachineFunction &MF = *MI.getMF();
3435 // Assuming a little-endian target, transform:
3437 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3439 // s32 val = *((i32)a)
3442 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3444 // s32 val = BSWAP(*((s32)a))
3445 Register Dst = MI.getOperand(0).getReg();
3446 LLT Ty = MRI.getType(Dst);
3450 // We need to combine at least two loads into this type. Since the smallest
3451 // possible load is into a byte, we need at least a 16-bit wide type.
3452 const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3453 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3456 // Match a collection of non-OR instructions in the pattern.
3457 auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3461 // We have a collection of non-OR instructions. Figure out how wide each of
3462 // the small loads should be based off of the number of potential loads we
3464 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3465 if (NarrowMemSizeInBits % 8 != 0)
3468 // Check if each register feeding into each OR is a load from the same
3469 // base pointer + some arithmetic.
3471 // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3473 // Also verify that each of these ends up putting a[i] into the same memory
3474 // offset as a load into a wide type would.
3475 SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
3476 GZExtLoad *LowestIdxLoad, *LatestLoad;
3478 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3479 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3482 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3484 // We have a bunch of loads being OR'd together. Using the addresses + offsets
3485 // we found before, check if this corresponds to a big or little endian byte
3486 // pattern. If it does, then we can represent it using a load + possibly a
3488 bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3489 Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3492 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3493 if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3496 // Make sure that the load from the lowest index produces offset 0 in the
3499 // This ensures that we won't combine something like this:
3501 // load x[i] -> byte 2
3502 // load x[i+1] -> byte 0 ---> wide_load x[i]
3503 // load x[i+2] -> byte 1
3504 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3505 const unsigned ZeroByteOffset =
3507 ? bigEndianByteAt(NumLoadsInTy, 0)
3508 : littleEndianByteAt(NumLoadsInTy, 0);
3509 auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3510 if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3511 ZeroOffsetIdx->second != LowestIdx)
3514 // We wil reuse the pointer from the load which ends up at byte offset 0. It
3515 // may not use index 0.
3516 Register Ptr = LowestIdxLoad->getPointerReg();
3517 const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
3518 LegalityQuery::MemDesc MMDesc(MMO);
3519 MMDesc.MemoryTy = Ty;
3520 if (!isLegalOrBeforeLegalizer(
3521 {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3523 auto PtrInfo = MMO.getPointerInfo();
3524 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3526 // Load must be allowed and fast on the target.
3527 LLVMContext &C = MF.getFunction().getContext();
3528 auto &DL = MF.getDataLayout();
3530 if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3534 MatchInfo = [=](MachineIRBuilder &MIB) {
3535 MIB.setInstrAndDebugLoc(*LatestLoad);
3536 Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3537 MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3539 MIB.buildBSwap(Dst, LoadDst);
3544 /// Check if the store \p Store is a truncstore that can be merged. That is,
3545 /// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty
3546 /// Register then it does not need to match and SrcVal is set to the source
3548 /// On match, returns the start byte offset of the \p SrcVal that is being
3550 static Optional<int64_t> getTruncStoreByteOffset(GStore &Store, Register &SrcVal,
3551 MachineRegisterInfo &MRI) {
3553 if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal))))
3556 // The shift amount must be a constant multiple of the narrow type.
3557 // It is translated to the offset address in the wide source value "y".
3559 // x = G_LSHR y, ShiftAmtC
3562 Register FoundSrcVal;
3564 if (!mi_match(TruncVal, MRI,
3565 m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)),
3566 m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) {
3567 if (!SrcVal.isValid() || TruncVal == SrcVal) {
3568 if (!SrcVal.isValid())
3570 return 0; // If it's the lowest index store.
3575 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
3576 if (ShiftAmt % NarrowBits!= 0)
3578 const unsigned Offset = ShiftAmt / NarrowBits;
3580 if (SrcVal.isValid() && FoundSrcVal != SrcVal)
3583 if (!SrcVal.isValid())
3584 SrcVal = FoundSrcVal;
3585 else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))
3590 /// Match a pattern where a wide type scalar value is stored by several narrow
3591 /// stores. Fold it into a single store or a BSWAP and a store if the targets
3594 /// Assuming little endian target:
3597 /// p[0] = (val >> 0) & 0xFF;
3598 /// p[1] = (val >> 8) & 0xFF;
3599 /// p[2] = (val >> 16) & 0xFF;
3600 /// p[3] = (val >> 24) & 0xFF;
3602 /// *((i32)p) = val;
3606 /// p[0] = (val >> 24) & 0xFF;
3607 /// p[1] = (val >> 16) & 0xFF;
3608 /// p[2] = (val >> 8) & 0xFF;
3609 /// p[3] = (val >> 0) & 0xFF;
3611 /// *((i32)p) = BSWAP(val);
3612 bool CombinerHelper::matchTruncStoreMerge(MachineInstr &MI,
3613 MergeTruncStoresInfo &MatchInfo) {
3614 auto &StoreMI = cast<GStore>(MI);
3615 LLT MemTy = StoreMI.getMMO().getMemoryType();
3617 // We only handle merging simple stores of 1-4 bytes.
3618 if (!MemTy.isScalar())
3620 switch (MemTy.getSizeInBits()) {
3628 if (!StoreMI.isSimple())
3631 // We do a simple search for mergeable stores prior to this one.
3632 // Any potential alias hazard along the way terminates the search.
3633 SmallVector<GStore *> FoundStores;
3635 // We're looking for:
3636 // 1) a (store(trunc(...)))
3637 // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get
3638 // the partial value stored.
3639 // 3) where the offsets form either a little or big-endian sequence.
3641 auto &LastStore = StoreMI;
3643 // The single base pointer that all stores must use.
3646 if (!mi_match(LastStore.getPointerReg(), MRI,
3647 m_GPtrAdd(m_Reg(BaseReg), m_ICst(LastOffset)))) {
3648 BaseReg = LastStore.getPointerReg();
3652 GStore *LowestIdxStore = &LastStore;
3653 int64_t LowestIdxOffset = LastOffset;
3655 Register WideSrcVal;
3656 auto LowestShiftAmt = getTruncStoreByteOffset(LastStore, WideSrcVal, MRI);
3657 if (!LowestShiftAmt)
3658 return false; // Didn't match a trunc.
3659 assert(WideSrcVal.isValid());
3661 LLT WideStoreTy = MRI.getType(WideSrcVal);
3662 // The wide type might not be a multiple of the memory type, e.g. s48 and s32.
3663 if (WideStoreTy.getSizeInBits() % MemTy.getSizeInBits() != 0)
3665 const unsigned NumStoresRequired =
3666 WideStoreTy.getSizeInBits() / MemTy.getSizeInBits();
3668 SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX);
3669 OffsetMap[*LowestShiftAmt] = LastOffset;
3670 FoundStores.emplace_back(&LastStore);
3672 // Search the block up for more stores.
3673 // We use a search threshold of 10 instructions here because the combiner
3674 // works top-down within a block, and we don't want to search an unbounded
3675 // number of predecessor instructions trying to find matching stores.
3676 // If we moved this optimization into a separate pass then we could probably
3677 // use a more efficient search without having a hard-coded threshold.
3678 const int MaxInstsToCheck = 10;
3679 int NumInstsChecked = 0;
3680 for (auto II = ++LastStore.getReverseIterator();
3681 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
3685 if ((NewStore = dyn_cast<GStore>(&*II))) {
3686 if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple())
3688 } else if (II->isLoadFoldBarrier() || II->mayLoad()) {
3691 continue; // This is a safe instruction we can look past.
3694 Register NewBaseReg;
3696 // Check we're storing to the same base + some offset.
3697 if (!mi_match(NewStore->getPointerReg(), MRI,
3698 m_GPtrAdd(m_Reg(NewBaseReg), m_ICst(MemOffset)))) {
3699 NewBaseReg = NewStore->getPointerReg();
3702 if (BaseReg != NewBaseReg)
3705 auto ShiftByteOffset = getTruncStoreByteOffset(*NewStore, WideSrcVal, MRI);
3706 if (!ShiftByteOffset)
3708 if (MemOffset < LowestIdxOffset) {
3709 LowestIdxOffset = MemOffset;
3710 LowestIdxStore = NewStore;
3713 // Map the offset in the store and the offset in the combined value, and
3714 // early return if it has been set before.
3715 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
3716 OffsetMap[*ShiftByteOffset] != INT64_MAX)
3718 OffsetMap[*ShiftByteOffset] = MemOffset;
3720 FoundStores.emplace_back(NewStore);
3721 // Reset counter since we've found a matching inst.
3722 NumInstsChecked = 0;
3723 if (FoundStores.size() == NumStoresRequired)
3727 if (FoundStores.size() != NumStoresRequired) {
3731 const auto &DL = LastStore.getMF()->getDataLayout();
3732 auto &C = LastStore.getMF()->getFunction().getContext();
3733 // Check that a store of the wide type is both allowed and fast on the target
3735 bool Allowed = getTargetLowering().allowsMemoryAccess(
3736 C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast);
3737 if (!Allowed || !Fast)
3740 // Check if the pieces of the value are going to the expected places in memory
3741 // to merge the stores.
3742 unsigned NarrowBits = MemTy.getScalarSizeInBits();
3743 auto checkOffsets = [&](bool MatchLittleEndian) {
3744 if (MatchLittleEndian) {
3745 for (unsigned i = 0; i != NumStoresRequired; ++i)
3746 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
3748 } else { // MatchBigEndian by reversing loop counter.
3749 for (unsigned i = 0, j = NumStoresRequired - 1; i != NumStoresRequired;
3751 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
3757 // Check if the offsets line up for the native data layout of this target.
3758 bool NeedBswap = false;
3759 bool NeedRotate = false;
3760 if (!checkOffsets(DL.isLittleEndian())) {
3761 // Special-case: check if byte offsets line up for the opposite endian.
3762 if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))
3764 else if (NumStoresRequired == 2 && checkOffsets(DL.isBigEndian()))
3771 !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}))
3774 !isLegalOrBeforeLegalizer({TargetOpcode::G_ROTR, {WideStoreTy}}))
3777 MatchInfo.NeedBSwap = NeedBswap;
3778 MatchInfo.NeedRotate = NeedRotate;
3779 MatchInfo.LowestIdxStore = LowestIdxStore;
3780 MatchInfo.WideSrcVal = WideSrcVal;
3781 MatchInfo.FoundStores = std::move(FoundStores);
3785 void CombinerHelper::applyTruncStoreMerge(MachineInstr &MI,
3786 MergeTruncStoresInfo &MatchInfo) {
3788 Builder.setInstrAndDebugLoc(MI);
3789 Register WideSrcVal = MatchInfo.WideSrcVal;
3790 LLT WideStoreTy = MRI.getType(WideSrcVal);
3792 if (MatchInfo.NeedBSwap) {
3793 WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0);
3794 } else if (MatchInfo.NeedRotate) {
3795 assert(WideStoreTy.getSizeInBits() % 2 == 0 &&
3796 "Unexpected type for rotate");
3798 Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2);
3800 Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0);
3803 Builder.buildStore(WideSrcVal, MatchInfo.LowestIdxStore->getPointerReg(),
3804 MatchInfo.LowestIdxStore->getMMO().getPointerInfo(),
3805 MatchInfo.LowestIdxStore->getMMO().getAlign());
3807 // Erase the old stores.
3808 for (auto *ST : MatchInfo.FoundStores)
3809 ST->eraseFromParent();
3812 bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
3813 MachineInstr *&ExtMI) {
3814 assert(MI.getOpcode() == TargetOpcode::G_PHI);
3816 Register DstReg = MI.getOperand(0).getReg();
3818 // TODO: Extending a vector may be expensive, don't do this until heuristics
3820 if (MRI.getType(DstReg).isVector())
3823 // Try to match a phi, whose only use is an extend.
3824 if (!MRI.hasOneNonDBGUse(DstReg))
3826 ExtMI = &*MRI.use_instr_nodbg_begin(DstReg);
3827 switch (ExtMI->getOpcode()) {
3828 case TargetOpcode::G_ANYEXT:
3829 return true; // G_ANYEXT is usually free.
3830 case TargetOpcode::G_ZEXT:
3831 case TargetOpcode::G_SEXT:
3837 // If the target is likely to fold this extend away, don't propagate.
3838 if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI))
3841 // We don't want to propagate the extends unless there's a good chance that
3842 // they'll be optimized in some way.
3843 // Collect the unique incoming values.
3844 SmallPtrSet<MachineInstr *, 4> InSrcs;
3845 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
3846 auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI);
3847 switch (DefMI->getOpcode()) {
3848 case TargetOpcode::G_LOAD:
3849 case TargetOpcode::G_TRUNC:
3850 case TargetOpcode::G_SEXT:
3851 case TargetOpcode::G_ZEXT:
3852 case TargetOpcode::G_ANYEXT:
3853 case TargetOpcode::G_CONSTANT:
3854 InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI));
3855 // Don't try to propagate if there are too many places to create new
3856 // extends, chances are it'll increase code size.
3857 if (InSrcs.size() > 2)
3867 void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
3868 MachineInstr *&ExtMI) {
3869 assert(MI.getOpcode() == TargetOpcode::G_PHI);
3870 Register DstReg = ExtMI->getOperand(0).getReg();
3871 LLT ExtTy = MRI.getType(DstReg);
3873 // Propagate the extension into the block of each incoming reg's block.
3874 // Use a SetVector here because PHIs can have duplicate edges, and we want
3875 // deterministic iteration order.
3876 SmallSetVector<MachineInstr *, 8> SrcMIs;
3877 SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap;
3878 for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) {
3879 auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg());
3880 if (!SrcMIs.insert(SrcMI))
3883 // Build an extend after each src inst.
3884 auto *MBB = SrcMI->getParent();
3885 MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
3886 if (InsertPt != MBB->end() && InsertPt->isPHI())
3887 InsertPt = MBB->getFirstNonPHI();
3889 Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
3890 Builder.setDebugLoc(MI.getDebugLoc());
3891 auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy,
3892 SrcMI->getOperand(0).getReg());
3893 OldToNewSrcMap[SrcMI] = NewExt;
3896 // Create a new phi with the extended inputs.
3897 Builder.setInstrAndDebugLoc(MI);
3898 auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
3899 NewPhi.addDef(DstReg);
3900 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
3902 NewPhi.addMBB(MO.getMBB());
3905 auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())];
3906 NewPhi.addUse(NewSrc->getOperand(0).getReg());
3908 Builder.insertInstr(NewPhi);
3909 ExtMI->eraseFromParent();
3912 bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
3914 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3915 // If we have a constant index, look for a G_BUILD_VECTOR source
3916 // and find the source register that the index maps to.
3917 Register SrcVec = MI.getOperand(1).getReg();
3918 LLT SrcTy = MRI.getType(SrcVec);
3919 if (!isLegalOrBeforeLegalizer(
3920 {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.getElementType()}}))
3923 auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
3924 if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
3927 unsigned VecIdx = Cst->Value.getZExtValue();
3928 MachineInstr *BuildVecMI =
3929 getOpcodeDef(TargetOpcode::G_BUILD_VECTOR, SrcVec, MRI);
3931 BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR_TRUNC, SrcVec, MRI);
3934 LLT ScalarTy = MRI.getType(BuildVecMI->getOperand(1).getReg());
3935 if (!isLegalOrBeforeLegalizer(
3936 {TargetOpcode::G_BUILD_VECTOR_TRUNC, {SrcTy, ScalarTy}}))
3940 EVT Ty(getMVTForLLT(SrcTy));
3941 if (!MRI.hasOneNonDBGUse(SrcVec) &&
3942 !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
3945 Reg = BuildVecMI->getOperand(VecIdx + 1).getReg();
3949 void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
3951 // Check the type of the register, since it may have come from a
3952 // G_BUILD_VECTOR_TRUNC.
3953 LLT ScalarTy = MRI.getType(Reg);
3954 Register DstReg = MI.getOperand(0).getReg();
3955 LLT DstTy = MRI.getType(DstReg);
3957 Builder.setInstrAndDebugLoc(MI);
3958 if (ScalarTy != DstTy) {
3959 assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
3960 Builder.buildTrunc(DstReg, Reg);
3961 MI.eraseFromParent();
3964 replaceSingleDefInstWithReg(MI, Reg);
3967 bool CombinerHelper::matchExtractAllEltsFromBuildVector(
3969 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3970 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3971 // This combine tries to find build_vector's which have every source element
3972 // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
3973 // the masked load scalarization is run late in the pipeline. There's already
3974 // a combine for a similar pattern starting from the extract, but that
3975 // doesn't attempt to do it if there are multiple uses of the build_vector,
3976 // which in this case is true. Starting the combine from the build_vector
3977 // feels more natural than trying to find sibling nodes of extracts.
3979 // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
3980 // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
3981 // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
3982 // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
3983 // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
3985 // replace ext{1,2,3,4} with %s{1,2,3,4}
3987 Register DstReg = MI.getOperand(0).getReg();
3988 LLT DstTy = MRI.getType(DstReg);
3989 unsigned NumElts = DstTy.getNumElements();
3991 SmallBitVector ExtractedElts(NumElts);
3992 for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) {
3993 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
3995 auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI);
3998 unsigned Idx = Cst->getZExtValue();
4000 return false; // Out of range.
4001 ExtractedElts.set(Idx);
4002 SrcDstPairs.emplace_back(
4003 std::make_pair(MI.getOperand(Idx + 1).getReg(), &II));
4005 // Match if every element was extracted.
4006 return ExtractedElts.all();
4009 void CombinerHelper::applyExtractAllEltsFromBuildVector(
4011 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
4012 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4013 for (auto &Pair : SrcDstPairs) {
4014 auto *ExtMI = Pair.second;
4015 replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first);
4016 ExtMI->eraseFromParent();
4018 MI.eraseFromParent();
4021 void CombinerHelper::applyBuildFn(
4022 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4023 Builder.setInstrAndDebugLoc(MI);
4025 MI.eraseFromParent();
4028 void CombinerHelper::applyBuildFnNoErase(
4029 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4030 Builder.setInstrAndDebugLoc(MI);
4034 bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
4035 BuildFnTy &MatchInfo) {
4036 assert(MI.getOpcode() == TargetOpcode::G_OR);
4038 Register Dst = MI.getOperand(0).getReg();
4039 LLT Ty = MRI.getType(Dst);
4040 unsigned BitWidth = Ty.getScalarSizeInBits();
4042 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4043 unsigned FshOpc = 0;
4045 // Match (or (shl ...), (lshr ...)).
4046 if (!mi_match(Dst, MRI,
4047 // m_GOr() handles the commuted version as well.
4048 m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)),
4049 m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt)))))
4052 // Given constants C0 and C1 such that C0 + C1 is bit-width:
4053 // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
4054 int64_t CstShlAmt, CstLShrAmt;
4055 if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) &&
4056 mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) &&
4057 CstShlAmt + CstLShrAmt == BitWidth) {
4058 FshOpc = TargetOpcode::G_FSHR;
4061 } else if (mi_match(LShrAmt, MRI,
4062 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
4064 // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
4065 FshOpc = TargetOpcode::G_FSHL;
4067 } else if (mi_match(ShlAmt, MRI,
4068 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) &&
4070 // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
4071 FshOpc = TargetOpcode::G_FSHR;
4077 LLT AmtTy = MRI.getType(Amt);
4078 if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}}))
4081 MatchInfo = [=](MachineIRBuilder &B) {
4082 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4087 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
4088 bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) {
4089 unsigned Opc = MI.getOpcode();
4090 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4091 Register X = MI.getOperand(1).getReg();
4092 Register Y = MI.getOperand(2).getReg();
4095 unsigned RotateOpc =
4096 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4097 return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
4100 void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
4101 unsigned Opc = MI.getOpcode();
4102 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4103 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4104 Observer.changingInstr(MI);
4105 MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
4106 : TargetOpcode::G_ROTR));
4107 MI.removeOperand(2);
4108 Observer.changedInstr(MI);
4111 // Fold (rot x, c) -> (rot x, c % BitSize)
4112 bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) {
4113 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4114 MI.getOpcode() == TargetOpcode::G_ROTR);
4116 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4117 Register AmtReg = MI.getOperand(2).getReg();
4118 bool OutOfRange = false;
4119 auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
4120 if (auto *CI = dyn_cast<ConstantInt>(C))
4121 OutOfRange |= CI->getValue().uge(Bitsize);
4124 return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
4127 void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
4128 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4129 MI.getOpcode() == TargetOpcode::G_ROTR);
4131 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4132 Builder.setInstrAndDebugLoc(MI);
4133 Register Amt = MI.getOperand(2).getReg();
4134 LLT AmtTy = MRI.getType(Amt);
4135 auto Bits = Builder.buildConstant(AmtTy, Bitsize);
4136 Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0);
4137 Observer.changingInstr(MI);
4138 MI.getOperand(2).setReg(Amt);
4139 Observer.changedInstr(MI);
4142 bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
4143 int64_t &MatchInfo) {
4144 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4145 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4146 auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
4147 auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
4148 Optional<bool> KnownVal;
4151 llvm_unreachable("Unexpected G_ICMP predicate?");
4152 case CmpInst::ICMP_EQ:
4153 KnownVal = KnownBits::eq(KnownLHS, KnownRHS);
4155 case CmpInst::ICMP_NE:
4156 KnownVal = KnownBits::ne(KnownLHS, KnownRHS);
4158 case CmpInst::ICMP_SGE:
4159 KnownVal = KnownBits::sge(KnownLHS, KnownRHS);
4161 case CmpInst::ICMP_SGT:
4162 KnownVal = KnownBits::sgt(KnownLHS, KnownRHS);
4164 case CmpInst::ICMP_SLE:
4165 KnownVal = KnownBits::sle(KnownLHS, KnownRHS);
4167 case CmpInst::ICMP_SLT:
4168 KnownVal = KnownBits::slt(KnownLHS, KnownRHS);
4170 case CmpInst::ICMP_UGE:
4171 KnownVal = KnownBits::uge(KnownLHS, KnownRHS);
4173 case CmpInst::ICMP_UGT:
4174 KnownVal = KnownBits::ugt(KnownLHS, KnownRHS);
4176 case CmpInst::ICMP_ULE:
4177 KnownVal = KnownBits::ule(KnownLHS, KnownRHS);
4179 case CmpInst::ICMP_ULT:
4180 KnownVal = KnownBits::ult(KnownLHS, KnownRHS);
4187 ? getICmpTrueVal(getTargetLowering(),
4189 MRI.getType(MI.getOperand(0).getReg()).isVector(),
4195 bool CombinerHelper::matchICmpToLHSKnownBits(
4196 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4197 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4200 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4201 // %cmp = G_ICMP ne %x, 0
4205 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4206 // %cmp = G_ICMP eq %x, 1
4208 // We can replace %cmp with %x assuming true is 1 on the target.
4209 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4210 if (!CmpInst::isEquality(Pred))
4212 Register Dst = MI.getOperand(0).getReg();
4213 LLT DstTy = MRI.getType(Dst);
4214 if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(),
4215 /* IsFP = */ false) != 1)
4217 int64_t OneOrZero = Pred == CmpInst::ICMP_EQ;
4218 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero)))
4220 Register LHS = MI.getOperand(2).getReg();
4221 auto KnownLHS = KB->getKnownBits(LHS);
4222 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4224 // Make sure replacing Dst with the LHS is a legal operation.
4225 LLT LHSTy = MRI.getType(LHS);
4226 unsigned LHSSize = LHSTy.getSizeInBits();
4227 unsigned DstSize = DstTy.getSizeInBits();
4228 unsigned Op = TargetOpcode::COPY;
4229 if (DstSize != LHSSize)
4230 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4231 if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}}))
4233 MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); };
4237 // Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
4238 bool CombinerHelper::matchAndOrDisjointMask(
4239 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4240 assert(MI.getOpcode() == TargetOpcode::G_AND);
4242 // Ignore vector types to simplify matching the two constants.
4243 // TODO: do this for vectors and scalars via a demanded bits analysis.
4244 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4249 Register AndMaskReg;
4250 int64_t AndMaskBits;
4252 if (!mi_match(MI, MRI,
4253 m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)),
4254 m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg)))))
4257 // Check if OrMask could turn on any bits in Src.
4258 if (AndMaskBits & OrMaskBits)
4261 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4262 Observer.changingInstr(MI);
4263 // Canonicalize the result to have the constant on the RHS.
4264 if (MI.getOperand(1).getReg() == AndMaskReg)
4265 MI.getOperand(2).setReg(AndMaskReg);
4266 MI.getOperand(1).setReg(Src);
4267 Observer.changedInstr(MI);
4272 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4273 bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
4274 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4275 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4276 Register Dst = MI.getOperand(0).getReg();
4277 Register Src = MI.getOperand(1).getReg();
4278 LLT Ty = MRI.getType(Src);
4279 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4280 if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
4282 int64_t Width = MI.getOperand(2).getImm();
4287 m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)),
4288 m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))))
4290 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4293 MatchInfo = [=](MachineIRBuilder &B) {
4294 auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
4295 auto Cst2 = B.buildConstant(ExtractTy, Width);
4296 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4301 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4302 bool CombinerHelper::matchBitfieldExtractFromAnd(
4303 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4304 assert(MI.getOpcode() == TargetOpcode::G_AND);
4305 Register Dst = MI.getOperand(0).getReg();
4306 LLT Ty = MRI.getType(Dst);
4307 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4308 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4309 TargetOpcode::G_UBFX, Ty, ExtractTy))
4312 int64_t AndImm, LSBImm;
4314 const unsigned Size = Ty.getScalarSizeInBits();
4315 if (!mi_match(MI.getOperand(0).getReg(), MRI,
4316 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
4320 // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4321 auto MaybeMask = static_cast<uint64_t>(AndImm);
4322 if (MaybeMask & (MaybeMask + 1))
4325 // LSB must fit within the register.
4326 if (static_cast<uint64_t>(LSBImm) >= Size)
4329 uint64_t Width = APInt(Size, AndImm).countTrailingOnes();
4330 MatchInfo = [=](MachineIRBuilder &B) {
4331 auto WidthCst = B.buildConstant(ExtractTy, Width);
4332 auto LSBCst = B.buildConstant(ExtractTy, LSBImm);
4333 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4338 bool CombinerHelper::matchBitfieldExtractFromShr(
4339 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4340 const unsigned Opcode = MI.getOpcode();
4341 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4343 const Register Dst = MI.getOperand(0).getReg();
4345 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4346 ? TargetOpcode::G_SBFX
4347 : TargetOpcode::G_UBFX;
4349 // Check if the type we would use for the extract is legal
4350 LLT Ty = MRI.getType(Dst);
4351 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4352 if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}}))
4358 const unsigned Size = Ty.getScalarSizeInBits();
4360 // Try to match shr (shl x, c1), c2
4361 if (!mi_match(Dst, MRI,
4363 m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))),
4367 // Make sure that the shift sizes can fit a bitfield extract
4368 if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4371 // Skip this combine if the G_SEXT_INREG combine could handle it
4372 if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4375 // Calculate start position and width of the extract
4376 const int64_t Pos = ShrAmt - ShlAmt;
4377 const int64_t Width = Size - ShrAmt;
4379 MatchInfo = [=](MachineIRBuilder &B) {
4380 auto WidthCst = B.buildConstant(ExtractTy, Width);
4381 auto PosCst = B.buildConstant(ExtractTy, Pos);
4382 B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4387 bool CombinerHelper::matchBitfieldExtractFromShrAnd(
4388 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4389 const unsigned Opcode = MI.getOpcode();
4390 assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
4392 const Register Dst = MI.getOperand(0).getReg();
4393 LLT Ty = MRI.getType(Dst);
4394 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4395 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4396 TargetOpcode::G_UBFX, Ty, ExtractTy))
4399 // Try to match shr (and x, c1), c2
4403 if (!mi_match(Dst, MRI,
4405 m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc), m_ICst(SMask))),
4409 const unsigned Size = Ty.getScalarSizeInBits();
4410 if (ShrAmt < 0 || ShrAmt >= Size)
4413 // If the shift subsumes the mask, emit the 0 directly.
4414 if (0 == (SMask >> ShrAmt)) {
4415 MatchInfo = [=](MachineIRBuilder &B) {
4416 B.buildConstant(Dst, 0);
4421 // Check that ubfx can do the extraction, with no holes in the mask.
4422 uint64_t UMask = SMask;
4423 UMask |= maskTrailingOnes<uint64_t>(ShrAmt);
4424 UMask &= maskTrailingOnes<uint64_t>(Size);
4425 if (!isMask_64(UMask))
4428 // Calculate start position and width of the extract.
4429 const int64_t Pos = ShrAmt;
4430 const int64_t Width = countTrailingOnes(UMask) - ShrAmt;
4432 // It's preferable to keep the shift, rather than form G_SBFX.
4433 // TODO: remove the G_AND via demanded bits analysis.
4434 if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size)
4437 MatchInfo = [=](MachineIRBuilder &B) {
4438 auto WidthCst = B.buildConstant(ExtractTy, Width);
4439 auto PosCst = B.buildConstant(ExtractTy, Pos);
4440 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
4445 bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4446 MachineInstr &PtrAdd) {
4447 assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD);
4449 Register Src1Reg = PtrAdd.getOperand(1).getReg();
4450 MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI);
4454 Register Src2Reg = PtrAdd.getOperand(2).getReg();
4456 if (MRI.hasOneNonDBGUse(Src1Reg))
4459 auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI);
4462 auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4466 const APInt &C1APIntVal = *C1;
4467 const APInt &C2APIntVal = *C2;
4468 const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4470 for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) {
4471 // This combine may end up running before ptrtoint/inttoptr combines
4472 // manage to eliminate redundant conversions, so try to look through them.
4473 MachineInstr *ConvUseMI = &UseMI;
4474 unsigned ConvUseOpc = ConvUseMI->getOpcode();
4475 while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4476 ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4477 Register DefReg = ConvUseMI->getOperand(0).getReg();
4478 if (!MRI.hasOneNonDBGUse(DefReg))
4480 ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg);
4481 ConvUseOpc = ConvUseMI->getOpcode();
4483 auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD ||
4484 ConvUseOpc == TargetOpcode::G_STORE;
4487 // Is x[offset2] already not a legal addressing mode? If so then
4488 // reassociating the constants breaks nothing (we test offset2 because
4489 // that's the one we hope to fold into the load or store).
4490 TargetLoweringBase::AddrMode AM;
4491 AM.HasBaseReg = true;
4492 AM.BaseOffs = C2APIntVal.getSExtValue();
4494 MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace();
4496 getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()),
4497 PtrAdd.getMF()->getFunction().getContext());
4498 const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4499 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4503 // Would x[offset1+offset2] still be a legal addressing mode?
4504 AM.BaseOffs = CombinedValue;
4505 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4513 bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI,
4515 BuildFnTy &MatchInfo) {
4516 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4517 Register Src1Reg = MI.getOperand(1).getReg();
4518 if (RHS->getOpcode() != TargetOpcode::G_ADD)
4520 auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI);
4524 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4525 LLT PtrTy = MRI.getType(MI.getOperand(0).getReg());
4528 Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg());
4529 Observer.changingInstr(MI);
4530 MI.getOperand(1).setReg(NewBase.getReg(0));
4531 MI.getOperand(2).setReg(RHS->getOperand(2).getReg());
4532 Observer.changedInstr(MI);
4534 return !reassociationCanBreakAddressingModePattern(MI);
4537 bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
4540 BuildFnTy &MatchInfo) {
4541 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
4542 // if and only if (G_PTR_ADD X, C) has one use.
4544 Optional<ValueAndVReg> LHSCstOff;
4545 if (!mi_match(MI.getBaseReg(), MRI,
4546 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff)))))
4549 auto *LHSPtrAdd = cast<GPtrAdd>(LHS);
4550 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4551 // When we change LHSPtrAdd's offset register we might cause it to use a reg
4552 // before its def. Sink the instruction so the outer PTR_ADD to ensure this
4554 LHSPtrAdd->moveBefore(&MI);
4555 Register RHSReg = MI.getOffsetReg();
4556 Observer.changingInstr(MI);
4557 MI.getOperand(2).setReg(LHSCstOff->VReg);
4558 Observer.changedInstr(MI);
4559 Observer.changingInstr(*LHSPtrAdd);
4560 LHSPtrAdd->getOperand(2).setReg(RHSReg);
4561 Observer.changedInstr(*LHSPtrAdd);
4563 return !reassociationCanBreakAddressingModePattern(MI);
4566 bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI,
4569 BuildFnTy &MatchInfo) {
4570 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4571 auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS);
4575 Register Src2Reg = MI.getOperand(2).getReg();
4576 Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4577 Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4578 auto C1 = getIConstantVRegVal(LHSSrc2, MRI);
4581 auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4585 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4586 auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2);
4587 Observer.changingInstr(MI);
4588 MI.getOperand(1).setReg(LHSSrc1);
4589 MI.getOperand(2).setReg(NewCst.getReg(0));
4590 Observer.changedInstr(MI);
4592 return !reassociationCanBreakAddressingModePattern(MI);
4595 bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI,
4596 BuildFnTy &MatchInfo) {
4597 auto &PtrAdd = cast<GPtrAdd>(MI);
4598 // We're trying to match a few pointer computation patterns here for
4599 // re-association opportunities.
4600 // 1) Isolating a constant operand to be on the RHS, e.g.:
4601 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4603 // 2) Folding two constants in each sub-tree as long as such folding
4604 // doesn't break a legal addressing mode.
4605 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4607 // 3) Move a constant from the LHS of an inner op to the RHS of the outer.
4608 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C)
4609 // iif (G_PTR_ADD X, C) has one use.
4610 MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg());
4611 MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg());
4613 // Try to match example 2.
4614 if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo))
4617 // Try to match example 3.
4618 if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo))
4621 // Try to match example 1.
4622 if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo))
4628 bool CombinerHelper::matchConstantFold(MachineInstr &MI, APInt &MatchInfo) {
4629 Register Op1 = MI.getOperand(1).getReg();
4630 Register Op2 = MI.getOperand(2).getReg();
4631 auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI);
4634 MatchInfo = *MaybeCst;
4638 bool CombinerHelper::matchNarrowBinopFeedingAnd(
4639 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4640 // Look for a binop feeding into an AND with a mask:
4642 // %add = G_ADD %lhs, %rhs
4643 // %and = G_AND %add, 000...11111111
4645 // Check if it's possible to perform the binop at a narrower width and zext
4646 // back to the original width like so:
4648 // %narrow_lhs = G_TRUNC %lhs
4649 // %narrow_rhs = G_TRUNC %rhs
4650 // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
4651 // %new_add = G_ZEXT %narrow_add
4652 // %and = G_AND %new_add, 000...11111111
4654 // This can allow later combines to eliminate the G_AND if it turns out
4655 // that the mask is irrelevant.
4656 assert(MI.getOpcode() == TargetOpcode::G_AND);
4657 Register Dst = MI.getOperand(0).getReg();
4658 Register AndLHS = MI.getOperand(1).getReg();
4659 Register AndRHS = MI.getOperand(2).getReg();
4660 LLT WideTy = MRI.getType(Dst);
4662 // If the potential binop has more than one use, then it's possible that one
4663 // of those uses will need its full width.
4664 if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS))
4667 // Check if the LHS feeding the AND is impacted by the high bits that we're
4670 // e.g. for 64-bit x, y:
4672 // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
4673 MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI);
4676 unsigned LHSOpc = LHSInst->getOpcode();
4680 case TargetOpcode::G_ADD:
4681 case TargetOpcode::G_SUB:
4682 case TargetOpcode::G_MUL:
4683 case TargetOpcode::G_AND:
4684 case TargetOpcode::G_OR:
4685 case TargetOpcode::G_XOR:
4689 // Find the mask on the RHS.
4690 auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI);
4693 auto Mask = Cst->Value;
4697 // No point in combining if there's nothing to truncate.
4698 unsigned NarrowWidth = Mask.countTrailingOnes();
4699 if (NarrowWidth == WideTy.getSizeInBits())
4701 LLT NarrowTy = LLT::scalar(NarrowWidth);
4703 // Check if adding the zext + truncates could be harmful.
4704 auto &MF = *MI.getMF();
4705 const auto &TLI = getTargetLowering();
4706 LLVMContext &Ctx = MF.getFunction().getContext();
4707 auto &DL = MF.getDataLayout();
4708 if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) ||
4709 !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx))
4711 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) ||
4712 !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}}))
4714 Register BinOpLHS = LHSInst->getOperand(1).getReg();
4715 Register BinOpRHS = LHSInst->getOperand(2).getReg();
4716 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4717 auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS);
4718 auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS);
4720 Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS});
4721 auto Ext = Builder.buildZExt(WideTy, NarrowBinOp);
4722 Observer.changingInstr(MI);
4723 MI.getOperand(1).setReg(Ext.getReg(0));
4724 Observer.changedInstr(MI);
4729 bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) {
4730 unsigned Opc = MI.getOpcode();
4731 assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO);
4733 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(2)))
4736 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4737 Observer.changingInstr(MI);
4738 unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO
4739 : TargetOpcode::G_SADDO;
4740 MI.setDesc(Builder.getTII().get(NewOpc));
4741 MI.getOperand(3).setReg(MI.getOperand(2).getReg());
4742 Observer.changedInstr(MI);
4747 bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4748 // (G_*MULO x, 0) -> 0 + no carry out
4749 assert(MI.getOpcode() == TargetOpcode::G_UMULO ||
4750 MI.getOpcode() == TargetOpcode::G_SMULO);
4751 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4753 Register Dst = MI.getOperand(0).getReg();
4754 Register Carry = MI.getOperand(1).getReg();
4755 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Dst)) ||
4756 !isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4758 MatchInfo = [=](MachineIRBuilder &B) {
4759 B.buildConstant(Dst, 0);
4760 B.buildConstant(Carry, 0);
4765 bool CombinerHelper::matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
4766 // (G_*ADDO x, 0) -> x + no carry out
4767 assert(MI.getOpcode() == TargetOpcode::G_UADDO ||
4768 MI.getOpcode() == TargetOpcode::G_SADDO);
4769 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0)))
4771 Register Carry = MI.getOperand(1).getReg();
4772 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Carry)))
4774 Register Dst = MI.getOperand(0).getReg();
4775 Register LHS = MI.getOperand(2).getReg();
4776 MatchInfo = [=](MachineIRBuilder &B) {
4777 B.buildCopy(Dst, LHS);
4778 B.buildConstant(Carry, 0);
4783 MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
4784 assert(MI.getOpcode() == TargetOpcode::G_UDIV);
4785 auto &UDiv = cast<GenericMachineInstr>(MI);
4786 Register Dst = UDiv.getReg(0);
4787 Register LHS = UDiv.getReg(1);
4788 Register RHS = UDiv.getReg(2);
4789 LLT Ty = MRI.getType(Dst);
4790 LLT ScalarTy = Ty.getScalarType();
4791 const unsigned EltBits = ScalarTy.getScalarSizeInBits();
4792 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4793 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
4794 auto &MIB = Builder;
4795 MIB.setInstrAndDebugLoc(MI);
4797 bool UseNPQ = false;
4798 SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
4800 auto BuildUDIVPattern = [&](const Constant *C) {
4801 auto *CI = cast<ConstantInt>(C);
4802 const APInt &Divisor = CI->getValue();
4803 UnsignedDivisionByConstantInfo magics =
4804 UnsignedDivisionByConstantInfo::get(Divisor);
4805 unsigned PreShift = 0, PostShift = 0;
4807 // If the divisor is even, we can avoid using the expensive fixup by
4808 // shifting the divided value upfront.
4809 if (magics.IsAdd && !Divisor[0]) {
4810 PreShift = Divisor.countTrailingZeros();
4811 // Get magic number for the shifted divisor.
4813 UnsignedDivisionByConstantInfo::get(Divisor.lshr(PreShift), PreShift);
4814 assert(!magics.IsAdd && "Should use cheap fixup now");
4818 if (!magics.IsAdd || Divisor.isOneValue()) {
4819 assert(magics.ShiftAmount < Divisor.getBitWidth() &&
4820 "We shouldn't generate an undefined shift!");
4821 PostShift = magics.ShiftAmount;
4824 PostShift = magics.ShiftAmount - 1;
4828 PreShifts.push_back(
4829 MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0));
4830 MagicFactors.push_back(MIB.buildConstant(ScalarTy, magics.Magic).getReg(0));
4831 NPQFactors.push_back(
4832 MIB.buildConstant(ScalarTy,
4833 SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1)
4834 : APInt::getZero(EltBits))
4836 PostShifts.push_back(
4837 MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0));
4842 // Collect the shifts/magic values from each element.
4843 bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern);
4845 assert(Matched && "Expected unary predicate match to succeed");
4847 Register PreShift, PostShift, MagicFactor, NPQFactor;
4848 auto *RHSDef = getOpcodeDef<GBuildVector>(RHS, MRI);
4850 PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0);
4851 MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0);
4852 NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0);
4853 PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0);
4855 assert(MRI.getType(RHS).isScalar() &&
4856 "Non-build_vector operation should have been a scalar");
4857 PreShift = PreShifts[0];
4858 MagicFactor = MagicFactors[0];
4859 PostShift = PostShifts[0];
4863 Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0);
4865 // Multiply the numerator (operand 0) by the magic value.
4866 Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0);
4869 Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0);
4871 // For vectors we might have a mix of non-NPQ/NPQ paths, so use
4872 // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero.
4874 NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0);
4876 NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0);
4878 Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0);
4881 Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0);
4882 auto One = MIB.buildConstant(Ty, 1);
4883 auto IsOne = MIB.buildICmp(
4884 CmpInst::Predicate::ICMP_EQ,
4885 Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One);
4886 return MIB.buildSelect(Ty, IsOne, LHS, Q);
4889 bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
4890 assert(MI.getOpcode() == TargetOpcode::G_UDIV);
4891 Register Dst = MI.getOperand(0).getReg();
4892 Register RHS = MI.getOperand(2).getReg();
4893 LLT DstTy = MRI.getType(Dst);
4894 auto *RHSDef = MRI.getVRegDef(RHS);
4895 if (!isConstantOrConstantVector(*RHSDef, MRI))
4898 auto &MF = *MI.getMF();
4899 AttributeList Attr = MF.getFunction().getAttributes();
4900 const auto &TLI = getTargetLowering();
4901 LLVMContext &Ctx = MF.getFunction().getContext();
4902 auto &DL = MF.getDataLayout();
4903 if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr))
4906 // Don't do this for minsize because the instruction sequence is usually
4908 if (MF.getFunction().hasMinSize())
4911 // Don't do this if the types are not going to be legal.
4913 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}}))
4915 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH, {DstTy}}))
4917 if (!isLegalOrBeforeLegalizer(
4918 {TargetOpcode::G_ICMP,
4919 {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1),
4924 auto CheckEltValue = [&](const Constant *C) {
4925 if (auto *CI = dyn_cast_or_null<ConstantInt>(C))
4926 return !CI->isZero();
4929 return matchUnaryPredicate(MRI, RHS, CheckEltValue);
4932 void CombinerHelper::applyUDivByConst(MachineInstr &MI) {
4933 auto *NewMI = buildUDivUsingMul(MI);
4934 replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
4937 bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
4938 assert(MI.getOpcode() == TargetOpcode::G_UMULH);
4939 Register RHS = MI.getOperand(2).getReg();
4940 Register Dst = MI.getOperand(0).getReg();
4941 LLT Ty = MRI.getType(Dst);
4942 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4943 auto MatchPow2ExceptOne = [&](const Constant *C) {
4944 if (auto *CI = dyn_cast<ConstantInt>(C))
4945 return CI->getValue().isPowerOf2() && !CI->getValue().isOne();
4948 if (!matchUnaryPredicate(MRI, RHS, MatchPow2ExceptOne, false))
4950 return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}});
4953 void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
4954 Register LHS = MI.getOperand(1).getReg();
4955 Register RHS = MI.getOperand(2).getReg();
4956 Register Dst = MI.getOperand(0).getReg();
4957 LLT Ty = MRI.getType(Dst);
4958 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
4959 unsigned NumEltBits = Ty.getScalarSizeInBits();
4961 Builder.setInstrAndDebugLoc(MI);
4962 auto LogBase2 = buildLogBase2(RHS, Builder);
4964 Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2);
4965 auto Trunc = Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt);
4966 Builder.buildLShr(Dst, LHS, Trunc);
4967 MI.eraseFromParent();
4970 bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
4971 BuildFnTy &MatchInfo) {
4972 unsigned Opc = MI.getOpcode();
4973 assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||
4974 Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
4975 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA);
4977 Register Dst = MI.getOperand(0).getReg();
4978 Register X = MI.getOperand(1).getReg();
4979 Register Y = MI.getOperand(2).getReg();
4980 LLT Type = MRI.getType(Dst);
4982 // fold (fadd x, fneg(y)) -> (fsub x, y)
4983 // fold (fadd fneg(y), x) -> (fsub x, y)
4984 // G_ADD is commutative so both cases are checked by m_GFAdd
4985 if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
4986 isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) {
4987 Opc = TargetOpcode::G_FSUB;
4989 /// fold (fsub x, fneg(y)) -> (fadd x, y)
4990 else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
4991 isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) {
4992 Opc = TargetOpcode::G_FADD;
4994 // fold (fmul fneg(x), fneg(y)) -> (fmul x, y)
4995 // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
4996 // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
4997 // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z)
4998 else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
4999 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) &&
5000 mi_match(X, MRI, m_GFNeg(m_Reg(X))) &&
5001 mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) {
5006 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5007 Observer.changingInstr(MI);
5008 MI.setDesc(B.getTII().get(Opc));
5009 MI.getOperand(1).setReg(X);
5010 MI.getOperand(2).setReg(Y);
5011 Observer.changedInstr(MI);
5016 /// Checks if \p MI is TargetOpcode::G_FMUL and contractable either
5017 /// due to global flags or MachineInstr flags.
5018 static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) {
5019 if (MI.getOpcode() != TargetOpcode::G_FMUL)
5021 return AllowFusionGlobally || MI.getFlag(MachineInstr::MIFlag::FmContract);
5024 static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1,
5025 const MachineRegisterInfo &MRI) {
5026 return std::distance(MRI.use_instr_nodbg_begin(MI0.getOperand(0).getReg()),
5027 MRI.use_instr_nodbg_end()) >
5028 std::distance(MRI.use_instr_nodbg_begin(MI1.getOperand(0).getReg()),
5029 MRI.use_instr_nodbg_end());
5032 bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI,
5033 bool &AllowFusionGlobally,
5034 bool &HasFMAD, bool &Aggressive,
5035 bool CanReassociate) {
5037 auto *MF = MI.getMF();
5038 const auto &TLI = *MF->getSubtarget().getTargetLowering();
5039 const TargetOptions &Options = MF->getTarget().Options;
5040 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5042 if (CanReassociate &&
5043 !(Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmReassoc)))
5046 // Floating-point multiply-add with intermediate rounding.
5047 HasFMAD = (LI && TLI.isFMADLegal(MI, DstType));
5048 // Floating-point multiply-add without intermediate rounding.
5049 bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) &&
5050 isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}});
5051 // No valid opcode, do not combine.
5052 if (!HasFMAD && !HasFMA)
5055 AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast ||
5056 Options.UnsafeFPMath || HasFMAD;
5057 // If the addition is not contractable, do not combine.
5058 if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract))
5061 Aggressive = TLI.enableAggressiveFMAFusion(DstType);
5065 bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
5066 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5067 assert(MI.getOpcode() == TargetOpcode::G_FADD);
5069 bool AllowFusionGlobally, HasFMAD, Aggressive;
5070 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5073 Register Op1 = MI.getOperand(1).getReg();
5074 Register Op2 = MI.getOperand(2).getReg();
5075 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5076 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5077 unsigned PreferredFusedOpcode =
5078 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5080 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5081 // prefer to fold the multiply with fewer uses.
5082 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5083 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5084 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5085 std::swap(LHS, RHS);
5088 // fold (fadd (fmul x, y), z) -> (fma x, y, z)
5089 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5090 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) {
5091 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5092 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5093 {LHS.MI->getOperand(1).getReg(),
5094 LHS.MI->getOperand(2).getReg(), RHS.Reg});
5099 // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
5100 if (isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5101 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) {
5102 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5103 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5104 {RHS.MI->getOperand(1).getReg(),
5105 RHS.MI->getOperand(2).getReg(), LHS.Reg});
5113 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
5114 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5115 assert(MI.getOpcode() == TargetOpcode::G_FADD);
5117 bool AllowFusionGlobally, HasFMAD, Aggressive;
5118 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5121 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5122 Register Op1 = MI.getOperand(1).getReg();
5123 Register Op2 = MI.getOperand(2).getReg();
5124 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5125 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5126 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5128 unsigned PreferredFusedOpcode =
5129 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5131 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5132 // prefer to fold the multiply with fewer uses.
5133 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5134 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5135 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5136 std::swap(LHS, RHS);
5139 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
5140 MachineInstr *FpExtSrc;
5141 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5142 isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5143 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5144 MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5145 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5146 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5147 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5148 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5149 {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg});
5154 // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z)
5155 // Note: Commutes FADD operands.
5156 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) &&
5157 isContractableFMul(*FpExtSrc, AllowFusionGlobally) &&
5158 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5159 MRI.getType(FpExtSrc->getOperand(1).getReg()))) {
5160 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5161 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg());
5162 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg());
5163 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5164 {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg});
5172 bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
5173 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5174 assert(MI.getOpcode() == TargetOpcode::G_FADD);
5176 bool AllowFusionGlobally, HasFMAD, Aggressive;
5177 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true))
5180 Register Op1 = MI.getOperand(1).getReg();
5181 Register Op2 = MI.getOperand(2).getReg();
5182 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5183 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5184 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5186 unsigned PreferredFusedOpcode =
5187 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5189 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5190 // prefer to fold the multiply with fewer uses.
5191 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5192 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5193 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5194 std::swap(LHS, RHS);
5197 MachineInstr *FMA = nullptr;
5199 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
5200 if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5201 (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() ==
5202 TargetOpcode::G_FMUL) &&
5203 MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) &&
5204 MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) {
5208 // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z))
5209 else if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5210 (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() ==
5211 TargetOpcode::G_FMUL) &&
5212 MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) &&
5213 MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) {
5219 MachineInstr *FMulMI = MRI.getVRegDef(FMA->getOperand(3).getReg());
5220 Register X = FMA->getOperand(1).getReg();
5221 Register Y = FMA->getOperand(2).getReg();
5222 Register U = FMulMI->getOperand(1).getReg();
5223 Register V = FMulMI->getOperand(2).getReg();
5225 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5226 Register InnerFMA = MRI.createGenericVirtualRegister(DstTy);
5227 B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z});
5228 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5237 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
5238 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5239 assert(MI.getOpcode() == TargetOpcode::G_FADD);
5241 bool AllowFusionGlobally, HasFMAD, Aggressive;
5242 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5248 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5249 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
5250 Register Op1 = MI.getOperand(1).getReg();
5251 Register Op2 = MI.getOperand(2).getReg();
5252 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5253 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5255 unsigned PreferredFusedOpcode =
5256 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5258 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5259 // prefer to fold the multiply with fewer uses.
5260 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5261 isContractableFMul(*RHS.MI, AllowFusionGlobally)) {
5262 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5263 std::swap(LHS, RHS);
5266 // Builds: (fma x, y, (fma (fpext u), (fpext v), z))
5267 auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X,
5268 Register Y, MachineIRBuilder &B) {
5269 Register FpExtU = B.buildFPExt(DstType, U).getReg(0);
5270 Register FpExtV = B.buildFPExt(DstType, V).getReg(0);
5272 B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z})
5274 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5278 MachineInstr *FMulMI, *FMAMI;
5279 // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
5280 // -> (fma x, y, (fma (fpext u), (fpext v), z))
5281 if (LHS.MI->getOpcode() == PreferredFusedOpcode &&
5282 mi_match(LHS.MI->getOperand(3).getReg(), MRI,
5283 m_GFPExt(m_MInstr(FMulMI))) &&
5284 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5285 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5286 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5287 MatchInfo = [=](MachineIRBuilder &B) {
5288 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5289 FMulMI->getOperand(2).getReg(), RHS.Reg,
5290 LHS.MI->getOperand(1).getReg(),
5291 LHS.MI->getOperand(2).getReg(), B);
5296 // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
5297 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5298 // FIXME: This turns two single-precision and one double-precision
5299 // operation into two double-precision operations, which might not be
5300 // interesting for all targets, especially GPUs.
5301 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5302 FMAMI->getOpcode() == PreferredFusedOpcode) {
5303 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5304 if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5305 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5306 MRI.getType(FMAMI->getOperand(0).getReg()))) {
5307 MatchInfo = [=](MachineIRBuilder &B) {
5308 Register X = FMAMI->getOperand(1).getReg();
5309 Register Y = FMAMI->getOperand(2).getReg();
5310 X = B.buildFPExt(DstType, X).getReg(0);
5311 Y = B.buildFPExt(DstType, Y).getReg(0);
5312 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5313 FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B);
5320 // fold (fadd z, (fma x, y, (fpext (fmul u, v)))
5321 // -> (fma x, y, (fma (fpext u), (fpext v), z))
5322 if (RHS.MI->getOpcode() == PreferredFusedOpcode &&
5323 mi_match(RHS.MI->getOperand(3).getReg(), MRI,
5324 m_GFPExt(m_MInstr(FMulMI))) &&
5325 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5326 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5327 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5328 MatchInfo = [=](MachineIRBuilder &B) {
5329 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5330 FMulMI->getOperand(2).getReg(), LHS.Reg,
5331 RHS.MI->getOperand(1).getReg(),
5332 RHS.MI->getOperand(2).getReg(), B);
5337 // fold (fadd z, (fpext (fma x, y, (fmul u, v)))
5338 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5339 // FIXME: This turns two single-precision and one double-precision
5340 // operation into two double-precision operations, which might not be
5341 // interesting for all targets, especially GPUs.
5342 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) &&
5343 FMAMI->getOpcode() == PreferredFusedOpcode) {
5344 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
5345 if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5346 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
5347 MRI.getType(FMAMI->getOperand(0).getReg()))) {
5348 MatchInfo = [=](MachineIRBuilder &B) {
5349 Register X = FMAMI->getOperand(1).getReg();
5350 Register Y = FMAMI->getOperand(2).getReg();
5351 X = B.buildFPExt(DstType, X).getReg(0);
5352 Y = B.buildFPExt(DstType, Y).getReg(0);
5353 buildMatchInfo(FMulMI->getOperand(1).getReg(),
5354 FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B);
5363 bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
5364 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5365 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5367 bool AllowFusionGlobally, HasFMAD, Aggressive;
5368 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5371 Register Op1 = MI.getOperand(1).getReg();
5372 Register Op2 = MI.getOperand(2).getReg();
5373 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1};
5374 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2};
5375 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5377 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5378 // prefer to fold the multiply with fewer uses.
5379 int FirstMulHasFewerUses = true;
5380 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5381 isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5382 hasMoreUses(*LHS.MI, *RHS.MI, MRI))
5383 FirstMulHasFewerUses = false;
5385 unsigned PreferredFusedOpcode =
5386 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5388 // fold (fsub (fmul x, y), z) -> (fma x, y, -z)
5389 if (FirstMulHasFewerUses &&
5390 (isContractableFMul(*LHS.MI, AllowFusionGlobally) &&
5391 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) {
5392 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5393 Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0);
5394 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5395 {LHS.MI->getOperand(1).getReg(),
5396 LHS.MI->getOperand(2).getReg(), NegZ});
5400 // fold (fsub x, (fmul y, z)) -> (fma -y, z, x)
5401 else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) &&
5402 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) {
5403 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5405 B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0);
5406 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5407 {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg});
5415 bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
5416 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5417 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5419 bool AllowFusionGlobally, HasFMAD, Aggressive;
5420 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5423 Register LHSReg = MI.getOperand(1).getReg();
5424 Register RHSReg = MI.getOperand(2).getReg();
5425 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5427 unsigned PreferredFusedOpcode =
5428 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5430 MachineInstr *FMulMI;
5431 // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
5432 if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5433 (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) &&
5434 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5435 isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5436 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5438 B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5439 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5440 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5441 {NegX, FMulMI->getOperand(2).getReg(), NegZ});
5446 // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
5447 if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) &&
5448 (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) &&
5449 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) &&
5450 isContractableFMul(*FMulMI, AllowFusionGlobally)) {
5451 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5452 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5453 {FMulMI->getOperand(1).getReg(),
5454 FMulMI->getOperand(2).getReg(), LHSReg});
5462 bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
5463 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5464 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5466 bool AllowFusionGlobally, HasFMAD, Aggressive;
5467 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5470 Register LHSReg = MI.getOperand(1).getReg();
5471 Register RHSReg = MI.getOperand(2).getReg();
5472 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5474 unsigned PreferredFusedOpcode =
5475 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5477 MachineInstr *FMulMI;
5478 // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z))
5479 if (mi_match(LHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5480 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5481 (Aggressive || MRI.hasOneNonDBGUse(LHSReg))) {
5482 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5484 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5486 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5487 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0);
5488 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5489 {FpExtX, FpExtY, NegZ});
5494 // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x)
5495 if (mi_match(RHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) &&
5496 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5497 (Aggressive || MRI.hasOneNonDBGUse(RHSReg))) {
5498 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5500 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0);
5501 Register NegY = B.buildFNeg(DstTy, FpExtY).getReg(0);
5503 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0);
5504 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
5505 {NegY, FpExtZ, LHSReg});
5513 bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
5514 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
5515 assert(MI.getOpcode() == TargetOpcode::G_FSUB);
5517 bool AllowFusionGlobally, HasFMAD, Aggressive;
5518 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
5521 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
5522 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
5523 Register LHSReg = MI.getOperand(1).getReg();
5524 Register RHSReg = MI.getOperand(2).getReg();
5526 unsigned PreferredFusedOpcode =
5527 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
5529 auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z,
5530 MachineIRBuilder &B) {
5531 Register FpExtX = B.buildFPExt(DstTy, X).getReg(0);
5532 Register FpExtY = B.buildFPExt(DstTy, Y).getReg(0);
5533 B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z});
5536 MachineInstr *FMulMI;
5537 // fold (fsub (fpext (fneg (fmul x, y))), z) ->
5538 // (fneg (fma (fpext x), (fpext y), z))
5539 // fold (fsub (fneg (fpext (fmul x, y))), z) ->
5540 // (fneg (fma (fpext x), (fpext y), z))
5541 if ((mi_match(LHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5542 mi_match(LHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5543 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5544 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5545 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5546 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5547 Register FMAReg = MRI.createGenericVirtualRegister(DstTy);
5548 buildMatchInfo(FMAReg, FMulMI->getOperand(1).getReg(),
5549 FMulMI->getOperand(2).getReg(), RHSReg, B);
5550 B.buildFNeg(MI.getOperand(0).getReg(), FMAReg);
5555 // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5556 // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5557 if ((mi_match(RHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
5558 mi_match(RHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
5559 isContractableFMul(*FMulMI, AllowFusionGlobally) &&
5560 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
5561 MRI.getType(FMulMI->getOperand(0).getReg()))) {
5562 MatchInfo = [=, &MI](MachineIRBuilder &B) {
5563 buildMatchInfo(MI.getOperand(0).getReg(), FMulMI->getOperand(1).getReg(),
5564 FMulMI->getOperand(2).getReg(), LHSReg, B);
5572 bool CombinerHelper::matchSelectToLogical(MachineInstr &MI,
5573 BuildFnTy &MatchInfo) {
5574 GSelect &Sel = cast<GSelect>(MI);
5575 Register DstReg = Sel.getReg(0);
5576 Register Cond = Sel.getCondReg();
5577 Register TrueReg = Sel.getTrueReg();
5578 Register FalseReg = Sel.getFalseReg();
5580 auto *TrueDef = getDefIgnoringCopies(TrueReg, MRI);
5581 auto *FalseDef = getDefIgnoringCopies(FalseReg, MRI);
5583 const LLT CondTy = MRI.getType(Cond);
5584 const LLT OpTy = MRI.getType(TrueReg);
5585 if (CondTy != OpTy || OpTy.getScalarSizeInBits() != 1)
5588 // We have a boolean select.
5590 // select Cond, Cond, F --> or Cond, F
5591 // select Cond, 1, F --> or Cond, F
5592 auto MaybeCstTrue = isConstantOrConstantSplatVector(*TrueDef, MRI);
5593 if (Cond == TrueReg || (MaybeCstTrue && MaybeCstTrue->isOne())) {
5594 MatchInfo = [=](MachineIRBuilder &MIB) {
5595 MIB.buildOr(DstReg, Cond, FalseReg);
5600 // select Cond, T, Cond --> and Cond, T
5601 // select Cond, T, 0 --> and Cond, T
5602 auto MaybeCstFalse = isConstantOrConstantSplatVector(*FalseDef, MRI);
5603 if (Cond == FalseReg || (MaybeCstFalse && MaybeCstFalse->isZero())) {
5604 MatchInfo = [=](MachineIRBuilder &MIB) {
5605 MIB.buildAnd(DstReg, Cond, TrueReg);
5610 // select Cond, T, 1 --> or (not Cond), T
5611 if (MaybeCstFalse && MaybeCstFalse->isOne()) {
5612 MatchInfo = [=](MachineIRBuilder &MIB) {
5613 MIB.buildOr(DstReg, MIB.buildNot(OpTy, Cond), TrueReg);
5618 // select Cond, 0, F --> and (not Cond), F
5619 if (MaybeCstTrue && MaybeCstTrue->isZero()) {
5620 MatchInfo = [=](MachineIRBuilder &MIB) {
5621 MIB.buildAnd(DstReg, MIB.buildNot(OpTy, Cond), FalseReg);
5628 bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI,
5629 unsigned &IdxToPropagate) {
5631 switch (MI.getOpcode()) {
5634 case TargetOpcode::G_FMINNUM:
5635 case TargetOpcode::G_FMAXNUM:
5636 PropagateNaN = false;
5638 case TargetOpcode::G_FMINIMUM:
5639 case TargetOpcode::G_FMAXIMUM:
5640 PropagateNaN = true;
5644 auto MatchNaN = [&](unsigned Idx) {
5645 Register MaybeNaNReg = MI.getOperand(Idx).getReg();
5646 const ConstantFP *MaybeCst = getConstantFPVRegVal(MaybeNaNReg, MRI);
5647 if (!MaybeCst || !MaybeCst->getValueAPF().isNaN())
5649 IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1);
5653 return MatchNaN(1) || MatchNaN(2);
5656 bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) {
5657 assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD");
5658 Register LHS = MI.getOperand(1).getReg();
5659 Register RHS = MI.getOperand(2).getReg();
5661 // Helper lambda to check for opportunities for
5664 auto CheckFold = [&](Register MaybeSub, Register MaybeSameReg) {
5666 return mi_match(MaybeSub, MRI, m_GSub(m_Reg(Src), m_Reg(Reg))) &&
5667 Reg == MaybeSameReg;
5669 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
5672 bool CombinerHelper::tryCombine(MachineInstr &MI) {
5673 if (tryCombineCopy(MI))
5675 if (tryCombineExtendingLoads(MI))
5677 if (tryCombineIndexedLoadStore(MI))