1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The machine combiner pass uses machine trace metrics to ensure the combined
10 // instructions do not lengthen the critical path or the resource depth.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/ADT/DenseMap.h"
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/Analysis/ProfileSummaryInfo.h"
16 #include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h"
17 #include "llvm/CodeGen/MachineDominators.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineLoopInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/MachineSizeOpts.h"
23 #include "llvm/CodeGen/MachineTraceMetrics.h"
24 #include "llvm/CodeGen/RegisterClassInfo.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetRegisterInfo.h"
27 #include "llvm/CodeGen/TargetSchedule.h"
28 #include "llvm/CodeGen/TargetSubtargetInfo.h"
29 #include "llvm/InitializePasses.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
36 #define DEBUG_TYPE "machine-combiner"
38 STATISTIC(NumInstCombined, "Number of machineinst combined");
40 static cl::opt<unsigned>
41 inc_threshold("machine-combiner-inc-threshold", cl::Hidden,
42 cl::desc("Incremental depth computation will be used for basic "
43 "blocks with more instructions."), cl::init(500));
45 static cl::opt<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden,
46 cl::desc("Dump all substituted intrs"),
49 #ifdef EXPENSIVE_CHECKS
50 static cl::opt<bool> VerifyPatternOrder(
51 "machine-combiner-verify-pattern-order", cl::Hidden,
53 "Verify that the generated patterns are ordered by increasing latency"),
56 static cl::opt<bool> VerifyPatternOrder(
57 "machine-combiner-verify-pattern-order", cl::Hidden,
59 "Verify that the generated patterns are ordered by increasing latency"),
64 class MachineCombiner : public MachineFunctionPass {
65 const TargetSubtargetInfo *STI;
66 const TargetInstrInfo *TII;
67 const TargetRegisterInfo *TRI;
68 MCSchedModel SchedModel;
69 MachineRegisterInfo *MRI;
70 MachineLoopInfo *MLI; // Current MachineLoopInfo
71 MachineTraceMetrics *Traces;
72 MachineTraceMetrics::Ensemble *MinInstr;
73 MachineBlockFrequencyInfo *MBFI;
74 ProfileSummaryInfo *PSI;
75 RegisterClassInfo RegClassInfo;
77 TargetSchedModel TSchedModel;
79 /// True if optimizing for code size.
84 MachineCombiner() : MachineFunctionPass(ID) {
85 initializeMachineCombinerPass(*PassRegistry::getPassRegistry());
87 void getAnalysisUsage(AnalysisUsage &AU) const override;
88 bool runOnMachineFunction(MachineFunction &MF) override;
89 StringRef getPassName() const override { return "Machine InstCombiner"; }
92 bool doSubstitute(unsigned NewSize, unsigned OldSize, bool OptForSize);
93 bool combineInstructions(MachineBasicBlock *);
94 MachineInstr *getOperandDef(const MachineOperand &MO);
95 bool isTransientMI(const MachineInstr *MI);
96 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
97 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
98 MachineTraceMetrics::Trace BlockTrace);
99 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot,
100 MachineTraceMetrics::Trace BlockTrace);
102 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root,
103 MachineTraceMetrics::Trace BlockTrace,
104 SmallVectorImpl<MachineInstr *> &InsInstrs,
105 SmallVectorImpl<MachineInstr *> &DelInstrs,
106 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
107 MachineCombinerPattern Pattern, bool SlackIsAccurate);
108 bool reduceRegisterPressure(MachineInstr &Root, MachineBasicBlock *MBB,
109 SmallVectorImpl<MachineInstr *> &InsInstrs,
110 SmallVectorImpl<MachineInstr *> &DelInstrs,
111 MachineCombinerPattern Pattern);
112 bool preservesResourceLen(MachineBasicBlock *MBB,
113 MachineTraceMetrics::Trace BlockTrace,
114 SmallVectorImpl<MachineInstr *> &InsInstrs,
115 SmallVectorImpl<MachineInstr *> &DelInstrs);
116 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
117 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
118 std::pair<unsigned, unsigned>
119 getLatenciesForInstrSequences(MachineInstr &MI,
120 SmallVectorImpl<MachineInstr *> &InsInstrs,
121 SmallVectorImpl<MachineInstr *> &DelInstrs,
122 MachineTraceMetrics::Trace BlockTrace);
124 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root,
125 SmallVector<MachineCombinerPattern, 16> &Patterns);
129 char MachineCombiner::ID = 0;
130 char &llvm::MachineCombinerID = MachineCombiner::ID;
132 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE,
133 "Machine InstCombiner", false, false)
134 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
135 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
136 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner",
139 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
140 AU.setPreservesCFG();
141 AU.addPreserved<MachineDominatorTree>();
142 AU.addRequired<MachineLoopInfo>();
143 AU.addPreserved<MachineLoopInfo>();
144 AU.addRequired<MachineTraceMetrics>();
145 AU.addPreserved<MachineTraceMetrics>();
146 AU.addRequired<LazyMachineBlockFrequencyInfoPass>();
147 AU.addRequired<ProfileSummaryInfoWrapperPass>();
148 MachineFunctionPass::getAnalysisUsage(AU);
151 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) {
152 MachineInstr *DefInstr = nullptr;
153 // We need a virtual register definition.
154 if (MO.isReg() && Register::isVirtualRegister(MO.getReg()))
155 DefInstr = MRI->getUniqueVRegDef(MO.getReg());
156 // PHI's have no depth etc.
157 if (DefInstr && DefInstr->isPHI())
162 /// Return true if MI is unlikely to generate an actual target instruction.
163 bool MachineCombiner::isTransientMI(const MachineInstr *MI) {
165 return MI->isTransient();
167 // If MI is a COPY, check if its src and dst registers can be coalesced.
168 Register Dst = MI->getOperand(0).getReg();
169 Register Src = MI->getOperand(1).getReg();
171 if (!MI->isFullCopy()) {
172 // If src RC contains super registers of dst RC, it can also be coalesced.
173 if (MI->getOperand(0).getSubReg() || Src.isPhysical() || Dst.isPhysical())
176 auto SrcSub = MI->getOperand(1).getSubReg();
177 auto SrcRC = MRI->getRegClass(Src);
178 auto DstRC = MRI->getRegClass(Dst);
179 return TRI->getMatchingSuperRegClass(SrcRC, DstRC, SrcSub) != nullptr;
182 if (Src.isPhysical() && Dst.isPhysical())
185 if (Src.isVirtual() && Dst.isVirtual()) {
186 auto SrcRC = MRI->getRegClass(Src);
187 auto DstRC = MRI->getRegClass(Dst);
188 return SrcRC->hasSuperClassEq(DstRC) || SrcRC->hasSubClassEq(DstRC);
194 // Now Src is physical register, Dst is virtual register.
195 auto DstRC = MRI->getRegClass(Dst);
196 return DstRC->contains(Src);
199 /// Computes depth of instructions in vector \InsInstr.
201 /// \param InsInstrs is a vector of machine instructions
202 /// \param InstrIdxForVirtReg is a dense map of virtual register to index
203 /// of defining machine instruction in \p InsInstrs
204 /// \param BlockTrace is a trace of machine instructions
206 /// \returns Depth of last instruction in \InsInstrs ("NewRoot")
208 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
209 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
210 MachineTraceMetrics::Trace BlockTrace) {
211 SmallVector<unsigned, 16> InstrDepth;
212 assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
213 "Missing machine model\n");
215 // For each instruction in the new sequence compute the depth based on the
216 // operands. Use the trace information when possible. For new operands which
217 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth
218 for (auto *InstrPtr : InsInstrs) { // for each Use
220 for (const MachineOperand &MO : InstrPtr->operands()) {
221 // Check for virtual register operand.
222 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg())))
226 unsigned DepthOp = 0;
227 unsigned LatencyOp = 0;
228 DenseMap<unsigned, unsigned>::iterator II =
229 InstrIdxForVirtReg.find(MO.getReg());
230 if (II != InstrIdxForVirtReg.end()) {
231 // Operand is new virtual register not in trace
232 assert(II->second < InstrDepth.size() && "Bad Index");
233 MachineInstr *DefInstr = InsInstrs[II->second];
235 "There must be a definition for a new virtual register");
236 DepthOp = InstrDepth[II->second];
237 int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg());
238 int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg());
239 LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx,
242 MachineInstr *DefInstr = getOperandDef(MO);
244 DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth;
245 if (!isTransientMI(DefInstr))
246 LatencyOp = TSchedModel.computeOperandLatency(
247 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
248 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
251 IDepth = std::max(IDepth, DepthOp + LatencyOp);
253 InstrDepth.push_back(IDepth);
255 unsigned NewRootIdx = InsInstrs.size() - 1;
256 return InstrDepth[NewRootIdx];
259 /// Computes instruction latency as max of latency of defined operands.
261 /// \param Root is a machine instruction that could be replaced by NewRoot.
262 /// It is used to compute a more accurate latency information for NewRoot in
263 /// case there is a dependent instruction in the same trace (\p BlockTrace)
264 /// \param NewRoot is the instruction for which the latency is computed
265 /// \param BlockTrace is a trace of machine instructions
267 /// \returns Latency of \p NewRoot
268 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
269 MachineTraceMetrics::Trace BlockTrace) {
270 assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
271 "Missing machine model\n");
273 // Check each definition in NewRoot and compute the latency
274 unsigned NewRootLatency = 0;
276 for (const MachineOperand &MO : NewRoot->operands()) {
277 // Check for virtual register operand.
278 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg())))
282 // Get the first instruction that uses MO
283 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg());
285 if (RI == MRI->reg_end())
287 MachineInstr *UseMO = RI->getParent();
288 unsigned LatencyOp = 0;
289 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) {
290 LatencyOp = TSchedModel.computeOperandLatency(
291 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO,
292 UseMO->findRegisterUseOperandIdx(MO.getReg()));
294 LatencyOp = TSchedModel.computeInstrLatency(NewRoot);
296 NewRootLatency = std::max(NewRootLatency, LatencyOp);
298 return NewRootLatency;
301 /// The combiner's goal may differ based on which pattern it is attempting
303 enum class CombinerObjective {
304 MustReduceDepth, // The data dependency chain must be improved.
305 MustReduceRegisterPressure, // The register pressure must be reduced.
306 Default // The critical path must not be lengthened.
309 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) {
310 // TODO: If C++ ever gets a real enum class, make this part of the
311 // MachineCombinerPattern class.
313 case MachineCombinerPattern::REASSOC_AX_BY:
314 case MachineCombinerPattern::REASSOC_AX_YB:
315 case MachineCombinerPattern::REASSOC_XA_BY:
316 case MachineCombinerPattern::REASSOC_XA_YB:
317 case MachineCombinerPattern::REASSOC_XY_AMM_BMM:
318 case MachineCombinerPattern::REASSOC_XMM_AMM_BMM:
319 case MachineCombinerPattern::SUBADD_OP1:
320 case MachineCombinerPattern::SUBADD_OP2:
321 return CombinerObjective::MustReduceDepth;
322 case MachineCombinerPattern::REASSOC_XY_BCA:
323 case MachineCombinerPattern::REASSOC_XY_BAC:
324 return CombinerObjective::MustReduceRegisterPressure;
326 return CombinerObjective::Default;
330 /// Estimate the latency of the new and original instruction sequence by summing
331 /// up the latencies of the inserted and deleted instructions. This assumes
332 /// that the inserted and deleted instructions are dependent instruction chains,
333 /// which might not hold in all cases.
334 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences(
335 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs,
336 SmallVectorImpl<MachineInstr *> &DelInstrs,
337 MachineTraceMetrics::Trace BlockTrace) {
338 assert(!InsInstrs.empty() && "Only support sequences that insert instrs.");
339 unsigned NewRootLatency = 0;
340 // NewRoot is the last instruction in the \p InsInstrs vector.
341 MachineInstr *NewRoot = InsInstrs.back();
342 for (unsigned i = 0; i < InsInstrs.size() - 1; i++)
343 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]);
344 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace);
346 unsigned RootLatency = 0;
347 for (auto *I : DelInstrs)
348 RootLatency += TSchedModel.computeInstrLatency(I);
350 return {NewRootLatency, RootLatency};
353 bool MachineCombiner::reduceRegisterPressure(
354 MachineInstr &Root, MachineBasicBlock *MBB,
355 SmallVectorImpl<MachineInstr *> &InsInstrs,
356 SmallVectorImpl<MachineInstr *> &DelInstrs,
357 MachineCombinerPattern Pattern) {
358 // FIXME: for now, we don't do any check for the register pressure patterns.
359 // We treat them as always profitable. But we can do better if we make
360 // RegPressureTracker class be aware of TIE attribute. Then we can get an
361 // accurate compare of register pressure with DelInstrs or InsInstrs.
365 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root.
366 /// The new code sequence ends in MI NewRoot. A necessary condition for the new
367 /// sequence to replace the old sequence is that it cannot lengthen the critical
368 /// path. The definition of "improve" may be restricted by specifying that the
369 /// new path improves the data dependency chain (MustReduceDepth).
370 bool MachineCombiner::improvesCriticalPathLen(
371 MachineBasicBlock *MBB, MachineInstr *Root,
372 MachineTraceMetrics::Trace BlockTrace,
373 SmallVectorImpl<MachineInstr *> &InsInstrs,
374 SmallVectorImpl<MachineInstr *> &DelInstrs,
375 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
376 MachineCombinerPattern Pattern,
377 bool SlackIsAccurate) {
378 assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
379 "Missing machine model\n");
380 // Get depth and latency of NewRoot and Root.
381 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
382 unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth;
384 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: "
385 << NewRootDepth << "\tRootDepth: " << RootDepth);
387 // For a transform such as reassociation, the cost equation is
388 // conservatively calculated so that we must improve the depth (data
389 // dependency cycles) in the critical path to proceed with the transform.
390 // Being conservative also protects against inaccuracies in the underlying
391 // machine trace metrics and CPU models.
392 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) {
393 LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth ");
394 LLVM_DEBUG(NewRootDepth < RootDepth
395 ? dbgs() << "\t and it does it\n"
396 : dbgs() << "\t but it does NOT do it\n");
397 return NewRootDepth < RootDepth;
400 // A more flexible cost calculation for the critical path includes the slack
401 // of the original code sequence. This may allow the transform to proceed
402 // even if the instruction depths (data dependency cycles) become worse.
404 // Account for the latency of the inserted and deleted instructions by
405 unsigned NewRootLatency, RootLatency;
406 std::tie(NewRootLatency, RootLatency) =
407 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace);
409 unsigned RootSlack = BlockTrace.getInstrSlack(*Root);
410 unsigned NewCycleCount = NewRootDepth + NewRootLatency;
411 unsigned OldCycleCount =
412 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0);
413 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency
414 << "\tRootLatency: " << RootLatency << "\n\tRootSlack: "
415 << RootSlack << " SlackIsAccurate=" << SlackIsAccurate
416 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount
417 << "\n\tRootDepth + RootLatency + RootSlack = "
419 LLVM_DEBUG(NewCycleCount <= OldCycleCount
420 ? dbgs() << "\n\t It IMPROVES PathLen because"
421 : dbgs() << "\n\t It DOES NOT improve PathLen because");
422 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount
423 << ", OldCycleCount = " << OldCycleCount << "\n");
425 return NewCycleCount <= OldCycleCount;
428 /// helper routine to convert instructions into SC
429 void MachineCombiner::instr2instrSC(
430 SmallVectorImpl<MachineInstr *> &Instrs,
431 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) {
432 for (auto *InstrPtr : Instrs) {
433 unsigned Opc = InstrPtr->getOpcode();
434 unsigned Idx = TII->get(Opc).getSchedClass();
435 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx);
436 InstrsSC.push_back(SC);
440 /// True when the new instructions do not increase resource length
441 bool MachineCombiner::preservesResourceLen(
442 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace,
443 SmallVectorImpl<MachineInstr *> &InsInstrs,
444 SmallVectorImpl<MachineInstr *> &DelInstrs) {
445 if (!TSchedModel.hasInstrSchedModel())
448 // Compute current resource length
450 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB);
451 SmallVector <const MachineBasicBlock *, 1> MBBarr;
452 MBBarr.push_back(MBB);
453 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr);
455 // Deal with SC rather than Instructions.
456 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC;
457 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC;
459 instr2instrSC(InsInstrs, InsInstrsSC);
460 instr2instrSC(DelInstrs, DelInstrsSC);
462 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC);
463 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC);
465 // Compute new resource length.
466 unsigned ResLenAfterCombine =
467 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
469 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: "
470 << ResLenBeforeCombine
471 << " and after: " << ResLenAfterCombine << "\n";);
473 ResLenAfterCombine <=
474 ResLenBeforeCombine + TII->getExtendResourceLenLimit()
475 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n"
476 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource "
479 return ResLenAfterCombine <=
480 ResLenBeforeCombine + TII->getExtendResourceLenLimit();
483 /// \returns true when new instruction sequence should be generated
484 /// independent if it lengthens critical path or not
485 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize,
487 if (OptForSize && (NewSize < OldSize))
489 if (!TSchedModel.hasInstrSchedModelOrItineraries())
494 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction
495 /// depths if requested.
497 /// \param MBB basic block to insert instructions in
498 /// \param MI current machine instruction
499 /// \param InsInstrs new instructions to insert in \p MBB
500 /// \param DelInstrs instruction to delete from \p MBB
501 /// \param MinInstr is a pointer to the machine trace information
502 /// \param RegUnits set of live registers, needed to compute instruction depths
503 /// \param TII is target instruction info, used to call target hook
504 /// \param Pattern is used to call target hook finalizeInsInstrs
505 /// \param IncrementalUpdate if true, compute instruction depths incrementally,
506 /// otherwise invalidate the trace
507 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI,
508 SmallVector<MachineInstr *, 16> InsInstrs,
509 SmallVector<MachineInstr *, 16> DelInstrs,
510 MachineTraceMetrics::Ensemble *MinInstr,
511 SparseSet<LiveRegUnit> &RegUnits,
512 const TargetInstrInfo *TII,
513 MachineCombinerPattern Pattern,
514 bool IncrementalUpdate) {
515 // If we want to fix up some placeholder for some target, do it now.
516 // We need this because in genAlternativeCodeSequence, we have not decided the
517 // better pattern InsInstrs or DelInstrs, so we don't want generate some
518 // sideeffect to the function. For example we need to delay the constant pool
519 // entry creation here after InsInstrs is selected as better pattern.
520 // Otherwise the constant pool entry created for InsInstrs will not be deleted
521 // even if InsInstrs is not the better pattern.
522 TII->finalizeInsInstrs(MI, Pattern, InsInstrs);
524 for (auto *InstrPtr : InsInstrs)
525 MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr);
527 for (auto *InstrPtr : DelInstrs) {
528 InstrPtr->eraseFromParent();
529 // Erase all LiveRegs defined by the removed instruction
530 for (auto *I = RegUnits.begin(); I != RegUnits.end();) {
531 if (I->MI == InstrPtr)
532 I = RegUnits.erase(I);
538 if (IncrementalUpdate)
539 for (auto *InstrPtr : InsInstrs)
540 MinInstr->updateDepth(MBB, *InstrPtr, RegUnits);
542 MinInstr->invalidate(MBB);
547 // Check that the difference between original and new latency is decreasing for
548 // later patterns. This helps to discover sub-optimal pattern orderings.
549 void MachineCombiner::verifyPatternOrder(
550 MachineBasicBlock *MBB, MachineInstr &Root,
551 SmallVector<MachineCombinerPattern, 16> &Patterns) {
552 long PrevLatencyDiff = std::numeric_limits<long>::max();
553 (void)PrevLatencyDiff; // Variable is used in assert only.
554 for (auto P : Patterns) {
555 SmallVector<MachineInstr *, 16> InsInstrs;
556 SmallVector<MachineInstr *, 16> DelInstrs;
557 DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
558 TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs,
560 // Found pattern, but did not generate alternative sequence.
561 // This can happen e.g. when an immediate could not be materialized
562 // in a single instruction.
563 if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries())
566 unsigned NewRootLatency, RootLatency;
567 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences(
568 Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB));
569 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency);
570 assert(CurrentLatencyDiff <= PrevLatencyDiff &&
571 "Current pattern is better than previous pattern.");
572 PrevLatencyDiff = CurrentLatencyDiff;
576 /// Substitute a slow code sequence with a faster one by
577 /// evaluating instruction combining pattern.
578 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction
579 /// combining based on machine trace metrics. Only combine a sequence of
580 /// instructions when this neither lengthens the critical path nor increases
581 /// resource pressure. When optimizing for codesize always combine when the new
582 /// sequence is shorter.
583 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
584 bool Changed = false;
585 LLVM_DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n");
587 bool IncrementalUpdate = false;
588 auto BlockIter = MBB->begin();
589 decltype(BlockIter) LastUpdate;
590 // Check if the block is in a loop.
591 const MachineLoop *ML = MLI->getLoopFor(MBB);
593 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
595 SparseSet<LiveRegUnit> RegUnits;
596 RegUnits.setUniverse(TRI->getNumRegUnits());
598 bool OptForSize = OptSize || llvm::shouldOptimizeForSize(MBB, PSI, MBFI);
600 bool DoRegPressureReduce =
601 TII->shouldReduceRegisterPressure(MBB, &RegClassInfo);
603 while (BlockIter != MBB->end()) {
604 auto &MI = *BlockIter++;
605 SmallVector<MachineCombinerPattern, 16> Patterns;
606 // The motivating example is:
608 // MUL Other MUL_op1 MUL_op2 Other
610 // ADD/SUB => MADD/MSUB
611 // (=Root) (=NewRoot)
613 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is
614 // usually beneficial for code size it unfortunately can hurt performance
615 // when the ADD is on the critical path, but the MUL is not. With the
616 // substitution the MUL becomes part of the critical path (in form of the
617 // MADD) and can lengthen it on architectures where the MADD latency is
618 // longer than the ADD latency.
620 // For each instruction we check if it can be the root of a combiner
621 // pattern. Then for each pattern the new code sequence in form of MI is
622 // generated and evaluated. When the efficiency criteria (don't lengthen
623 // critical path, don't use more resources) is met the new sequence gets
624 // hooked up into the basic block before the old sequence is removed.
626 // The algorithm does not try to evaluate all patterns and pick the best.
627 // This is only an artificial restriction though. In practice there is
628 // mostly one pattern, and getMachineCombinerPatterns() can order patterns
629 // based on an internal cost heuristic. If
630 // machine-combiner-verify-pattern-order is enabled, all patterns are
631 // checked to ensure later patterns do not provide better latency savings.
633 if (!TII->getMachineCombinerPatterns(MI, Patterns, DoRegPressureReduce))
636 if (VerifyPatternOrder)
637 verifyPatternOrder(MBB, MI, Patterns);
639 for (auto P : Patterns) {
640 SmallVector<MachineInstr *, 16> InsInstrs;
641 SmallVector<MachineInstr *, 16> DelInstrs;
642 DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
643 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs,
645 unsigned NewInstCount = InsInstrs.size();
646 unsigned OldInstCount = DelInstrs.size();
647 // Found pattern, but did not generate alternative sequence.
648 // This can happen e.g. when an immediate could not be materialized
649 // in a single instruction.
653 LLVM_DEBUG(if (dump_intrs) {
654 dbgs() << "\tFor the Pattern (" << (int)P
655 << ") these instructions could be removed\n";
656 for (auto const *InstrPtr : DelInstrs)
657 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false,
658 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII);
659 dbgs() << "\tThese instructions could replace the removed ones\n";
660 for (auto const *InstrPtr : InsInstrs)
661 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false,
662 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII);
665 bool SubstituteAlways = false;
666 if (ML && TII->isThroughputPattern(P))
667 SubstituteAlways = true;
669 if (IncrementalUpdate && LastUpdate != BlockIter) {
670 // Update depths since the last incremental update.
671 MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits);
672 LastUpdate = BlockIter;
675 if (DoRegPressureReduce &&
676 getCombinerObjective(P) ==
677 CombinerObjective::MustReduceRegisterPressure) {
678 if (MBB->size() > inc_threshold) {
679 // Use incremental depth updates for basic blocks above threshold
680 IncrementalUpdate = true;
681 LastUpdate = BlockIter;
683 if (reduceRegisterPressure(MI, MBB, InsInstrs, DelInstrs, P)) {
684 // Replace DelInstrs with InsInstrs.
685 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr,
686 RegUnits, TII, P, IncrementalUpdate);
689 // Go back to previous instruction as it may have ILP reassociation
696 // Substitute when we optimize for codesize and the new sequence has
697 // fewer instructions OR
698 // the new sequence neither lengthens the critical path nor increases
699 // resource pressure.
700 if (SubstituteAlways ||
701 doSubstitute(NewInstCount, OldInstCount, OptForSize)) {
702 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr,
703 RegUnits, TII, P, IncrementalUpdate);
704 // Eagerly stop after the first pattern fires.
708 // For big basic blocks, we only compute the full trace the first time
709 // we hit this. We do not invalidate the trace, but instead update the
710 // instruction depths incrementally.
711 // NOTE: Only the instruction depths up to MI are accurate. All other
712 // trace information is not updated.
713 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB);
714 Traces->verifyAnalysis();
715 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs,
716 InstrIdxForVirtReg, P,
717 !IncrementalUpdate) &&
718 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) {
719 if (MBB->size() > inc_threshold) {
720 // Use incremental depth updates for basic blocks above treshold
721 IncrementalUpdate = true;
722 LastUpdate = BlockIter;
725 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr,
726 RegUnits, TII, P, IncrementalUpdate);
728 // Eagerly stop after the first pattern fires.
732 // Cleanup instructions of the alternative code sequence. There is no
734 MachineFunction *MF = MBB->getParent();
735 for (auto *InstrPtr : InsInstrs)
736 MF->deleteMachineInstr(InstrPtr);
738 InstrIdxForVirtReg.clear();
742 if (Changed && IncrementalUpdate)
743 Traces->invalidate(MBB);
747 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
748 STI = &MF.getSubtarget();
749 TII = STI->getInstrInfo();
750 TRI = STI->getRegisterInfo();
751 SchedModel = STI->getSchedModel();
752 TSchedModel.init(STI);
753 MRI = &MF.getRegInfo();
754 MLI = &getAnalysis<MachineLoopInfo>();
755 Traces = &getAnalysis<MachineTraceMetrics>();
756 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
757 MBFI = (PSI && PSI->hasProfileSummary()) ?
758 &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() :
761 OptSize = MF.getFunction().hasOptSize();
762 RegClassInfo.runOnMachineFunction(MF);
764 LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
765 if (!TII->useMachineCombiner()) {
768 << " Skipping pass: Target does not support machine combiner\n");
772 bool Changed = false;
774 // Try to combine instructions.
776 Changed |= combineInstructions(&MBB);