1 //===- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains a pass that performs load / store related peephole
10 // optimizations. This pass should be run after register allocation.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/TargetRegisterInfo.h"
31 #include "llvm/IR/DebugLoc.h"
32 #include "llvm/MC/MCRegisterInfo.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/DebugCounter.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
47 #define DEBUG_TYPE "aarch64-ldst-opt"
49 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
50 STATISTIC(NumPostFolded, "Number of post-index updates folded");
51 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
52 STATISTIC(NumUnscaledPairCreated,
53 "Number of load/store from unscaled generated");
54 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
55 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
57 DEBUG_COUNTER(RegRenamingCounter, DEBUG_TYPE "-reg-renaming",
58 "Controls which pairs are considered for renaming");
60 // The LdStLimit limits how far we search for load/store pairs.
61 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
62 cl::init(20), cl::Hidden);
64 // The UpdateLimit limits how far we search for update instructions when we form
65 // pre-/post-index instructions.
66 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
69 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
73 using LdStPairFlags = struct LdStPairFlags {
74 // If a matching instruction is found, MergeForward is set to true if the
75 // merge is to remove the first instruction and replace the second with
76 // a pair-wise insn, and false if the reverse is true.
77 bool MergeForward = false;
79 // SExtIdx gives the index of the result of the load pair that must be
80 // extended. The value of SExtIdx assumes that the paired load produces the
81 // value in this order: (I, returned iterator), i.e., -1 means no value has
82 // to be extended, 0 means I, and 1 means the returned iterator.
85 // If not none, RenameReg can be used to rename the result register of the
86 // first store in a pair. Currently this only works when merging stores
88 Optional<MCPhysReg> RenameReg = None;
90 LdStPairFlags() = default;
92 void setMergeForward(bool V = true) { MergeForward = V; }
93 bool getMergeForward() const { return MergeForward; }
95 void setSExtIdx(int V) { SExtIdx = V; }
96 int getSExtIdx() const { return SExtIdx; }
98 void setRenameReg(MCPhysReg R) { RenameReg = R; }
99 void clearRenameReg() { RenameReg = None; }
100 Optional<MCPhysReg> getRenameReg() const { return RenameReg; }
103 struct AArch64LoadStoreOpt : public MachineFunctionPass {
106 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
107 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
111 const AArch64InstrInfo *TII;
112 const TargetRegisterInfo *TRI;
113 const AArch64Subtarget *Subtarget;
115 // Track which register units have been modified and used.
116 LiveRegUnits ModifiedRegUnits, UsedRegUnits;
117 LiveRegUnits DefinedInBB;
119 void getAnalysisUsage(AnalysisUsage &AU) const override {
120 AU.addRequired<AAResultsWrapperPass>();
121 MachineFunctionPass::getAnalysisUsage(AU);
124 // Scan the instructions looking for a load/store that can be combined
125 // with the current instruction into a load/store pair.
126 // Return the matching instruction if one is found, else MBB->end().
127 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
128 LdStPairFlags &Flags,
130 bool FindNarrowMerge);
132 // Scan the instructions looking for a store that writes to the address from
133 // which the current load instruction reads. Return true if one is found.
134 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
135 MachineBasicBlock::iterator &StoreI);
137 // Merge the two instructions indicated into a wider narrow store instruction.
138 MachineBasicBlock::iterator
139 mergeNarrowZeroStores(MachineBasicBlock::iterator I,
140 MachineBasicBlock::iterator MergeMI,
141 const LdStPairFlags &Flags);
143 // Merge the two instructions indicated into a single pair-wise instruction.
144 MachineBasicBlock::iterator
145 mergePairedInsns(MachineBasicBlock::iterator I,
146 MachineBasicBlock::iterator Paired,
147 const LdStPairFlags &Flags);
149 // Promote the load that reads directly from the address stored to.
150 MachineBasicBlock::iterator
151 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
152 MachineBasicBlock::iterator StoreI);
154 // Scan the instruction list to find a base register update that can
155 // be combined with the current instruction (a load or store) using
156 // pre or post indexed addressing with writeback. Scan forwards.
157 MachineBasicBlock::iterator
158 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
159 int UnscaledOffset, unsigned Limit);
161 // Scan the instruction list to find a base register update that can
162 // be combined with the current instruction (a load or store) using
163 // pre or post indexed addressing with writeback. Scan backwards.
164 MachineBasicBlock::iterator
165 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
167 // Find an instruction that updates the base register of the ld/st
169 bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
170 unsigned BaseReg, int Offset);
172 // Merge a pre- or post-index base register update into a ld/st instruction.
173 MachineBasicBlock::iterator
174 mergeUpdateInsn(MachineBasicBlock::iterator I,
175 MachineBasicBlock::iterator Update, bool IsPreIdx);
177 // Find and merge zero store instructions.
178 bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI);
180 // Find and pair ldr/str instructions.
181 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
183 // Find and promote load instructions which read directly from store.
184 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
186 // Find and merge a base register updates before or after a ld/st instruction.
187 bool tryToMergeLdStUpdate(MachineBasicBlock::iterator &MBBI);
189 bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt);
191 bool runOnMachineFunction(MachineFunction &Fn) override;
193 MachineFunctionProperties getRequiredProperties() const override {
194 return MachineFunctionProperties().set(
195 MachineFunctionProperties::Property::NoVRegs);
198 StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; }
201 char AArch64LoadStoreOpt::ID = 0;
203 } // end anonymous namespace
205 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
206 AARCH64_LOAD_STORE_OPT_NAME, false, false)
208 static bool isNarrowStore(unsigned Opc) {
212 case AArch64::STRBBui:
213 case AArch64::STURBBi:
214 case AArch64::STRHHui:
215 case AArch64::STURHHi:
220 // These instruction set memory tag and either keep memory contents unchanged or
221 // set it to zero, ignoring the address part of the source register.
222 static bool isTagStore(const MachineInstr &MI) {
223 switch (MI.getOpcode()) {
226 case AArch64::STGOffset:
227 case AArch64::STZGOffset:
228 case AArch64::ST2GOffset:
229 case AArch64::STZ2GOffset:
234 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
235 bool *IsValidLdStrOpc = nullptr) {
237 *IsValidLdStrOpc = true;
241 *IsValidLdStrOpc = false;
242 return std::numeric_limits<unsigned>::max();
243 case AArch64::STRDui:
244 case AArch64::STURDi:
245 case AArch64::STRQui:
246 case AArch64::STURQi:
247 case AArch64::STRBBui:
248 case AArch64::STURBBi:
249 case AArch64::STRHHui:
250 case AArch64::STURHHi:
251 case AArch64::STRWui:
252 case AArch64::STURWi:
253 case AArch64::STRXui:
254 case AArch64::STURXi:
255 case AArch64::LDRDui:
256 case AArch64::LDURDi:
257 case AArch64::LDRQui:
258 case AArch64::LDURQi:
259 case AArch64::LDRWui:
260 case AArch64::LDURWi:
261 case AArch64::LDRXui:
262 case AArch64::LDURXi:
263 case AArch64::STRSui:
264 case AArch64::STURSi:
265 case AArch64::LDRSui:
266 case AArch64::LDURSi:
268 case AArch64::LDRSWui:
269 return AArch64::LDRWui;
270 case AArch64::LDURSWi:
271 return AArch64::LDURWi;
275 static unsigned getMatchingWideOpcode(unsigned Opc) {
278 llvm_unreachable("Opcode has no wide equivalent!");
279 case AArch64::STRBBui:
280 return AArch64::STRHHui;
281 case AArch64::STRHHui:
282 return AArch64::STRWui;
283 case AArch64::STURBBi:
284 return AArch64::STURHHi;
285 case AArch64::STURHHi:
286 return AArch64::STURWi;
287 case AArch64::STURWi:
288 return AArch64::STURXi;
289 case AArch64::STRWui:
290 return AArch64::STRXui;
294 static unsigned getMatchingPairOpcode(unsigned Opc) {
297 llvm_unreachable("Opcode has no pairwise equivalent!");
298 case AArch64::STRSui:
299 case AArch64::STURSi:
300 return AArch64::STPSi;
301 case AArch64::STRDui:
302 case AArch64::STURDi:
303 return AArch64::STPDi;
304 case AArch64::STRQui:
305 case AArch64::STURQi:
306 return AArch64::STPQi;
307 case AArch64::STRWui:
308 case AArch64::STURWi:
309 return AArch64::STPWi;
310 case AArch64::STRXui:
311 case AArch64::STURXi:
312 return AArch64::STPXi;
313 case AArch64::LDRSui:
314 case AArch64::LDURSi:
315 return AArch64::LDPSi;
316 case AArch64::LDRDui:
317 case AArch64::LDURDi:
318 return AArch64::LDPDi;
319 case AArch64::LDRQui:
320 case AArch64::LDURQi:
321 return AArch64::LDPQi;
322 case AArch64::LDRWui:
323 case AArch64::LDURWi:
324 return AArch64::LDPWi;
325 case AArch64::LDRXui:
326 case AArch64::LDURXi:
327 return AArch64::LDPXi;
328 case AArch64::LDRSWui:
329 case AArch64::LDURSWi:
330 return AArch64::LDPSWi;
334 static unsigned isMatchingStore(MachineInstr &LoadInst,
335 MachineInstr &StoreInst) {
336 unsigned LdOpc = LoadInst.getOpcode();
337 unsigned StOpc = StoreInst.getOpcode();
340 llvm_unreachable("Unsupported load instruction!");
341 case AArch64::LDRBBui:
342 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
343 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
344 case AArch64::LDURBBi:
345 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
346 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
347 case AArch64::LDRHHui:
348 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
349 StOpc == AArch64::STRXui;
350 case AArch64::LDURHHi:
351 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
352 StOpc == AArch64::STURXi;
353 case AArch64::LDRWui:
354 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
355 case AArch64::LDURWi:
356 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
357 case AArch64::LDRXui:
358 return StOpc == AArch64::STRXui;
359 case AArch64::LDURXi:
360 return StOpc == AArch64::STURXi;
364 static unsigned getPreIndexedOpcode(unsigned Opc) {
365 // FIXME: We don't currently support creating pre-indexed loads/stores when
366 // the load or store is the unscaled version. If we decide to perform such an
367 // optimization in the future the cases for the unscaled loads/stores will
368 // need to be added here.
371 llvm_unreachable("Opcode has no pre-indexed equivalent!");
372 case AArch64::STRSui:
373 return AArch64::STRSpre;
374 case AArch64::STRDui:
375 return AArch64::STRDpre;
376 case AArch64::STRQui:
377 return AArch64::STRQpre;
378 case AArch64::STRBBui:
379 return AArch64::STRBBpre;
380 case AArch64::STRHHui:
381 return AArch64::STRHHpre;
382 case AArch64::STRWui:
383 return AArch64::STRWpre;
384 case AArch64::STRXui:
385 return AArch64::STRXpre;
386 case AArch64::LDRSui:
387 return AArch64::LDRSpre;
388 case AArch64::LDRDui:
389 return AArch64::LDRDpre;
390 case AArch64::LDRQui:
391 return AArch64::LDRQpre;
392 case AArch64::LDRBBui:
393 return AArch64::LDRBBpre;
394 case AArch64::LDRHHui:
395 return AArch64::LDRHHpre;
396 case AArch64::LDRWui:
397 return AArch64::LDRWpre;
398 case AArch64::LDRXui:
399 return AArch64::LDRXpre;
400 case AArch64::LDRSWui:
401 return AArch64::LDRSWpre;
403 return AArch64::LDPSpre;
404 case AArch64::LDPSWi:
405 return AArch64::LDPSWpre;
407 return AArch64::LDPDpre;
409 return AArch64::LDPQpre;
411 return AArch64::LDPWpre;
413 return AArch64::LDPXpre;
415 return AArch64::STPSpre;
417 return AArch64::STPDpre;
419 return AArch64::STPQpre;
421 return AArch64::STPWpre;
423 return AArch64::STPXpre;
424 case AArch64::STGOffset:
425 return AArch64::STGPreIndex;
426 case AArch64::STZGOffset:
427 return AArch64::STZGPreIndex;
428 case AArch64::ST2GOffset:
429 return AArch64::ST2GPreIndex;
430 case AArch64::STZ2GOffset:
431 return AArch64::STZ2GPreIndex;
433 return AArch64::STGPpre;
437 static unsigned getPostIndexedOpcode(unsigned Opc) {
440 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
441 case AArch64::STRSui:
442 case AArch64::STURSi:
443 return AArch64::STRSpost;
444 case AArch64::STRDui:
445 case AArch64::STURDi:
446 return AArch64::STRDpost;
447 case AArch64::STRQui:
448 case AArch64::STURQi:
449 return AArch64::STRQpost;
450 case AArch64::STRBBui:
451 return AArch64::STRBBpost;
452 case AArch64::STRHHui:
453 return AArch64::STRHHpost;
454 case AArch64::STRWui:
455 case AArch64::STURWi:
456 return AArch64::STRWpost;
457 case AArch64::STRXui:
458 case AArch64::STURXi:
459 return AArch64::STRXpost;
460 case AArch64::LDRSui:
461 case AArch64::LDURSi:
462 return AArch64::LDRSpost;
463 case AArch64::LDRDui:
464 case AArch64::LDURDi:
465 return AArch64::LDRDpost;
466 case AArch64::LDRQui:
467 case AArch64::LDURQi:
468 return AArch64::LDRQpost;
469 case AArch64::LDRBBui:
470 return AArch64::LDRBBpost;
471 case AArch64::LDRHHui:
472 return AArch64::LDRHHpost;
473 case AArch64::LDRWui:
474 case AArch64::LDURWi:
475 return AArch64::LDRWpost;
476 case AArch64::LDRXui:
477 case AArch64::LDURXi:
478 return AArch64::LDRXpost;
479 case AArch64::LDRSWui:
480 return AArch64::LDRSWpost;
482 return AArch64::LDPSpost;
483 case AArch64::LDPSWi:
484 return AArch64::LDPSWpost;
486 return AArch64::LDPDpost;
488 return AArch64::LDPQpost;
490 return AArch64::LDPWpost;
492 return AArch64::LDPXpost;
494 return AArch64::STPSpost;
496 return AArch64::STPDpost;
498 return AArch64::STPQpost;
500 return AArch64::STPWpost;
502 return AArch64::STPXpost;
503 case AArch64::STGOffset:
504 return AArch64::STGPostIndex;
505 case AArch64::STZGOffset:
506 return AArch64::STZGPostIndex;
507 case AArch64::ST2GOffset:
508 return AArch64::ST2GPostIndex;
509 case AArch64::STZ2GOffset:
510 return AArch64::STZ2GPostIndex;
512 return AArch64::STGPpost;
516 static bool isPairedLdSt(const MachineInstr &MI) {
517 switch (MI.getOpcode()) {
521 case AArch64::LDPSWi:
536 // Returns the scale and offset range of pre/post indexed variants of MI.
537 static void getPrePostIndexedMemOpInfo(const MachineInstr &MI, int &Scale,
538 int &MinOffset, int &MaxOffset) {
539 bool IsPaired = isPairedLdSt(MI);
540 bool IsTagStore = isTagStore(MI);
541 // ST*G and all paired ldst have the same scale in pre/post-indexed variants
542 // as in the "unsigned offset" variant.
543 // All other pre/post indexed ldst instructions are unscaled.
544 Scale = (IsTagStore || IsPaired) ? AArch64InstrInfo::getMemScale(MI) : 1;
555 static MachineOperand &getLdStRegOp(MachineInstr &MI,
556 unsigned PairedRegOp = 0) {
557 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
558 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
559 return MI.getOperand(Idx);
562 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) {
563 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
564 return MI.getOperand(Idx);
567 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) {
568 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
569 return MI.getOperand(Idx);
572 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
573 MachineInstr &StoreInst,
574 const AArch64InstrInfo *TII) {
575 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
576 int LoadSize = TII->getMemScale(LoadInst);
577 int StoreSize = TII->getMemScale(StoreInst);
578 int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
579 ? getLdStOffsetOp(StoreInst).getImm()
580 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
581 int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
582 ? getLdStOffsetOp(LoadInst).getImm()
583 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
584 return (UnscaledStOffset <= UnscaledLdOffset) &&
585 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
588 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
589 unsigned Opc = MI.getOpcode();
590 return (Opc == AArch64::STRWui || Opc == AArch64::STURWi ||
591 isNarrowStore(Opc)) &&
592 getLdStRegOp(MI).getReg() == AArch64::WZR;
595 static bool isPromotableLoadFromStore(MachineInstr &MI) {
596 switch (MI.getOpcode()) {
599 // Scaled instructions.
600 case AArch64::LDRBBui:
601 case AArch64::LDRHHui:
602 case AArch64::LDRWui:
603 case AArch64::LDRXui:
604 // Unscaled instructions.
605 case AArch64::LDURBBi:
606 case AArch64::LDURHHi:
607 case AArch64::LDURWi:
608 case AArch64::LDURXi:
613 static bool isMergeableLdStUpdate(MachineInstr &MI) {
614 unsigned Opc = MI.getOpcode();
618 // Scaled instructions.
619 case AArch64::STRSui:
620 case AArch64::STRDui:
621 case AArch64::STRQui:
622 case AArch64::STRXui:
623 case AArch64::STRWui:
624 case AArch64::STRHHui:
625 case AArch64::STRBBui:
626 case AArch64::LDRSui:
627 case AArch64::LDRDui:
628 case AArch64::LDRQui:
629 case AArch64::LDRXui:
630 case AArch64::LDRWui:
631 case AArch64::LDRHHui:
632 case AArch64::LDRBBui:
633 case AArch64::STGOffset:
634 case AArch64::STZGOffset:
635 case AArch64::ST2GOffset:
636 case AArch64::STZ2GOffset:
638 // Unscaled instructions.
639 case AArch64::STURSi:
640 case AArch64::STURDi:
641 case AArch64::STURQi:
642 case AArch64::STURWi:
643 case AArch64::STURXi:
644 case AArch64::LDURSi:
645 case AArch64::LDURDi:
646 case AArch64::LDURQi:
647 case AArch64::LDURWi:
648 case AArch64::LDURXi:
649 // Paired instructions.
651 case AArch64::LDPSWi:
661 // Make sure this is a reg+imm (as opposed to an address reloc).
662 if (!getLdStOffsetOp(MI).isImm())
669 MachineBasicBlock::iterator
670 AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
671 MachineBasicBlock::iterator MergeMI,
672 const LdStPairFlags &Flags) {
673 assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
674 "Expected promotable zero stores.");
676 MachineBasicBlock::iterator NextI = I;
678 // If NextI is the second of the two instructions to be merged, we need
679 // to skip one further. Either way we merge will invalidate the iterator,
680 // and we don't need to scan the new instruction, as it's a pairwise
681 // instruction, which we're not considering for further action anyway.
682 if (NextI == MergeMI)
685 unsigned Opc = I->getOpcode();
686 bool IsScaled = !TII->isUnscaledLdSt(Opc);
687 int OffsetStride = IsScaled ? 1 : TII->getMemScale(*I);
689 bool MergeForward = Flags.getMergeForward();
690 // Insert our new paired instruction after whichever of the paired
691 // instructions MergeForward indicates.
692 MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
693 // Also based on MergeForward is from where we copy the base register operand
694 // so we get the flags compatible with the input code.
695 const MachineOperand &BaseRegOp =
696 MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I);
698 // Which register is Rt and which is Rt2 depends on the offset order.
700 if (getLdStOffsetOp(*I).getImm() ==
701 getLdStOffsetOp(*MergeMI).getImm() + OffsetStride)
706 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
707 // Change the scaled offset from small to large type.
709 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
713 // Construct the new instruction.
714 DebugLoc DL = I->getDebugLoc();
715 MachineBasicBlock *MBB = I->getParent();
716 MachineInstrBuilder MIB;
717 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
718 .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
721 .cloneMergedMemRefs({&*I, &*MergeMI})
722 .setMIFlags(I->mergeFlagsWith(*MergeMI));
725 LLVM_DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ");
726 LLVM_DEBUG(I->print(dbgs()));
727 LLVM_DEBUG(dbgs() << " ");
728 LLVM_DEBUG(MergeMI->print(dbgs()));
729 LLVM_DEBUG(dbgs() << " with instruction:\n ");
730 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
731 LLVM_DEBUG(dbgs() << "\n");
733 // Erase the old instructions.
734 I->eraseFromParent();
735 MergeMI->eraseFromParent();
739 // Apply Fn to all instructions between MI and the beginning of the block, until
740 // a def for DefReg is reached. Returns true, iff Fn returns true for all
741 // visited instructions. Stop after visiting Limit iterations.
742 static bool forAllMIsUntilDef(MachineInstr &MI, MCPhysReg DefReg,
743 const TargetRegisterInfo *TRI, unsigned Limit,
744 std::function<bool(MachineInstr &, bool)> &Fn) {
745 auto MBB = MI.getParent();
746 for (MachineBasicBlock::reverse_iterator I = MI.getReverseIterator(),
753 bool isDef = any_of(I->operands(), [DefReg, TRI](MachineOperand &MOP) {
754 return MOP.isReg() && MOP.isDef() && !MOP.isDebug() && MOP.getReg() &&
755 TRI->regsOverlap(MOP.getReg(), DefReg);
765 static void updateDefinedRegisters(MachineInstr &MI, LiveRegUnits &Units,
766 const TargetRegisterInfo *TRI) {
768 for (const MachineOperand &MOP : phys_regs_and_masks(MI))
769 if (MOP.isReg() && MOP.isKill())
770 Units.removeReg(MOP.getReg());
772 for (const MachineOperand &MOP : phys_regs_and_masks(MI))
773 if (MOP.isReg() && !MOP.isKill())
774 Units.addReg(MOP.getReg());
777 MachineBasicBlock::iterator
778 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
779 MachineBasicBlock::iterator Paired,
780 const LdStPairFlags &Flags) {
781 MachineBasicBlock::iterator NextI = I;
783 // If NextI is the second of the two instructions to be merged, we need
784 // to skip one further. Either way we merge will invalidate the iterator,
785 // and we don't need to scan the new instruction, as it's a pairwise
786 // instruction, which we're not considering for further action anyway.
790 int SExtIdx = Flags.getSExtIdx();
792 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
793 bool IsUnscaled = TII->isUnscaledLdSt(Opc);
794 int OffsetStride = IsUnscaled ? TII->getMemScale(*I) : 1;
796 bool MergeForward = Flags.getMergeForward();
798 Optional<MCPhysReg> RenameReg = Flags.getRenameReg();
799 if (MergeForward && RenameReg) {
800 MCRegister RegToRename = getLdStRegOp(*I).getReg();
801 DefinedInBB.addReg(*RenameReg);
803 // Return the sub/super register for RenameReg, matching the size of
805 auto GetMatchingSubReg = [this,
806 RenameReg](MCPhysReg OriginalReg) -> MCPhysReg {
807 for (MCPhysReg SubOrSuper : TRI->sub_and_superregs_inclusive(*RenameReg))
808 if (TRI->getMinimalPhysRegClass(OriginalReg) ==
809 TRI->getMinimalPhysRegClass(SubOrSuper))
811 llvm_unreachable("Should have found matching sub or super register!");
814 std::function<bool(MachineInstr &, bool)> UpdateMIs =
815 [this, RegToRename, GetMatchingSubReg](MachineInstr &MI, bool IsDef) {
817 bool SeenDef = false;
818 for (auto &MOP : MI.operands()) {
819 // Rename the first explicit definition and all implicit
820 // definitions matching RegToRename.
821 if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
822 (!SeenDef || (MOP.isDef() && MOP.isImplicit())) &&
823 TRI->regsOverlap(MOP.getReg(), RegToRename)) {
824 assert((MOP.isImplicit() ||
825 (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
826 "Need renamable operands");
827 MOP.setReg(GetMatchingSubReg(MOP.getReg()));
832 for (auto &MOP : MI.operands()) {
833 if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
834 TRI->regsOverlap(MOP.getReg(), RegToRename)) {
835 assert((MOP.isImplicit() ||
836 (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
837 "Need renamable operands");
838 MOP.setReg(GetMatchingSubReg(MOP.getReg()));
842 LLVM_DEBUG(dbgs() << "Renamed " << MI << "\n");
845 forAllMIsUntilDef(*I, RegToRename, TRI, LdStLimit, UpdateMIs);
848 // Make sure the register used for renaming is not used between the paired
849 // instructions. That would trash the content before the new paired
852 iterator_range<MachineInstrBundleIterator<llvm::MachineInstr>>(
853 std::next(I), std::next(Paired)))
854 assert(all_of(MI.operands(),
855 [this, &RenameReg](const MachineOperand &MOP) {
856 return !MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
857 !TRI->regsOverlap(MOP.getReg(), *RenameReg);
859 "Rename register used between paired instruction, trashing the "
864 // Insert our new paired instruction after whichever of the paired
865 // instructions MergeForward indicates.
866 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
867 // Also based on MergeForward is from where we copy the base register operand
868 // so we get the flags compatible with the input code.
869 const MachineOperand &BaseRegOp =
870 MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I);
872 int Offset = getLdStOffsetOp(*I).getImm();
873 int PairedOffset = getLdStOffsetOp(*Paired).getImm();
874 bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
875 if (IsUnscaled != PairedIsUnscaled) {
876 // We're trying to pair instructions that differ in how they are scaled. If
877 // I is scaled then scale the offset of Paired accordingly. Otherwise, do
878 // the opposite (i.e., make Paired's offset unscaled).
879 int MemSize = TII->getMemScale(*Paired);
880 if (PairedIsUnscaled) {
881 // If the unscaled offset isn't a multiple of the MemSize, we can't
882 // pair the operations together.
883 assert(!(PairedOffset % TII->getMemScale(*Paired)) &&
884 "Offset should be a multiple of the stride!");
885 PairedOffset /= MemSize;
887 PairedOffset *= MemSize;
891 // Which register is Rt and which is Rt2 depends on the offset order.
892 MachineInstr *RtMI, *Rt2MI;
893 if (Offset == PairedOffset + OffsetStride) {
896 // Here we swapped the assumption made for SExtIdx.
897 // I.e., we turn ldp I, Paired into ldp Paired, I.
898 // Update the index accordingly.
900 SExtIdx = (SExtIdx + 1) % 2;
905 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
906 // Scale the immediate offset, if necessary.
907 if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
908 assert(!(OffsetImm % TII->getMemScale(*RtMI)) &&
909 "Unscaled offset cannot be scaled.");
910 OffsetImm /= TII->getMemScale(*RtMI);
913 // Construct the new instruction.
914 MachineInstrBuilder MIB;
915 DebugLoc DL = I->getDebugLoc();
916 MachineBasicBlock *MBB = I->getParent();
917 MachineOperand RegOp0 = getLdStRegOp(*RtMI);
918 MachineOperand RegOp1 = getLdStRegOp(*Rt2MI);
919 // Kill flags may become invalid when moving stores for pairing.
920 if (RegOp0.isUse()) {
922 // Clear kill flags on store if moving upwards. Example:
925 // STRWui kill %w1 ; need to clear kill flag when moving STRWui upwards
926 RegOp0.setIsKill(false);
927 RegOp1.setIsKill(false);
929 // Clear kill flags of the first stores register. Example:
931 // USE kill %w1 ; need to clear kill flag when moving STRWui downwards
933 Register Reg = getLdStRegOp(*I).getReg();
934 for (MachineInstr &MI : make_range(std::next(I), Paired))
935 MI.clearRegisterKills(Reg, TRI);
938 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
943 .cloneMergedMemRefs({&*I, &*Paired})
944 .setMIFlags(I->mergeFlagsWith(*Paired));
949 dbgs() << "Creating pair load/store. Replacing instructions:\n ");
950 LLVM_DEBUG(I->print(dbgs()));
951 LLVM_DEBUG(dbgs() << " ");
952 LLVM_DEBUG(Paired->print(dbgs()));
953 LLVM_DEBUG(dbgs() << " with instruction:\n ");
955 // Generate the sign extension for the proper result of the ldp.
956 // I.e., with X1, that would be:
957 // %w1 = KILL %w1, implicit-def %x1
958 // %x1 = SBFMXri killed %x1, 0, 31
959 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
960 // Right now, DstMO has the extended register, since it comes from an
962 Register DstRegX = DstMO.getReg();
963 // Get the W variant of that register.
964 Register DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
965 // Update the result of LDP to use the W instead of the X variant.
966 DstMO.setReg(DstRegW);
967 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
968 LLVM_DEBUG(dbgs() << "\n");
969 // Make the machine verifier happy by providing a definition for
971 // Insert this definition right after the generated LDP, i.e., before
973 MachineInstrBuilder MIBKill =
974 BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
976 .addReg(DstRegX, RegState::Define);
977 MIBKill->getOperand(2).setImplicit();
978 // Create the sign extension.
979 MachineInstrBuilder MIBSXTW =
980 BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
985 LLVM_DEBUG(dbgs() << " Extend operand:\n ");
986 LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
988 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
990 LLVM_DEBUG(dbgs() << "\n");
993 for (const MachineOperand &MOP : phys_regs_and_masks(*I))
994 if (MOP.isReg() && MOP.isKill())
995 DefinedInBB.addReg(MOP.getReg());
997 // Erase the old instructions.
998 I->eraseFromParent();
999 Paired->eraseFromParent();
1004 MachineBasicBlock::iterator
1005 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
1006 MachineBasicBlock::iterator StoreI) {
1007 MachineBasicBlock::iterator NextI = LoadI;
1010 int LoadSize = TII->getMemScale(*LoadI);
1011 int StoreSize = TII->getMemScale(*StoreI);
1012 Register LdRt = getLdStRegOp(*LoadI).getReg();
1013 const MachineOperand &StMO = getLdStRegOp(*StoreI);
1014 Register StRt = getLdStRegOp(*StoreI).getReg();
1015 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
1017 assert((IsStoreXReg ||
1018 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
1019 "Unexpected RegClass");
1021 MachineInstr *BitExtMI;
1022 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
1023 // Remove the load, if the destination register of the loads is the same
1024 // register for stored value.
1025 if (StRt == LdRt && LoadSize == 8) {
1026 for (MachineInstr &MI : make_range(StoreI->getIterator(),
1027 LoadI->getIterator())) {
1028 if (MI.killsRegister(StRt, TRI)) {
1029 MI.clearRegisterKills(StRt, TRI);
1033 LLVM_DEBUG(dbgs() << "Remove load instruction:\n ");
1034 LLVM_DEBUG(LoadI->print(dbgs()));
1035 LLVM_DEBUG(dbgs() << "\n");
1036 LoadI->eraseFromParent();
1039 // Replace the load with a mov if the load and store are in the same size.
1041 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1042 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
1043 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
1045 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1046 .setMIFlags(LoadI->getFlags());
1048 // FIXME: Currently we disable this transformation in big-endian targets as
1049 // performance and correctness are verified only in little-endian.
1050 if (!Subtarget->isLittleEndian())
1052 bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
1053 assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
1054 "Unsupported ld/st match");
1055 assert(LoadSize <= StoreSize && "Invalid load size");
1056 int UnscaledLdOffset = IsUnscaled
1057 ? getLdStOffsetOp(*LoadI).getImm()
1058 : getLdStOffsetOp(*LoadI).getImm() * LoadSize;
1059 int UnscaledStOffset = IsUnscaled
1060 ? getLdStOffsetOp(*StoreI).getImm()
1061 : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
1062 int Width = LoadSize * 8;
1064 IsStoreXReg ? Register(TRI->getMatchingSuperReg(
1065 LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
1068 assert((UnscaledLdOffset >= UnscaledStOffset &&
1069 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
1072 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
1073 int Imms = Immr + Width - 1;
1074 if (UnscaledLdOffset == UnscaledStOffset) {
1075 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
1076 | ((Immr) << 6) // immr
1077 | ((Imms) << 0) // imms
1081 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1082 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
1085 .addImm(AndMaskEncoded)
1086 .setMIFlags(LoadI->getFlags());
1089 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1090 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
1095 .setMIFlags(LoadI->getFlags());
1099 // Clear kill flags between store and load.
1100 for (MachineInstr &MI : make_range(StoreI->getIterator(),
1101 BitExtMI->getIterator()))
1102 if (MI.killsRegister(StRt, TRI)) {
1103 MI.clearRegisterKills(StRt, TRI);
1107 LLVM_DEBUG(dbgs() << "Promoting load by replacing :\n ");
1108 LLVM_DEBUG(StoreI->print(dbgs()));
1109 LLVM_DEBUG(dbgs() << " ");
1110 LLVM_DEBUG(LoadI->print(dbgs()));
1111 LLVM_DEBUG(dbgs() << " with instructions:\n ");
1112 LLVM_DEBUG(StoreI->print(dbgs()));
1113 LLVM_DEBUG(dbgs() << " ");
1114 LLVM_DEBUG((BitExtMI)->print(dbgs()));
1115 LLVM_DEBUG(dbgs() << "\n");
1117 // Erase the old instructions.
1118 LoadI->eraseFromParent();
1122 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
1123 // Convert the byte-offset used by unscaled into an "element" offset used
1124 // by the scaled pair load/store instructions.
1126 // If the byte-offset isn't a multiple of the stride, there's no point
1127 // trying to match it.
1128 if (Offset % OffsetStride)
1130 Offset /= OffsetStride;
1132 return Offset <= 63 && Offset >= -64;
1135 // Do alignment, specialized to power of 2 and for signed ints,
1136 // avoiding having to do a C-style cast from uint_64t to int when
1137 // using alignTo from include/llvm/Support/MathExtras.h.
1138 // FIXME: Move this function to include/MathExtras.h?
1139 static int alignTo(int Num, int PowOf2) {
1140 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1143 static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
1144 AliasAnalysis *AA) {
1145 // One of the instructions must modify memory.
1146 if (!MIa.mayStore() && !MIb.mayStore())
1149 // Both instructions must be memory operations.
1150 if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
1153 return MIa.mayAlias(AA, MIb, /*UseTBAA*/false);
1156 static bool mayAlias(MachineInstr &MIa,
1157 SmallVectorImpl<MachineInstr *> &MemInsns,
1158 AliasAnalysis *AA) {
1159 for (MachineInstr *MIb : MemInsns)
1160 if (mayAlias(MIa, *MIb, AA))
1166 bool AArch64LoadStoreOpt::findMatchingStore(
1167 MachineBasicBlock::iterator I, unsigned Limit,
1168 MachineBasicBlock::iterator &StoreI) {
1169 MachineBasicBlock::iterator B = I->getParent()->begin();
1170 MachineBasicBlock::iterator MBBI = I;
1171 MachineInstr &LoadMI = *I;
1172 Register BaseReg = getLdStBaseOp(LoadMI).getReg();
1174 // If the load is the first instruction in the block, there's obviously
1175 // not any matching store.
1179 // Track which register units have been modified and used between the first
1180 // insn and the second insn.
1181 ModifiedRegUnits.clear();
1182 UsedRegUnits.clear();
1187 MachineInstr &MI = *MBBI;
1189 // Don't count transient instructions towards the search limit since there
1190 // may be different numbers of them if e.g. debug information is present.
1191 if (!MI.isTransient())
1194 // If the load instruction reads directly from the address to which the
1195 // store instruction writes and the stored value is not modified, we can
1196 // promote the load. Since we do not handle stores with pre-/post-index,
1197 // it's unnecessary to check if BaseReg is modified by the store itself.
1198 if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
1199 BaseReg == getLdStBaseOp(MI).getReg() &&
1200 isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
1201 ModifiedRegUnits.available(getLdStRegOp(MI).getReg())) {
1209 // Update modified / uses register units.
1210 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1212 // Otherwise, if the base register is modified, we have no match, so
1214 if (!ModifiedRegUnits.available(BaseReg))
1217 // If we encounter a store aliased with the load, return early.
1218 if (MI.mayStore() && mayAlias(LoadMI, MI, AA))
1220 } while (MBBI != B && Count < Limit);
1224 // Returns true if FirstMI and MI are candidates for merging or pairing.
1225 // Otherwise, returns false.
1226 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1227 LdStPairFlags &Flags,
1228 const AArch64InstrInfo *TII) {
1229 // If this is volatile or if pairing is suppressed, not a candidate.
1230 if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1233 // We should have already checked FirstMI for pair suppression and volatility.
1234 assert(!FirstMI.hasOrderedMemoryRef() &&
1235 !TII->isLdStPairSuppressed(FirstMI) &&
1236 "FirstMI shouldn't get here if either of these checks are true.");
1238 unsigned OpcA = FirstMI.getOpcode();
1239 unsigned OpcB = MI.getOpcode();
1241 // Opcodes match: nothing more to check.
1245 // Try to match a sign-extended load/store with a zero-extended load/store.
1246 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1247 unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1248 assert(IsValidLdStrOpc &&
1249 "Given Opc should be a Load or Store with an immediate");
1250 // OpcA will be the first instruction in the pair.
1251 if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1252 Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1256 // If the second instruction isn't even a mergable/pairable load/store, bail
1258 if (!PairIsValidLdStrOpc)
1261 // FIXME: We don't support merging narrow stores with mixed scaled/unscaled
1263 if (isNarrowStore(OpcA) || isNarrowStore(OpcB))
1266 // Try to match an unscaled load/store with a scaled load/store.
1267 return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
1268 getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1270 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1274 canRenameUpToDef(MachineInstr &FirstMI, LiveRegUnits &UsedInBetween,
1275 SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1276 const TargetRegisterInfo *TRI) {
1277 if (!FirstMI.mayStore())
1280 // Check if we can find an unused register which we can use to rename
1281 // the register used by the first load/store.
1282 auto *RegClass = TRI->getMinimalPhysRegClass(getLdStRegOp(FirstMI).getReg());
1283 MachineFunction &MF = *FirstMI.getParent()->getParent();
1284 if (!RegClass || !MF.getRegInfo().tracksLiveness())
1287 auto RegToRename = getLdStRegOp(FirstMI).getReg();
1288 // For now, we only rename if the store operand gets killed at the store.
1289 if (!getLdStRegOp(FirstMI).isKill() &&
1290 !any_of(FirstMI.operands(),
1291 [TRI, RegToRename](const MachineOperand &MOP) {
1292 return MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
1293 MOP.isImplicit() && MOP.isKill() &&
1294 TRI->regsOverlap(RegToRename, MOP.getReg());
1296 LLVM_DEBUG(dbgs() << " Operand not killed at " << FirstMI << "\n");
1299 auto canRenameMOP = [](const MachineOperand &MOP) {
1300 return MOP.isImplicit() ||
1301 (MOP.isRenamable() && !MOP.isEarlyClobber() && !MOP.isTied());
1304 bool FoundDef = false;
1306 // For each instruction between FirstMI and the previous def for RegToRename,
1308 // * check if we can rename RegToRename in this instruction
1309 // * collect the registers used and required register classes for RegToRename.
1310 std::function<bool(MachineInstr &, bool)> CheckMIs = [&](MachineInstr &MI,
1312 LLVM_DEBUG(dbgs() << "Checking " << MI << "\n");
1313 // Currently we do not try to rename across frame-setup instructions.
1314 if (MI.getFlag(MachineInstr::FrameSetup)) {
1315 LLVM_DEBUG(dbgs() << " Cannot rename framesetup instructions currently ("
1320 UsedInBetween.accumulate(MI);
1322 // For a definition, check that we can rename the definition and exit the
1326 // For defs, check if we can rename the first def of RegToRename.
1328 // For some pseudo instructions, we might not generate code in the end
1329 // (e.g. KILL) and we would end up without a correct def for the rename
1331 // TODO: This might be overly conservative and we could handle those cases
1332 // in multiple ways:
1333 // 1. Insert an extra copy, to materialize the def.
1334 // 2. Skip pseudo-defs until we find an non-pseudo def.
1335 if (MI.isPseudo()) {
1336 LLVM_DEBUG(dbgs() << " Cannot rename pseudo instruction " << MI
1341 for (auto &MOP : MI.operands()) {
1342 if (!MOP.isReg() || !MOP.isDef() || MOP.isDebug() || !MOP.getReg() ||
1343 !TRI->regsOverlap(MOP.getReg(), RegToRename))
1345 if (!canRenameMOP(MOP)) {
1347 << " Cannot rename " << MOP << " in " << MI << "\n");
1350 RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1354 for (auto &MOP : MI.operands()) {
1355 if (!MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
1356 !TRI->regsOverlap(MOP.getReg(), RegToRename))
1359 if (!canRenameMOP(MOP)) {
1361 << " Cannot rename " << MOP << " in " << MI << "\n");
1364 RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1370 if (!forAllMIsUntilDef(FirstMI, RegToRename, TRI, LdStLimit, CheckMIs))
1374 LLVM_DEBUG(dbgs() << " Did not find definition for register in BB\n");
1380 // Check if we can find a physical register for renaming. This register must:
1381 // * not be defined up to FirstMI (checking DefinedInBB)
1382 // * not used between the MI and the defining instruction of the register to
1383 // rename (checked using UsedInBetween).
1384 // * is available in all used register classes (checked using RequiredClasses).
1385 static Optional<MCPhysReg> tryToFindRegisterToRename(
1386 MachineInstr &FirstMI, MachineInstr &MI, LiveRegUnits &DefinedInBB,
1387 LiveRegUnits &UsedInBetween,
1388 SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1389 const TargetRegisterInfo *TRI) {
1390 auto &MF = *FirstMI.getParent()->getParent();
1391 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1393 // Checks if any sub- or super-register of PR is callee saved.
1394 auto AnySubOrSuperRegCalleePreserved = [&MF, TRI](MCPhysReg PR) {
1395 return any_of(TRI->sub_and_superregs_inclusive(PR),
1396 [&MF, TRI](MCPhysReg SubOrSuper) {
1397 return TRI->isCalleeSavedPhysReg(SubOrSuper, MF);
1401 // Check if PR or one of its sub- or super-registers can be used for all
1402 // required register classes.
1403 auto CanBeUsedForAllClasses = [&RequiredClasses, TRI](MCPhysReg PR) {
1404 return all_of(RequiredClasses, [PR, TRI](const TargetRegisterClass *C) {
1405 return any_of(TRI->sub_and_superregs_inclusive(PR),
1406 [C, TRI](MCPhysReg SubOrSuper) {
1407 return C == TRI->getMinimalPhysRegClass(SubOrSuper);
1412 auto *RegClass = TRI->getMinimalPhysRegClass(getLdStRegOp(FirstMI).getReg());
1413 for (const MCPhysReg &PR : *RegClass) {
1414 if (DefinedInBB.available(PR) && UsedInBetween.available(PR) &&
1415 !RegInfo.isReserved(PR) && !AnySubOrSuperRegCalleePreserved(PR) &&
1416 CanBeUsedForAllClasses(PR)) {
1417 DefinedInBB.addReg(PR);
1418 LLVM_DEBUG(dbgs() << "Found rename register " << printReg(PR, TRI)
1423 LLVM_DEBUG(dbgs() << "No rename register found from "
1424 << TRI->getRegClassName(RegClass) << "\n");
1428 /// Scan the instructions looking for a load/store that can be combined with the
1429 /// current instruction into a wider equivalent or a load/store pair.
1430 MachineBasicBlock::iterator
1431 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1432 LdStPairFlags &Flags, unsigned Limit,
1433 bool FindNarrowMerge) {
1434 MachineBasicBlock::iterator E = I->getParent()->end();
1435 MachineBasicBlock::iterator MBBI = I;
1436 MachineBasicBlock::iterator MBBIWithRenameReg;
1437 MachineInstr &FirstMI = *I;
1440 bool MayLoad = FirstMI.mayLoad();
1441 bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
1442 Register Reg = getLdStRegOp(FirstMI).getReg();
1443 Register BaseReg = getLdStBaseOp(FirstMI).getReg();
1444 int Offset = getLdStOffsetOp(FirstMI).getImm();
1445 int OffsetStride = IsUnscaled ? TII->getMemScale(FirstMI) : 1;
1446 bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1448 Optional<bool> MaybeCanRename = None;
1449 SmallPtrSet<const TargetRegisterClass *, 5> RequiredClasses;
1450 LiveRegUnits UsedInBetween;
1451 UsedInBetween.init(*TRI);
1453 Flags.clearRenameReg();
1455 // Track which register units have been modified and used between the first
1456 // insn (inclusive) and the second insn.
1457 ModifiedRegUnits.clear();
1458 UsedRegUnits.clear();
1460 // Remember any instructions that read/write memory between FirstMI and MI.
1461 SmallVector<MachineInstr *, 4> MemInsns;
1463 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1464 MachineInstr &MI = *MBBI;
1466 UsedInBetween.accumulate(MI);
1468 // Don't count transient instructions towards the search limit since there
1469 // may be different numbers of them if e.g. debug information is present.
1470 if (!MI.isTransient())
1473 Flags.setSExtIdx(-1);
1474 if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1475 getLdStOffsetOp(MI).isImm()) {
1476 assert(MI.mayLoadOrStore() && "Expected memory operation.");
1477 // If we've found another instruction with the same opcode, check to see
1478 // if the base and offset are compatible with our starting instruction.
1479 // These instructions all have scaled immediate operands, so we just
1480 // check for +1/-1. Make sure to check the new instruction offset is
1481 // actually an immediate and not a symbolic reference destined for
1483 Register MIBaseReg = getLdStBaseOp(MI).getReg();
1484 int MIOffset = getLdStOffsetOp(MI).getImm();
1485 bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
1486 if (IsUnscaled != MIIsUnscaled) {
1487 // We're trying to pair instructions that differ in how they are scaled.
1488 // If FirstMI is scaled then scale the offset of MI accordingly.
1489 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1490 int MemSize = TII->getMemScale(MI);
1492 // If the unscaled offset isn't a multiple of the MemSize, we can't
1493 // pair the operations together: bail and keep looking.
1494 if (MIOffset % MemSize) {
1495 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1497 MemInsns.push_back(&MI);
1500 MIOffset /= MemSize;
1502 MIOffset *= MemSize;
1506 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1507 (Offset + OffsetStride == MIOffset))) {
1508 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1509 if (FindNarrowMerge) {
1510 // If the alignment requirements of the scaled wide load/store
1511 // instruction can't express the offset of the scaled narrow input,
1512 // bail and keep looking. For promotable zero stores, allow only when
1513 // the stored value is the same (i.e., WZR).
1514 if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1515 (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1516 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1518 MemInsns.push_back(&MI);
1522 // Pairwise instructions have a 7-bit signed offset field. Single
1523 // insns have a 12-bit unsigned offset field. If the resultant
1524 // immediate offset of merging these instructions is out of range for
1525 // a pairwise instruction, bail and keep looking.
1526 if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1527 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1529 MemInsns.push_back(&MI);
1532 // If the alignment requirements of the paired (scaled) instruction
1533 // can't express the offset of the unscaled input, bail and keep
1535 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1536 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1538 MemInsns.push_back(&MI);
1542 // If the destination register of the loads is the same register, bail
1543 // and keep looking. A load-pair instruction with both destination
1544 // registers the same is UNPREDICTABLE and will result in an exception.
1545 if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
1546 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
1548 MemInsns.push_back(&MI);
1552 // If the Rt of the second instruction was not modified or used between
1553 // the two instructions and none of the instructions between the second
1554 // and first alias with the second, we can combine the second into the
1556 if (ModifiedRegUnits.available(getLdStRegOp(MI).getReg()) &&
1558 !UsedRegUnits.available(getLdStRegOp(MI).getReg())) &&
1559 !mayAlias(MI, MemInsns, AA)) {
1561 Flags.setMergeForward(false);
1562 Flags.clearRenameReg();
1566 // Likewise, if the Rt of the first instruction is not modified or used
1567 // between the two instructions and none of the instructions between the
1568 // first and the second alias with the first, we can combine the first
1571 !UsedRegUnits.available(getLdStRegOp(FirstMI).getReg())) &&
1572 !mayAlias(FirstMI, MemInsns, AA)) {
1574 if (ModifiedRegUnits.available(getLdStRegOp(FirstMI).getReg())) {
1575 Flags.setMergeForward(true);
1576 Flags.clearRenameReg();
1580 if (DebugCounter::shouldExecute(RegRenamingCounter)) {
1581 if (!MaybeCanRename)
1582 MaybeCanRename = {canRenameUpToDef(FirstMI, UsedInBetween,
1583 RequiredClasses, TRI)};
1585 if (*MaybeCanRename) {
1586 Optional<MCPhysReg> MaybeRenameReg = tryToFindRegisterToRename(
1587 FirstMI, MI, DefinedInBB, UsedInBetween, RequiredClasses,
1589 if (MaybeRenameReg) {
1590 Flags.setRenameReg(*MaybeRenameReg);
1591 Flags.setMergeForward(true);
1592 MBBIWithRenameReg = MBBI;
1597 // Unable to combine these instructions due to interference in between.
1602 if (Flags.getRenameReg())
1603 return MBBIWithRenameReg;
1605 // If the instruction wasn't a matching load or store. Stop searching if we
1606 // encounter a call instruction that might modify memory.
1610 // Update modified / uses register units.
1611 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1613 // Otherwise, if the base register is modified, we have no match, so
1615 if (!ModifiedRegUnits.available(BaseReg))
1618 // Update list of instructions that read/write memory.
1619 if (MI.mayLoadOrStore())
1620 MemInsns.push_back(&MI);
1625 MachineBasicBlock::iterator
1626 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1627 MachineBasicBlock::iterator Update,
1629 assert((Update->getOpcode() == AArch64::ADDXri ||
1630 Update->getOpcode() == AArch64::SUBXri) &&
1631 "Unexpected base register update instruction to merge!");
1632 MachineBasicBlock::iterator NextI = I;
1633 // Return the instruction following the merged instruction, which is
1634 // the instruction following our unmerged load. Unless that's the add/sub
1635 // instruction we're merging, in which case it's the one after that.
1636 if (++NextI == Update)
1639 int Value = Update->getOperand(2).getImm();
1640 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1641 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1642 if (Update->getOpcode() == AArch64::SUBXri)
1645 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1646 : getPostIndexedOpcode(I->getOpcode());
1647 MachineInstrBuilder MIB;
1648 int Scale, MinOffset, MaxOffset;
1649 getPrePostIndexedMemOpInfo(*I, Scale, MinOffset, MaxOffset);
1650 if (!isPairedLdSt(*I)) {
1651 // Non-paired instruction.
1652 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1653 .add(getLdStRegOp(*Update))
1654 .add(getLdStRegOp(*I))
1655 .add(getLdStBaseOp(*I))
1656 .addImm(Value / Scale)
1657 .setMemRefs(I->memoperands())
1658 .setMIFlags(I->mergeFlagsWith(*Update));
1660 // Paired instruction.
1661 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1662 .add(getLdStRegOp(*Update))
1663 .add(getLdStRegOp(*I, 0))
1664 .add(getLdStRegOp(*I, 1))
1665 .add(getLdStBaseOp(*I))
1666 .addImm(Value / Scale)
1667 .setMemRefs(I->memoperands())
1668 .setMIFlags(I->mergeFlagsWith(*Update));
1674 LLVM_DEBUG(dbgs() << "Creating pre-indexed load/store.");
1677 LLVM_DEBUG(dbgs() << "Creating post-indexed load/store.");
1679 LLVM_DEBUG(dbgs() << " Replacing instructions:\n ");
1680 LLVM_DEBUG(I->print(dbgs()));
1681 LLVM_DEBUG(dbgs() << " ");
1682 LLVM_DEBUG(Update->print(dbgs()));
1683 LLVM_DEBUG(dbgs() << " with instruction:\n ");
1684 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1685 LLVM_DEBUG(dbgs() << "\n");
1687 // Erase the old instructions for the block.
1688 I->eraseFromParent();
1689 Update->eraseFromParent();
1694 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
1696 unsigned BaseReg, int Offset) {
1697 switch (MI.getOpcode()) {
1700 case AArch64::SUBXri:
1701 case AArch64::ADDXri:
1702 // Make sure it's a vanilla immediate operand, not a relocation or
1703 // anything else we can't handle.
1704 if (!MI.getOperand(2).isImm())
1706 // Watch out for 1 << 12 shifted value.
1707 if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
1710 // The update instruction source and destination register must be the
1711 // same as the load/store base register.
1712 if (MI.getOperand(0).getReg() != BaseReg ||
1713 MI.getOperand(1).getReg() != BaseReg)
1716 int UpdateOffset = MI.getOperand(2).getImm();
1717 if (MI.getOpcode() == AArch64::SUBXri)
1718 UpdateOffset = -UpdateOffset;
1720 // The immediate must be a multiple of the scaling factor of the pre/post
1721 // indexed instruction.
1722 int Scale, MinOffset, MaxOffset;
1723 getPrePostIndexedMemOpInfo(MemMI, Scale, MinOffset, MaxOffset);
1724 if (UpdateOffset % Scale != 0)
1727 // Scaled offset must fit in the instruction immediate.
1728 int ScaledOffset = UpdateOffset / Scale;
1729 if (ScaledOffset > MaxOffset || ScaledOffset < MinOffset)
1732 // If we have a non-zero Offset, we check that it matches the amount
1733 // we're adding to the register.
1734 if (!Offset || Offset == UpdateOffset)
1741 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1742 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
1743 MachineBasicBlock::iterator E = I->getParent()->end();
1744 MachineInstr &MemMI = *I;
1745 MachineBasicBlock::iterator MBBI = I;
1747 Register BaseReg = getLdStBaseOp(MemMI).getReg();
1748 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * TII->getMemScale(MemMI);
1750 // Scan forward looking for post-index opportunities. Updating instructions
1751 // can't be formed if the memory instruction doesn't have the offset we're
1753 if (MIUnscaledOffset != UnscaledOffset)
1756 // If the base register overlaps a source/destination register, we can't
1757 // merge the update. This does not apply to tag store instructions which
1758 // ignore the address part of the source register.
1759 // This does not apply to STGPi as well, which does not have unpredictable
1760 // behavior in this case unlike normal stores, and always performs writeback
1761 // after reading the source register value.
1762 if (!isTagStore(MemMI) && MemMI.getOpcode() != AArch64::STGPi) {
1763 bool IsPairedInsn = isPairedLdSt(MemMI);
1764 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1765 Register DestReg = getLdStRegOp(MemMI, i).getReg();
1766 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1771 // Track which register units have been modified and used between the first
1772 // insn (inclusive) and the second insn.
1773 ModifiedRegUnits.clear();
1774 UsedRegUnits.clear();
1776 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1777 MachineInstr &MI = *MBBI;
1779 // Don't count transient instructions towards the search limit since there
1780 // may be different numbers of them if e.g. debug information is present.
1781 if (!MI.isTransient())
1784 // If we found a match, return it.
1785 if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
1788 // Update the status of what the instruction clobbered and used.
1789 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1791 // Otherwise, if the base register is used or modified, we have no match, so
1793 if (!ModifiedRegUnits.available(BaseReg) ||
1794 !UsedRegUnits.available(BaseReg))
1800 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1801 MachineBasicBlock::iterator I, unsigned Limit) {
1802 MachineBasicBlock::iterator B = I->getParent()->begin();
1803 MachineBasicBlock::iterator E = I->getParent()->end();
1804 MachineInstr &MemMI = *I;
1805 MachineBasicBlock::iterator MBBI = I;
1807 Register BaseReg = getLdStBaseOp(MemMI).getReg();
1808 int Offset = getLdStOffsetOp(MemMI).getImm();
1810 // If the load/store is the first instruction in the block, there's obviously
1811 // not any matching update. Ditto if the memory offset isn't zero.
1812 if (MBBI == B || Offset != 0)
1814 // If the base register overlaps a destination register, we can't
1815 // merge the update.
1816 if (!isTagStore(MemMI)) {
1817 bool IsPairedInsn = isPairedLdSt(MemMI);
1818 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1819 Register DestReg = getLdStRegOp(MemMI, i).getReg();
1820 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1825 // Track which register units have been modified and used between the first
1826 // insn (inclusive) and the second insn.
1827 ModifiedRegUnits.clear();
1828 UsedRegUnits.clear();
1832 MachineInstr &MI = *MBBI;
1834 // Don't count transient instructions towards the search limit since there
1835 // may be different numbers of them if e.g. debug information is present.
1836 if (!MI.isTransient())
1839 // If we found a match, return it.
1840 if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset))
1843 // Update the status of what the instruction clobbered and used.
1844 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1846 // Otherwise, if the base register is used or modified, we have no match, so
1848 if (!ModifiedRegUnits.available(BaseReg) ||
1849 !UsedRegUnits.available(BaseReg))
1851 } while (MBBI != B && Count < Limit);
1855 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1856 MachineBasicBlock::iterator &MBBI) {
1857 MachineInstr &MI = *MBBI;
1858 // If this is a volatile load, don't mess with it.
1859 if (MI.hasOrderedMemoryRef())
1862 // Make sure this is a reg+imm.
1863 // FIXME: It is possible to extend it to handle reg+reg cases.
1864 if (!getLdStOffsetOp(MI).isImm())
1867 // Look backward up to LdStLimit instructions.
1868 MachineBasicBlock::iterator StoreI;
1869 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
1870 ++NumLoadsFromStoresPromoted;
1871 // Promote the load. Keeping the iterator straight is a
1872 // pain, so we let the merge routine tell us what the next instruction
1873 // is after it's done mucking about.
1874 MBBI = promoteLoadFromStore(MBBI, StoreI);
1880 // Merge adjacent zero stores into a wider store.
1881 bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
1882 MachineBasicBlock::iterator &MBBI) {
1883 assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.");
1884 MachineInstr &MI = *MBBI;
1885 MachineBasicBlock::iterator E = MI.getParent()->end();
1887 if (!TII->isCandidateToMergeOrPair(MI))
1890 // Look ahead up to LdStLimit instructions for a mergable instruction.
1891 LdStPairFlags Flags;
1892 MachineBasicBlock::iterator MergeMI =
1893 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
1895 ++NumZeroStoresPromoted;
1897 // Keeping the iterator straight is a pain, so we let the merge routine tell
1898 // us what the next instruction is after it's done mucking about.
1899 MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags);
1905 // Find loads and stores that can be merged into a single load or store pair
1907 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1908 MachineInstr &MI = *MBBI;
1909 MachineBasicBlock::iterator E = MI.getParent()->end();
1911 if (!TII->isCandidateToMergeOrPair(MI))
1914 // Early exit if the offset is not possible to match. (6 bits of positive
1915 // range, plus allow an extra one in case we find a later insn that matches
1917 bool IsUnscaled = TII->isUnscaledLdSt(MI);
1918 int Offset = getLdStOffsetOp(MI).getImm();
1919 int OffsetStride = IsUnscaled ? TII->getMemScale(MI) : 1;
1920 // Allow one more for offset.
1922 Offset -= OffsetStride;
1923 if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
1926 // Look ahead up to LdStLimit instructions for a pairable instruction.
1927 LdStPairFlags Flags;
1928 MachineBasicBlock::iterator Paired =
1929 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
1932 if (TII->isUnscaledLdSt(MI))
1933 ++NumUnscaledPairCreated;
1934 // Keeping the iterator straight is a pain, so we let the merge routine tell
1935 // us what the next instruction is after it's done mucking about.
1936 auto Prev = std::prev(MBBI);
1937 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1938 // Collect liveness info for instructions between Prev and the new position
1940 for (auto I = std::next(Prev); I != MBBI; I++)
1941 updateDefinedRegisters(*I, DefinedInBB, TRI);
1948 bool AArch64LoadStoreOpt::tryToMergeLdStUpdate
1949 (MachineBasicBlock::iterator &MBBI) {
1950 MachineInstr &MI = *MBBI;
1951 MachineBasicBlock::iterator E = MI.getParent()->end();
1952 MachineBasicBlock::iterator Update;
1954 // Look forward to try to form a post-index instruction. For example,
1956 // add x20, x20, #32
1958 // ldr x0, [x20], #32
1959 Update = findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
1961 // Merge the update into the ld/st.
1962 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
1966 // Don't know how to handle unscaled pre/post-index versions below, so bail.
1967 if (TII->isUnscaledLdSt(MI.getOpcode()))
1970 // Look back to try to find a pre-index instruction. For example,
1974 // ldr x1, [x0, #8]!
1975 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
1977 // Merge the update into the ld/st.
1978 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1982 // The immediate in the load/store is scaled by the size of the memory
1983 // operation. The immediate in the add we're looking for,
1984 // however, is not, so adjust here.
1985 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * TII->getMemScale(MI);
1987 // Look forward to try to find a pre-index instruction. For example,
1988 // ldr x1, [x0, #64]
1991 // ldr x1, [x0, #64]!
1992 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
1994 // Merge the update into the ld/st.
1995 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
2002 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
2003 bool EnableNarrowZeroStOpt) {
2005 bool Modified = false;
2006 // Four tranformations to do here:
2007 // 1) Find loads that directly read from stores and promote them by
2008 // replacing with mov instructions. If the store is wider than the load,
2009 // the load will be replaced with a bitfield extract.
2012 // ldrh w2, [x0, #6]
2016 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2018 if (isPromotableLoadFromStore(*MBBI) && tryToPromoteLoadFromStore(MBBI))
2023 // 2) Merge adjacent zero stores into a wider store.
2026 // strh wzr, [x0, #2]
2031 // str wzr, [x0, #4]
2034 if (EnableNarrowZeroStOpt)
2035 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2037 if (isPromotableZeroStoreInst(*MBBI) && tryToMergeZeroStInst(MBBI))
2042 // 3) Find loads and stores that can be merged into a single load or store
2043 // pair instruction.
2050 if (MBB.getParent()->getRegInfo().tracksLiveness()) {
2051 DefinedInBB.clear();
2052 DefinedInBB.addLiveIns(MBB);
2055 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2057 // Track currently live registers up to this point, to help with
2058 // searching for a rename register on demand.
2059 updateDefinedRegisters(*MBBI, DefinedInBB, TRI);
2060 if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
2065 // 4) Find base register updates that can be merged into the load or store
2066 // as a base-reg writeback.
2072 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2074 if (isMergeableLdStUpdate(*MBBI) && tryToMergeLdStUpdate(MBBI))
2083 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
2084 if (skipFunction(Fn.getFunction()))
2087 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
2088 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
2089 TRI = Subtarget->getRegisterInfo();
2090 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2092 // Resize the modified and used register unit trackers. We do this once
2093 // per function and then clear the register units each time we optimize a load
2095 ModifiedRegUnits.init(*TRI);
2096 UsedRegUnits.init(*TRI);
2097 DefinedInBB.init(*TRI);
2099 bool Modified = false;
2100 bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign();
2101 for (auto &MBB : Fn) {
2102 auto M = optimizeBlock(MBB, enableNarrowZeroStOpt);
2109 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and
2110 // stores near one another? Note: The pre-RA instruction scheduler already has
2111 // hooks to try and schedule pairable loads/stores together to improve pairing
2112 // opportunities. Thus, pre-RA pairing pass may not be worth the effort.
2114 // FIXME: When pairing store instructions it's very possible for this pass to
2115 // hoist a store with a KILL marker above another use (without a KILL marker).
2116 // The resulting IR is invalid, but nothing uses the KILL markers after this
2117 // pass, so it's never caused a problem in practice.
2119 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
2120 /// load / store optimization pass.
2121 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
2122 return new AArch64LoadStoreOpt();