1 //===- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineFunctionPass.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/TargetRegisterInfo.h"
31 #include "llvm/IR/DebugLoc.h"
32 #include "llvm/MC/MCRegisterInfo.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/raw_ostream.h"
45 #define DEBUG_TYPE "aarch64-ldst-opt"
47 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
48 STATISTIC(NumPostFolded, "Number of post-index updates folded");
49 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
50 STATISTIC(NumUnscaledPairCreated,
51 "Number of load/store from unscaled generated");
52 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
53 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
55 // The LdStLimit limits how far we search for load/store pairs.
56 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
57 cl::init(20), cl::Hidden);
59 // The UpdateLimit limits how far we search for update instructions when we form
60 // pre-/post-index instructions.
61 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
64 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
68 using LdStPairFlags = struct LdStPairFlags {
69 // If a matching instruction is found, MergeForward is set to true if the
70 // merge is to remove the first instruction and replace the second with
71 // a pair-wise insn, and false if the reverse is true.
72 bool MergeForward = false;
74 // SExtIdx gives the index of the result of the load pair that must be
75 // extended. The value of SExtIdx assumes that the paired load produces the
76 // value in this order: (I, returned iterator), i.e., -1 means no value has
77 // to be extended, 0 means I, and 1 means the returned iterator.
80 LdStPairFlags() = default;
82 void setMergeForward(bool V = true) { MergeForward = V; }
83 bool getMergeForward() const { return MergeForward; }
85 void setSExtIdx(int V) { SExtIdx = V; }
86 int getSExtIdx() const { return SExtIdx; }
89 struct AArch64LoadStoreOpt : public MachineFunctionPass {
92 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
93 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
97 const AArch64InstrInfo *TII;
98 const TargetRegisterInfo *TRI;
99 const AArch64Subtarget *Subtarget;
101 // Track which register units have been modified and used.
102 LiveRegUnits ModifiedRegUnits, UsedRegUnits;
104 void getAnalysisUsage(AnalysisUsage &AU) const override {
105 AU.addRequired<AAResultsWrapperPass>();
106 MachineFunctionPass::getAnalysisUsage(AU);
109 // Scan the instructions looking for a load/store that can be combined
110 // with the current instruction into a load/store pair.
111 // Return the matching instruction if one is found, else MBB->end().
112 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
113 LdStPairFlags &Flags,
115 bool FindNarrowMerge);
117 // Scan the instructions looking for a store that writes to the address from
118 // which the current load instruction reads. Return true if one is found.
119 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
120 MachineBasicBlock::iterator &StoreI);
122 // Merge the two instructions indicated into a wider narrow store instruction.
123 MachineBasicBlock::iterator
124 mergeNarrowZeroStores(MachineBasicBlock::iterator I,
125 MachineBasicBlock::iterator MergeMI,
126 const LdStPairFlags &Flags);
128 // Merge the two instructions indicated into a single pair-wise instruction.
129 MachineBasicBlock::iterator
130 mergePairedInsns(MachineBasicBlock::iterator I,
131 MachineBasicBlock::iterator Paired,
132 const LdStPairFlags &Flags);
134 // Promote the load that reads directly from the address stored to.
135 MachineBasicBlock::iterator
136 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
137 MachineBasicBlock::iterator StoreI);
139 // Scan the instruction list to find a base register update that can
140 // be combined with the current instruction (a load or store) using
141 // pre or post indexed addressing with writeback. Scan forwards.
142 MachineBasicBlock::iterator
143 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
144 int UnscaledOffset, unsigned Limit);
146 // Scan the instruction list to find a base register update that can
147 // be combined with the current instruction (a load or store) using
148 // pre or post indexed addressing with writeback. Scan backwards.
149 MachineBasicBlock::iterator
150 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
152 // Find an instruction that updates the base register of the ld/st
154 bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
155 unsigned BaseReg, int Offset);
157 // Merge a pre- or post-index base register update into a ld/st instruction.
158 MachineBasicBlock::iterator
159 mergeUpdateInsn(MachineBasicBlock::iterator I,
160 MachineBasicBlock::iterator Update, bool IsPreIdx);
162 // Find and merge zero store instructions.
163 bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI);
165 // Find and pair ldr/str instructions.
166 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
168 // Find and promote load instructions which read directly from store.
169 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
171 // Find and merge a base register updates before or after a ld/st instruction.
172 bool tryToMergeLdStUpdate(MachineBasicBlock::iterator &MBBI);
174 bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt);
176 bool runOnMachineFunction(MachineFunction &Fn) override;
178 MachineFunctionProperties getRequiredProperties() const override {
179 return MachineFunctionProperties().set(
180 MachineFunctionProperties::Property::NoVRegs);
183 StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; }
186 char AArch64LoadStoreOpt::ID = 0;
188 } // end anonymous namespace
190 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
191 AARCH64_LOAD_STORE_OPT_NAME, false, false)
193 static bool isNarrowStore(unsigned Opc) {
197 case AArch64::STRBBui:
198 case AArch64::STURBBi:
199 case AArch64::STRHHui:
200 case AArch64::STURHHi:
205 // Scaling factor for unscaled load or store.
206 static int getMemScale(MachineInstr &MI) {
207 switch (MI.getOpcode()) {
209 llvm_unreachable("Opcode has unknown scale!");
210 case AArch64::LDRBBui:
211 case AArch64::LDURBBi:
212 case AArch64::LDRSBWui:
213 case AArch64::LDURSBWi:
214 case AArch64::STRBBui:
215 case AArch64::STURBBi:
217 case AArch64::LDRHHui:
218 case AArch64::LDURHHi:
219 case AArch64::LDRSHWui:
220 case AArch64::LDURSHWi:
221 case AArch64::STRHHui:
222 case AArch64::STURHHi:
224 case AArch64::LDRSui:
225 case AArch64::LDURSi:
226 case AArch64::LDRSWui:
227 case AArch64::LDURSWi:
228 case AArch64::LDRWui:
229 case AArch64::LDURWi:
230 case AArch64::STRSui:
231 case AArch64::STURSi:
232 case AArch64::STRWui:
233 case AArch64::STURWi:
235 case AArch64::LDPSWi:
240 case AArch64::LDRDui:
241 case AArch64::LDURDi:
242 case AArch64::LDRXui:
243 case AArch64::LDURXi:
244 case AArch64::STRDui:
245 case AArch64::STURDi:
246 case AArch64::STRXui:
247 case AArch64::STURXi:
253 case AArch64::LDRQui:
254 case AArch64::LDURQi:
255 case AArch64::STRQui:
256 case AArch64::STURQi:
263 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
264 bool *IsValidLdStrOpc = nullptr) {
266 *IsValidLdStrOpc = true;
270 *IsValidLdStrOpc = false;
271 return std::numeric_limits<unsigned>::max();
272 case AArch64::STRDui:
273 case AArch64::STURDi:
274 case AArch64::STRQui:
275 case AArch64::STURQi:
276 case AArch64::STRBBui:
277 case AArch64::STURBBi:
278 case AArch64::STRHHui:
279 case AArch64::STURHHi:
280 case AArch64::STRWui:
281 case AArch64::STURWi:
282 case AArch64::STRXui:
283 case AArch64::STURXi:
284 case AArch64::LDRDui:
285 case AArch64::LDURDi:
286 case AArch64::LDRQui:
287 case AArch64::LDURQi:
288 case AArch64::LDRWui:
289 case AArch64::LDURWi:
290 case AArch64::LDRXui:
291 case AArch64::LDURXi:
292 case AArch64::STRSui:
293 case AArch64::STURSi:
294 case AArch64::LDRSui:
295 case AArch64::LDURSi:
297 case AArch64::LDRSWui:
298 return AArch64::LDRWui;
299 case AArch64::LDURSWi:
300 return AArch64::LDURWi;
304 static unsigned getMatchingWideOpcode(unsigned Opc) {
307 llvm_unreachable("Opcode has no wide equivalent!");
308 case AArch64::STRBBui:
309 return AArch64::STRHHui;
310 case AArch64::STRHHui:
311 return AArch64::STRWui;
312 case AArch64::STURBBi:
313 return AArch64::STURHHi;
314 case AArch64::STURHHi:
315 return AArch64::STURWi;
316 case AArch64::STURWi:
317 return AArch64::STURXi;
318 case AArch64::STRWui:
319 return AArch64::STRXui;
323 static unsigned getMatchingPairOpcode(unsigned Opc) {
326 llvm_unreachable("Opcode has no pairwise equivalent!");
327 case AArch64::STRSui:
328 case AArch64::STURSi:
329 return AArch64::STPSi;
330 case AArch64::STRDui:
331 case AArch64::STURDi:
332 return AArch64::STPDi;
333 case AArch64::STRQui:
334 case AArch64::STURQi:
335 return AArch64::STPQi;
336 case AArch64::STRWui:
337 case AArch64::STURWi:
338 return AArch64::STPWi;
339 case AArch64::STRXui:
340 case AArch64::STURXi:
341 return AArch64::STPXi;
342 case AArch64::LDRSui:
343 case AArch64::LDURSi:
344 return AArch64::LDPSi;
345 case AArch64::LDRDui:
346 case AArch64::LDURDi:
347 return AArch64::LDPDi;
348 case AArch64::LDRQui:
349 case AArch64::LDURQi:
350 return AArch64::LDPQi;
351 case AArch64::LDRWui:
352 case AArch64::LDURWi:
353 return AArch64::LDPWi;
354 case AArch64::LDRXui:
355 case AArch64::LDURXi:
356 return AArch64::LDPXi;
357 case AArch64::LDRSWui:
358 case AArch64::LDURSWi:
359 return AArch64::LDPSWi;
363 static unsigned isMatchingStore(MachineInstr &LoadInst,
364 MachineInstr &StoreInst) {
365 unsigned LdOpc = LoadInst.getOpcode();
366 unsigned StOpc = StoreInst.getOpcode();
369 llvm_unreachable("Unsupported load instruction!");
370 case AArch64::LDRBBui:
371 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
372 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
373 case AArch64::LDURBBi:
374 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
375 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
376 case AArch64::LDRHHui:
377 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
378 StOpc == AArch64::STRXui;
379 case AArch64::LDURHHi:
380 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
381 StOpc == AArch64::STURXi;
382 case AArch64::LDRWui:
383 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
384 case AArch64::LDURWi:
385 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
386 case AArch64::LDRXui:
387 return StOpc == AArch64::STRXui;
388 case AArch64::LDURXi:
389 return StOpc == AArch64::STURXi;
393 static unsigned getPreIndexedOpcode(unsigned Opc) {
394 // FIXME: We don't currently support creating pre-indexed loads/stores when
395 // the load or store is the unscaled version. If we decide to perform such an
396 // optimization in the future the cases for the unscaled loads/stores will
397 // need to be added here.
400 llvm_unreachable("Opcode has no pre-indexed equivalent!");
401 case AArch64::STRSui:
402 return AArch64::STRSpre;
403 case AArch64::STRDui:
404 return AArch64::STRDpre;
405 case AArch64::STRQui:
406 return AArch64::STRQpre;
407 case AArch64::STRBBui:
408 return AArch64::STRBBpre;
409 case AArch64::STRHHui:
410 return AArch64::STRHHpre;
411 case AArch64::STRWui:
412 return AArch64::STRWpre;
413 case AArch64::STRXui:
414 return AArch64::STRXpre;
415 case AArch64::LDRSui:
416 return AArch64::LDRSpre;
417 case AArch64::LDRDui:
418 return AArch64::LDRDpre;
419 case AArch64::LDRQui:
420 return AArch64::LDRQpre;
421 case AArch64::LDRBBui:
422 return AArch64::LDRBBpre;
423 case AArch64::LDRHHui:
424 return AArch64::LDRHHpre;
425 case AArch64::LDRWui:
426 return AArch64::LDRWpre;
427 case AArch64::LDRXui:
428 return AArch64::LDRXpre;
429 case AArch64::LDRSWui:
430 return AArch64::LDRSWpre;
432 return AArch64::LDPSpre;
433 case AArch64::LDPSWi:
434 return AArch64::LDPSWpre;
436 return AArch64::LDPDpre;
438 return AArch64::LDPQpre;
440 return AArch64::LDPWpre;
442 return AArch64::LDPXpre;
444 return AArch64::STPSpre;
446 return AArch64::STPDpre;
448 return AArch64::STPQpre;
450 return AArch64::STPWpre;
452 return AArch64::STPXpre;
456 static unsigned getPostIndexedOpcode(unsigned Opc) {
459 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
460 case AArch64::STRSui:
461 case AArch64::STURSi:
462 return AArch64::STRSpost;
463 case AArch64::STRDui:
464 case AArch64::STURDi:
465 return AArch64::STRDpost;
466 case AArch64::STRQui:
467 case AArch64::STURQi:
468 return AArch64::STRQpost;
469 case AArch64::STRBBui:
470 return AArch64::STRBBpost;
471 case AArch64::STRHHui:
472 return AArch64::STRHHpost;
473 case AArch64::STRWui:
474 case AArch64::STURWi:
475 return AArch64::STRWpost;
476 case AArch64::STRXui:
477 case AArch64::STURXi:
478 return AArch64::STRXpost;
479 case AArch64::LDRSui:
480 case AArch64::LDURSi:
481 return AArch64::LDRSpost;
482 case AArch64::LDRDui:
483 case AArch64::LDURDi:
484 return AArch64::LDRDpost;
485 case AArch64::LDRQui:
486 case AArch64::LDURQi:
487 return AArch64::LDRQpost;
488 case AArch64::LDRBBui:
489 return AArch64::LDRBBpost;
490 case AArch64::LDRHHui:
491 return AArch64::LDRHHpost;
492 case AArch64::LDRWui:
493 case AArch64::LDURWi:
494 return AArch64::LDRWpost;
495 case AArch64::LDRXui:
496 case AArch64::LDURXi:
497 return AArch64::LDRXpost;
498 case AArch64::LDRSWui:
499 return AArch64::LDRSWpost;
501 return AArch64::LDPSpost;
502 case AArch64::LDPSWi:
503 return AArch64::LDPSWpost;
505 return AArch64::LDPDpost;
507 return AArch64::LDPQpost;
509 return AArch64::LDPWpost;
511 return AArch64::LDPXpost;
513 return AArch64::STPSpost;
515 return AArch64::STPDpost;
517 return AArch64::STPQpost;
519 return AArch64::STPWpost;
521 return AArch64::STPXpost;
525 static bool isPairedLdSt(const MachineInstr &MI) {
526 switch (MI.getOpcode()) {
530 case AArch64::LDPSWi:
544 static const MachineOperand &getLdStRegOp(const MachineInstr &MI,
545 unsigned PairedRegOp = 0) {
546 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
547 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
548 return MI.getOperand(Idx);
551 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) {
552 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
553 return MI.getOperand(Idx);
556 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) {
557 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
558 return MI.getOperand(Idx);
561 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
562 MachineInstr &StoreInst,
563 const AArch64InstrInfo *TII) {
564 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
565 int LoadSize = getMemScale(LoadInst);
566 int StoreSize = getMemScale(StoreInst);
567 int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
568 ? getLdStOffsetOp(StoreInst).getImm()
569 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
570 int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
571 ? getLdStOffsetOp(LoadInst).getImm()
572 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
573 return (UnscaledStOffset <= UnscaledLdOffset) &&
574 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
577 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
578 unsigned Opc = MI.getOpcode();
579 return (Opc == AArch64::STRWui || Opc == AArch64::STURWi ||
580 isNarrowStore(Opc)) &&
581 getLdStRegOp(MI).getReg() == AArch64::WZR;
584 static bool isPromotableLoadFromStore(MachineInstr &MI) {
585 switch (MI.getOpcode()) {
588 // Scaled instructions.
589 case AArch64::LDRBBui:
590 case AArch64::LDRHHui:
591 case AArch64::LDRWui:
592 case AArch64::LDRXui:
593 // Unscaled instructions.
594 case AArch64::LDURBBi:
595 case AArch64::LDURHHi:
596 case AArch64::LDURWi:
597 case AArch64::LDURXi:
602 static bool isMergeableLdStUpdate(MachineInstr &MI) {
603 unsigned Opc = MI.getOpcode();
607 // Scaled instructions.
608 case AArch64::STRSui:
609 case AArch64::STRDui:
610 case AArch64::STRQui:
611 case AArch64::STRXui:
612 case AArch64::STRWui:
613 case AArch64::STRHHui:
614 case AArch64::STRBBui:
615 case AArch64::LDRSui:
616 case AArch64::LDRDui:
617 case AArch64::LDRQui:
618 case AArch64::LDRXui:
619 case AArch64::LDRWui:
620 case AArch64::LDRHHui:
621 case AArch64::LDRBBui:
622 // Unscaled instructions.
623 case AArch64::STURSi:
624 case AArch64::STURDi:
625 case AArch64::STURQi:
626 case AArch64::STURWi:
627 case AArch64::STURXi:
628 case AArch64::LDURSi:
629 case AArch64::LDURDi:
630 case AArch64::LDURQi:
631 case AArch64::LDURWi:
632 case AArch64::LDURXi:
633 // Paired instructions.
635 case AArch64::LDPSWi:
645 // Make sure this is a reg+imm (as opposed to an address reloc).
646 if (!getLdStOffsetOp(MI).isImm())
653 MachineBasicBlock::iterator
654 AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
655 MachineBasicBlock::iterator MergeMI,
656 const LdStPairFlags &Flags) {
657 assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
658 "Expected promotable zero stores.");
660 MachineBasicBlock::iterator NextI = I;
662 // If NextI is the second of the two instructions to be merged, we need
663 // to skip one further. Either way we merge will invalidate the iterator,
664 // and we don't need to scan the new instruction, as it's a pairwise
665 // instruction, which we're not considering for further action anyway.
666 if (NextI == MergeMI)
669 unsigned Opc = I->getOpcode();
670 bool IsScaled = !TII->isUnscaledLdSt(Opc);
671 int OffsetStride = IsScaled ? 1 : getMemScale(*I);
673 bool MergeForward = Flags.getMergeForward();
674 // Insert our new paired instruction after whichever of the paired
675 // instructions MergeForward indicates.
676 MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
677 // Also based on MergeForward is from where we copy the base register operand
678 // so we get the flags compatible with the input code.
679 const MachineOperand &BaseRegOp =
680 MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I);
682 // Which register is Rt and which is Rt2 depends on the offset order.
684 if (getLdStOffsetOp(*I).getImm() ==
685 getLdStOffsetOp(*MergeMI).getImm() + OffsetStride)
690 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
691 // Change the scaled offset from small to large type.
693 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
697 // Construct the new instruction.
698 DebugLoc DL = I->getDebugLoc();
699 MachineBasicBlock *MBB = I->getParent();
700 MachineInstrBuilder MIB;
701 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
702 .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
705 .cloneMergedMemRefs({&*I, &*MergeMI})
706 .setMIFlags(I->mergeFlagsWith(*MergeMI));
709 LLVM_DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ");
710 LLVM_DEBUG(I->print(dbgs()));
711 LLVM_DEBUG(dbgs() << " ");
712 LLVM_DEBUG(MergeMI->print(dbgs()));
713 LLVM_DEBUG(dbgs() << " with instruction:\n ");
714 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
715 LLVM_DEBUG(dbgs() << "\n");
717 // Erase the old instructions.
718 I->eraseFromParent();
719 MergeMI->eraseFromParent();
723 MachineBasicBlock::iterator
724 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
725 MachineBasicBlock::iterator Paired,
726 const LdStPairFlags &Flags) {
727 MachineBasicBlock::iterator NextI = I;
729 // If NextI is the second of the two instructions to be merged, we need
730 // to skip one further. Either way we merge will invalidate the iterator,
731 // and we don't need to scan the new instruction, as it's a pairwise
732 // instruction, which we're not considering for further action anyway.
736 int SExtIdx = Flags.getSExtIdx();
738 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
739 bool IsUnscaled = TII->isUnscaledLdSt(Opc);
740 int OffsetStride = IsUnscaled ? getMemScale(*I) : 1;
742 bool MergeForward = Flags.getMergeForward();
743 // Insert our new paired instruction after whichever of the paired
744 // instructions MergeForward indicates.
745 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
746 // Also based on MergeForward is from where we copy the base register operand
747 // so we get the flags compatible with the input code.
748 const MachineOperand &BaseRegOp =
749 MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I);
751 int Offset = getLdStOffsetOp(*I).getImm();
752 int PairedOffset = getLdStOffsetOp(*Paired).getImm();
753 bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
754 if (IsUnscaled != PairedIsUnscaled) {
755 // We're trying to pair instructions that differ in how they are scaled. If
756 // I is scaled then scale the offset of Paired accordingly. Otherwise, do
757 // the opposite (i.e., make Paired's offset unscaled).
758 int MemSize = getMemScale(*Paired);
759 if (PairedIsUnscaled) {
760 // If the unscaled offset isn't a multiple of the MemSize, we can't
761 // pair the operations together.
762 assert(!(PairedOffset % getMemScale(*Paired)) &&
763 "Offset should be a multiple of the stride!");
764 PairedOffset /= MemSize;
766 PairedOffset *= MemSize;
770 // Which register is Rt and which is Rt2 depends on the offset order.
771 MachineInstr *RtMI, *Rt2MI;
772 if (Offset == PairedOffset + OffsetStride) {
775 // Here we swapped the assumption made for SExtIdx.
776 // I.e., we turn ldp I, Paired into ldp Paired, I.
777 // Update the index accordingly.
779 SExtIdx = (SExtIdx + 1) % 2;
784 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
785 // Scale the immediate offset, if necessary.
786 if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
787 assert(!(OffsetImm % getMemScale(*RtMI)) &&
788 "Unscaled offset cannot be scaled.");
789 OffsetImm /= getMemScale(*RtMI);
792 // Construct the new instruction.
793 MachineInstrBuilder MIB;
794 DebugLoc DL = I->getDebugLoc();
795 MachineBasicBlock *MBB = I->getParent();
796 MachineOperand RegOp0 = getLdStRegOp(*RtMI);
797 MachineOperand RegOp1 = getLdStRegOp(*Rt2MI);
798 // Kill flags may become invalid when moving stores for pairing.
799 if (RegOp0.isUse()) {
801 // Clear kill flags on store if moving upwards. Example:
804 // STRWui kill %w1 ; need to clear kill flag when moving STRWui upwards
805 RegOp0.setIsKill(false);
806 RegOp1.setIsKill(false);
808 // Clear kill flags of the first stores register. Example:
810 // USE kill %w1 ; need to clear kill flag when moving STRWui downwards
812 unsigned Reg = getLdStRegOp(*I).getReg();
813 for (MachineInstr &MI : make_range(std::next(I), Paired))
814 MI.clearRegisterKills(Reg, TRI);
817 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
822 .cloneMergedMemRefs({&*I, &*Paired})
823 .setMIFlags(I->mergeFlagsWith(*Paired));
828 dbgs() << "Creating pair load/store. Replacing instructions:\n ");
829 LLVM_DEBUG(I->print(dbgs()));
830 LLVM_DEBUG(dbgs() << " ");
831 LLVM_DEBUG(Paired->print(dbgs()));
832 LLVM_DEBUG(dbgs() << " with instruction:\n ");
834 // Generate the sign extension for the proper result of the ldp.
835 // I.e., with X1, that would be:
836 // %w1 = KILL %w1, implicit-def %x1
837 // %x1 = SBFMXri killed %x1, 0, 31
838 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
839 // Right now, DstMO has the extended register, since it comes from an
841 unsigned DstRegX = DstMO.getReg();
842 // Get the W variant of that register.
843 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
844 // Update the result of LDP to use the W instead of the X variant.
845 DstMO.setReg(DstRegW);
846 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
847 LLVM_DEBUG(dbgs() << "\n");
848 // Make the machine verifier happy by providing a definition for
850 // Insert this definition right after the generated LDP, i.e., before
852 MachineInstrBuilder MIBKill =
853 BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
855 .addReg(DstRegX, RegState::Define);
856 MIBKill->getOperand(2).setImplicit();
857 // Create the sign extension.
858 MachineInstrBuilder MIBSXTW =
859 BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
864 LLVM_DEBUG(dbgs() << " Extend operand:\n ");
865 LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
867 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
869 LLVM_DEBUG(dbgs() << "\n");
871 // Erase the old instructions.
872 I->eraseFromParent();
873 Paired->eraseFromParent();
878 MachineBasicBlock::iterator
879 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
880 MachineBasicBlock::iterator StoreI) {
881 MachineBasicBlock::iterator NextI = LoadI;
884 int LoadSize = getMemScale(*LoadI);
885 int StoreSize = getMemScale(*StoreI);
886 unsigned LdRt = getLdStRegOp(*LoadI).getReg();
887 const MachineOperand &StMO = getLdStRegOp(*StoreI);
888 unsigned StRt = getLdStRegOp(*StoreI).getReg();
889 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
891 assert((IsStoreXReg ||
892 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
893 "Unexpected RegClass");
895 MachineInstr *BitExtMI;
896 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
897 // Remove the load, if the destination register of the loads is the same
898 // register for stored value.
899 if (StRt == LdRt && LoadSize == 8) {
900 for (MachineInstr &MI : make_range(StoreI->getIterator(),
901 LoadI->getIterator())) {
902 if (MI.killsRegister(StRt, TRI)) {
903 MI.clearRegisterKills(StRt, TRI);
907 LLVM_DEBUG(dbgs() << "Remove load instruction:\n ");
908 LLVM_DEBUG(LoadI->print(dbgs()));
909 LLVM_DEBUG(dbgs() << "\n");
910 LoadI->eraseFromParent();
913 // Replace the load with a mov if the load and store are in the same size.
915 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
916 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
917 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
919 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
920 .setMIFlags(LoadI->getFlags());
922 // FIXME: Currently we disable this transformation in big-endian targets as
923 // performance and correctness are verified only in little-endian.
924 if (!Subtarget->isLittleEndian())
926 bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
927 assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
928 "Unsupported ld/st match");
929 assert(LoadSize <= StoreSize && "Invalid load size");
930 int UnscaledLdOffset = IsUnscaled
931 ? getLdStOffsetOp(*LoadI).getImm()
932 : getLdStOffsetOp(*LoadI).getImm() * LoadSize;
933 int UnscaledStOffset = IsUnscaled
934 ? getLdStOffsetOp(*StoreI).getImm()
935 : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
936 int Width = LoadSize * 8;
937 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
938 int Imms = Immr + Width - 1;
939 unsigned DestReg = IsStoreXReg
940 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
941 &AArch64::GPR64RegClass)
944 assert((UnscaledLdOffset >= UnscaledStOffset &&
945 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
948 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
949 Imms = Immr + Width - 1;
950 if (UnscaledLdOffset == UnscaledStOffset) {
951 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
952 | ((Immr) << 6) // immr
953 | ((Imms) << 0) // imms
957 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
958 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
961 .addImm(AndMaskEncoded)
962 .setMIFlags(LoadI->getFlags());
965 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
966 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
971 .setMIFlags(LoadI->getFlags());
975 // Clear kill flags between store and load.
976 for (MachineInstr &MI : make_range(StoreI->getIterator(),
977 BitExtMI->getIterator()))
978 if (MI.killsRegister(StRt, TRI)) {
979 MI.clearRegisterKills(StRt, TRI);
983 LLVM_DEBUG(dbgs() << "Promoting load by replacing :\n ");
984 LLVM_DEBUG(StoreI->print(dbgs()));
985 LLVM_DEBUG(dbgs() << " ");
986 LLVM_DEBUG(LoadI->print(dbgs()));
987 LLVM_DEBUG(dbgs() << " with instructions:\n ");
988 LLVM_DEBUG(StoreI->print(dbgs()));
989 LLVM_DEBUG(dbgs() << " ");
990 LLVM_DEBUG((BitExtMI)->print(dbgs()));
991 LLVM_DEBUG(dbgs() << "\n");
993 // Erase the old instructions.
994 LoadI->eraseFromParent();
998 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
999 // Convert the byte-offset used by unscaled into an "element" offset used
1000 // by the scaled pair load/store instructions.
1002 // If the byte-offset isn't a multiple of the stride, there's no point
1003 // trying to match it.
1004 if (Offset % OffsetStride)
1006 Offset /= OffsetStride;
1008 return Offset <= 63 && Offset >= -64;
1011 // Do alignment, specialized to power of 2 and for signed ints,
1012 // avoiding having to do a C-style cast from uint_64t to int when
1013 // using alignTo from include/llvm/Support/MathExtras.h.
1014 // FIXME: Move this function to include/MathExtras.h?
1015 static int alignTo(int Num, int PowOf2) {
1016 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1019 static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
1020 AliasAnalysis *AA) {
1021 // One of the instructions must modify memory.
1022 if (!MIa.mayStore() && !MIb.mayStore())
1025 // Both instructions must be memory operations.
1026 if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
1029 return MIa.mayAlias(AA, MIb, /*UseTBAA*/false);
1032 static bool mayAlias(MachineInstr &MIa,
1033 SmallVectorImpl<MachineInstr *> &MemInsns,
1034 AliasAnalysis *AA) {
1035 for (MachineInstr *MIb : MemInsns)
1036 if (mayAlias(MIa, *MIb, AA))
1042 bool AArch64LoadStoreOpt::findMatchingStore(
1043 MachineBasicBlock::iterator I, unsigned Limit,
1044 MachineBasicBlock::iterator &StoreI) {
1045 MachineBasicBlock::iterator B = I->getParent()->begin();
1046 MachineBasicBlock::iterator MBBI = I;
1047 MachineInstr &LoadMI = *I;
1048 unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
1050 // If the load is the first instruction in the block, there's obviously
1051 // not any matching store.
1055 // Track which register units have been modified and used between the first
1056 // insn and the second insn.
1057 ModifiedRegUnits.clear();
1058 UsedRegUnits.clear();
1063 MachineInstr &MI = *MBBI;
1065 // Don't count transient instructions towards the search limit since there
1066 // may be different numbers of them if e.g. debug information is present.
1067 if (!MI.isTransient())
1070 // If the load instruction reads directly from the address to which the
1071 // store instruction writes and the stored value is not modified, we can
1072 // promote the load. Since we do not handle stores with pre-/post-index,
1073 // it's unnecessary to check if BaseReg is modified by the store itself.
1074 if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
1075 BaseReg == getLdStBaseOp(MI).getReg() &&
1076 isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
1077 ModifiedRegUnits.available(getLdStRegOp(MI).getReg())) {
1085 // Update modified / uses register units.
1086 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1088 // Otherwise, if the base register is modified, we have no match, so
1090 if (!ModifiedRegUnits.available(BaseReg))
1093 // If we encounter a store aliased with the load, return early.
1094 if (MI.mayStore() && mayAlias(LoadMI, MI, AA))
1096 } while (MBBI != B && Count < Limit);
1100 // Returns true if FirstMI and MI are candidates for merging or pairing.
1101 // Otherwise, returns false.
1102 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1103 LdStPairFlags &Flags,
1104 const AArch64InstrInfo *TII) {
1105 // If this is volatile or if pairing is suppressed, not a candidate.
1106 if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1109 // We should have already checked FirstMI for pair suppression and volatility.
1110 assert(!FirstMI.hasOrderedMemoryRef() &&
1111 !TII->isLdStPairSuppressed(FirstMI) &&
1112 "FirstMI shouldn't get here if either of these checks are true.");
1114 unsigned OpcA = FirstMI.getOpcode();
1115 unsigned OpcB = MI.getOpcode();
1117 // Opcodes match: nothing more to check.
1121 // Try to match a sign-extended load/store with a zero-extended load/store.
1122 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1123 unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1124 assert(IsValidLdStrOpc &&
1125 "Given Opc should be a Load or Store with an immediate");
1126 // OpcA will be the first instruction in the pair.
1127 if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1128 Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1132 // If the second instruction isn't even a mergable/pairable load/store, bail
1134 if (!PairIsValidLdStrOpc)
1137 // FIXME: We don't support merging narrow stores with mixed scaled/unscaled
1139 if (isNarrowStore(OpcA) || isNarrowStore(OpcB))
1142 // Try to match an unscaled load/store with a scaled load/store.
1143 return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
1144 getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1146 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1149 /// Scan the instructions looking for a load/store that can be combined with the
1150 /// current instruction into a wider equivalent or a load/store pair.
1151 MachineBasicBlock::iterator
1152 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1153 LdStPairFlags &Flags, unsigned Limit,
1154 bool FindNarrowMerge) {
1155 MachineBasicBlock::iterator E = I->getParent()->end();
1156 MachineBasicBlock::iterator MBBI = I;
1157 MachineInstr &FirstMI = *I;
1160 bool MayLoad = FirstMI.mayLoad();
1161 bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
1162 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1163 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1164 int Offset = getLdStOffsetOp(FirstMI).getImm();
1165 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
1166 bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1168 // Track which register units have been modified and used between the first
1169 // insn (inclusive) and the second insn.
1170 ModifiedRegUnits.clear();
1171 UsedRegUnits.clear();
1173 // Remember any instructions that read/write memory between FirstMI and MI.
1174 SmallVector<MachineInstr *, 4> MemInsns;
1176 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1177 MachineInstr &MI = *MBBI;
1179 // Don't count transient instructions towards the search limit since there
1180 // may be different numbers of them if e.g. debug information is present.
1181 if (!MI.isTransient())
1184 Flags.setSExtIdx(-1);
1185 if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1186 getLdStOffsetOp(MI).isImm()) {
1187 assert(MI.mayLoadOrStore() && "Expected memory operation.");
1188 // If we've found another instruction with the same opcode, check to see
1189 // if the base and offset are compatible with our starting instruction.
1190 // These instructions all have scaled immediate operands, so we just
1191 // check for +1/-1. Make sure to check the new instruction offset is
1192 // actually an immediate and not a symbolic reference destined for
1194 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1195 int MIOffset = getLdStOffsetOp(MI).getImm();
1196 bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
1197 if (IsUnscaled != MIIsUnscaled) {
1198 // We're trying to pair instructions that differ in how they are scaled.
1199 // If FirstMI is scaled then scale the offset of MI accordingly.
1200 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1201 int MemSize = getMemScale(MI);
1203 // If the unscaled offset isn't a multiple of the MemSize, we can't
1204 // pair the operations together: bail and keep looking.
1205 if (MIOffset % MemSize) {
1206 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1208 MemInsns.push_back(&MI);
1211 MIOffset /= MemSize;
1213 MIOffset *= MemSize;
1217 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1218 (Offset + OffsetStride == MIOffset))) {
1219 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1220 if (FindNarrowMerge) {
1221 // If the alignment requirements of the scaled wide load/store
1222 // instruction can't express the offset of the scaled narrow input,
1223 // bail and keep looking. For promotable zero stores, allow only when
1224 // the stored value is the same (i.e., WZR).
1225 if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1226 (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1227 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1229 MemInsns.push_back(&MI);
1233 // Pairwise instructions have a 7-bit signed offset field. Single
1234 // insns have a 12-bit unsigned offset field. If the resultant
1235 // immediate offset of merging these instructions is out of range for
1236 // a pairwise instruction, bail and keep looking.
1237 if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1238 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1240 MemInsns.push_back(&MI);
1243 // If the alignment requirements of the paired (scaled) instruction
1244 // can't express the offset of the unscaled input, bail and keep
1246 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1247 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1249 MemInsns.push_back(&MI);
1253 // If the destination register of the loads is the same register, bail
1254 // and keep looking. A load-pair instruction with both destination
1255 // registers the same is UNPREDICTABLE and will result in an exception.
1256 if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
1257 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
1259 MemInsns.push_back(&MI);
1263 // If the Rt of the second instruction was not modified or used between
1264 // the two instructions and none of the instructions between the second
1265 // and first alias with the second, we can combine the second into the
1267 if (ModifiedRegUnits.available(getLdStRegOp(MI).getReg()) &&
1269 !UsedRegUnits.available(getLdStRegOp(MI).getReg())) &&
1270 !mayAlias(MI, MemInsns, AA)) {
1271 Flags.setMergeForward(false);
1275 // Likewise, if the Rt of the first instruction is not modified or used
1276 // between the two instructions and none of the instructions between the
1277 // first and the second alias with the first, we can combine the first
1279 if (ModifiedRegUnits.available(getLdStRegOp(FirstMI).getReg()) &&
1281 !UsedRegUnits.available(getLdStRegOp(FirstMI).getReg())) &&
1282 !mayAlias(FirstMI, MemInsns, AA)) {
1283 Flags.setMergeForward(true);
1286 // Unable to combine these instructions due to interference in between.
1291 // If the instruction wasn't a matching load or store. Stop searching if we
1292 // encounter a call instruction that might modify memory.
1296 // Update modified / uses register units.
1297 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1299 // Otherwise, if the base register is modified, we have no match, so
1301 if (!ModifiedRegUnits.available(BaseReg))
1304 // Update list of instructions that read/write memory.
1305 if (MI.mayLoadOrStore())
1306 MemInsns.push_back(&MI);
1311 MachineBasicBlock::iterator
1312 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1313 MachineBasicBlock::iterator Update,
1315 assert((Update->getOpcode() == AArch64::ADDXri ||
1316 Update->getOpcode() == AArch64::SUBXri) &&
1317 "Unexpected base register update instruction to merge!");
1318 MachineBasicBlock::iterator NextI = I;
1319 // Return the instruction following the merged instruction, which is
1320 // the instruction following our unmerged load. Unless that's the add/sub
1321 // instruction we're merging, in which case it's the one after that.
1322 if (++NextI == Update)
1325 int Value = Update->getOperand(2).getImm();
1326 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1327 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1328 if (Update->getOpcode() == AArch64::SUBXri)
1331 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1332 : getPostIndexedOpcode(I->getOpcode());
1333 MachineInstrBuilder MIB;
1334 if (!isPairedLdSt(*I)) {
1335 // Non-paired instruction.
1336 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1337 .add(getLdStRegOp(*Update))
1338 .add(getLdStRegOp(*I))
1339 .add(getLdStBaseOp(*I))
1341 .setMemRefs(I->memoperands())
1342 .setMIFlags(I->mergeFlagsWith(*Update));
1344 // Paired instruction.
1345 int Scale = getMemScale(*I);
1346 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1347 .add(getLdStRegOp(*Update))
1348 .add(getLdStRegOp(*I, 0))
1349 .add(getLdStRegOp(*I, 1))
1350 .add(getLdStBaseOp(*I))
1351 .addImm(Value / Scale)
1352 .setMemRefs(I->memoperands())
1353 .setMIFlags(I->mergeFlagsWith(*Update));
1359 LLVM_DEBUG(dbgs() << "Creating pre-indexed load/store.");
1362 LLVM_DEBUG(dbgs() << "Creating post-indexed load/store.");
1364 LLVM_DEBUG(dbgs() << " Replacing instructions:\n ");
1365 LLVM_DEBUG(I->print(dbgs()));
1366 LLVM_DEBUG(dbgs() << " ");
1367 LLVM_DEBUG(Update->print(dbgs()));
1368 LLVM_DEBUG(dbgs() << " with instruction:\n ");
1369 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1370 LLVM_DEBUG(dbgs() << "\n");
1372 // Erase the old instructions for the block.
1373 I->eraseFromParent();
1374 Update->eraseFromParent();
1379 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
1381 unsigned BaseReg, int Offset) {
1382 switch (MI.getOpcode()) {
1385 case AArch64::SUBXri:
1386 case AArch64::ADDXri:
1387 // Make sure it's a vanilla immediate operand, not a relocation or
1388 // anything else we can't handle.
1389 if (!MI.getOperand(2).isImm())
1391 // Watch out for 1 << 12 shifted value.
1392 if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
1395 // The update instruction source and destination register must be the
1396 // same as the load/store base register.
1397 if (MI.getOperand(0).getReg() != BaseReg ||
1398 MI.getOperand(1).getReg() != BaseReg)
1401 bool IsPairedInsn = isPairedLdSt(MemMI);
1402 int UpdateOffset = MI.getOperand(2).getImm();
1403 if (MI.getOpcode() == AArch64::SUBXri)
1404 UpdateOffset = -UpdateOffset;
1406 // For non-paired load/store instructions, the immediate must fit in a
1407 // signed 9-bit integer.
1408 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1411 // For paired load/store instructions, the immediate must be a multiple of
1412 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1415 int Scale = getMemScale(MemMI);
1416 if (UpdateOffset % Scale != 0)
1419 int ScaledOffset = UpdateOffset / Scale;
1420 if (ScaledOffset > 63 || ScaledOffset < -64)
1424 // If we have a non-zero Offset, we check that it matches the amount
1425 // we're adding to the register.
1426 if (!Offset || Offset == UpdateOffset)
1433 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1434 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
1435 MachineBasicBlock::iterator E = I->getParent()->end();
1436 MachineInstr &MemMI = *I;
1437 MachineBasicBlock::iterator MBBI = I;
1439 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1440 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
1442 // Scan forward looking for post-index opportunities. Updating instructions
1443 // can't be formed if the memory instruction doesn't have the offset we're
1445 if (MIUnscaledOffset != UnscaledOffset)
1448 // If the base register overlaps a destination register, we can't
1449 // merge the update.
1450 bool IsPairedInsn = isPairedLdSt(MemMI);
1451 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1452 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1453 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1457 // Track which register units have been modified and used between the first
1458 // insn (inclusive) and the second insn.
1459 ModifiedRegUnits.clear();
1460 UsedRegUnits.clear();
1462 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1463 MachineInstr &MI = *MBBI;
1465 // Don't count transient instructions towards the search limit since there
1466 // may be different numbers of them if e.g. debug information is present.
1467 if (!MI.isTransient())
1470 // If we found a match, return it.
1471 if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
1474 // Update the status of what the instruction clobbered and used.
1475 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1477 // Otherwise, if the base register is used or modified, we have no match, so
1479 if (!ModifiedRegUnits.available(BaseReg) ||
1480 !UsedRegUnits.available(BaseReg))
1486 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1487 MachineBasicBlock::iterator I, unsigned Limit) {
1488 MachineBasicBlock::iterator B = I->getParent()->begin();
1489 MachineBasicBlock::iterator E = I->getParent()->end();
1490 MachineInstr &MemMI = *I;
1491 MachineBasicBlock::iterator MBBI = I;
1493 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1494 int Offset = getLdStOffsetOp(MemMI).getImm();
1496 // If the load/store is the first instruction in the block, there's obviously
1497 // not any matching update. Ditto if the memory offset isn't zero.
1498 if (MBBI == B || Offset != 0)
1500 // If the base register overlaps a destination register, we can't
1501 // merge the update.
1502 bool IsPairedInsn = isPairedLdSt(MemMI);
1503 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1504 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1505 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1509 // Track which register units have been modified and used between the first
1510 // insn (inclusive) and the second insn.
1511 ModifiedRegUnits.clear();
1512 UsedRegUnits.clear();
1516 MachineInstr &MI = *MBBI;
1518 // Don't count transient instructions towards the search limit since there
1519 // may be different numbers of them if e.g. debug information is present.
1520 if (!MI.isTransient())
1523 // If we found a match, return it.
1524 if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset))
1527 // Update the status of what the instruction clobbered and used.
1528 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1530 // Otherwise, if the base register is used or modified, we have no match, so
1532 if (!ModifiedRegUnits.available(BaseReg) ||
1533 !UsedRegUnits.available(BaseReg))
1535 } while (MBBI != B && Count < Limit);
1539 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1540 MachineBasicBlock::iterator &MBBI) {
1541 MachineInstr &MI = *MBBI;
1542 // If this is a volatile load, don't mess with it.
1543 if (MI.hasOrderedMemoryRef())
1546 // Make sure this is a reg+imm.
1547 // FIXME: It is possible to extend it to handle reg+reg cases.
1548 if (!getLdStOffsetOp(MI).isImm())
1551 // Look backward up to LdStLimit instructions.
1552 MachineBasicBlock::iterator StoreI;
1553 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
1554 ++NumLoadsFromStoresPromoted;
1555 // Promote the load. Keeping the iterator straight is a
1556 // pain, so we let the merge routine tell us what the next instruction
1557 // is after it's done mucking about.
1558 MBBI = promoteLoadFromStore(MBBI, StoreI);
1564 // Merge adjacent zero stores into a wider store.
1565 bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
1566 MachineBasicBlock::iterator &MBBI) {
1567 assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.");
1568 MachineInstr &MI = *MBBI;
1569 MachineBasicBlock::iterator E = MI.getParent()->end();
1571 if (!TII->isCandidateToMergeOrPair(MI))
1574 // Look ahead up to LdStLimit instructions for a mergable instruction.
1575 LdStPairFlags Flags;
1576 MachineBasicBlock::iterator MergeMI =
1577 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
1579 ++NumZeroStoresPromoted;
1581 // Keeping the iterator straight is a pain, so we let the merge routine tell
1582 // us what the next instruction is after it's done mucking about.
1583 MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags);
1589 // Find loads and stores that can be merged into a single load or store pair
1591 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1592 MachineInstr &MI = *MBBI;
1593 MachineBasicBlock::iterator E = MI.getParent()->end();
1595 if (!TII->isCandidateToMergeOrPair(MI))
1598 // Early exit if the offset is not possible to match. (6 bits of positive
1599 // range, plus allow an extra one in case we find a later insn that matches
1601 bool IsUnscaled = TII->isUnscaledLdSt(MI);
1602 int Offset = getLdStOffsetOp(MI).getImm();
1603 int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
1604 // Allow one more for offset.
1606 Offset -= OffsetStride;
1607 if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
1610 // Look ahead up to LdStLimit instructions for a pairable instruction.
1611 LdStPairFlags Flags;
1612 MachineBasicBlock::iterator Paired =
1613 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
1616 if (TII->isUnscaledLdSt(MI))
1617 ++NumUnscaledPairCreated;
1618 // Keeping the iterator straight is a pain, so we let the merge routine tell
1619 // us what the next instruction is after it's done mucking about.
1620 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1626 bool AArch64LoadStoreOpt::tryToMergeLdStUpdate
1627 (MachineBasicBlock::iterator &MBBI) {
1628 MachineInstr &MI = *MBBI;
1629 MachineBasicBlock::iterator E = MI.getParent()->end();
1630 MachineBasicBlock::iterator Update;
1632 // Look forward to try to form a post-index instruction. For example,
1634 // add x20, x20, #32
1636 // ldr x0, [x20], #32
1637 Update = findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
1639 // Merge the update into the ld/st.
1640 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
1644 // Don't know how to handle unscaled pre/post-index versions below, so bail.
1645 if (TII->isUnscaledLdSt(MI.getOpcode()))
1648 // Look back to try to find a pre-index instruction. For example,
1652 // ldr x1, [x0, #8]!
1653 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
1655 // Merge the update into the ld/st.
1656 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1660 // The immediate in the load/store is scaled by the size of the memory
1661 // operation. The immediate in the add we're looking for,
1662 // however, is not, so adjust here.
1663 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
1665 // Look forward to try to find a post-index instruction. For example,
1666 // ldr x1, [x0, #64]
1669 // ldr x1, [x0, #64]!
1670 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
1672 // Merge the update into the ld/st.
1673 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1680 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1681 bool EnableNarrowZeroStOpt) {
1682 bool Modified = false;
1683 // Four tranformations to do here:
1684 // 1) Find loads that directly read from stores and promote them by
1685 // replacing with mov instructions. If the store is wider than the load,
1686 // the load will be replaced with a bitfield extract.
1689 // ldrh w2, [x0, #6]
1693 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1695 if (isPromotableLoadFromStore(*MBBI) && tryToPromoteLoadFromStore(MBBI))
1700 // 2) Merge adjacent zero stores into a wider store.
1703 // strh wzr, [x0, #2]
1708 // str wzr, [x0, #4]
1711 if (EnableNarrowZeroStOpt)
1712 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1714 if (isPromotableZeroStoreInst(*MBBI) && tryToMergeZeroStInst(MBBI))
1719 // 3) Find loads and stores that can be merged into a single load or store
1720 // pair instruction.
1726 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1728 if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
1733 // 4) Find base register updates that can be merged into the load or store
1734 // as a base-reg writeback.
1740 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1742 if (isMergeableLdStUpdate(*MBBI) && tryToMergeLdStUpdate(MBBI))
1751 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1752 if (skipFunction(Fn.getFunction()))
1755 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1756 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1757 TRI = Subtarget->getRegisterInfo();
1758 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1760 // Resize the modified and used register unit trackers. We do this once
1761 // per function and then clear the register units each time we optimize a load
1763 ModifiedRegUnits.init(*TRI);
1764 UsedRegUnits.init(*TRI);
1766 bool Modified = false;
1767 bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign();
1768 for (auto &MBB : Fn)
1769 Modified |= optimizeBlock(MBB, enableNarrowZeroStOpt);
1774 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and
1775 // stores near one another? Note: The pre-RA instruction scheduler already has
1776 // hooks to try and schedule pairable loads/stores together to improve pairing
1777 // opportunities. Thus, pre-RA pairing pass may not be worth the effort.
1779 // FIXME: When pairing store instructions it's very possible for this pass to
1780 // hoist a store with a KILL marker above another use (without a KILL marker).
1781 // The resulting IR is invalid, but nothing uses the KILL markers after this
1782 // pass, so it's never caused a problem in practice.
1784 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
1785 /// load / store optimization pass.
1786 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1787 return new AArch64LoadStoreOpt();