1 //=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/IR/DebugLoc.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/Pass.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetRegisterInfo.h"
44 #define DEBUG_TYPE "aarch64-ldst-opt"
46 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
47 STATISTIC(NumPostFolded, "Number of post-index updates folded");
48 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
49 STATISTIC(NumUnscaledPairCreated,
50 "Number of load/store from unscaled generated");
51 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
52 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
54 // The LdStLimit limits how far we search for load/store pairs.
55 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
56 cl::init(20), cl::Hidden);
58 // The UpdateLimit limits how far we search for update instructions when we form
59 // pre-/post-index instructions.
60 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
63 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
67 typedef struct LdStPairFlags {
68 // If a matching instruction is found, MergeForward is set to true if the
69 // merge is to remove the first instruction and replace the second with
70 // a pair-wise insn, and false if the reverse is true.
71 bool MergeForward = false;
73 // SExtIdx gives the index of the result of the load pair that must be
74 // extended. The value of SExtIdx assumes that the paired load produces the
75 // value in this order: (I, returned iterator), i.e., -1 means no value has
76 // to be extended, 0 means I, and 1 means the returned iterator.
79 LdStPairFlags() = default;
81 void setMergeForward(bool V = true) { MergeForward = V; }
82 bool getMergeForward() const { return MergeForward; }
84 void setSExtIdx(int V) { SExtIdx = V; }
85 int getSExtIdx() const { return SExtIdx; }
89 struct AArch64LoadStoreOpt : public MachineFunctionPass {
92 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
93 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
97 const AArch64InstrInfo *TII;
98 const TargetRegisterInfo *TRI;
99 const AArch64Subtarget *Subtarget;
101 // Track which registers have been modified and used.
102 BitVector ModifiedRegs, UsedRegs;
104 virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
105 AU.addRequired<AAResultsWrapperPass>();
106 MachineFunctionPass::getAnalysisUsage(AU);
109 // Scan the instructions looking for a load/store that can be combined
110 // with the current instruction into a load/store pair.
111 // Return the matching instruction if one is found, else MBB->end().
112 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
113 LdStPairFlags &Flags,
115 bool FindNarrowMerge);
117 // Scan the instructions looking for a store that writes to the address from
118 // which the current load instruction reads. Return true if one is found.
119 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
120 MachineBasicBlock::iterator &StoreI);
122 // Merge the two instructions indicated into a wider narrow store instruction.
123 MachineBasicBlock::iterator
124 mergeNarrowZeroStores(MachineBasicBlock::iterator I,
125 MachineBasicBlock::iterator MergeMI,
126 const LdStPairFlags &Flags);
128 // Merge the two instructions indicated into a single pair-wise instruction.
129 MachineBasicBlock::iterator
130 mergePairedInsns(MachineBasicBlock::iterator I,
131 MachineBasicBlock::iterator Paired,
132 const LdStPairFlags &Flags);
134 // Promote the load that reads directly from the address stored to.
135 MachineBasicBlock::iterator
136 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
137 MachineBasicBlock::iterator StoreI);
139 // Scan the instruction list to find a base register update that can
140 // be combined with the current instruction (a load or store) using
141 // pre or post indexed addressing with writeback. Scan forwards.
142 MachineBasicBlock::iterator
143 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
144 int UnscaledOffset, unsigned Limit);
146 // Scan the instruction list to find a base register update that can
147 // be combined with the current instruction (a load or store) using
148 // pre or post indexed addressing with writeback. Scan backwards.
149 MachineBasicBlock::iterator
150 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
152 // Find an instruction that updates the base register of the ld/st
154 bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
155 unsigned BaseReg, int Offset);
157 // Merge a pre- or post-index base register update into a ld/st instruction.
158 MachineBasicBlock::iterator
159 mergeUpdateInsn(MachineBasicBlock::iterator I,
160 MachineBasicBlock::iterator Update, bool IsPreIdx);
162 // Find and merge zero store instructions.
163 bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI);
165 // Find and pair ldr/str instructions.
166 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
168 // Find and promote load instructions which read directly from store.
169 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
171 bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt);
173 bool runOnMachineFunction(MachineFunction &Fn) override;
175 MachineFunctionProperties getRequiredProperties() const override {
176 return MachineFunctionProperties().set(
177 MachineFunctionProperties::Property::NoVRegs);
180 StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; }
183 char AArch64LoadStoreOpt::ID = 0;
185 } // end anonymous namespace
187 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
188 AARCH64_LOAD_STORE_OPT_NAME, false, false)
190 static bool isNarrowStore(unsigned Opc) {
194 case AArch64::STRBBui:
195 case AArch64::STURBBi:
196 case AArch64::STRHHui:
197 case AArch64::STURHHi:
202 // Scaling factor for unscaled load or store.
203 static int getMemScale(MachineInstr &MI) {
204 switch (MI.getOpcode()) {
206 llvm_unreachable("Opcode has unknown scale!");
207 case AArch64::LDRBBui:
208 case AArch64::LDURBBi:
209 case AArch64::LDRSBWui:
210 case AArch64::LDURSBWi:
211 case AArch64::STRBBui:
212 case AArch64::STURBBi:
214 case AArch64::LDRHHui:
215 case AArch64::LDURHHi:
216 case AArch64::LDRSHWui:
217 case AArch64::LDURSHWi:
218 case AArch64::STRHHui:
219 case AArch64::STURHHi:
221 case AArch64::LDRSui:
222 case AArch64::LDURSi:
223 case AArch64::LDRSWui:
224 case AArch64::LDURSWi:
225 case AArch64::LDRWui:
226 case AArch64::LDURWi:
227 case AArch64::STRSui:
228 case AArch64::STURSi:
229 case AArch64::STRWui:
230 case AArch64::STURWi:
232 case AArch64::LDPSWi:
237 case AArch64::LDRDui:
238 case AArch64::LDURDi:
239 case AArch64::LDRXui:
240 case AArch64::LDURXi:
241 case AArch64::STRDui:
242 case AArch64::STURDi:
243 case AArch64::STRXui:
244 case AArch64::STURXi:
250 case AArch64::LDRQui:
251 case AArch64::LDURQi:
252 case AArch64::STRQui:
253 case AArch64::STURQi:
260 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
261 bool *IsValidLdStrOpc = nullptr) {
263 *IsValidLdStrOpc = true;
267 *IsValidLdStrOpc = false;
268 return std::numeric_limits<unsigned>::max();
269 case AArch64::STRDui:
270 case AArch64::STURDi:
271 case AArch64::STRQui:
272 case AArch64::STURQi:
273 case AArch64::STRBBui:
274 case AArch64::STURBBi:
275 case AArch64::STRHHui:
276 case AArch64::STURHHi:
277 case AArch64::STRWui:
278 case AArch64::STURWi:
279 case AArch64::STRXui:
280 case AArch64::STURXi:
281 case AArch64::LDRDui:
282 case AArch64::LDURDi:
283 case AArch64::LDRQui:
284 case AArch64::LDURQi:
285 case AArch64::LDRWui:
286 case AArch64::LDURWi:
287 case AArch64::LDRXui:
288 case AArch64::LDURXi:
289 case AArch64::STRSui:
290 case AArch64::STURSi:
291 case AArch64::LDRSui:
292 case AArch64::LDURSi:
294 case AArch64::LDRSWui:
295 return AArch64::LDRWui;
296 case AArch64::LDURSWi:
297 return AArch64::LDURWi;
301 static unsigned getMatchingWideOpcode(unsigned Opc) {
304 llvm_unreachable("Opcode has no wide equivalent!");
305 case AArch64::STRBBui:
306 return AArch64::STRHHui;
307 case AArch64::STRHHui:
308 return AArch64::STRWui;
309 case AArch64::STURBBi:
310 return AArch64::STURHHi;
311 case AArch64::STURHHi:
312 return AArch64::STURWi;
313 case AArch64::STURWi:
314 return AArch64::STURXi;
315 case AArch64::STRWui:
316 return AArch64::STRXui;
320 static unsigned getMatchingPairOpcode(unsigned Opc) {
323 llvm_unreachable("Opcode has no pairwise equivalent!");
324 case AArch64::STRSui:
325 case AArch64::STURSi:
326 return AArch64::STPSi;
327 case AArch64::STRDui:
328 case AArch64::STURDi:
329 return AArch64::STPDi;
330 case AArch64::STRQui:
331 case AArch64::STURQi:
332 return AArch64::STPQi;
333 case AArch64::STRWui:
334 case AArch64::STURWi:
335 return AArch64::STPWi;
336 case AArch64::STRXui:
337 case AArch64::STURXi:
338 return AArch64::STPXi;
339 case AArch64::LDRSui:
340 case AArch64::LDURSi:
341 return AArch64::LDPSi;
342 case AArch64::LDRDui:
343 case AArch64::LDURDi:
344 return AArch64::LDPDi;
345 case AArch64::LDRQui:
346 case AArch64::LDURQi:
347 return AArch64::LDPQi;
348 case AArch64::LDRWui:
349 case AArch64::LDURWi:
350 return AArch64::LDPWi;
351 case AArch64::LDRXui:
352 case AArch64::LDURXi:
353 return AArch64::LDPXi;
354 case AArch64::LDRSWui:
355 case AArch64::LDURSWi:
356 return AArch64::LDPSWi;
360 static unsigned isMatchingStore(MachineInstr &LoadInst,
361 MachineInstr &StoreInst) {
362 unsigned LdOpc = LoadInst.getOpcode();
363 unsigned StOpc = StoreInst.getOpcode();
366 llvm_unreachable("Unsupported load instruction!");
367 case AArch64::LDRBBui:
368 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
369 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
370 case AArch64::LDURBBi:
371 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
372 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
373 case AArch64::LDRHHui:
374 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
375 StOpc == AArch64::STRXui;
376 case AArch64::LDURHHi:
377 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
378 StOpc == AArch64::STURXi;
379 case AArch64::LDRWui:
380 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
381 case AArch64::LDURWi:
382 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
383 case AArch64::LDRXui:
384 return StOpc == AArch64::STRXui;
385 case AArch64::LDURXi:
386 return StOpc == AArch64::STURXi;
390 static unsigned getPreIndexedOpcode(unsigned Opc) {
393 llvm_unreachable("Opcode has no pre-indexed equivalent!");
394 case AArch64::STRSui:
395 return AArch64::STRSpre;
396 case AArch64::STRDui:
397 return AArch64::STRDpre;
398 case AArch64::STRQui:
399 return AArch64::STRQpre;
400 case AArch64::STRBBui:
401 return AArch64::STRBBpre;
402 case AArch64::STRHHui:
403 return AArch64::STRHHpre;
404 case AArch64::STRWui:
405 return AArch64::STRWpre;
406 case AArch64::STRXui:
407 return AArch64::STRXpre;
408 case AArch64::LDRSui:
409 return AArch64::LDRSpre;
410 case AArch64::LDRDui:
411 return AArch64::LDRDpre;
412 case AArch64::LDRQui:
413 return AArch64::LDRQpre;
414 case AArch64::LDRBBui:
415 return AArch64::LDRBBpre;
416 case AArch64::LDRHHui:
417 return AArch64::LDRHHpre;
418 case AArch64::LDRWui:
419 return AArch64::LDRWpre;
420 case AArch64::LDRXui:
421 return AArch64::LDRXpre;
422 case AArch64::LDRSWui:
423 return AArch64::LDRSWpre;
425 return AArch64::LDPSpre;
426 case AArch64::LDPSWi:
427 return AArch64::LDPSWpre;
429 return AArch64::LDPDpre;
431 return AArch64::LDPQpre;
433 return AArch64::LDPWpre;
435 return AArch64::LDPXpre;
437 return AArch64::STPSpre;
439 return AArch64::STPDpre;
441 return AArch64::STPQpre;
443 return AArch64::STPWpre;
445 return AArch64::STPXpre;
449 static unsigned getPostIndexedOpcode(unsigned Opc) {
452 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
453 case AArch64::STRSui:
454 return AArch64::STRSpost;
455 case AArch64::STRDui:
456 return AArch64::STRDpost;
457 case AArch64::STRQui:
458 return AArch64::STRQpost;
459 case AArch64::STRBBui:
460 return AArch64::STRBBpost;
461 case AArch64::STRHHui:
462 return AArch64::STRHHpost;
463 case AArch64::STRWui:
464 return AArch64::STRWpost;
465 case AArch64::STRXui:
466 return AArch64::STRXpost;
467 case AArch64::LDRSui:
468 return AArch64::LDRSpost;
469 case AArch64::LDRDui:
470 return AArch64::LDRDpost;
471 case AArch64::LDRQui:
472 return AArch64::LDRQpost;
473 case AArch64::LDRBBui:
474 return AArch64::LDRBBpost;
475 case AArch64::LDRHHui:
476 return AArch64::LDRHHpost;
477 case AArch64::LDRWui:
478 return AArch64::LDRWpost;
479 case AArch64::LDRXui:
480 return AArch64::LDRXpost;
481 case AArch64::LDRSWui:
482 return AArch64::LDRSWpost;
484 return AArch64::LDPSpost;
485 case AArch64::LDPSWi:
486 return AArch64::LDPSWpost;
488 return AArch64::LDPDpost;
490 return AArch64::LDPQpost;
492 return AArch64::LDPWpost;
494 return AArch64::LDPXpost;
496 return AArch64::STPSpost;
498 return AArch64::STPDpost;
500 return AArch64::STPQpost;
502 return AArch64::STPWpost;
504 return AArch64::STPXpost;
508 static bool isPairedLdSt(const MachineInstr &MI) {
509 switch (MI.getOpcode()) {
513 case AArch64::LDPSWi:
527 static const MachineOperand &getLdStRegOp(const MachineInstr &MI,
528 unsigned PairedRegOp = 0) {
529 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
530 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
531 return MI.getOperand(Idx);
534 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) {
535 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
536 return MI.getOperand(Idx);
539 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) {
540 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
541 return MI.getOperand(Idx);
544 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
545 MachineInstr &StoreInst,
546 const AArch64InstrInfo *TII) {
547 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
548 int LoadSize = getMemScale(LoadInst);
549 int StoreSize = getMemScale(StoreInst);
550 int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
551 ? getLdStOffsetOp(StoreInst).getImm()
552 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
553 int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
554 ? getLdStOffsetOp(LoadInst).getImm()
555 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
556 return (UnscaledStOffset <= UnscaledLdOffset) &&
557 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
560 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
561 unsigned Opc = MI.getOpcode();
562 return (Opc == AArch64::STRWui || Opc == AArch64::STURWi ||
563 isNarrowStore(Opc)) &&
564 getLdStRegOp(MI).getReg() == AArch64::WZR;
567 MachineBasicBlock::iterator
568 AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
569 MachineBasicBlock::iterator MergeMI,
570 const LdStPairFlags &Flags) {
571 assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
572 "Expected promotable zero stores.");
574 MachineBasicBlock::iterator NextI = I;
576 // If NextI is the second of the two instructions to be merged, we need
577 // to skip one further. Either way we merge will invalidate the iterator,
578 // and we don't need to scan the new instruction, as it's a pairwise
579 // instruction, which we're not considering for further action anyway.
580 if (NextI == MergeMI)
583 unsigned Opc = I->getOpcode();
584 bool IsScaled = !TII->isUnscaledLdSt(Opc);
585 int OffsetStride = IsScaled ? 1 : getMemScale(*I);
587 bool MergeForward = Flags.getMergeForward();
588 // Insert our new paired instruction after whichever of the paired
589 // instructions MergeForward indicates.
590 MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
591 // Also based on MergeForward is from where we copy the base register operand
592 // so we get the flags compatible with the input code.
593 const MachineOperand &BaseRegOp =
594 MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I);
596 // Which register is Rt and which is Rt2 depends on the offset order.
598 if (getLdStOffsetOp(*I).getImm() ==
599 getLdStOffsetOp(*MergeMI).getImm() + OffsetStride)
604 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
605 // Change the scaled offset from small to large type.
607 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
611 // Construct the new instruction.
612 DebugLoc DL = I->getDebugLoc();
613 MachineBasicBlock *MBB = I->getParent();
614 MachineInstrBuilder MIB;
615 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
616 .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
619 .setMemRefs(I->mergeMemRefsWith(*MergeMI));
622 DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ");
623 DEBUG(I->print(dbgs()));
624 DEBUG(dbgs() << " ");
625 DEBUG(MergeMI->print(dbgs()));
626 DEBUG(dbgs() << " with instruction:\n ");
627 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
628 DEBUG(dbgs() << "\n");
630 // Erase the old instructions.
631 I->eraseFromParent();
632 MergeMI->eraseFromParent();
636 MachineBasicBlock::iterator
637 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
638 MachineBasicBlock::iterator Paired,
639 const LdStPairFlags &Flags) {
640 MachineBasicBlock::iterator NextI = I;
642 // If NextI is the second of the two instructions to be merged, we need
643 // to skip one further. Either way we merge will invalidate the iterator,
644 // and we don't need to scan the new instruction, as it's a pairwise
645 // instruction, which we're not considering for further action anyway.
649 int SExtIdx = Flags.getSExtIdx();
651 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
652 bool IsUnscaled = TII->isUnscaledLdSt(Opc);
653 int OffsetStride = IsUnscaled ? getMemScale(*I) : 1;
655 bool MergeForward = Flags.getMergeForward();
656 // Insert our new paired instruction after whichever of the paired
657 // instructions MergeForward indicates.
658 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
659 // Also based on MergeForward is from where we copy the base register operand
660 // so we get the flags compatible with the input code.
661 const MachineOperand &BaseRegOp =
662 MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I);
664 int Offset = getLdStOffsetOp(*I).getImm();
665 int PairedOffset = getLdStOffsetOp(*Paired).getImm();
666 bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
667 if (IsUnscaled != PairedIsUnscaled) {
668 // We're trying to pair instructions that differ in how they are scaled. If
669 // I is scaled then scale the offset of Paired accordingly. Otherwise, do
670 // the opposite (i.e., make Paired's offset unscaled).
671 int MemSize = getMemScale(*Paired);
672 if (PairedIsUnscaled) {
673 // If the unscaled offset isn't a multiple of the MemSize, we can't
674 // pair the operations together.
675 assert(!(PairedOffset % getMemScale(*Paired)) &&
676 "Offset should be a multiple of the stride!");
677 PairedOffset /= MemSize;
679 PairedOffset *= MemSize;
683 // Which register is Rt and which is Rt2 depends on the offset order.
684 MachineInstr *RtMI, *Rt2MI;
685 if (Offset == PairedOffset + OffsetStride) {
688 // Here we swapped the assumption made for SExtIdx.
689 // I.e., we turn ldp I, Paired into ldp Paired, I.
690 // Update the index accordingly.
692 SExtIdx = (SExtIdx + 1) % 2;
697 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
698 // Scale the immediate offset, if necessary.
699 if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
700 assert(!(OffsetImm % getMemScale(*RtMI)) &&
701 "Unscaled offset cannot be scaled.");
702 OffsetImm /= getMemScale(*RtMI);
705 // Construct the new instruction.
706 MachineInstrBuilder MIB;
707 DebugLoc DL = I->getDebugLoc();
708 MachineBasicBlock *MBB = I->getParent();
709 MachineOperand RegOp0 = getLdStRegOp(*RtMI);
710 MachineOperand RegOp1 = getLdStRegOp(*Rt2MI);
711 // Kill flags may become invalid when moving stores for pairing.
712 if (RegOp0.isUse()) {
714 // Clear kill flags on store if moving upwards. Example:
717 // STRWui kill %w1 ; need to clear kill flag when moving STRWui upwards
718 RegOp0.setIsKill(false);
719 RegOp1.setIsKill(false);
721 // Clear kill flags of the first stores register. Example:
723 // USE kill %w1 ; need to clear kill flag when moving STRWui downwards
725 unsigned Reg = getLdStRegOp(*I).getReg();
726 for (MachineInstr &MI : make_range(std::next(I), Paired))
727 MI.clearRegisterKills(Reg, TRI);
730 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
735 .setMemRefs(I->mergeMemRefsWith(*Paired));
739 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
740 DEBUG(I->print(dbgs()));
741 DEBUG(dbgs() << " ");
742 DEBUG(Paired->print(dbgs()));
743 DEBUG(dbgs() << " with instruction:\n ");
745 // Generate the sign extension for the proper result of the ldp.
746 // I.e., with X1, that would be:
747 // %W1<def> = KILL %W1, %X1<imp-def>
748 // %X1<def> = SBFMXri %X1<kill>, 0, 31
749 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
750 // Right now, DstMO has the extended register, since it comes from an
752 unsigned DstRegX = DstMO.getReg();
753 // Get the W variant of that register.
754 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
755 // Update the result of LDP to use the W instead of the X variant.
756 DstMO.setReg(DstRegW);
757 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
758 DEBUG(dbgs() << "\n");
759 // Make the machine verifier happy by providing a definition for
761 // Insert this definition right after the generated LDP, i.e., before
763 MachineInstrBuilder MIBKill =
764 BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
766 .addReg(DstRegX, RegState::Define);
767 MIBKill->getOperand(2).setImplicit();
768 // Create the sign extension.
769 MachineInstrBuilder MIBSXTW =
770 BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
775 DEBUG(dbgs() << " Extend operand:\n ");
776 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
778 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
780 DEBUG(dbgs() << "\n");
782 // Erase the old instructions.
783 I->eraseFromParent();
784 Paired->eraseFromParent();
789 MachineBasicBlock::iterator
790 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
791 MachineBasicBlock::iterator StoreI) {
792 MachineBasicBlock::iterator NextI = LoadI;
795 int LoadSize = getMemScale(*LoadI);
796 int StoreSize = getMemScale(*StoreI);
797 unsigned LdRt = getLdStRegOp(*LoadI).getReg();
798 const MachineOperand &StMO = getLdStRegOp(*StoreI);
799 unsigned StRt = getLdStRegOp(*StoreI).getReg();
800 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
802 assert((IsStoreXReg ||
803 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
804 "Unexpected RegClass");
806 MachineInstr *BitExtMI;
807 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
808 // Remove the load, if the destination register of the loads is the same
809 // register for stored value.
810 if (StRt == LdRt && LoadSize == 8) {
811 for (MachineInstr &MI : make_range(StoreI->getIterator(),
812 LoadI->getIterator())) {
813 if (MI.killsRegister(StRt, TRI)) {
814 MI.clearRegisterKills(StRt, TRI);
818 DEBUG(dbgs() << "Remove load instruction:\n ");
819 DEBUG(LoadI->print(dbgs()));
820 DEBUG(dbgs() << "\n");
821 LoadI->eraseFromParent();
824 // Replace the load with a mov if the load and store are in the same size.
826 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
827 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
828 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
830 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
832 // FIXME: Currently we disable this transformation in big-endian targets as
833 // performance and correctness are verified only in little-endian.
834 if (!Subtarget->isLittleEndian())
836 bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
837 assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
838 "Unsupported ld/st match");
839 assert(LoadSize <= StoreSize && "Invalid load size");
840 int UnscaledLdOffset = IsUnscaled
841 ? getLdStOffsetOp(*LoadI).getImm()
842 : getLdStOffsetOp(*LoadI).getImm() * LoadSize;
843 int UnscaledStOffset = IsUnscaled
844 ? getLdStOffsetOp(*StoreI).getImm()
845 : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
846 int Width = LoadSize * 8;
847 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
848 int Imms = Immr + Width - 1;
849 unsigned DestReg = IsStoreXReg
850 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
851 &AArch64::GPR64RegClass)
854 assert((UnscaledLdOffset >= UnscaledStOffset &&
855 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
858 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
859 Imms = Immr + Width - 1;
860 if (UnscaledLdOffset == UnscaledStOffset) {
861 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
862 | ((Immr) << 6) // immr
863 | ((Imms) << 0) // imms
867 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
868 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
871 .addImm(AndMaskEncoded);
874 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
875 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
883 // Clear kill flags between store and load.
884 for (MachineInstr &MI : make_range(StoreI->getIterator(),
885 BitExtMI->getIterator()))
886 if (MI.killsRegister(StRt, TRI)) {
887 MI.clearRegisterKills(StRt, TRI);
891 DEBUG(dbgs() << "Promoting load by replacing :\n ");
892 DEBUG(StoreI->print(dbgs()));
893 DEBUG(dbgs() << " ");
894 DEBUG(LoadI->print(dbgs()));
895 DEBUG(dbgs() << " with instructions:\n ");
896 DEBUG(StoreI->print(dbgs()));
897 DEBUG(dbgs() << " ");
898 DEBUG((BitExtMI)->print(dbgs()));
899 DEBUG(dbgs() << "\n");
901 // Erase the old instructions.
902 LoadI->eraseFromParent();
906 /// trackRegDefsUses - Remember what registers the specified instruction uses
908 static void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs,
910 const TargetRegisterInfo *TRI) {
911 for (const MachineOperand &MO : MI.operands()) {
913 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
917 unsigned Reg = MO.getReg();
921 // WZR/XZR are not modified even when used as a destination register.
922 if (Reg != AArch64::WZR && Reg != AArch64::XZR)
923 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
924 ModifiedRegs.set(*AI);
926 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
927 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
933 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
934 // Convert the byte-offset used by unscaled into an "element" offset used
935 // by the scaled pair load/store instructions.
937 // If the byte-offset isn't a multiple of the stride, there's no point
938 // trying to match it.
939 if (Offset % OffsetStride)
941 Offset /= OffsetStride;
943 return Offset <= 63 && Offset >= -64;
946 // Do alignment, specialized to power of 2 and for signed ints,
947 // avoiding having to do a C-style cast from uint_64t to int when
948 // using alignTo from include/llvm/Support/MathExtras.h.
949 // FIXME: Move this function to include/MathExtras.h?
950 static int alignTo(int Num, int PowOf2) {
951 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
954 static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
956 // One of the instructions must modify memory.
957 if (!MIa.mayStore() && !MIb.mayStore())
960 // Both instructions must be memory operations.
961 if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
964 return MIa.mayAlias(AA, MIb, /*UseTBAA*/false);
967 static bool mayAlias(MachineInstr &MIa,
968 SmallVectorImpl<MachineInstr *> &MemInsns,
970 for (MachineInstr *MIb : MemInsns)
971 if (mayAlias(MIa, *MIb, AA))
977 bool AArch64LoadStoreOpt::findMatchingStore(
978 MachineBasicBlock::iterator I, unsigned Limit,
979 MachineBasicBlock::iterator &StoreI) {
980 MachineBasicBlock::iterator B = I->getParent()->begin();
981 MachineBasicBlock::iterator MBBI = I;
982 MachineInstr &LoadMI = *I;
983 unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
985 // If the load is the first instruction in the block, there's obviously
986 // not any matching store.
990 // Track which registers have been modified and used between the first insn
991 // and the second insn.
992 ModifiedRegs.reset();
998 MachineInstr &MI = *MBBI;
1000 // Don't count transient instructions towards the search limit since there
1001 // may be different numbers of them if e.g. debug information is present.
1002 if (!MI.isTransient())
1005 // If the load instruction reads directly from the address to which the
1006 // store instruction writes and the stored value is not modified, we can
1007 // promote the load. Since we do not handle stores with pre-/post-index,
1008 // it's unnecessary to check if BaseReg is modified by the store itself.
1009 if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
1010 BaseReg == getLdStBaseOp(MI).getReg() &&
1011 isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
1012 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1020 // Update modified / uses register lists.
1021 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1023 // Otherwise, if the base register is modified, we have no match, so
1025 if (ModifiedRegs[BaseReg])
1028 // If we encounter a store aliased with the load, return early.
1029 if (MI.mayStore() && mayAlias(LoadMI, MI, AA))
1031 } while (MBBI != B && Count < Limit);
1035 // Returns true if FirstMI and MI are candidates for merging or pairing.
1036 // Otherwise, returns false.
1037 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1038 LdStPairFlags &Flags,
1039 const AArch64InstrInfo *TII) {
1040 // If this is volatile or if pairing is suppressed, not a candidate.
1041 if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1044 // We should have already checked FirstMI for pair suppression and volatility.
1045 assert(!FirstMI.hasOrderedMemoryRef() &&
1046 !TII->isLdStPairSuppressed(FirstMI) &&
1047 "FirstMI shouldn't get here if either of these checks are true.");
1049 unsigned OpcA = FirstMI.getOpcode();
1050 unsigned OpcB = MI.getOpcode();
1052 // Opcodes match: nothing more to check.
1056 // Try to match a sign-extended load/store with a zero-extended load/store.
1057 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1058 unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1059 assert(IsValidLdStrOpc &&
1060 "Given Opc should be a Load or Store with an immediate");
1061 // OpcA will be the first instruction in the pair.
1062 if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1063 Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1067 // If the second instruction isn't even a mergable/pairable load/store, bail
1069 if (!PairIsValidLdStrOpc)
1072 // FIXME: We don't support merging narrow stores with mixed scaled/unscaled
1074 if (isNarrowStore(OpcA) || isNarrowStore(OpcB))
1077 // Try to match an unscaled load/store with a scaled load/store.
1078 return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
1079 getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1081 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1084 /// Scan the instructions looking for a load/store that can be combined with the
1085 /// current instruction into a wider equivalent or a load/store pair.
1086 MachineBasicBlock::iterator
1087 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1088 LdStPairFlags &Flags, unsigned Limit,
1089 bool FindNarrowMerge) {
1090 MachineBasicBlock::iterator E = I->getParent()->end();
1091 MachineBasicBlock::iterator MBBI = I;
1092 MachineInstr &FirstMI = *I;
1095 bool MayLoad = FirstMI.mayLoad();
1096 bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
1097 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1098 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1099 int Offset = getLdStOffsetOp(FirstMI).getImm();
1100 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
1101 bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1103 // Track which registers have been modified and used between the first insn
1104 // (inclusive) and the second insn.
1105 ModifiedRegs.reset();
1108 // Remember any instructions that read/write memory between FirstMI and MI.
1109 SmallVector<MachineInstr *, 4> MemInsns;
1111 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1112 MachineInstr &MI = *MBBI;
1114 // Don't count transient instructions towards the search limit since there
1115 // may be different numbers of them if e.g. debug information is present.
1116 if (!MI.isTransient())
1119 Flags.setSExtIdx(-1);
1120 if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1121 getLdStOffsetOp(MI).isImm()) {
1122 assert(MI.mayLoadOrStore() && "Expected memory operation.");
1123 // If we've found another instruction with the same opcode, check to see
1124 // if the base and offset are compatible with our starting instruction.
1125 // These instructions all have scaled immediate operands, so we just
1126 // check for +1/-1. Make sure to check the new instruction offset is
1127 // actually an immediate and not a symbolic reference destined for
1129 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1130 int MIOffset = getLdStOffsetOp(MI).getImm();
1131 bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
1132 if (IsUnscaled != MIIsUnscaled) {
1133 // We're trying to pair instructions that differ in how they are scaled.
1134 // If FirstMI is scaled then scale the offset of MI accordingly.
1135 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1136 int MemSize = getMemScale(MI);
1138 // If the unscaled offset isn't a multiple of the MemSize, we can't
1139 // pair the operations together: bail and keep looking.
1140 if (MIOffset % MemSize) {
1141 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1142 MemInsns.push_back(&MI);
1145 MIOffset /= MemSize;
1147 MIOffset *= MemSize;
1151 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1152 (Offset + OffsetStride == MIOffset))) {
1153 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1154 if (FindNarrowMerge) {
1155 // If the alignment requirements of the scaled wide load/store
1156 // instruction can't express the offset of the scaled narrow input,
1157 // bail and keep looking. For promotable zero stores, allow only when
1158 // the stored value is the same (i.e., WZR).
1159 if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1160 (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1161 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1162 MemInsns.push_back(&MI);
1166 // Pairwise instructions have a 7-bit signed offset field. Single
1167 // insns have a 12-bit unsigned offset field. If the resultant
1168 // immediate offset of merging these instructions is out of range for
1169 // a pairwise instruction, bail and keep looking.
1170 if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1171 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1172 MemInsns.push_back(&MI);
1175 // If the alignment requirements of the paired (scaled) instruction
1176 // can't express the offset of the unscaled input, bail and keep
1178 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1179 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1180 MemInsns.push_back(&MI);
1184 // If the destination register of the loads is the same register, bail
1185 // and keep looking. A load-pair instruction with both destination
1186 // registers the same is UNPREDICTABLE and will result in an exception.
1187 if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
1188 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1189 MemInsns.push_back(&MI);
1193 // If the Rt of the second instruction was not modified or used between
1194 // the two instructions and none of the instructions between the second
1195 // and first alias with the second, we can combine the second into the
1197 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1198 !(MI.mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
1199 !mayAlias(MI, MemInsns, AA)) {
1200 Flags.setMergeForward(false);
1204 // Likewise, if the Rt of the first instruction is not modified or used
1205 // between the two instructions and none of the instructions between the
1206 // first and the second alias with the first, we can combine the first
1208 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
1209 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
1210 !mayAlias(FirstMI, MemInsns, AA)) {
1211 Flags.setMergeForward(true);
1214 // Unable to combine these instructions due to interference in between.
1219 // If the instruction wasn't a matching load or store. Stop searching if we
1220 // encounter a call instruction that might modify memory.
1224 // Update modified / uses register lists.
1225 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1227 // Otherwise, if the base register is modified, we have no match, so
1229 if (ModifiedRegs[BaseReg])
1232 // Update list of instructions that read/write memory.
1233 if (MI.mayLoadOrStore())
1234 MemInsns.push_back(&MI);
1239 MachineBasicBlock::iterator
1240 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1241 MachineBasicBlock::iterator Update,
1243 assert((Update->getOpcode() == AArch64::ADDXri ||
1244 Update->getOpcode() == AArch64::SUBXri) &&
1245 "Unexpected base register update instruction to merge!");
1246 MachineBasicBlock::iterator NextI = I;
1247 // Return the instruction following the merged instruction, which is
1248 // the instruction following our unmerged load. Unless that's the add/sub
1249 // instruction we're merging, in which case it's the one after that.
1250 if (++NextI == Update)
1253 int Value = Update->getOperand(2).getImm();
1254 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1255 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1256 if (Update->getOpcode() == AArch64::SUBXri)
1259 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1260 : getPostIndexedOpcode(I->getOpcode());
1261 MachineInstrBuilder MIB;
1262 if (!isPairedLdSt(*I)) {
1263 // Non-paired instruction.
1264 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1265 .add(getLdStRegOp(*Update))
1266 .add(getLdStRegOp(*I))
1267 .add(getLdStBaseOp(*I))
1269 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
1271 // Paired instruction.
1272 int Scale = getMemScale(*I);
1273 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1274 .add(getLdStRegOp(*Update))
1275 .add(getLdStRegOp(*I, 0))
1276 .add(getLdStRegOp(*I, 1))
1277 .add(getLdStBaseOp(*I))
1278 .addImm(Value / Scale)
1279 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
1284 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1286 DEBUG(dbgs() << "Creating post-indexed load/store.");
1287 DEBUG(dbgs() << " Replacing instructions:\n ");
1288 DEBUG(I->print(dbgs()));
1289 DEBUG(dbgs() << " ");
1290 DEBUG(Update->print(dbgs()));
1291 DEBUG(dbgs() << " with instruction:\n ");
1292 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1293 DEBUG(dbgs() << "\n");
1295 // Erase the old instructions for the block.
1296 I->eraseFromParent();
1297 Update->eraseFromParent();
1302 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
1304 unsigned BaseReg, int Offset) {
1305 switch (MI.getOpcode()) {
1308 case AArch64::SUBXri:
1309 case AArch64::ADDXri:
1310 // Make sure it's a vanilla immediate operand, not a relocation or
1311 // anything else we can't handle.
1312 if (!MI.getOperand(2).isImm())
1314 // Watch out for 1 << 12 shifted value.
1315 if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
1318 // The update instruction source and destination register must be the
1319 // same as the load/store base register.
1320 if (MI.getOperand(0).getReg() != BaseReg ||
1321 MI.getOperand(1).getReg() != BaseReg)
1324 bool IsPairedInsn = isPairedLdSt(MemMI);
1325 int UpdateOffset = MI.getOperand(2).getImm();
1326 if (MI.getOpcode() == AArch64::SUBXri)
1327 UpdateOffset = -UpdateOffset;
1329 // For non-paired load/store instructions, the immediate must fit in a
1330 // signed 9-bit integer.
1331 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1334 // For paired load/store instructions, the immediate must be a multiple of
1335 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1338 int Scale = getMemScale(MemMI);
1339 if (UpdateOffset % Scale != 0)
1342 int ScaledOffset = UpdateOffset / Scale;
1343 if (ScaledOffset > 63 || ScaledOffset < -64)
1347 // If we have a non-zero Offset, we check that it matches the amount
1348 // we're adding to the register.
1349 if (!Offset || Offset == UpdateOffset)
1356 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1357 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
1358 MachineBasicBlock::iterator E = I->getParent()->end();
1359 MachineInstr &MemMI = *I;
1360 MachineBasicBlock::iterator MBBI = I;
1362 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1363 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
1365 // Scan forward looking for post-index opportunities. Updating instructions
1366 // can't be formed if the memory instruction doesn't have the offset we're
1368 if (MIUnscaledOffset != UnscaledOffset)
1371 // If the base register overlaps a destination register, we can't
1372 // merge the update.
1373 bool IsPairedInsn = isPairedLdSt(MemMI);
1374 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1375 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1376 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1380 // Track which registers have been modified and used between the first insn
1381 // (inclusive) and the second insn.
1382 ModifiedRegs.reset();
1385 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1386 MachineInstr &MI = *MBBI;
1388 // Don't count transient instructions towards the search limit since there
1389 // may be different numbers of them if e.g. debug information is present.
1390 if (!MI.isTransient())
1393 // If we found a match, return it.
1394 if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
1397 // Update the status of what the instruction clobbered and used.
1398 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1400 // Otherwise, if the base register is used or modified, we have no match, so
1402 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1408 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1409 MachineBasicBlock::iterator I, unsigned Limit) {
1410 MachineBasicBlock::iterator B = I->getParent()->begin();
1411 MachineBasicBlock::iterator E = I->getParent()->end();
1412 MachineInstr &MemMI = *I;
1413 MachineBasicBlock::iterator MBBI = I;
1415 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1416 int Offset = getLdStOffsetOp(MemMI).getImm();
1418 // If the load/store is the first instruction in the block, there's obviously
1419 // not any matching update. Ditto if the memory offset isn't zero.
1420 if (MBBI == B || Offset != 0)
1422 // If the base register overlaps a destination register, we can't
1423 // merge the update.
1424 bool IsPairedInsn = isPairedLdSt(MemMI);
1425 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1426 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1427 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1431 // Track which registers have been modified and used between the first insn
1432 // (inclusive) and the second insn.
1433 ModifiedRegs.reset();
1438 MachineInstr &MI = *MBBI;
1440 // Don't count transient instructions towards the search limit since there
1441 // may be different numbers of them if e.g. debug information is present.
1442 if (!MI.isTransient())
1445 // If we found a match, return it.
1446 if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset))
1449 // Update the status of what the instruction clobbered and used.
1450 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1452 // Otherwise, if the base register is used or modified, we have no match, so
1454 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1456 } while (MBBI != B && Count < Limit);
1460 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1461 MachineBasicBlock::iterator &MBBI) {
1462 MachineInstr &MI = *MBBI;
1463 // If this is a volatile load, don't mess with it.
1464 if (MI.hasOrderedMemoryRef())
1467 // Make sure this is a reg+imm.
1468 // FIXME: It is possible to extend it to handle reg+reg cases.
1469 if (!getLdStOffsetOp(MI).isImm())
1472 // Look backward up to LdStLimit instructions.
1473 MachineBasicBlock::iterator StoreI;
1474 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
1475 ++NumLoadsFromStoresPromoted;
1476 // Promote the load. Keeping the iterator straight is a
1477 // pain, so we let the merge routine tell us what the next instruction
1478 // is after it's done mucking about.
1479 MBBI = promoteLoadFromStore(MBBI, StoreI);
1485 // Merge adjacent zero stores into a wider store.
1486 bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
1487 MachineBasicBlock::iterator &MBBI) {
1488 assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.");
1489 MachineInstr &MI = *MBBI;
1490 MachineBasicBlock::iterator E = MI.getParent()->end();
1492 if (!TII->isCandidateToMergeOrPair(MI))
1495 // Look ahead up to LdStLimit instructions for a mergable instruction.
1496 LdStPairFlags Flags;
1497 MachineBasicBlock::iterator MergeMI =
1498 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
1500 ++NumZeroStoresPromoted;
1502 // Keeping the iterator straight is a pain, so we let the merge routine tell
1503 // us what the next instruction is after it's done mucking about.
1504 MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags);
1510 // Find loads and stores that can be merged into a single load or store pair
1512 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1513 MachineInstr &MI = *MBBI;
1514 MachineBasicBlock::iterator E = MI.getParent()->end();
1516 if (!TII->isCandidateToMergeOrPair(MI))
1519 // Early exit if the offset is not possible to match. (6 bits of positive
1520 // range, plus allow an extra one in case we find a later insn that matches
1522 bool IsUnscaled = TII->isUnscaledLdSt(MI);
1523 int Offset = getLdStOffsetOp(MI).getImm();
1524 int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
1525 // Allow one more for offset.
1527 Offset -= OffsetStride;
1528 if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
1531 // Look ahead up to LdStLimit instructions for a pairable instruction.
1532 LdStPairFlags Flags;
1533 MachineBasicBlock::iterator Paired =
1534 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
1537 if (TII->isUnscaledLdSt(MI))
1538 ++NumUnscaledPairCreated;
1539 // Keeping the iterator straight is a pain, so we let the merge routine tell
1540 // us what the next instruction is after it's done mucking about.
1541 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1547 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1548 bool EnableNarrowZeroStOpt) {
1549 bool Modified = false;
1550 // Four tranformations to do here:
1551 // 1) Find loads that directly read from stores and promote them by
1552 // replacing with mov instructions. If the store is wider than the load,
1553 // the load will be replaced with a bitfield extract.
1556 // ldrh w2, [x0, #6]
1560 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1562 MachineInstr &MI = *MBBI;
1563 switch (MI.getOpcode()) {
1565 // Just move on to the next instruction.
1568 // Scaled instructions.
1569 case AArch64::LDRBBui:
1570 case AArch64::LDRHHui:
1571 case AArch64::LDRWui:
1572 case AArch64::LDRXui:
1573 // Unscaled instructions.
1574 case AArch64::LDURBBi:
1575 case AArch64::LDURHHi:
1576 case AArch64::LDURWi:
1577 case AArch64::LDURXi:
1578 if (tryToPromoteLoadFromStore(MBBI)) {
1586 // 2) Merge adjacent zero stores into a wider store.
1589 // strh wzr, [x0, #2]
1594 // str wzr, [x0, #4]
1597 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1598 EnableNarrowZeroStOpt && MBBI != E;) {
1599 if (isPromotableZeroStoreInst(*MBBI)) {
1600 if (tryToMergeZeroStInst(MBBI)) {
1608 // 3) Find loads and stores that can be merged into a single load or store
1609 // pair instruction.
1615 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1617 if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
1622 // 4) Find base register updates that can be merged into the load or store
1623 // as a base-reg writeback.
1629 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1631 MachineInstr &MI = *MBBI;
1632 // Do update merging. It's simpler to keep this separate from the above
1633 // switchs, though not strictly necessary.
1634 unsigned Opc = MI.getOpcode();
1637 // Just move on to the next instruction.
1640 // Scaled instructions.
1641 case AArch64::STRSui:
1642 case AArch64::STRDui:
1643 case AArch64::STRQui:
1644 case AArch64::STRXui:
1645 case AArch64::STRWui:
1646 case AArch64::STRHHui:
1647 case AArch64::STRBBui:
1648 case AArch64::LDRSui:
1649 case AArch64::LDRDui:
1650 case AArch64::LDRQui:
1651 case AArch64::LDRXui:
1652 case AArch64::LDRWui:
1653 case AArch64::LDRHHui:
1654 case AArch64::LDRBBui:
1655 // Unscaled instructions.
1656 case AArch64::STURSi:
1657 case AArch64::STURDi:
1658 case AArch64::STURQi:
1659 case AArch64::STURWi:
1660 case AArch64::STURXi:
1661 case AArch64::LDURSi:
1662 case AArch64::LDURDi:
1663 case AArch64::LDURQi:
1664 case AArch64::LDURWi:
1665 case AArch64::LDURXi:
1666 // Paired instructions.
1667 case AArch64::LDPSi:
1668 case AArch64::LDPSWi:
1669 case AArch64::LDPDi:
1670 case AArch64::LDPQi:
1671 case AArch64::LDPWi:
1672 case AArch64::LDPXi:
1673 case AArch64::STPSi:
1674 case AArch64::STPDi:
1675 case AArch64::STPQi:
1676 case AArch64::STPWi:
1677 case AArch64::STPXi: {
1678 // Make sure this is a reg+imm (as opposed to an address reloc).
1679 if (!getLdStOffsetOp(MI).isImm()) {
1683 // Look forward to try to form a post-index instruction. For example,
1685 // add x20, x20, #32
1687 // ldr x0, [x20], #32
1688 MachineBasicBlock::iterator Update =
1689 findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
1691 // Merge the update into the ld/st.
1692 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
1697 // Don't know how to handle pre/post-index versions, so move to the next
1699 if (TII->isUnscaledLdSt(Opc)) {
1704 // Look back to try to find a pre-index instruction. For example,
1708 // ldr x1, [x0, #8]!
1709 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
1711 // Merge the update into the ld/st.
1712 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1717 // The immediate in the load/store is scaled by the size of the memory
1718 // operation. The immediate in the add we're looking for,
1719 // however, is not, so adjust here.
1720 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
1722 // Look forward to try to find a post-index instruction. For example,
1723 // ldr x1, [x0, #64]
1726 // ldr x1, [x0, #64]!
1727 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
1729 // Merge the update into the ld/st.
1730 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1736 // Nothing found. Just move to the next instruction.
1746 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1747 if (skipFunction(*Fn.getFunction()))
1750 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1751 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1752 TRI = Subtarget->getRegisterInfo();
1753 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1755 // Resize the modified and used register bitfield trackers. We do this once
1756 // per function and then clear the bitfield each time we optimize a load or
1758 ModifiedRegs.resize(TRI->getNumRegs());
1759 UsedRegs.resize(TRI->getNumRegs());
1761 bool Modified = false;
1762 bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign();
1763 for (auto &MBB : Fn)
1764 Modified |= optimizeBlock(MBB, enableNarrowZeroStOpt);
1769 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and
1770 // stores near one another? Note: The pre-RA instruction scheduler already has
1771 // hooks to try and schedule pairable loads/stores together to improve pairing
1772 // opportunities. Thus, pre-RA pairing pass may not be worth the effort.
1774 // FIXME: When pairing store instructions it's very possible for this pass to
1775 // hoist a store with a KILL marker above another use (without a KILL marker).
1776 // The resulting IR is invalid, but nothing uses the KILL markers after this
1777 // pass, so it's never caused a problem in practice.
1779 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
1780 /// load / store optimization pass.
1781 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1782 return new AArch64LoadStoreOpt();