1 //===- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// \file This file contains a pass that performs load / store related peephole
11 /// optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMISelLowering.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMSubtarget.h"
21 #include "MCTargetDesc/ARMAddressingModes.h"
22 #include "MCTargetDesc/ARMBaseInfo.h"
23 #include "Utils/ARMBaseInfo.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/DenseSet.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/iterator_range.h"
33 #include "llvm/Analysis/AliasAnalysis.h"
34 #include "llvm/CodeGen/LivePhysRegs.h"
35 #include "llvm/CodeGen/MachineBasicBlock.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineFunctionPass.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineInstrBuilder.h"
40 #include "llvm/CodeGen/MachineMemOperand.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/MachineRegisterInfo.h"
43 #include "llvm/CodeGen/RegisterClassInfo.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetRegisterInfo.h"
48 #include "llvm/CodeGen/TargetSubtargetInfo.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/DebugLoc.h"
51 #include "llvm/IR/DerivedTypes.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/MC/MCInstrDesc.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Allocator.h"
57 #include "llvm/Support/CommandLine.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
71 #define DEBUG_TYPE "arm-ldst-opt"
73 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
74 STATISTIC(NumSTMGened , "Number of stm instructions generated");
75 STATISTIC(NumVLDMGened, "Number of vldm instructions generated");
76 STATISTIC(NumVSTMGened, "Number of vstm instructions generated");
77 STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
78 STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation");
79 STATISTIC(NumSTRDFormed,"Number of strd created before allocation");
80 STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm");
81 STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
82 STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
83 STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
85 /// This switch disables formation of double/multi instructions that could
86 /// potentially lead to (new) alignment traps even with CCR.UNALIGN_TRP
87 /// disabled. This can be used to create libraries that are robust even when
88 /// users provoke undefined behaviour by supplying misaligned pointers.
89 /// \see mayCombineMisaligned()
91 AssumeMisalignedLoadStores("arm-assume-misaligned-load-store", cl::Hidden,
92 cl::init(false), cl::desc("Be more conservative in ARM load/store opt"));
94 #define ARM_LOAD_STORE_OPT_NAME "ARM load / store optimization pass"
98 /// Post- register allocation pass the combine load / store instructions to
99 /// form ldm / stm instructions.
100 struct ARMLoadStoreOpt : public MachineFunctionPass {
103 const MachineFunction *MF;
104 const TargetInstrInfo *TII;
105 const TargetRegisterInfo *TRI;
106 const ARMSubtarget *STI;
107 const TargetLowering *TL;
108 ARMFunctionInfo *AFI;
109 LivePhysRegs LiveRegs;
110 RegisterClassInfo RegClassInfo;
111 MachineBasicBlock::const_iterator LiveRegPos;
113 bool RegClassInfoValid;
114 bool isThumb1, isThumb2;
116 ARMLoadStoreOpt() : MachineFunctionPass(ID) {}
118 bool runOnMachineFunction(MachineFunction &Fn) override;
120 MachineFunctionProperties getRequiredProperties() const override {
121 return MachineFunctionProperties().set(
122 MachineFunctionProperties::Property::NoVRegs);
125 StringRef getPassName() const override { return ARM_LOAD_STORE_OPT_NAME; }
128 /// A set of load/store MachineInstrs with same base register sorted by
130 struct MemOpQueueEntry {
132 int Offset; ///< Load/Store offset.
133 unsigned Position; ///< Position as counted from end of basic block.
135 MemOpQueueEntry(MachineInstr &MI, int Offset, unsigned Position)
136 : MI(&MI), Offset(Offset), Position(Position) {}
138 using MemOpQueue = SmallVector<MemOpQueueEntry, 8>;
140 /// A set of MachineInstrs that fulfill (nearly all) conditions to get
141 /// merged into a LDM/STM.
142 struct MergeCandidate {
143 /// List of instructions ordered by load/store offset.
144 SmallVector<MachineInstr*, 4> Instrs;
146 /// Index in Instrs of the instruction being latest in the schedule.
147 unsigned LatestMIIdx;
149 /// Index in Instrs of the instruction being earliest in the schedule.
150 unsigned EarliestMIIdx;
152 /// Index into the basic block where the merged instruction will be
153 /// inserted. (See MemOpQueueEntry.Position)
156 /// Whether the instructions can be merged into a ldm/stm instruction.
157 bool CanMergeToLSMulti;
159 /// Whether the instructions can be merged into a ldrd/strd instruction.
160 bool CanMergeToLSDouble;
162 SpecificBumpPtrAllocator<MergeCandidate> Allocator;
163 SmallVector<const MergeCandidate*,4> Candidates;
164 SmallVector<MachineInstr*,4> MergeBaseCandidates;
166 void moveLiveRegsBefore(const MachineBasicBlock &MBB,
167 MachineBasicBlock::const_iterator Before);
168 unsigned findFreeReg(const TargetRegisterClass &RegClass);
169 void UpdateBaseRegUses(MachineBasicBlock &MBB,
170 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
171 unsigned Base, unsigned WordOffset,
172 ARMCC::CondCodes Pred, unsigned PredReg);
173 MachineInstr *CreateLoadStoreMulti(
174 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
175 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
176 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
177 ArrayRef<std::pair<unsigned, bool>> Regs);
178 MachineInstr *CreateLoadStoreDouble(
179 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
180 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
181 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
182 ArrayRef<std::pair<unsigned, bool>> Regs) const;
183 void FormCandidates(const MemOpQueue &MemOps);
184 MachineInstr *MergeOpsUpdate(const MergeCandidate &Cand);
185 bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
186 MachineBasicBlock::iterator &MBBI);
187 bool MergeBaseUpdateLoadStore(MachineInstr *MI);
188 bool MergeBaseUpdateLSMultiple(MachineInstr *MI);
189 bool MergeBaseUpdateLSDouble(MachineInstr &MI) const;
190 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
191 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
192 bool CombineMovBx(MachineBasicBlock &MBB);
195 } // end anonymous namespace
197 char ARMLoadStoreOpt::ID = 0;
199 INITIALIZE_PASS(ARMLoadStoreOpt, "arm-ldst-opt", ARM_LOAD_STORE_OPT_NAME, false,
202 static bool definesCPSR(const MachineInstr &MI) {
203 for (const auto &MO : MI.operands()) {
206 if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
207 // If the instruction has live CPSR def, then it's not safe to fold it
208 // into load / store.
215 static int getMemoryOpOffset(const MachineInstr &MI) {
216 unsigned Opcode = MI.getOpcode();
217 bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
218 unsigned NumOperands = MI.getDesc().getNumOperands();
219 unsigned OffField = MI.getOperand(NumOperands - 3).getImm();
221 if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
222 Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
223 Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8 ||
224 Opcode == ARM::LDRi12 || Opcode == ARM::STRi12)
227 // Thumb1 immediate offsets are scaled by 4
228 if (Opcode == ARM::tLDRi || Opcode == ARM::tSTRi ||
229 Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi)
232 int Offset = isAM3 ? ARM_AM::getAM3Offset(OffField)
233 : ARM_AM::getAM5Offset(OffField) * 4;
234 ARM_AM::AddrOpc Op = isAM3 ? ARM_AM::getAM3Op(OffField)
235 : ARM_AM::getAM5Op(OffField);
237 if (Op == ARM_AM::sub)
243 static const MachineOperand &getLoadStoreBaseOp(const MachineInstr &MI) {
244 return MI.getOperand(1);
247 static const MachineOperand &getLoadStoreRegOp(const MachineInstr &MI) {
248 return MI.getOperand(0);
251 static int getLoadStoreMultipleOpcode(unsigned Opcode, ARM_AM::AMSubMode Mode) {
253 default: llvm_unreachable("Unhandled opcode!");
257 default: llvm_unreachable("Unhandled submode!");
258 case ARM_AM::ia: return ARM::LDMIA;
259 case ARM_AM::da: return ARM::LDMDA;
260 case ARM_AM::db: return ARM::LDMDB;
261 case ARM_AM::ib: return ARM::LDMIB;
266 default: llvm_unreachable("Unhandled submode!");
267 case ARM_AM::ia: return ARM::STMIA;
268 case ARM_AM::da: return ARM::STMDA;
269 case ARM_AM::db: return ARM::STMDB;
270 case ARM_AM::ib: return ARM::STMIB;
274 // tLDMIA is writeback-only - unless the base register is in the input
278 default: llvm_unreachable("Unhandled submode!");
279 case ARM_AM::ia: return ARM::tLDMIA;
283 // There is no non-writeback tSTMIA either.
286 default: llvm_unreachable("Unhandled submode!");
287 case ARM_AM::ia: return ARM::tSTMIA_UPD;
293 default: llvm_unreachable("Unhandled submode!");
294 case ARM_AM::ia: return ARM::t2LDMIA;
295 case ARM_AM::db: return ARM::t2LDMDB;
301 default: llvm_unreachable("Unhandled submode!");
302 case ARM_AM::ia: return ARM::t2STMIA;
303 case ARM_AM::db: return ARM::t2STMDB;
308 default: llvm_unreachable("Unhandled submode!");
309 case ARM_AM::ia: return ARM::VLDMSIA;
310 case ARM_AM::db: return 0; // Only VLDMSDB_UPD exists.
315 default: llvm_unreachable("Unhandled submode!");
316 case ARM_AM::ia: return ARM::VSTMSIA;
317 case ARM_AM::db: return 0; // Only VSTMSDB_UPD exists.
322 default: llvm_unreachable("Unhandled submode!");
323 case ARM_AM::ia: return ARM::VLDMDIA;
324 case ARM_AM::db: return 0; // Only VLDMDDB_UPD exists.
329 default: llvm_unreachable("Unhandled submode!");
330 case ARM_AM::ia: return ARM::VSTMDIA;
331 case ARM_AM::db: return 0; // Only VSTMDDB_UPD exists.
336 static ARM_AM::AMSubMode getLoadStoreMultipleSubMode(unsigned Opcode) {
338 default: llvm_unreachable("Unhandled opcode!");
345 case ARM::tLDMIA_UPD:
346 case ARM::tSTMIA_UPD:
347 case ARM::t2LDMIA_RET:
349 case ARM::t2LDMIA_UPD:
351 case ARM::t2STMIA_UPD:
353 case ARM::VLDMSIA_UPD:
355 case ARM::VSTMSIA_UPD:
357 case ARM::VLDMDIA_UPD:
359 case ARM::VSTMDIA_UPD:
373 case ARM::t2LDMDB_UPD:
375 case ARM::t2STMDB_UPD:
376 case ARM::VLDMSDB_UPD:
377 case ARM::VSTMSDB_UPD:
378 case ARM::VLDMDDB_UPD:
379 case ARM::VSTMDDB_UPD:
390 static bool isT1i32Load(unsigned Opc) {
391 return Opc == ARM::tLDRi || Opc == ARM::tLDRspi;
394 static bool isT2i32Load(unsigned Opc) {
395 return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
398 static bool isi32Load(unsigned Opc) {
399 return Opc == ARM::LDRi12 || isT1i32Load(Opc) || isT2i32Load(Opc) ;
402 static bool isT1i32Store(unsigned Opc) {
403 return Opc == ARM::tSTRi || Opc == ARM::tSTRspi;
406 static bool isT2i32Store(unsigned Opc) {
407 return Opc == ARM::t2STRi12 || Opc == ARM::t2STRi8;
410 static bool isi32Store(unsigned Opc) {
411 return Opc == ARM::STRi12 || isT1i32Store(Opc) || isT2i32Store(Opc);
414 static bool isLoadSingle(unsigned Opc) {
415 return isi32Load(Opc) || Opc == ARM::VLDRS || Opc == ARM::VLDRD;
418 static unsigned getImmScale(unsigned Opc) {
420 default: llvm_unreachable("Unhandled opcode!");
435 static unsigned getLSMultipleTransferSize(const MachineInstr *MI) {
436 switch (MI->getOpcode()) {
463 case ARM::tLDMIA_UPD:
464 case ARM::tSTMIA_UPD:
471 return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 4;
474 return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 8;
478 /// Update future uses of the base register with the offset introduced
479 /// due to writeback. This function only works on Thumb1.
480 void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
481 MachineBasicBlock::iterator MBBI,
482 const DebugLoc &DL, unsigned Base,
484 ARMCC::CondCodes Pred,
486 assert(isThumb1 && "Can only update base register uses for Thumb1!");
487 // Start updating any instructions with immediate offsets. Insert a SUB before
488 // the first non-updateable instruction (if any).
489 for (; MBBI != MBB.end(); ++MBBI) {
490 bool InsertSub = false;
491 unsigned Opc = MBBI->getOpcode();
493 if (MBBI->readsRegister(Base)) {
496 Opc == ARM::tLDRi || Opc == ARM::tLDRHi || Opc == ARM::tLDRBi;
498 Opc == ARM::tSTRi || Opc == ARM::tSTRHi || Opc == ARM::tSTRBi;
500 if (IsLoad || IsStore) {
501 // Loads and stores with immediate offsets can be updated, but only if
502 // the new offset isn't negative.
503 // The MachineOperand containing the offset immediate is the last one
504 // before predicates.
506 MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
507 // The offsets are scaled by 1, 2 or 4 depending on the Opcode.
508 Offset = MO.getImm() - WordOffset * getImmScale(Opc);
510 // If storing the base register, it needs to be reset first.
511 unsigned InstrSrcReg = getLoadStoreRegOp(*MBBI).getReg();
513 if (Offset >= 0 && !(IsStore && InstrSrcReg == Base))
517 } else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
518 !definesCPSR(*MBBI)) {
519 // SUBS/ADDS using this register, with a dead def of the CPSR.
520 // Merge it with the update; if the merged offset is too large,
521 // insert a new sub instead.
523 MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
524 Offset = (Opc == ARM::tSUBi8) ?
525 MO.getImm() + WordOffset * 4 :
526 MO.getImm() - WordOffset * 4 ;
527 if (Offset >= 0 && TL->isLegalAddImmediate(Offset)) {
528 // FIXME: Swap ADDS<->SUBS if Offset < 0, erase instruction if
531 // The base register has now been reset, so exit early.
537 // Can't update the instruction.
540 } else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) {
541 // Since SUBS sets the condition flags, we can't place the base reset
542 // after an instruction that has a live CPSR def.
543 // The base register might also contain an argument for a function call.
548 // An instruction above couldn't be updated, so insert a sub.
549 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base)
550 .add(t1CondCodeOp(true))
552 .addImm(WordOffset * 4)
558 if (MBBI->killsRegister(Base) || MBBI->definesRegister(Base))
559 // Register got killed. Stop updating.
563 // End of block was reached.
564 if (MBB.succ_size() > 0) {
565 // FIXME: Because of a bug, live registers are sometimes missing from
566 // the successor blocks' live-in sets. This means we can't trust that
567 // information and *always* have to reset at the end of a block.
569 if (MBBI != MBB.end()) --MBBI;
570 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base)
571 .add(t1CondCodeOp(true))
573 .addImm(WordOffset * 4)
579 /// Return the first register of class \p RegClass that is not in \p Regs.
580 unsigned ARMLoadStoreOpt::findFreeReg(const TargetRegisterClass &RegClass) {
581 if (!RegClassInfoValid) {
582 RegClassInfo.runOnMachineFunction(*MF);
583 RegClassInfoValid = true;
586 for (unsigned Reg : RegClassInfo.getOrder(&RegClass))
587 if (!LiveRegs.contains(Reg))
592 /// Compute live registers just before instruction \p Before (in normal schedule
593 /// direction). Computes backwards so multiple queries in the same block must
594 /// come in reverse order.
595 void ARMLoadStoreOpt::moveLiveRegsBefore(const MachineBasicBlock &MBB,
596 MachineBasicBlock::const_iterator Before) {
597 // Initialize if we never queried in this block.
598 if (!LiveRegsValid) {
600 LiveRegs.addLiveOuts(MBB);
601 LiveRegPos = MBB.end();
602 LiveRegsValid = true;
604 // Move backward just before the "Before" position.
605 while (LiveRegPos != Before) {
607 LiveRegs.stepBackward(*LiveRegPos);
611 static bool ContainsReg(const ArrayRef<std::pair<unsigned, bool>> &Regs,
613 for (const std::pair<unsigned, bool> &R : Regs)
619 /// Create and insert a LDM or STM with Base as base register and registers in
620 /// Regs as the register operands that would be loaded / stored. It returns
621 /// true if the transformation is done.
622 MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(
623 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
624 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
625 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
626 ArrayRef<std::pair<unsigned, bool>> Regs) {
627 unsigned NumRegs = Regs.size();
630 // For Thumb1 targets, it might be necessary to clobber the CPSR to merge.
631 // Compute liveness information for that register to make the decision.
632 bool SafeToClobberCPSR = !isThumb1 ||
633 (MBB.computeRegisterLiveness(TRI, ARM::CPSR, InsertBefore, 20) ==
634 MachineBasicBlock::LQR_Dead);
636 bool Writeback = isThumb1; // Thumb1 LDM/STM have base reg writeback.
638 // Exception: If the base register is in the input reglist, Thumb1 LDM is
640 // It's also not possible to merge an STR of the base register in Thumb1.
641 if (isThumb1 && ContainsReg(Regs, Base)) {
642 assert(Base != ARM::SP && "Thumb1 does not allow SP in register list");
643 if (Opcode == ARM::tLDRi)
645 else if (Opcode == ARM::tSTRi)
649 ARM_AM::AMSubMode Mode = ARM_AM::ia;
650 // VFP and Thumb2 do not support IB or DA modes. Thumb1 only supports IA.
651 bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
652 bool haveIBAndDA = isNotVFP && !isThumb2 && !isThumb1;
654 if (Offset == 4 && haveIBAndDA) {
656 } else if (Offset == -4 * (int)NumRegs + 4 && haveIBAndDA) {
658 } else if (Offset == -4 * (int)NumRegs && isNotVFP && !isThumb1) {
659 // VLDM/VSTM do not support DB mode without also updating the base reg.
661 } else if (Offset != 0 || Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
662 // Check if this is a supported opcode before inserting instructions to
663 // calculate a new base register.
664 if (!getLoadStoreMultipleOpcode(Opcode, Mode)) return nullptr;
666 // If starting offset isn't zero, insert a MI to materialize a new base.
667 // But only do so if it is cost effective, i.e. merging more than two
672 // On Thumb1, it's not worth materializing a new base register without
673 // clobbering the CPSR (i.e. not using ADDS/SUBS).
674 if (!SafeToClobberCPSR)
678 if (isi32Load(Opcode)) {
679 // If it is a load, then just use one of the destination registers
680 // as the new base. Will no longer be writeback in Thumb1.
681 NewBase = Regs[NumRegs-1].first;
684 // Find a free register that we can use as scratch register.
685 moveLiveRegsBefore(MBB, InsertBefore);
686 // The merged instruction does not exist yet but will use several Regs if
688 if (!isLoadSingle(Opcode))
689 for (const std::pair<unsigned, bool> &R : Regs)
690 LiveRegs.addReg(R.first);
692 NewBase = findFreeReg(isThumb1 ? ARM::tGPRRegClass : ARM::GPRRegClass);
698 isThumb2 ? ARM::t2ADDri :
699 (isThumb1 && Base == ARM::SP) ? ARM::tADDrSPi :
700 (isThumb1 && Offset < 8) ? ARM::tADDi3 :
701 isThumb1 ? ARM::tADDi8 : ARM::ADDri;
706 isThumb2 ? ARM::t2SUBri :
707 (isThumb1 && Offset < 8 && Base != ARM::SP) ? ARM::tSUBi3 :
708 isThumb1 ? ARM::tSUBi8 : ARM::SUBri;
711 if (!TL->isLegalAddImmediate(Offset))
712 // FIXME: Try add with register operand?
713 return nullptr; // Probably not worth it then.
715 // We can only append a kill flag to the add/sub input if the value is not
716 // used in the register list of the stm as well.
717 bool KillOldBase = BaseKill &&
718 (!isi32Store(Opcode) || !ContainsReg(Regs, Base));
721 // Thumb1: depending on immediate size, use either
722 // ADDS NewBase, Base, #imm3
725 // ADDS NewBase, #imm8.
726 if (Base != NewBase &&
727 (BaseOpc == ARM::tADDi8 || BaseOpc == ARM::tSUBi8)) {
728 // Need to insert a MOV to the new base first.
729 if (isARMLowRegister(NewBase) && isARMLowRegister(Base) &&
731 // thumbv4t doesn't have lo->lo copies, and we can't predicate tMOVSr
732 if (Pred != ARMCC::AL)
734 BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVSr), NewBase)
735 .addReg(Base, getKillRegState(KillOldBase));
737 BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVr), NewBase)
738 .addReg(Base, getKillRegState(KillOldBase))
739 .add(predOps(Pred, PredReg));
741 // The following ADDS/SUBS becomes an update.
745 if (BaseOpc == ARM::tADDrSPi) {
746 assert(Offset % 4 == 0 && "tADDrSPi offset is scaled by 4");
747 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
748 .addReg(Base, getKillRegState(KillOldBase))
750 .add(predOps(Pred, PredReg));
752 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
753 .add(t1CondCodeOp(true))
754 .addReg(Base, getKillRegState(KillOldBase))
756 .add(predOps(Pred, PredReg));
758 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
759 .addReg(Base, getKillRegState(KillOldBase))
761 .add(predOps(Pred, PredReg))
765 BaseKill = true; // New base is always killed straight away.
768 bool isDef = isLoadSingle(Opcode);
770 // Get LS multiple opcode. Note that for Thumb1 this might be an opcode with
771 // base register writeback.
772 Opcode = getLoadStoreMultipleOpcode(Opcode, Mode);
776 // Check if a Thumb1 LDM/STM merge is safe. This is the case if:
777 // - There is no writeback (LDM of base register),
778 // - the base register is killed by the merged instruction,
779 // - or it's safe to overwrite the condition flags, i.e. to insert a SUBS
780 // to reset the base register.
781 // Otherwise, don't merge.
782 // It's safe to return here since the code to materialize a new base register
783 // above is also conditional on SafeToClobberCPSR.
784 if (isThumb1 && !SafeToClobberCPSR && Writeback && !BaseKill)
787 MachineInstrBuilder MIB;
790 assert(isThumb1 && "expected Writeback only inThumb1");
791 if (Opcode == ARM::tLDMIA) {
792 assert(!(ContainsReg(Regs, Base)) && "Thumb1 can't LDM ! with Base in Regs");
793 // Update tLDMIA with writeback if necessary.
794 Opcode = ARM::tLDMIA_UPD;
797 MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode));
799 // Thumb1: we might need to set base writeback when building the MI.
800 MIB.addReg(Base, getDefRegState(true))
801 .addReg(Base, getKillRegState(BaseKill));
803 // The base isn't dead after a merged instruction with writeback.
804 // Insert a sub instruction after the newly formed instruction to reset.
806 UpdateBaseRegUses(MBB, InsertBefore, DL, Base, NumRegs, Pred, PredReg);
808 // No writeback, simply build the MachineInstr.
809 MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode));
810 MIB.addReg(Base, getKillRegState(BaseKill));
813 MIB.addImm(Pred).addReg(PredReg);
815 for (const std::pair<unsigned, bool> &R : Regs)
816 MIB.addReg(R.first, getDefRegState(isDef) | getKillRegState(R.second));
818 return MIB.getInstr();
821 MachineInstr *ARMLoadStoreOpt::CreateLoadStoreDouble(
822 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
823 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
824 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
825 ArrayRef<std::pair<unsigned, bool>> Regs) const {
826 bool IsLoad = isi32Load(Opcode);
827 assert((IsLoad || isi32Store(Opcode)) && "Must have integer load or store");
828 unsigned LoadStoreOpcode = IsLoad ? ARM::t2LDRDi8 : ARM::t2STRDi8;
830 assert(Regs.size() == 2);
831 MachineInstrBuilder MIB = BuildMI(MBB, InsertBefore, DL,
832 TII->get(LoadStoreOpcode));
834 MIB.addReg(Regs[0].first, RegState::Define)
835 .addReg(Regs[1].first, RegState::Define);
837 MIB.addReg(Regs[0].first, getKillRegState(Regs[0].second))
838 .addReg(Regs[1].first, getKillRegState(Regs[1].second));
840 MIB.addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
841 return MIB.getInstr();
844 /// Call MergeOps and update MemOps and merges accordingly on success.
845 MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) {
846 const MachineInstr *First = Cand.Instrs.front();
847 unsigned Opcode = First->getOpcode();
848 bool IsLoad = isLoadSingle(Opcode);
849 SmallVector<std::pair<unsigned, bool>, 8> Regs;
850 SmallVector<unsigned, 4> ImpDefs;
851 DenseSet<unsigned> KilledRegs;
852 DenseSet<unsigned> UsedRegs;
853 // Determine list of registers and list of implicit super-register defs.
854 for (const MachineInstr *MI : Cand.Instrs) {
855 const MachineOperand &MO = getLoadStoreRegOp(*MI);
856 unsigned Reg = MO.getReg();
857 bool IsKill = MO.isKill();
859 KilledRegs.insert(Reg);
860 Regs.push_back(std::make_pair(Reg, IsKill));
861 UsedRegs.insert(Reg);
864 // Collect any implicit defs of super-registers, after merging we can't
865 // be sure anymore that we properly preserved these live ranges and must
866 // removed these implicit operands.
867 for (const MachineOperand &MO : MI->implicit_operands()) {
868 if (!MO.isReg() || !MO.isDef() || MO.isDead())
870 assert(MO.isImplicit());
871 unsigned DefReg = MO.getReg();
873 if (is_contained(ImpDefs, DefReg))
875 // We can ignore cases where the super-reg is read and written.
876 if (MI->readsRegister(DefReg))
878 ImpDefs.push_back(DefReg);
883 // Attempt the merge.
884 using iterator = MachineBasicBlock::iterator;
886 MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
887 iterator InsertBefore = std::next(iterator(LatestMI));
888 MachineBasicBlock &MBB = *LatestMI->getParent();
889 unsigned Offset = getMemoryOpOffset(*First);
890 unsigned Base = getLoadStoreBaseOp(*First).getReg();
891 bool BaseKill = LatestMI->killsRegister(Base);
892 unsigned PredReg = 0;
893 ARMCC::CondCodes Pred = getInstrPredicate(*First, PredReg);
894 DebugLoc DL = First->getDebugLoc();
895 MachineInstr *Merged = nullptr;
896 if (Cand.CanMergeToLSDouble)
897 Merged = CreateLoadStoreDouble(MBB, InsertBefore, Offset, Base, BaseKill,
898 Opcode, Pred, PredReg, DL, Regs);
899 if (!Merged && Cand.CanMergeToLSMulti)
900 Merged = CreateLoadStoreMulti(MBB, InsertBefore, Offset, Base, BaseKill,
901 Opcode, Pred, PredReg, DL, Regs);
905 // Determine earliest instruction that will get removed. We then keep an
906 // iterator just above it so the following erases don't invalidated it.
907 iterator EarliestI(Cand.Instrs[Cand.EarliestMIIdx]);
908 bool EarliestAtBegin = false;
909 if (EarliestI == MBB.begin()) {
910 EarliestAtBegin = true;
912 EarliestI = std::prev(EarliestI);
915 // Remove instructions which have been merged.
916 for (MachineInstr *MI : Cand.Instrs)
919 // Determine range between the earliest removed instruction and the new one.
921 EarliestI = MBB.begin();
923 EarliestI = std::next(EarliestI);
924 auto FixupRange = make_range(EarliestI, iterator(Merged));
926 if (isLoadSingle(Opcode)) {
927 // If the previous loads defined a super-reg, then we have to mark earlier
928 // operands undef; Replicate the super-reg def on the merged instruction.
929 for (MachineInstr &MI : FixupRange) {
930 for (unsigned &ImpDefReg : ImpDefs) {
931 for (MachineOperand &MO : MI.implicit_operands()) {
932 if (!MO.isReg() || MO.getReg() != ImpDefReg)
942 MachineInstrBuilder MIB(*Merged->getParent()->getParent(), Merged);
943 for (unsigned ImpDef : ImpDefs)
944 MIB.addReg(ImpDef, RegState::ImplicitDefine);
946 // Remove kill flags: We are possibly storing the values later now.
947 assert(isi32Store(Opcode) || Opcode == ARM::VSTRS || Opcode == ARM::VSTRD);
948 for (MachineInstr &MI : FixupRange) {
949 for (MachineOperand &MO : MI.uses()) {
950 if (!MO.isReg() || !MO.isKill())
952 if (UsedRegs.count(MO.getReg()))
956 assert(ImpDefs.empty());
962 static bool isValidLSDoubleOffset(int Offset) {
963 unsigned Value = abs(Offset);
964 // t2LDRDi8/t2STRDi8 supports an 8 bit immediate which is internally
966 return (Value % 4) == 0 && Value < 1024;
969 /// Return true for loads/stores that can be combined to a double/multi
970 /// operation without increasing the requirements for alignment.
971 static bool mayCombineMisaligned(const TargetSubtargetInfo &STI,
972 const MachineInstr &MI) {
973 // vldr/vstr trap on misaligned pointers anyway, forming vldm makes no
975 unsigned Opcode = MI.getOpcode();
976 if (!isi32Load(Opcode) && !isi32Store(Opcode))
979 // Stack pointer alignment is out of the programmers control so we can trust
980 // SP-relative loads/stores.
981 if (getLoadStoreBaseOp(MI).getReg() == ARM::SP &&
982 STI.getFrameLowering()->getTransientStackAlignment() >= 4)
987 /// Find candidates for load/store multiple merge in list of MemOpQueueEntries.
988 void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) {
989 const MachineInstr *FirstMI = MemOps[0].MI;
990 unsigned Opcode = FirstMI->getOpcode();
991 bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
992 unsigned Size = getLSMultipleTransferSize(FirstMI);
995 unsigned EIndex = MemOps.size();
997 // Look at the first instruction.
998 const MachineInstr *MI = MemOps[SIndex].MI;
999 int Offset = MemOps[SIndex].Offset;
1000 const MachineOperand &PMO = getLoadStoreRegOp(*MI);
1001 unsigned PReg = PMO.getReg();
1002 unsigned PRegNum = PMO.isUndef() ? std::numeric_limits<unsigned>::max()
1003 : TRI->getEncodingValue(PReg);
1004 unsigned Latest = SIndex;
1005 unsigned Earliest = SIndex;
1007 bool CanMergeToLSDouble =
1008 STI->isThumb2() && isNotVFP && isValidLSDoubleOffset(Offset);
1009 // ARM errata 602117: LDRD with base in list may result in incorrect base
1010 // register when interrupted or faulted.
1011 if (STI->isCortexM3() && isi32Load(Opcode) &&
1012 PReg == getLoadStoreBaseOp(*MI).getReg())
1013 CanMergeToLSDouble = false;
1015 bool CanMergeToLSMulti = true;
1016 // On swift vldm/vstm starting with an odd register number as that needs
1017 // more uops than single vldrs.
1018 if (STI->hasSlowOddRegister() && !isNotVFP && (PRegNum % 2) == 1)
1019 CanMergeToLSMulti = false;
1021 // LDRD/STRD do not allow SP/PC. LDM/STM do not support it or have it
1022 // deprecated; LDM to PC is fine but cannot happen here.
1023 if (PReg == ARM::SP || PReg == ARM::PC)
1024 CanMergeToLSMulti = CanMergeToLSDouble = false;
1026 // Should we be conservative?
1027 if (AssumeMisalignedLoadStores && !mayCombineMisaligned(*STI, *MI))
1028 CanMergeToLSMulti = CanMergeToLSDouble = false;
1030 // vldm / vstm limit are 32 for S variants, 16 for D variants.
1042 // Merge following instructions where possible.
1043 for (unsigned I = SIndex+1; I < EIndex; ++I, ++Count) {
1044 int NewOffset = MemOps[I].Offset;
1045 if (NewOffset != Offset + (int)Size)
1047 const MachineOperand &MO = getLoadStoreRegOp(*MemOps[I].MI);
1048 unsigned Reg = MO.getReg();
1049 if (Reg == ARM::SP || Reg == ARM::PC)
1054 // See if the current load/store may be part of a multi load/store.
1055 unsigned RegNum = MO.isUndef() ? std::numeric_limits<unsigned>::max()
1056 : TRI->getEncodingValue(Reg);
1057 bool PartOfLSMulti = CanMergeToLSMulti;
1058 if (PartOfLSMulti) {
1059 // Register numbers must be in ascending order.
1060 if (RegNum <= PRegNum)
1061 PartOfLSMulti = false;
1062 // For VFP / NEON load/store multiples, the registers must be
1063 // consecutive and within the limit on the number of registers per
1065 else if (!isNotVFP && RegNum != PRegNum+1)
1066 PartOfLSMulti = false;
1068 // See if the current load/store may be part of a double load/store.
1069 bool PartOfLSDouble = CanMergeToLSDouble && Count <= 1;
1071 if (!PartOfLSMulti && !PartOfLSDouble)
1073 CanMergeToLSMulti &= PartOfLSMulti;
1074 CanMergeToLSDouble &= PartOfLSDouble;
1075 // Track MemOp with latest and earliest position (Positions are
1076 // counted in reverse).
1077 unsigned Position = MemOps[I].Position;
1078 if (Position < MemOps[Latest].Position)
1080 else if (Position > MemOps[Earliest].Position)
1082 // Prepare for next MemOp.
1087 // Form a candidate from the Ops collected so far.
1088 MergeCandidate *Candidate = new(Allocator.Allocate()) MergeCandidate;
1089 for (unsigned C = SIndex, CE = SIndex + Count; C < CE; ++C)
1090 Candidate->Instrs.push_back(MemOps[C].MI);
1091 Candidate->LatestMIIdx = Latest - SIndex;
1092 Candidate->EarliestMIIdx = Earliest - SIndex;
1093 Candidate->InsertPos = MemOps[Latest].Position;
1095 CanMergeToLSMulti = CanMergeToLSDouble = false;
1096 Candidate->CanMergeToLSMulti = CanMergeToLSMulti;
1097 Candidate->CanMergeToLSDouble = CanMergeToLSDouble;
1098 Candidates.push_back(Candidate);
1099 // Continue after the chain.
1101 } while (SIndex < EIndex);
1104 static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
1105 ARM_AM::AMSubMode Mode) {
1107 default: llvm_unreachable("Unhandled opcode!");
1113 default: llvm_unreachable("Unhandled submode!");
1114 case ARM_AM::ia: return ARM::LDMIA_UPD;
1115 case ARM_AM::ib: return ARM::LDMIB_UPD;
1116 case ARM_AM::da: return ARM::LDMDA_UPD;
1117 case ARM_AM::db: return ARM::LDMDB_UPD;
1124 default: llvm_unreachable("Unhandled submode!");
1125 case ARM_AM::ia: return ARM::STMIA_UPD;
1126 case ARM_AM::ib: return ARM::STMIB_UPD;
1127 case ARM_AM::da: return ARM::STMDA_UPD;
1128 case ARM_AM::db: return ARM::STMDB_UPD;
1133 default: llvm_unreachable("Unhandled submode!");
1134 case ARM_AM::ia: return ARM::t2LDMIA_UPD;
1135 case ARM_AM::db: return ARM::t2LDMDB_UPD;
1140 default: llvm_unreachable("Unhandled submode!");
1141 case ARM_AM::ia: return ARM::t2STMIA_UPD;
1142 case ARM_AM::db: return ARM::t2STMDB_UPD;
1146 default: llvm_unreachable("Unhandled submode!");
1147 case ARM_AM::ia: return ARM::VLDMSIA_UPD;
1148 case ARM_AM::db: return ARM::VLDMSDB_UPD;
1152 default: llvm_unreachable("Unhandled submode!");
1153 case ARM_AM::ia: return ARM::VLDMDIA_UPD;
1154 case ARM_AM::db: return ARM::VLDMDDB_UPD;
1158 default: llvm_unreachable("Unhandled submode!");
1159 case ARM_AM::ia: return ARM::VSTMSIA_UPD;
1160 case ARM_AM::db: return ARM::VSTMSDB_UPD;
1164 default: llvm_unreachable("Unhandled submode!");
1165 case ARM_AM::ia: return ARM::VSTMDIA_UPD;
1166 case ARM_AM::db: return ARM::VSTMDDB_UPD;
1171 /// Check if the given instruction increments or decrements a register and
1172 /// return the amount it is incremented/decremented. Returns 0 if the CPSR flags
1173 /// generated by the instruction are possibly read as well.
1174 static int isIncrementOrDecrement(const MachineInstr &MI, unsigned Reg,
1175 ARMCC::CondCodes Pred, unsigned PredReg) {
1178 switch (MI.getOpcode()) {
1179 case ARM::tADDi8: Scale = 4; CheckCPSRDef = true; break;
1180 case ARM::tSUBi8: Scale = -4; CheckCPSRDef = true; break;
1182 case ARM::SUBri: Scale = -1; CheckCPSRDef = true; break;
1184 case ARM::ADDri: Scale = 1; CheckCPSRDef = true; break;
1185 case ARM::tADDspi: Scale = 4; CheckCPSRDef = false; break;
1186 case ARM::tSUBspi: Scale = -4; CheckCPSRDef = false; break;
1191 if (MI.getOperand(0).getReg() != Reg ||
1192 MI.getOperand(1).getReg() != Reg ||
1193 getInstrPredicate(MI, MIPredReg) != Pred ||
1194 MIPredReg != PredReg)
1197 if (CheckCPSRDef && definesCPSR(MI))
1199 return MI.getOperand(2).getImm() * Scale;
1202 /// Searches for an increment or decrement of \p Reg before \p MBBI.
1203 static MachineBasicBlock::iterator
1204 findIncDecBefore(MachineBasicBlock::iterator MBBI, unsigned Reg,
1205 ARMCC::CondCodes Pred, unsigned PredReg, int &Offset) {
1207 MachineBasicBlock &MBB = *MBBI->getParent();
1208 MachineBasicBlock::iterator BeginMBBI = MBB.begin();
1209 MachineBasicBlock::iterator EndMBBI = MBB.end();
1210 if (MBBI == BeginMBBI)
1213 // Skip debug values.
1214 MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI);
1215 while (PrevMBBI->isDebugInstr() && PrevMBBI != BeginMBBI)
1218 Offset = isIncrementOrDecrement(*PrevMBBI, Reg, Pred, PredReg);
1219 return Offset == 0 ? EndMBBI : PrevMBBI;
1222 /// Searches for a increment or decrement of \p Reg after \p MBBI.
1223 static MachineBasicBlock::iterator
1224 findIncDecAfter(MachineBasicBlock::iterator MBBI, unsigned Reg,
1225 ARMCC::CondCodes Pred, unsigned PredReg, int &Offset) {
1227 MachineBasicBlock &MBB = *MBBI->getParent();
1228 MachineBasicBlock::iterator EndMBBI = MBB.end();
1229 MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
1230 // Skip debug values.
1231 while (NextMBBI != EndMBBI && NextMBBI->isDebugInstr())
1233 if (NextMBBI == EndMBBI)
1236 Offset = isIncrementOrDecrement(*NextMBBI, Reg, Pred, PredReg);
1237 return Offset == 0 ? EndMBBI : NextMBBI;
1240 /// Fold proceeding/trailing inc/dec of base register into the
1241 /// LDM/STM/VLDM{D|S}/VSTM{D|S} op when possible:
1243 /// stmia rn, <ra, rb, rc>
1244 /// rn := rn + 4 * 3;
1246 /// stmia rn!, <ra, rb, rc>
1248 /// rn := rn - 4 * 3;
1249 /// ldmia rn, <ra, rb, rc>
1251 /// ldmdb rn!, <ra, rb, rc>
1252 bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) {
1253 // Thumb1 is already using updating loads/stores.
1254 if (isThumb1) return false;
1256 const MachineOperand &BaseOP = MI->getOperand(0);
1257 unsigned Base = BaseOP.getReg();
1258 bool BaseKill = BaseOP.isKill();
1259 unsigned PredReg = 0;
1260 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
1261 unsigned Opcode = MI->getOpcode();
1262 DebugLoc DL = MI->getDebugLoc();
1264 // Can't use an updating ld/st if the base register is also a dest
1265 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
1266 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
1267 if (MI->getOperand(i).getReg() == Base)
1270 int Bytes = getLSMultipleTransferSize(MI);
1271 MachineBasicBlock &MBB = *MI->getParent();
1272 MachineBasicBlock::iterator MBBI(MI);
1274 MachineBasicBlock::iterator MergeInstr
1275 = findIncDecBefore(MBBI, Base, Pred, PredReg, Offset);
1276 ARM_AM::AMSubMode Mode = getLoadStoreMultipleSubMode(Opcode);
1277 if (Mode == ARM_AM::ia && Offset == -Bytes) {
1279 } else if (Mode == ARM_AM::ib && Offset == -Bytes) {
1282 MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
1283 if (((Mode != ARM_AM::ia && Mode != ARM_AM::ib) || Offset != Bytes) &&
1284 ((Mode != ARM_AM::da && Mode != ARM_AM::db) || Offset != -Bytes)) {
1286 // We couldn't find an inc/dec to merge. But if the base is dead, we
1287 // can still change to a writeback form as that will save us 2 bytes
1288 // of code size. It can create WAW hazards though, so only do it if
1289 // we're minimizing code size.
1290 if (!MBB.getParent()->getFunction().optForMinSize() || !BaseKill)
1293 bool HighRegsUsed = false;
1294 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
1295 if (MI->getOperand(i).getReg() >= ARM::R8) {
1296 HighRegsUsed = true;
1301 MergeInstr = MBB.end();
1306 if (MergeInstr != MBB.end())
1307 MBB.erase(MergeInstr);
1309 unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode, Mode);
1310 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
1311 .addReg(Base, getDefRegState(true)) // WB base register
1312 .addReg(Base, getKillRegState(BaseKill))
1313 .addImm(Pred).addReg(PredReg);
1315 // Transfer the rest of operands.
1316 for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum)
1317 MIB.add(MI->getOperand(OpNum));
1319 // Transfer memoperands.
1320 MIB.setMemRefs(MI->memoperands());
1326 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc,
1327 ARM_AM::AddrOpc Mode) {
1330 return ARM::LDR_PRE_IMM;
1332 return ARM::STR_PRE_IMM;
1334 return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
1336 return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
1338 return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
1340 return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
1343 return ARM::t2LDR_PRE;
1346 return ARM::t2STR_PRE;
1347 default: llvm_unreachable("Unhandled opcode!");
1351 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc,
1352 ARM_AM::AddrOpc Mode) {
1355 return ARM::LDR_POST_IMM;
1357 return ARM::STR_POST_IMM;
1359 return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
1361 return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
1363 return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
1365 return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
1368 return ARM::t2LDR_POST;
1371 return ARM::t2STR_POST;
1372 default: llvm_unreachable("Unhandled opcode!");
1376 /// Fold proceeding/trailing inc/dec of base register into the
1377 /// LDR/STR/FLD{D|S}/FST{D|S} op when possible:
1378 bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
1379 // Thumb1 doesn't have updating LDR/STR.
1380 // FIXME: Use LDM/STM with single register instead.
1381 if (isThumb1) return false;
1383 unsigned Base = getLoadStoreBaseOp(*MI).getReg();
1384 bool BaseKill = getLoadStoreBaseOp(*MI).isKill();
1385 unsigned Opcode = MI->getOpcode();
1386 DebugLoc DL = MI->getDebugLoc();
1387 bool isAM5 = (Opcode == ARM::VLDRD || Opcode == ARM::VLDRS ||
1388 Opcode == ARM::VSTRD || Opcode == ARM::VSTRS);
1389 bool isAM2 = (Opcode == ARM::LDRi12 || Opcode == ARM::STRi12);
1390 if (isi32Load(Opcode) || isi32Store(Opcode))
1391 if (MI->getOperand(2).getImm() != 0)
1393 if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
1396 // Can't do the merge if the destination register is the same as the would-be
1397 // writeback register.
1398 if (MI->getOperand(0).getReg() == Base)
1401 unsigned PredReg = 0;
1402 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
1403 int Bytes = getLSMultipleTransferSize(MI);
1404 MachineBasicBlock &MBB = *MI->getParent();
1405 MachineBasicBlock::iterator MBBI(MI);
1407 MachineBasicBlock::iterator MergeInstr
1408 = findIncDecBefore(MBBI, Base, Pred, PredReg, Offset);
1410 if (!isAM5 && Offset == Bytes) {
1411 NewOpc = getPreIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
1412 } else if (Offset == -Bytes) {
1413 NewOpc = getPreIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
1415 MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
1416 if (Offset == Bytes) {
1417 NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
1418 } else if (!isAM5 && Offset == -Bytes) {
1419 NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
1423 MBB.erase(MergeInstr);
1425 ARM_AM::AddrOpc AddSub = Offset < 0 ? ARM_AM::sub : ARM_AM::add;
1427 bool isLd = isLoadSingle(Opcode);
1429 // VLDM[SD]_UPD, VSTM[SD]_UPD
1430 // (There are no base-updating versions of VLDR/VSTR instructions, but the
1431 // updating load/store-multiple instructions can be used with only one
1433 MachineOperand &MO = MI->getOperand(0);
1434 BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
1435 .addReg(Base, getDefRegState(true)) // WB base register
1436 .addReg(Base, getKillRegState(isLd ? BaseKill : false))
1437 .addImm(Pred).addReg(PredReg)
1438 .addReg(MO.getReg(), (isLd ? getDefRegState(true) :
1439 getKillRegState(MO.isKill())));
1442 // LDR_PRE, LDR_POST
1443 if (NewOpc == ARM::LDR_PRE_IMM || NewOpc == ARM::LDRB_PRE_IMM) {
1444 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
1445 .addReg(Base, RegState::Define)
1446 .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
1448 int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
1449 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
1450 .addReg(Base, RegState::Define)
1454 .add(predOps(Pred, PredReg));
1457 // t2LDR_PRE, t2LDR_POST
1458 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
1459 .addReg(Base, RegState::Define)
1462 .add(predOps(Pred, PredReg));
1465 MachineOperand &MO = MI->getOperand(0);
1466 // FIXME: post-indexed stores use am2offset_imm, which still encodes
1467 // the vestigal zero-reg offset register. When that's fixed, this clause
1468 // can be removed entirely.
1469 if (isAM2 && NewOpc == ARM::STR_POST_IMM) {
1470 int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
1471 // STR_PRE, STR_POST
1472 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base)
1473 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
1477 .add(predOps(Pred, PredReg));
1479 // t2STR_PRE, t2STR_POST
1480 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base)
1481 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
1484 .add(predOps(Pred, PredReg));
1492 bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const {
1493 unsigned Opcode = MI.getOpcode();
1494 assert((Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8) &&
1495 "Must have t2STRDi8 or t2LDRDi8");
1496 if (MI.getOperand(3).getImm() != 0)
1499 // Behaviour for writeback is undefined if base register is the same as one
1501 const MachineOperand &BaseOp = MI.getOperand(2);
1502 unsigned Base = BaseOp.getReg();
1503 const MachineOperand &Reg0Op = MI.getOperand(0);
1504 const MachineOperand &Reg1Op = MI.getOperand(1);
1505 if (Reg0Op.getReg() == Base || Reg1Op.getReg() == Base)
1509 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
1510 MachineBasicBlock::iterator MBBI(MI);
1511 MachineBasicBlock &MBB = *MI.getParent();
1513 MachineBasicBlock::iterator MergeInstr = findIncDecBefore(MBBI, Base, Pred,
1516 if (Offset == 8 || Offset == -8) {
1517 NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_PRE : ARM::t2STRD_PRE;
1519 MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
1520 if (Offset == 8 || Offset == -8) {
1521 NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_POST : ARM::t2STRD_POST;
1525 MBB.erase(MergeInstr);
1527 DebugLoc DL = MI.getDebugLoc();
1528 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
1529 if (NewOpc == ARM::t2LDRD_PRE || NewOpc == ARM::t2LDRD_POST) {
1530 MIB.add(Reg0Op).add(Reg1Op).addReg(BaseOp.getReg(), RegState::Define);
1532 assert(NewOpc == ARM::t2STRD_PRE || NewOpc == ARM::t2STRD_POST);
1533 MIB.addReg(BaseOp.getReg(), RegState::Define).add(Reg0Op).add(Reg1Op);
1535 MIB.addReg(BaseOp.getReg(), RegState::Kill)
1536 .addImm(Offset).addImm(Pred).addReg(PredReg);
1537 assert(TII->get(Opcode).getNumOperands() == 6 &&
1538 TII->get(NewOpc).getNumOperands() == 7 &&
1539 "Unexpected number of operands in Opcode specification.");
1541 // Transfer implicit operands.
1542 for (const MachineOperand &MO : MI.implicit_operands())
1544 MIB.setMemRefs(MI.memoperands());
1550 /// Returns true if instruction is a memory operation that this pass is capable
1551 /// of operating on.
1552 static bool isMemoryOp(const MachineInstr &MI) {
1553 unsigned Opcode = MI.getOpcode();
1573 if (!MI.getOperand(1).isReg())
1576 // When no memory operands are present, conservatively assume unaligned,
1577 // volatile, unfoldable.
1578 if (!MI.hasOneMemOperand())
1581 const MachineMemOperand &MMO = **MI.memoperands_begin();
1583 // Don't touch volatile memory accesses - we may be changing their order.
1584 if (MMO.isVolatile())
1587 // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
1589 if (MMO.getAlignment() < 4)
1592 // str <undef> could probably be eliminated entirely, but for now we just want
1593 // to avoid making a mess of it.
1594 // FIXME: Use str <undef> as a wildcard to enable better stm folding.
1595 if (MI.getOperand(0).isReg() && MI.getOperand(0).isUndef())
1598 // Likewise don't mess with references to undefined addresses.
1599 if (MI.getOperand(1).isUndef())
1605 static void InsertLDR_STR(MachineBasicBlock &MBB,
1606 MachineBasicBlock::iterator &MBBI, int Offset,
1607 bool isDef, unsigned NewOpc, unsigned Reg,
1608 bool RegDeadKill, bool RegUndef, unsigned BaseReg,
1609 bool BaseKill, bool BaseUndef, ARMCC::CondCodes Pred,
1610 unsigned PredReg, const TargetInstrInfo *TII) {
1612 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1614 .addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
1615 .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
1616 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
1618 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1620 .addReg(Reg, getKillRegState(RegDeadKill) | getUndefRegState(RegUndef))
1621 .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
1622 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
1626 bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
1627 MachineBasicBlock::iterator &MBBI) {
1628 MachineInstr *MI = &*MBBI;
1629 unsigned Opcode = MI->getOpcode();
1630 // FIXME: Code/comments below check Opcode == t2STRDi8, but this check returns
1631 // if we see this opcode.
1632 if (Opcode != ARM::LDRD && Opcode != ARM::STRD && Opcode != ARM::t2LDRDi8)
1635 const MachineOperand &BaseOp = MI->getOperand(2);
1636 unsigned BaseReg = BaseOp.getReg();
1637 unsigned EvenReg = MI->getOperand(0).getReg();
1638 unsigned OddReg = MI->getOperand(1).getReg();
1639 unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
1640 unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
1642 // ARM errata 602117: LDRD with base in list may result in incorrect base
1643 // register when interrupted or faulted.
1644 bool Errata602117 = EvenReg == BaseReg &&
1645 (Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8) && STI->isCortexM3();
1646 // ARM LDRD/STRD needs consecutive registers.
1647 bool NonConsecutiveRegs = (Opcode == ARM::LDRD || Opcode == ARM::STRD) &&
1648 (EvenRegNum % 2 != 0 || EvenRegNum + 1 != OddRegNum);
1650 if (!Errata602117 && !NonConsecutiveRegs)
1653 bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
1654 bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
1655 bool EvenDeadKill = isLd ?
1656 MI->getOperand(0).isDead() : MI->getOperand(0).isKill();
1657 bool EvenUndef = MI->getOperand(0).isUndef();
1658 bool OddDeadKill = isLd ?
1659 MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
1660 bool OddUndef = MI->getOperand(1).isUndef();
1661 bool BaseKill = BaseOp.isKill();
1662 bool BaseUndef = BaseOp.isUndef();
1663 assert((isT2 || MI->getOperand(3).getReg() == ARM::NoRegister) &&
1664 "register offset not handled below");
1665 int OffImm = getMemoryOpOffset(*MI);
1666 unsigned PredReg = 0;
1667 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
1669 if (OddRegNum > EvenRegNum && OffImm == 0) {
1670 // Ascending register numbers and no offset. It's safe to change it to a
1672 unsigned NewOpc = (isLd)
1673 ? (isT2 ? ARM::t2LDMIA : ARM::LDMIA)
1674 : (isT2 ? ARM::t2STMIA : ARM::STMIA);
1676 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
1677 .addReg(BaseReg, getKillRegState(BaseKill))
1678 .addImm(Pred).addReg(PredReg)
1679 .addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
1680 .addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
1683 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
1684 .addReg(BaseReg, getKillRegState(BaseKill))
1685 .addImm(Pred).addReg(PredReg)
1687 getKillRegState(EvenDeadKill) | getUndefRegState(EvenUndef))
1689 getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
1693 // Split into two instructions.
1694 unsigned NewOpc = (isLd)
1695 ? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
1696 : (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
1697 // Be extra careful for thumb2. t2LDRi8 can't reference a zero offset,
1698 // so adjust and use t2LDRi12 here for that.
1699 unsigned NewOpc2 = (isLd)
1700 ? (isT2 ? (OffImm+4 < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
1701 : (isT2 ? (OffImm+4 < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
1702 // If this is a load, make sure the first load does not clobber the base
1703 // register before the second load reads it.
1704 if (isLd && TRI->regsOverlap(EvenReg, BaseReg)) {
1705 assert(!TRI->regsOverlap(OddReg, BaseReg));
1706 InsertLDR_STR(MBB, MBBI, OffImm + 4, isLd, NewOpc2, OddReg, OddDeadKill,
1707 false, BaseReg, false, BaseUndef, Pred, PredReg, TII);
1708 InsertLDR_STR(MBB, MBBI, OffImm, isLd, NewOpc, EvenReg, EvenDeadKill,
1709 false, BaseReg, BaseKill, BaseUndef, Pred, PredReg, TII);
1711 if (OddReg == EvenReg && EvenDeadKill) {
1712 // If the two source operands are the same, the kill marker is
1713 // probably on the first one. e.g.
1714 // t2STRDi8 killed %r5, %r5, killed %r9, 0, 14, %reg0
1715 EvenDeadKill = false;
1718 // Never kill the base register in the first instruction.
1719 if (EvenReg == BaseReg)
1720 EvenDeadKill = false;
1721 InsertLDR_STR(MBB, MBBI, OffImm, isLd, NewOpc, EvenReg, EvenDeadKill,
1722 EvenUndef, BaseReg, false, BaseUndef, Pred, PredReg, TII);
1723 InsertLDR_STR(MBB, MBBI, OffImm + 4, isLd, NewOpc2, OddReg, OddDeadKill,
1724 OddUndef, BaseReg, BaseKill, BaseUndef, Pred, PredReg, TII);
1732 MBBI = MBB.erase(MBBI);
1736 /// An optimization pass to turn multiple LDR / STR ops of the same base and
1737 /// incrementing offset into LDM / STM ops.
1738 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
1740 unsigned CurrBase = 0;
1741 unsigned CurrOpc = ~0u;
1742 ARMCC::CondCodes CurrPred = ARMCC::AL;
1743 unsigned Position = 0;
1744 assert(Candidates.size() == 0);
1745 assert(MergeBaseCandidates.size() == 0);
1746 LiveRegsValid = false;
1748 for (MachineBasicBlock::iterator I = MBB.end(), MBBI; I != MBB.begin();
1750 // The instruction in front of the iterator is the one we look at.
1751 MBBI = std::prev(I);
1752 if (FixInvalidRegPairOp(MBB, MBBI))
1756 if (isMemoryOp(*MBBI)) {
1757 unsigned Opcode = MBBI->getOpcode();
1758 const MachineOperand &MO = MBBI->getOperand(0);
1759 unsigned Reg = MO.getReg();
1760 unsigned Base = getLoadStoreBaseOp(*MBBI).getReg();
1761 unsigned PredReg = 0;
1762 ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg);
1763 int Offset = getMemoryOpOffset(*MBBI);
1764 if (CurrBase == 0) {
1765 // Start of a new chain.
1769 MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
1772 // Note: No need to match PredReg in the next if.
1773 if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
1775 // r4 := ldr [r0, #8]
1776 // r4 := ldr [r0, #4]
1779 // If a load overrides the base register or a register loaded by
1780 // another load in our chain, we cannot take this instruction.
1781 bool Overlap = false;
1782 if (isLoadSingle(Opcode)) {
1783 Overlap = (Base == Reg);
1785 for (const MemOpQueueEntry &E : MemOps) {
1786 if (TRI->regsOverlap(Reg, E.MI->getOperand(0).getReg())) {
1795 // Check offset and sort memory operation into the current chain.
1796 if (Offset > MemOps.back().Offset) {
1797 MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
1800 MemOpQueue::iterator MI, ME;
1801 for (MI = MemOps.begin(), ME = MemOps.end(); MI != ME; ++MI) {
1802 if (Offset < MI->Offset) {
1803 // Found a place to insert.
1806 if (Offset == MI->Offset) {
1807 // Collision, abort.
1812 if (MI != MemOps.end()) {
1813 MemOps.insert(MI, MemOpQueueEntry(*MBBI, Offset, Position));
1820 // Don't advance the iterator; The op will start a new chain next.
1823 // Fallthrough to look into existing chain.
1824 } else if (MBBI->isDebugInstr()) {
1826 } else if (MBBI->getOpcode() == ARM::t2LDRDi8 ||
1827 MBBI->getOpcode() == ARM::t2STRDi8) {
1828 // ARMPreAllocLoadStoreOpt has already formed some LDRD/STRD instructions
1829 // remember them because we may still be able to merge add/sub into them.
1830 MergeBaseCandidates.push_back(&*MBBI);
1833 // If we are here then the chain is broken; Extract candidates for a merge.
1834 if (MemOps.size() > 0) {
1835 FormCandidates(MemOps);
1836 // Reset for the next chain.
1839 CurrPred = ARMCC::AL;
1843 if (MemOps.size() > 0)
1844 FormCandidates(MemOps);
1846 // Sort candidates so they get processed from end to begin of the basic
1847 // block later; This is necessary for liveness calculation.
1848 auto LessThan = [](const MergeCandidate* M0, const MergeCandidate *M1) {
1849 return M0->InsertPos < M1->InsertPos;
1851 llvm::sort(Candidates, LessThan);
1853 // Go through list of candidates and merge.
1854 bool Changed = false;
1855 for (const MergeCandidate *Candidate : Candidates) {
1856 if (Candidate->CanMergeToLSMulti || Candidate->CanMergeToLSDouble) {
1857 MachineInstr *Merged = MergeOpsUpdate(*Candidate);
1858 // Merge preceding/trailing base inc/dec into the merged op.
1861 unsigned Opcode = Merged->getOpcode();
1862 if (Opcode == ARM::t2STRDi8 || Opcode == ARM::t2LDRDi8)
1863 MergeBaseUpdateLSDouble(*Merged);
1865 MergeBaseUpdateLSMultiple(Merged);
1867 for (MachineInstr *MI : Candidate->Instrs) {
1868 if (MergeBaseUpdateLoadStore(MI))
1873 assert(Candidate->Instrs.size() == 1);
1874 if (MergeBaseUpdateLoadStore(Candidate->Instrs.front()))
1879 // Try to fold add/sub into the LDRD/STRD formed by ARMPreAllocLoadStoreOpt.
1880 for (MachineInstr *MI : MergeBaseCandidates)
1881 MergeBaseUpdateLSDouble(*MI);
1882 MergeBaseCandidates.clear();
1887 /// If this is a exit BB, try merging the return ops ("bx lr" and "mov pc, lr")
1888 /// into the preceding stack restore so it directly restore the value of LR
1890 /// ldmfd sp!, {..., lr}
1893 /// ldmfd sp!, {..., lr}
1896 /// ldmfd sp!, {..., pc}
1897 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
1898 // Thumb1 LDM doesn't allow high registers.
1899 if (isThumb1) return false;
1900 if (MBB.empty()) return false;
1902 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
1903 if (MBBI != MBB.begin() && MBBI != MBB.end() &&
1904 (MBBI->getOpcode() == ARM::BX_RET ||
1905 MBBI->getOpcode() == ARM::tBX_RET ||
1906 MBBI->getOpcode() == ARM::MOVPCLR)) {
1907 MachineBasicBlock::iterator PrevI = std::prev(MBBI);
1908 // Ignore any debug instructions.
1909 while (PrevI->isDebugInstr() && PrevI != MBB.begin())
1911 MachineInstr &PrevMI = *PrevI;
1912 unsigned Opcode = PrevMI.getOpcode();
1913 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
1914 Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
1915 Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
1916 MachineOperand &MO = PrevMI.getOperand(PrevMI.getNumOperands() - 1);
1917 if (MO.getReg() != ARM::LR)
1919 unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
1920 assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
1921 Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
1922 PrevMI.setDesc(TII->get(NewOpc));
1924 PrevMI.copyImplicitOps(*MBB.getParent(), *MBBI);
1926 // We now restore LR into PC so it is not live-out of the return block
1927 // anymore: Clear the CSI Restored bit.
1928 MachineFrameInfo &MFI = MBB.getParent()->getFrameInfo();
1929 // CSI should be fixed after PrologEpilog Insertion
1930 assert(MFI.isCalleeSavedInfoValid() && "CSI should be valid");
1931 for (CalleeSavedInfo &Info : MFI.getCalleeSavedInfo()) {
1932 if (Info.getReg() == ARM::LR) {
1933 Info.setRestored(false);
1943 bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &MBB) {
1944 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1945 if (MBBI == MBB.begin() || MBBI == MBB.end() ||
1946 MBBI->getOpcode() != ARM::tBX_RET)
1949 MachineBasicBlock::iterator Prev = MBBI;
1951 if (Prev->getOpcode() != ARM::tMOVr || !Prev->definesRegister(ARM::LR))
1954 for (auto Use : Prev->uses())
1956 assert(STI->hasV4TOps());
1957 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX))
1958 .addReg(Use.getReg(), RegState::Kill)
1959 .add(predOps(ARMCC::AL))
1960 .copyImplicitOps(*MBBI);
1966 llvm_unreachable("tMOVr doesn't kill a reg before tBX_RET?");
1969 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1970 if (skipFunction(Fn.getFunction()))
1974 STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
1975 TL = STI->getTargetLowering();
1976 AFI = Fn.getInfo<ARMFunctionInfo>();
1977 TII = STI->getInstrInfo();
1978 TRI = STI->getRegisterInfo();
1980 RegClassInfoValid = false;
1981 isThumb2 = AFI->isThumb2Function();
1982 isThumb1 = AFI->isThumbFunction() && !isThumb2;
1984 bool Modified = false;
1985 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
1987 MachineBasicBlock &MBB = *MFI;
1988 Modified |= LoadStoreMultipleOpti(MBB);
1989 if (STI->hasV5TOps())
1990 Modified |= MergeReturnIntoLDM(MBB);
1992 Modified |= CombineMovBx(MBB);
1995 Allocator.DestroyAll();
1999 #define ARM_PREALLOC_LOAD_STORE_OPT_NAME \
2000 "ARM pre- register allocation load / store optimization pass"
2004 /// Pre- register allocation pass that move load / stores from consecutive
2005 /// locations close to make it more likely they will be combined later.
2006 struct ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
2010 const DataLayout *TD;
2011 const TargetInstrInfo *TII;
2012 const TargetRegisterInfo *TRI;
2013 const ARMSubtarget *STI;
2014 MachineRegisterInfo *MRI;
2015 MachineFunction *MF;
2017 ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
2019 bool runOnMachineFunction(MachineFunction &Fn) override;
2021 StringRef getPassName() const override {
2022 return ARM_PREALLOC_LOAD_STORE_OPT_NAME;
2025 void getAnalysisUsage(AnalysisUsage &AU) const override {
2026 AU.addRequired<AAResultsWrapperPass>();
2027 MachineFunctionPass::getAnalysisUsage(AU);
2031 bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
2032 unsigned &NewOpc, unsigned &EvenReg,
2033 unsigned &OddReg, unsigned &BaseReg,
2035 unsigned &PredReg, ARMCC::CondCodes &Pred,
2037 bool RescheduleOps(MachineBasicBlock *MBB,
2038 SmallVectorImpl<MachineInstr *> &Ops,
2039 unsigned Base, bool isLd,
2040 DenseMap<MachineInstr*, unsigned> &MI2LocMap);
2041 bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
2044 } // end anonymous namespace
2046 char ARMPreAllocLoadStoreOpt::ID = 0;
2048 INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-ldst-opt",
2049 ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false)
2051 bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
2052 if (AssumeMisalignedLoadStores || skipFunction(Fn.getFunction()))
2055 TD = &Fn.getDataLayout();
2056 STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
2057 TII = STI->getInstrInfo();
2058 TRI = STI->getRegisterInfo();
2059 MRI = &Fn.getRegInfo();
2061 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2063 bool Modified = false;
2064 for (MachineBasicBlock &MFI : Fn)
2065 Modified |= RescheduleLoadStoreInstrs(&MFI);
2070 static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
2071 MachineBasicBlock::iterator I,
2072 MachineBasicBlock::iterator E,
2073 SmallPtrSetImpl<MachineInstr*> &MemOps,
2074 SmallSet<unsigned, 4> &MemRegs,
2075 const TargetRegisterInfo *TRI,
2076 AliasAnalysis *AA) {
2077 // Are there stores / loads / calls between them?
2078 SmallSet<unsigned, 4> AddedRegPressure;
2080 if (I->isDebugInstr() || MemOps.count(&*I))
2082 if (I->isCall() || I->isTerminator() || I->hasUnmodeledSideEffects())
2084 if (I->mayStore() || (!isLd && I->mayLoad()))
2085 for (MachineInstr *MemOp : MemOps)
2086 if (I->mayAlias(AA, *MemOp, /*UseTBAA*/ false))
2088 for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
2089 MachineOperand &MO = I->getOperand(j);
2092 unsigned Reg = MO.getReg();
2093 if (MO.isDef() && TRI->regsOverlap(Reg, Base))
2095 if (Reg != Base && !MemRegs.count(Reg))
2096 AddedRegPressure.insert(Reg);
2100 // Estimate register pressure increase due to the transformation.
2101 if (MemRegs.size() <= 4)
2102 // Ok if we are moving small number of instructions.
2104 return AddedRegPressure.size() <= MemRegs.size() * 2;
2108 ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
2109 DebugLoc &dl, unsigned &NewOpc,
2111 unsigned &SecondReg,
2112 unsigned &BaseReg, int &Offset,
2114 ARMCC::CondCodes &Pred,
2116 // Make sure we're allowed to generate LDRD/STRD.
2117 if (!STI->hasV5TEOps())
2120 // FIXME: VLDRS / VSTRS -> VLDRD / VSTRD
2122 unsigned Opcode = Op0->getOpcode();
2123 if (Opcode == ARM::LDRi12) {
2125 } else if (Opcode == ARM::STRi12) {
2127 } else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
2128 NewOpc = ARM::t2LDRDi8;
2131 } else if (Opcode == ARM::t2STRi8 || Opcode == ARM::t2STRi12) {
2132 NewOpc = ARM::t2STRDi8;
2139 // Make sure the base address satisfies i64 ld / st alignment requirement.
2140 // At the moment, we ignore the memoryoperand's value.
2141 // If we want to use AliasAnalysis, we should check it accordingly.
2142 if (!Op0->hasOneMemOperand() ||
2143 (*Op0->memoperands_begin())->isVolatile())
2146 unsigned Align = (*Op0->memoperands_begin())->getAlignment();
2147 const Function &Func = MF->getFunction();
2148 unsigned ReqAlign = STI->hasV6Ops()
2149 ? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext()))
2150 : 8; // Pre-v6 need 8-byte align
2151 if (Align < ReqAlign)
2154 // Then make sure the immediate offset fits.
2155 int OffImm = getMemoryOpOffset(*Op0);
2157 int Limit = (1 << 8) * Scale;
2158 if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
2162 ARM_AM::AddrOpc AddSub = ARM_AM::add;
2164 AddSub = ARM_AM::sub;
2167 int Limit = (1 << 8) * Scale;
2168 if (OffImm >= Limit || (OffImm & (Scale-1)))
2170 Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
2172 FirstReg = Op0->getOperand(0).getReg();
2173 SecondReg = Op1->getOperand(0).getReg();
2174 if (FirstReg == SecondReg)
2176 BaseReg = Op0->getOperand(1).getReg();
2177 Pred = getInstrPredicate(*Op0, PredReg);
2178 dl = Op0->getDebugLoc();
2182 bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
2183 SmallVectorImpl<MachineInstr *> &Ops,
2184 unsigned Base, bool isLd,
2185 DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
2186 bool RetVal = false;
2188 // Sort by offset (in reverse order).
2189 llvm::sort(Ops, [](const MachineInstr *LHS, const MachineInstr *RHS) {
2190 int LOffset = getMemoryOpOffset(*LHS);
2191 int ROffset = getMemoryOpOffset(*RHS);
2192 assert(LHS == RHS || LOffset != ROffset);
2193 return LOffset > ROffset;
2196 // The loads / stores of the same base are in order. Scan them from first to
2197 // last and check for the following:
2198 // 1. Any def of base.
2200 while (Ops.size() > 1) {
2201 unsigned FirstLoc = ~0U;
2202 unsigned LastLoc = 0;
2203 MachineInstr *FirstOp = nullptr;
2204 MachineInstr *LastOp = nullptr;
2206 unsigned LastOpcode = 0;
2207 unsigned LastBytes = 0;
2208 unsigned NumMove = 0;
2209 for (int i = Ops.size() - 1; i >= 0; --i) {
2210 // Make sure each operation has the same kind.
2211 MachineInstr *Op = Ops[i];
2213 = getLoadStoreMultipleOpcode(Op->getOpcode(), ARM_AM::ia);
2214 if (LastOpcode && LSMOpcode != LastOpcode)
2217 // Check that we have a continuous set of offsets.
2218 int Offset = getMemoryOpOffset(*Op);
2219 unsigned Bytes = getLSMultipleTransferSize(Op);
2221 if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
2225 // Don't try to reschedule too many instructions.
2226 if (NumMove == 8) // FIXME: Tune this limit.
2229 // Found a mergable instruction; save information about it.
2231 LastOffset = Offset;
2233 LastOpcode = LSMOpcode;
2235 unsigned Loc = MI2LocMap[Op];
2236 if (Loc <= FirstLoc) {
2240 if (Loc >= LastLoc) {
2249 SmallPtrSet<MachineInstr*, 4> MemOps;
2250 SmallSet<unsigned, 4> MemRegs;
2251 for (size_t i = Ops.size() - NumMove, e = Ops.size(); i != e; ++i) {
2252 MemOps.insert(Ops[i]);
2253 MemRegs.insert(Ops[i]->getOperand(0).getReg());
2256 // Be conservative, if the instructions are too far apart, don't
2257 // move them. We want to limit the increase of register pressure.
2258 bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this.
2260 DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp,
2261 MemOps, MemRegs, TRI, AA);
2263 for (unsigned i = 0; i != NumMove; ++i)
2266 // This is the new location for the loads / stores.
2267 MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
2268 while (InsertPos != MBB->end() &&
2269 (MemOps.count(&*InsertPos) || InsertPos->isDebugInstr()))
2272 // If we are moving a pair of loads / stores, see if it makes sense
2273 // to try to allocate a pair of registers that can form register pairs.
2274 MachineInstr *Op0 = Ops.back();
2275 MachineInstr *Op1 = Ops[Ops.size()-2];
2276 unsigned FirstReg = 0, SecondReg = 0;
2277 unsigned BaseReg = 0, PredReg = 0;
2278 ARMCC::CondCodes Pred = ARMCC::AL;
2280 unsigned NewOpc = 0;
2283 if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
2284 FirstReg, SecondReg, BaseReg,
2285 Offset, PredReg, Pred, isT2)) {
2289 const MCInstrDesc &MCID = TII->get(NewOpc);
2290 const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI, *MF);
2291 MRI->constrainRegClass(FirstReg, TRC);
2292 MRI->constrainRegClass(SecondReg, TRC);
2294 // Form the pair instruction.
2296 MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
2297 .addReg(FirstReg, RegState::Define)
2298 .addReg(SecondReg, RegState::Define)
2300 // FIXME: We're converting from LDRi12 to an insn that still
2301 // uses addrmode2, so we need an explicit offset reg. It should
2302 // always by reg0 since we're transforming LDRi12s.
2305 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
2306 MIB.cloneMergedMemRefs({Op0, Op1});
2307 LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
2310 MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
2314 // FIXME: We're converting from LDRi12 to an insn that still
2315 // uses addrmode2, so we need an explicit offset reg. It should
2316 // always by reg0 since we're transforming STRi12s.
2319 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
2320 MIB.cloneMergedMemRefs({Op0, Op1});
2321 LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
2328 // Add register allocation hints to form register pairs.
2329 MRI->setRegAllocationHint(FirstReg, ARMRI::RegPairEven, SecondReg);
2330 MRI->setRegAllocationHint(SecondReg, ARMRI::RegPairOdd, FirstReg);
2333 for (unsigned i = 0; i != NumMove; ++i) {
2334 MachineInstr *Op = Ops.back();
2336 MBB->splice(InsertPos, MBB, Op);
2340 NumLdStMoved += NumMove;
2350 ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
2351 bool RetVal = false;
2353 DenseMap<MachineInstr*, unsigned> MI2LocMap;
2354 DenseMap<unsigned, SmallVector<MachineInstr *, 4>> Base2LdsMap;
2355 DenseMap<unsigned, SmallVector<MachineInstr *, 4>> Base2StsMap;
2356 SmallVector<unsigned, 4> LdBases;
2357 SmallVector<unsigned, 4> StBases;
2360 MachineBasicBlock::iterator MBBI = MBB->begin();
2361 MachineBasicBlock::iterator E = MBB->end();
2363 for (; MBBI != E; ++MBBI) {
2364 MachineInstr &MI = *MBBI;
2365 if (MI.isCall() || MI.isTerminator()) {
2366 // Stop at barriers.
2371 if (!MI.isDebugInstr())
2372 MI2LocMap[&MI] = ++Loc;
2374 if (!isMemoryOp(MI))
2376 unsigned PredReg = 0;
2377 if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
2380 int Opc = MI.getOpcode();
2381 bool isLd = isLoadSingle(Opc);
2382 unsigned Base = MI.getOperand(1).getReg();
2383 int Offset = getMemoryOpOffset(MI);
2385 bool StopHere = false;
2387 DenseMap<unsigned, SmallVector<MachineInstr *, 4>>::iterator BI =
2388 Base2LdsMap.find(Base);
2389 if (BI != Base2LdsMap.end()) {
2390 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
2391 if (Offset == getMemoryOpOffset(*BI->second[i])) {
2397 BI->second.push_back(&MI);
2399 Base2LdsMap[Base].push_back(&MI);
2400 LdBases.push_back(Base);
2403 DenseMap<unsigned, SmallVector<MachineInstr *, 4>>::iterator BI =
2404 Base2StsMap.find(Base);
2405 if (BI != Base2StsMap.end()) {
2406 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
2407 if (Offset == getMemoryOpOffset(*BI->second[i])) {
2413 BI->second.push_back(&MI);
2415 Base2StsMap[Base].push_back(&MI);
2416 StBases.push_back(Base);
2421 // Found a duplicate (a base+offset combination that's seen earlier).
2428 // Re-schedule loads.
2429 for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
2430 unsigned Base = LdBases[i];
2431 SmallVectorImpl<MachineInstr *> &Lds = Base2LdsMap[Base];
2433 RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
2436 // Re-schedule stores.
2437 for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
2438 unsigned Base = StBases[i];
2439 SmallVectorImpl<MachineInstr *> &Sts = Base2StsMap[Base];
2441 RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
2445 Base2LdsMap.clear();
2446 Base2StsMap.clear();
2455 /// Returns an instance of the load / store optimization pass.
2456 FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
2458 return new ARMPreAllocLoadStoreOpt();
2459 return new ARMLoadStoreOpt();