1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// \file This file contains a pass that performs load / store related peephole
11 /// optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMISelLowering.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMSubtarget.h"
21 #include "MCTargetDesc/ARMAddressingModes.h"
22 #include "ThumbRegisterInfo.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFunctionPass.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/RegisterClassInfo.h"
35 #include "llvm/CodeGen/SelectionDAGNodes.h"
36 #include "llvm/CodeGen/LivePhysRegs.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/Support/Allocator.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetRegisterInfo.h"
49 #define DEBUG_TYPE "arm-ldst-opt"
51 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
52 STATISTIC(NumSTMGened , "Number of stm instructions generated");
53 STATISTIC(NumVLDMGened, "Number of vldm instructions generated");
54 STATISTIC(NumVSTMGened, "Number of vstm instructions generated");
55 STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
56 STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation");
57 STATISTIC(NumSTRDFormed,"Number of strd created before allocation");
58 STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm");
59 STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
60 STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
61 STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
63 /// This switch disables formation of double/multi instructions that could
64 /// potentially lead to (new) alignment traps even with CCR.UNALIGN_TRP
65 /// disabled. This can be used to create libraries that are robust even when
66 /// users provoke undefined behaviour by supplying misaligned pointers.
67 /// \see mayCombineMisaligned()
69 AssumeMisalignedLoadStores("arm-assume-misaligned-load-store", cl::Hidden,
70 cl::init(false), cl::desc("Be more conservative in ARM load/store opt"));
72 #define ARM_LOAD_STORE_OPT_NAME "ARM load / store optimization pass"
75 /// Post- register allocation pass the combine load / store instructions to
76 /// form ldm / stm instructions.
77 struct ARMLoadStoreOpt : public MachineFunctionPass {
79 ARMLoadStoreOpt() : MachineFunctionPass(ID) {}
81 const MachineFunction *MF;
82 const TargetInstrInfo *TII;
83 const TargetRegisterInfo *TRI;
84 const ARMSubtarget *STI;
85 const TargetLowering *TL;
87 LivePhysRegs LiveRegs;
88 RegisterClassInfo RegClassInfo;
89 MachineBasicBlock::const_iterator LiveRegPos;
91 bool RegClassInfoValid;
92 bool isThumb1, isThumb2;
94 bool runOnMachineFunction(MachineFunction &Fn) override;
96 MachineFunctionProperties getRequiredProperties() const override {
97 return MachineFunctionProperties().set(
98 MachineFunctionProperties::Property::NoVRegs);
101 StringRef getPassName() const override { return ARM_LOAD_STORE_OPT_NAME; }
104 /// A set of load/store MachineInstrs with same base register sorted by
106 struct MemOpQueueEntry {
108 int Offset; ///< Load/Store offset.
109 unsigned Position; ///< Position as counted from end of basic block.
110 MemOpQueueEntry(MachineInstr &MI, int Offset, unsigned Position)
111 : MI(&MI), Offset(Offset), Position(Position) {}
113 typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
115 /// A set of MachineInstrs that fulfill (nearly all) conditions to get
116 /// merged into a LDM/STM.
117 struct MergeCandidate {
118 /// List of instructions ordered by load/store offset.
119 SmallVector<MachineInstr*, 4> Instrs;
120 /// Index in Instrs of the instruction being latest in the schedule.
121 unsigned LatestMIIdx;
122 /// Index in Instrs of the instruction being earliest in the schedule.
123 unsigned EarliestMIIdx;
124 /// Index into the basic block where the merged instruction will be
125 /// inserted. (See MemOpQueueEntry.Position)
127 /// Whether the instructions can be merged into a ldm/stm instruction.
128 bool CanMergeToLSMulti;
129 /// Whether the instructions can be merged into a ldrd/strd instruction.
130 bool CanMergeToLSDouble;
132 SpecificBumpPtrAllocator<MergeCandidate> Allocator;
133 SmallVector<const MergeCandidate*,4> Candidates;
134 SmallVector<MachineInstr*,4> MergeBaseCandidates;
136 void moveLiveRegsBefore(const MachineBasicBlock &MBB,
137 MachineBasicBlock::const_iterator Before);
138 unsigned findFreeReg(const TargetRegisterClass &RegClass);
139 void UpdateBaseRegUses(MachineBasicBlock &MBB,
140 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
141 unsigned Base, unsigned WordOffset,
142 ARMCC::CondCodes Pred, unsigned PredReg);
143 MachineInstr *CreateLoadStoreMulti(
144 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
145 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
146 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
147 ArrayRef<std::pair<unsigned, bool>> Regs);
148 MachineInstr *CreateLoadStoreDouble(
149 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
150 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
151 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
152 ArrayRef<std::pair<unsigned, bool>> Regs) const;
153 void FormCandidates(const MemOpQueue &MemOps);
154 MachineInstr *MergeOpsUpdate(const MergeCandidate &Cand);
155 bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
156 MachineBasicBlock::iterator &MBBI);
157 bool MergeBaseUpdateLoadStore(MachineInstr *MI);
158 bool MergeBaseUpdateLSMultiple(MachineInstr *MI);
159 bool MergeBaseUpdateLSDouble(MachineInstr &MI) const;
160 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
161 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
162 bool CombineMovBx(MachineBasicBlock &MBB);
164 char ARMLoadStoreOpt::ID = 0;
167 INITIALIZE_PASS(ARMLoadStoreOpt, "arm-ldst-opt", ARM_LOAD_STORE_OPT_NAME, false,
170 static bool definesCPSR(const MachineInstr &MI) {
171 for (const auto &MO : MI.operands()) {
174 if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
175 // If the instruction has live CPSR def, then it's not safe to fold it
176 // into load / store.
183 static int getMemoryOpOffset(const MachineInstr &MI) {
184 unsigned Opcode = MI.getOpcode();
185 bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
186 unsigned NumOperands = MI.getDesc().getNumOperands();
187 unsigned OffField = MI.getOperand(NumOperands - 3).getImm();
189 if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
190 Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
191 Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8 ||
192 Opcode == ARM::LDRi12 || Opcode == ARM::STRi12)
195 // Thumb1 immediate offsets are scaled by 4
196 if (Opcode == ARM::tLDRi || Opcode == ARM::tSTRi ||
197 Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi)
200 int Offset = isAM3 ? ARM_AM::getAM3Offset(OffField)
201 : ARM_AM::getAM5Offset(OffField) * 4;
202 ARM_AM::AddrOpc Op = isAM3 ? ARM_AM::getAM3Op(OffField)
203 : ARM_AM::getAM5Op(OffField);
205 if (Op == ARM_AM::sub)
211 static const MachineOperand &getLoadStoreBaseOp(const MachineInstr &MI) {
212 return MI.getOperand(1);
215 static const MachineOperand &getLoadStoreRegOp(const MachineInstr &MI) {
216 return MI.getOperand(0);
219 static int getLoadStoreMultipleOpcode(unsigned Opcode, ARM_AM::AMSubMode Mode) {
221 default: llvm_unreachable("Unhandled opcode!");
225 default: llvm_unreachable("Unhandled submode!");
226 case ARM_AM::ia: return ARM::LDMIA;
227 case ARM_AM::da: return ARM::LDMDA;
228 case ARM_AM::db: return ARM::LDMDB;
229 case ARM_AM::ib: return ARM::LDMIB;
234 default: llvm_unreachable("Unhandled submode!");
235 case ARM_AM::ia: return ARM::STMIA;
236 case ARM_AM::da: return ARM::STMDA;
237 case ARM_AM::db: return ARM::STMDB;
238 case ARM_AM::ib: return ARM::STMIB;
242 // tLDMIA is writeback-only - unless the base register is in the input
246 default: llvm_unreachable("Unhandled submode!");
247 case ARM_AM::ia: return ARM::tLDMIA;
251 // There is no non-writeback tSTMIA either.
254 default: llvm_unreachable("Unhandled submode!");
255 case ARM_AM::ia: return ARM::tSTMIA_UPD;
261 default: llvm_unreachable("Unhandled submode!");
262 case ARM_AM::ia: return ARM::t2LDMIA;
263 case ARM_AM::db: return ARM::t2LDMDB;
269 default: llvm_unreachable("Unhandled submode!");
270 case ARM_AM::ia: return ARM::t2STMIA;
271 case ARM_AM::db: return ARM::t2STMDB;
276 default: llvm_unreachable("Unhandled submode!");
277 case ARM_AM::ia: return ARM::VLDMSIA;
278 case ARM_AM::db: return 0; // Only VLDMSDB_UPD exists.
283 default: llvm_unreachable("Unhandled submode!");
284 case ARM_AM::ia: return ARM::VSTMSIA;
285 case ARM_AM::db: return 0; // Only VSTMSDB_UPD exists.
290 default: llvm_unreachable("Unhandled submode!");
291 case ARM_AM::ia: return ARM::VLDMDIA;
292 case ARM_AM::db: return 0; // Only VLDMDDB_UPD exists.
297 default: llvm_unreachable("Unhandled submode!");
298 case ARM_AM::ia: return ARM::VSTMDIA;
299 case ARM_AM::db: return 0; // Only VSTMDDB_UPD exists.
304 static ARM_AM::AMSubMode getLoadStoreMultipleSubMode(unsigned Opcode) {
306 default: llvm_unreachable("Unhandled opcode!");
313 case ARM::tLDMIA_UPD:
314 case ARM::tSTMIA_UPD:
315 case ARM::t2LDMIA_RET:
317 case ARM::t2LDMIA_UPD:
319 case ARM::t2STMIA_UPD:
321 case ARM::VLDMSIA_UPD:
323 case ARM::VSTMSIA_UPD:
325 case ARM::VLDMDIA_UPD:
327 case ARM::VSTMDIA_UPD:
341 case ARM::t2LDMDB_UPD:
343 case ARM::t2STMDB_UPD:
344 case ARM::VLDMSDB_UPD:
345 case ARM::VSTMSDB_UPD:
346 case ARM::VLDMDDB_UPD:
347 case ARM::VSTMDDB_UPD:
358 static bool isT1i32Load(unsigned Opc) {
359 return Opc == ARM::tLDRi || Opc == ARM::tLDRspi;
362 static bool isT2i32Load(unsigned Opc) {
363 return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
366 static bool isi32Load(unsigned Opc) {
367 return Opc == ARM::LDRi12 || isT1i32Load(Opc) || isT2i32Load(Opc) ;
370 static bool isT1i32Store(unsigned Opc) {
371 return Opc == ARM::tSTRi || Opc == ARM::tSTRspi;
374 static bool isT2i32Store(unsigned Opc) {
375 return Opc == ARM::t2STRi12 || Opc == ARM::t2STRi8;
378 static bool isi32Store(unsigned Opc) {
379 return Opc == ARM::STRi12 || isT1i32Store(Opc) || isT2i32Store(Opc);
382 static bool isLoadSingle(unsigned Opc) {
383 return isi32Load(Opc) || Opc == ARM::VLDRS || Opc == ARM::VLDRD;
386 static unsigned getImmScale(unsigned Opc) {
388 default: llvm_unreachable("Unhandled opcode!");
403 static unsigned getLSMultipleTransferSize(const MachineInstr *MI) {
404 switch (MI->getOpcode()) {
431 case ARM::tLDMIA_UPD:
432 case ARM::tSTMIA_UPD:
439 return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 4;
442 return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 8;
446 /// Update future uses of the base register with the offset introduced
447 /// due to writeback. This function only works on Thumb1.
448 void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
449 MachineBasicBlock::iterator MBBI,
450 const DebugLoc &DL, unsigned Base,
452 ARMCC::CondCodes Pred,
454 assert(isThumb1 && "Can only update base register uses for Thumb1!");
455 // Start updating any instructions with immediate offsets. Insert a SUB before
456 // the first non-updateable instruction (if any).
457 for (; MBBI != MBB.end(); ++MBBI) {
458 bool InsertSub = false;
459 unsigned Opc = MBBI->getOpcode();
461 if (MBBI->readsRegister(Base)) {
464 Opc == ARM::tLDRi || Opc == ARM::tLDRHi || Opc == ARM::tLDRBi;
466 Opc == ARM::tSTRi || Opc == ARM::tSTRHi || Opc == ARM::tSTRBi;
468 if (IsLoad || IsStore) {
469 // Loads and stores with immediate offsets can be updated, but only if
470 // the new offset isn't negative.
471 // The MachineOperand containing the offset immediate is the last one
472 // before predicates.
474 MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
475 // The offsets are scaled by 1, 2 or 4 depending on the Opcode.
476 Offset = MO.getImm() - WordOffset * getImmScale(Opc);
478 // If storing the base register, it needs to be reset first.
479 unsigned InstrSrcReg = getLoadStoreRegOp(*MBBI).getReg();
481 if (Offset >= 0 && !(IsStore && InstrSrcReg == Base))
486 } else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
487 !definesCPSR(*MBBI)) {
488 // SUBS/ADDS using this register, with a dead def of the CPSR.
489 // Merge it with the update; if the merged offset is too large,
490 // insert a new sub instead.
492 MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
493 Offset = (Opc == ARM::tSUBi8) ?
494 MO.getImm() + WordOffset * 4 :
495 MO.getImm() - WordOffset * 4 ;
496 if (Offset >= 0 && TL->isLegalAddImmediate(Offset)) {
497 // FIXME: Swap ADDS<->SUBS if Offset < 0, erase instruction if
500 // The base register has now been reset, so exit early.
507 // Can't update the instruction.
511 } else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) {
512 // Since SUBS sets the condition flags, we can't place the base reset
513 // after an instruction that has a live CPSR def.
514 // The base register might also contain an argument for a function call.
519 // An instruction above couldn't be updated, so insert a sub.
520 AddDefaultT1CC(BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base), true)
521 .addReg(Base).addImm(WordOffset * 4).addImm(Pred).addReg(PredReg);
525 if (MBBI->killsRegister(Base) || MBBI->definesRegister(Base))
526 // Register got killed. Stop updating.
530 // End of block was reached.
531 if (MBB.succ_size() > 0) {
532 // FIXME: Because of a bug, live registers are sometimes missing from
533 // the successor blocks' live-in sets. This means we can't trust that
534 // information and *always* have to reset at the end of a block.
536 if (MBBI != MBB.end()) --MBBI;
538 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base), true)
539 .addReg(Base).addImm(WordOffset * 4).addImm(Pred).addReg(PredReg);
543 /// Return the first register of class \p RegClass that is not in \p Regs.
544 unsigned ARMLoadStoreOpt::findFreeReg(const TargetRegisterClass &RegClass) {
545 if (!RegClassInfoValid) {
546 RegClassInfo.runOnMachineFunction(*MF);
547 RegClassInfoValid = true;
550 for (unsigned Reg : RegClassInfo.getOrder(&RegClass))
551 if (!LiveRegs.contains(Reg))
556 /// Compute live registers just before instruction \p Before (in normal schedule
557 /// direction). Computes backwards so multiple queries in the same block must
558 /// come in reverse order.
559 void ARMLoadStoreOpt::moveLiveRegsBefore(const MachineBasicBlock &MBB,
560 MachineBasicBlock::const_iterator Before) {
561 // Initialize if we never queried in this block.
562 if (!LiveRegsValid) {
564 LiveRegs.addLiveOuts(MBB);
565 LiveRegPos = MBB.end();
566 LiveRegsValid = true;
568 // Move backward just before the "Before" position.
569 while (LiveRegPos != Before) {
571 LiveRegs.stepBackward(*LiveRegPos);
575 static bool ContainsReg(const ArrayRef<std::pair<unsigned, bool>> &Regs,
577 for (const std::pair<unsigned, bool> &R : Regs)
583 /// Create and insert a LDM or STM with Base as base register and registers in
584 /// Regs as the register operands that would be loaded / stored. It returns
585 /// true if the transformation is done.
586 MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(
587 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
588 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
589 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
590 ArrayRef<std::pair<unsigned, bool>> Regs) {
591 unsigned NumRegs = Regs.size();
594 // For Thumb1 targets, it might be necessary to clobber the CPSR to merge.
595 // Compute liveness information for that register to make the decision.
596 bool SafeToClobberCPSR = !isThumb1 ||
597 (MBB.computeRegisterLiveness(TRI, ARM::CPSR, InsertBefore, 20) ==
598 MachineBasicBlock::LQR_Dead);
600 bool Writeback = isThumb1; // Thumb1 LDM/STM have base reg writeback.
602 // Exception: If the base register is in the input reglist, Thumb1 LDM is
604 // It's also not possible to merge an STR of the base register in Thumb1.
605 if (isThumb1 && isi32Load(Opcode) && ContainsReg(Regs, Base)) {
606 assert(Base != ARM::SP && "Thumb1 does not allow SP in register list");
607 if (Opcode == ARM::tLDRi) {
609 } else if (Opcode == ARM::tSTRi) {
614 ARM_AM::AMSubMode Mode = ARM_AM::ia;
615 // VFP and Thumb2 do not support IB or DA modes. Thumb1 only supports IA.
616 bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
617 bool haveIBAndDA = isNotVFP && !isThumb2 && !isThumb1;
619 if (Offset == 4 && haveIBAndDA) {
621 } else if (Offset == -4 * (int)NumRegs + 4 && haveIBAndDA) {
623 } else if (Offset == -4 * (int)NumRegs && isNotVFP && !isThumb1) {
624 // VLDM/VSTM do not support DB mode without also updating the base reg.
626 } else if (Offset != 0 || Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
627 // Check if this is a supported opcode before inserting instructions to
628 // calculate a new base register.
629 if (!getLoadStoreMultipleOpcode(Opcode, Mode)) return nullptr;
631 // If starting offset isn't zero, insert a MI to materialize a new base.
632 // But only do so if it is cost effective, i.e. merging more than two
637 // On Thumb1, it's not worth materializing a new base register without
638 // clobbering the CPSR (i.e. not using ADDS/SUBS).
639 if (!SafeToClobberCPSR)
643 if (isi32Load(Opcode)) {
644 // If it is a load, then just use one of the destination registers
645 // as the new base. Will no longer be writeback in Thumb1.
646 NewBase = Regs[NumRegs-1].first;
649 // Find a free register that we can use as scratch register.
650 moveLiveRegsBefore(MBB, InsertBefore);
651 // The merged instruction does not exist yet but will use several Regs if
653 if (!isLoadSingle(Opcode))
654 for (const std::pair<unsigned, bool> &R : Regs)
655 LiveRegs.addReg(R.first);
657 NewBase = findFreeReg(isThumb1 ? ARM::tGPRRegClass : ARM::GPRRegClass);
663 isThumb2 ? ARM::t2ADDri :
664 (isThumb1 && Base == ARM::SP) ? ARM::tADDrSPi :
665 (isThumb1 && Offset < 8) ? ARM::tADDi3 :
666 isThumb1 ? ARM::tADDi8 : ARM::ADDri;
671 isThumb2 ? ARM::t2SUBri :
672 (isThumb1 && Offset < 8 && Base != ARM::SP) ? ARM::tSUBi3 :
673 isThumb1 ? ARM::tSUBi8 : ARM::SUBri;
676 if (!TL->isLegalAddImmediate(Offset))
677 // FIXME: Try add with register operand?
678 return nullptr; // Probably not worth it then.
680 // We can only append a kill flag to the add/sub input if the value is not
681 // used in the register list of the stm as well.
682 bool KillOldBase = BaseKill &&
683 (!isi32Store(Opcode) || !ContainsReg(Regs, Base));
686 // Thumb1: depending on immediate size, use either
687 // ADDS NewBase, Base, #imm3
690 // ADDS NewBase, #imm8.
691 if (Base != NewBase &&
692 (BaseOpc == ARM::tADDi8 || BaseOpc == ARM::tSUBi8)) {
693 // Need to insert a MOV to the new base first.
694 if (isARMLowRegister(NewBase) && isARMLowRegister(Base) &&
696 // thumbv4t doesn't have lo->lo copies, and we can't predicate tMOVSr
697 if (Pred != ARMCC::AL)
699 BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVSr), NewBase)
700 .addReg(Base, getKillRegState(KillOldBase));
702 BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVr), NewBase)
703 .addReg(Base, getKillRegState(KillOldBase))
704 .addImm(Pred).addReg(PredReg);
706 // The following ADDS/SUBS becomes an update.
710 if (BaseOpc == ARM::tADDrSPi) {
711 assert(Offset % 4 == 0 && "tADDrSPi offset is scaled by 4");
712 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
713 .addReg(Base, getKillRegState(KillOldBase)).addImm(Offset/4)
714 .addImm(Pred).addReg(PredReg);
717 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase), true)
718 .addReg(Base, getKillRegState(KillOldBase)).addImm(Offset)
719 .addImm(Pred).addReg(PredReg);
721 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
722 .addReg(Base, getKillRegState(KillOldBase)).addImm(Offset)
723 .addImm(Pred).addReg(PredReg).addReg(0);
726 BaseKill = true; // New base is always killed straight away.
729 bool isDef = isLoadSingle(Opcode);
731 // Get LS multiple opcode. Note that for Thumb1 this might be an opcode with
732 // base register writeback.
733 Opcode = getLoadStoreMultipleOpcode(Opcode, Mode);
737 // Check if a Thumb1 LDM/STM merge is safe. This is the case if:
738 // - There is no writeback (LDM of base register),
739 // - the base register is killed by the merged instruction,
740 // - or it's safe to overwrite the condition flags, i.e. to insert a SUBS
741 // to reset the base register.
742 // Otherwise, don't merge.
743 // It's safe to return here since the code to materialize a new base register
744 // above is also conditional on SafeToClobberCPSR.
745 if (isThumb1 && !SafeToClobberCPSR && Writeback && !BaseKill)
748 MachineInstrBuilder MIB;
751 assert(isThumb1 && "expected Writeback only inThumb1");
752 if (Opcode == ARM::tLDMIA) {
753 assert(!(ContainsReg(Regs, Base)) && "Thumb1 can't LDM ! with Base in Regs");
754 // Update tLDMIA with writeback if necessary.
755 Opcode = ARM::tLDMIA_UPD;
758 MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode));
760 // Thumb1: we might need to set base writeback when building the MI.
761 MIB.addReg(Base, getDefRegState(true))
762 .addReg(Base, getKillRegState(BaseKill));
764 // The base isn't dead after a merged instruction with writeback.
765 // Insert a sub instruction after the newly formed instruction to reset.
767 UpdateBaseRegUses(MBB, InsertBefore, DL, Base, NumRegs, Pred, PredReg);
770 // No writeback, simply build the MachineInstr.
771 MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode));
772 MIB.addReg(Base, getKillRegState(BaseKill));
775 MIB.addImm(Pred).addReg(PredReg);
777 for (const std::pair<unsigned, bool> &R : Regs)
778 MIB.addReg(R.first, getDefRegState(isDef) | getKillRegState(R.second));
780 return MIB.getInstr();
783 MachineInstr *ARMLoadStoreOpt::CreateLoadStoreDouble(
784 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
785 int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
786 ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
787 ArrayRef<std::pair<unsigned, bool>> Regs) const {
788 bool IsLoad = isi32Load(Opcode);
789 assert((IsLoad || isi32Store(Opcode)) && "Must have integer load or store");
790 unsigned LoadStoreOpcode = IsLoad ? ARM::t2LDRDi8 : ARM::t2STRDi8;
792 assert(Regs.size() == 2);
793 MachineInstrBuilder MIB = BuildMI(MBB, InsertBefore, DL,
794 TII->get(LoadStoreOpcode));
796 MIB.addReg(Regs[0].first, RegState::Define)
797 .addReg(Regs[1].first, RegState::Define);
799 MIB.addReg(Regs[0].first, getKillRegState(Regs[0].second))
800 .addReg(Regs[1].first, getKillRegState(Regs[1].second));
802 MIB.addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
803 return MIB.getInstr();
806 /// Call MergeOps and update MemOps and merges accordingly on success.
807 MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) {
808 const MachineInstr *First = Cand.Instrs.front();
809 unsigned Opcode = First->getOpcode();
810 bool IsLoad = isLoadSingle(Opcode);
811 SmallVector<std::pair<unsigned, bool>, 8> Regs;
812 SmallVector<unsigned, 4> ImpDefs;
813 DenseSet<unsigned> KilledRegs;
814 DenseSet<unsigned> UsedRegs;
815 // Determine list of registers and list of implicit super-register defs.
816 for (const MachineInstr *MI : Cand.Instrs) {
817 const MachineOperand &MO = getLoadStoreRegOp(*MI);
818 unsigned Reg = MO.getReg();
819 bool IsKill = MO.isKill();
821 KilledRegs.insert(Reg);
822 Regs.push_back(std::make_pair(Reg, IsKill));
823 UsedRegs.insert(Reg);
826 // Collect any implicit defs of super-registers, after merging we can't
827 // be sure anymore that we properly preserved these live ranges and must
828 // removed these implicit operands.
829 for (const MachineOperand &MO : MI->implicit_operands()) {
830 if (!MO.isReg() || !MO.isDef() || MO.isDead())
832 assert(MO.isImplicit());
833 unsigned DefReg = MO.getReg();
835 if (is_contained(ImpDefs, DefReg))
837 // We can ignore cases where the super-reg is read and written.
838 if (MI->readsRegister(DefReg))
840 ImpDefs.push_back(DefReg);
845 // Attempt the merge.
846 typedef MachineBasicBlock::iterator iterator;
847 MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
848 iterator InsertBefore = std::next(iterator(LatestMI));
849 MachineBasicBlock &MBB = *LatestMI->getParent();
850 unsigned Offset = getMemoryOpOffset(*First);
851 unsigned Base = getLoadStoreBaseOp(*First).getReg();
852 bool BaseKill = LatestMI->killsRegister(Base);
853 unsigned PredReg = 0;
854 ARMCC::CondCodes Pred = getInstrPredicate(*First, PredReg);
855 DebugLoc DL = First->getDebugLoc();
856 MachineInstr *Merged = nullptr;
857 if (Cand.CanMergeToLSDouble)
858 Merged = CreateLoadStoreDouble(MBB, InsertBefore, Offset, Base, BaseKill,
859 Opcode, Pred, PredReg, DL, Regs);
860 if (!Merged && Cand.CanMergeToLSMulti)
861 Merged = CreateLoadStoreMulti(MBB, InsertBefore, Offset, Base, BaseKill,
862 Opcode, Pred, PredReg, DL, Regs);
866 // Determine earliest instruction that will get removed. We then keep an
867 // iterator just above it so the following erases don't invalidated it.
868 iterator EarliestI(Cand.Instrs[Cand.EarliestMIIdx]);
869 bool EarliestAtBegin = false;
870 if (EarliestI == MBB.begin()) {
871 EarliestAtBegin = true;
873 EarliestI = std::prev(EarliestI);
876 // Remove instructions which have been merged.
877 for (MachineInstr *MI : Cand.Instrs)
880 // Determine range between the earliest removed instruction and the new one.
882 EarliestI = MBB.begin();
884 EarliestI = std::next(EarliestI);
885 auto FixupRange = make_range(EarliestI, iterator(Merged));
887 if (isLoadSingle(Opcode)) {
888 // If the previous loads defined a super-reg, then we have to mark earlier
889 // operands undef; Replicate the super-reg def on the merged instruction.
890 for (MachineInstr &MI : FixupRange) {
891 for (unsigned &ImpDefReg : ImpDefs) {
892 for (MachineOperand &MO : MI.implicit_operands()) {
893 if (!MO.isReg() || MO.getReg() != ImpDefReg)
903 MachineInstrBuilder MIB(*Merged->getParent()->getParent(), Merged);
904 for (unsigned ImpDef : ImpDefs)
905 MIB.addReg(ImpDef, RegState::ImplicitDefine);
907 // Remove kill flags: We are possibly storing the values later now.
908 assert(isi32Store(Opcode) || Opcode == ARM::VSTRS || Opcode == ARM::VSTRD);
909 for (MachineInstr &MI : FixupRange) {
910 for (MachineOperand &MO : MI.uses()) {
911 if (!MO.isReg() || !MO.isKill())
913 if (UsedRegs.count(MO.getReg()))
917 assert(ImpDefs.empty());
923 static bool isValidLSDoubleOffset(int Offset) {
924 unsigned Value = abs(Offset);
925 // t2LDRDi8/t2STRDi8 supports an 8 bit immediate which is internally
927 return (Value % 4) == 0 && Value < 1024;
930 /// Return true for loads/stores that can be combined to a double/multi
931 /// operation without increasing the requirements for alignment.
932 static bool mayCombineMisaligned(const TargetSubtargetInfo &STI,
933 const MachineInstr &MI) {
934 // vldr/vstr trap on misaligned pointers anyway, forming vldm makes no
936 unsigned Opcode = MI.getOpcode();
937 if (!isi32Load(Opcode) && !isi32Store(Opcode))
940 // Stack pointer alignment is out of the programmers control so we can trust
941 // SP-relative loads/stores.
942 if (getLoadStoreBaseOp(MI).getReg() == ARM::SP &&
943 STI.getFrameLowering()->getTransientStackAlignment() >= 4)
948 /// Find candidates for load/store multiple merge in list of MemOpQueueEntries.
949 void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) {
950 const MachineInstr *FirstMI = MemOps[0].MI;
951 unsigned Opcode = FirstMI->getOpcode();
952 bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
953 unsigned Size = getLSMultipleTransferSize(FirstMI);
956 unsigned EIndex = MemOps.size();
958 // Look at the first instruction.
959 const MachineInstr *MI = MemOps[SIndex].MI;
960 int Offset = MemOps[SIndex].Offset;
961 const MachineOperand &PMO = getLoadStoreRegOp(*MI);
962 unsigned PReg = PMO.getReg();
963 unsigned PRegNum = PMO.isUndef() ? UINT_MAX : TRI->getEncodingValue(PReg);
964 unsigned Latest = SIndex;
965 unsigned Earliest = SIndex;
967 bool CanMergeToLSDouble =
968 STI->isThumb2() && isNotVFP && isValidLSDoubleOffset(Offset);
969 // ARM errata 602117: LDRD with base in list may result in incorrect base
970 // register when interrupted or faulted.
971 if (STI->isCortexM3() && isi32Load(Opcode) &&
972 PReg == getLoadStoreBaseOp(*MI).getReg())
973 CanMergeToLSDouble = false;
975 bool CanMergeToLSMulti = true;
976 // On swift vldm/vstm starting with an odd register number as that needs
977 // more uops than single vldrs.
978 if (STI->hasSlowOddRegister() && !isNotVFP && (PRegNum % 2) == 1)
979 CanMergeToLSMulti = false;
981 // LDRD/STRD do not allow SP/PC. LDM/STM do not support it or have it
982 // deprecated; LDM to PC is fine but cannot happen here.
983 if (PReg == ARM::SP || PReg == ARM::PC)
984 CanMergeToLSMulti = CanMergeToLSDouble = false;
986 // Should we be conservative?
987 if (AssumeMisalignedLoadStores && !mayCombineMisaligned(*STI, *MI))
988 CanMergeToLSMulti = CanMergeToLSDouble = false;
990 // Merge following instructions where possible.
991 for (unsigned I = SIndex+1; I < EIndex; ++I, ++Count) {
992 int NewOffset = MemOps[I].Offset;
993 if (NewOffset != Offset + (int)Size)
995 const MachineOperand &MO = getLoadStoreRegOp(*MemOps[I].MI);
996 unsigned Reg = MO.getReg();
997 if (Reg == ARM::SP || Reg == ARM::PC)
1000 // See if the current load/store may be part of a multi load/store.
1001 unsigned RegNum = MO.isUndef() ? UINT_MAX : TRI->getEncodingValue(Reg);
1002 bool PartOfLSMulti = CanMergeToLSMulti;
1003 if (PartOfLSMulti) {
1004 // Register numbers must be in ascending order.
1005 if (RegNum <= PRegNum)
1006 PartOfLSMulti = false;
1007 // For VFP / NEON load/store multiples, the registers must be
1008 // consecutive and within the limit on the number of registers per
1010 else if (!isNotVFP && RegNum != PRegNum+1)
1011 PartOfLSMulti = false;
1013 // See if the current load/store may be part of a double load/store.
1014 bool PartOfLSDouble = CanMergeToLSDouble && Count <= 1;
1016 if (!PartOfLSMulti && !PartOfLSDouble)
1018 CanMergeToLSMulti &= PartOfLSMulti;
1019 CanMergeToLSDouble &= PartOfLSDouble;
1020 // Track MemOp with latest and earliest position (Positions are
1021 // counted in reverse).
1022 unsigned Position = MemOps[I].Position;
1023 if (Position < MemOps[Latest].Position)
1025 else if (Position > MemOps[Earliest].Position)
1027 // Prepare for next MemOp.
1032 // Form a candidate from the Ops collected so far.
1033 MergeCandidate *Candidate = new(Allocator.Allocate()) MergeCandidate;
1034 for (unsigned C = SIndex, CE = SIndex + Count; C < CE; ++C)
1035 Candidate->Instrs.push_back(MemOps[C].MI);
1036 Candidate->LatestMIIdx = Latest - SIndex;
1037 Candidate->EarliestMIIdx = Earliest - SIndex;
1038 Candidate->InsertPos = MemOps[Latest].Position;
1040 CanMergeToLSMulti = CanMergeToLSDouble = false;
1041 Candidate->CanMergeToLSMulti = CanMergeToLSMulti;
1042 Candidate->CanMergeToLSDouble = CanMergeToLSDouble;
1043 Candidates.push_back(Candidate);
1044 // Continue after the chain.
1046 } while (SIndex < EIndex);
1049 static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
1050 ARM_AM::AMSubMode Mode) {
1052 default: llvm_unreachable("Unhandled opcode!");
1058 default: llvm_unreachable("Unhandled submode!");
1059 case ARM_AM::ia: return ARM::LDMIA_UPD;
1060 case ARM_AM::ib: return ARM::LDMIB_UPD;
1061 case ARM_AM::da: return ARM::LDMDA_UPD;
1062 case ARM_AM::db: return ARM::LDMDB_UPD;
1069 default: llvm_unreachable("Unhandled submode!");
1070 case ARM_AM::ia: return ARM::STMIA_UPD;
1071 case ARM_AM::ib: return ARM::STMIB_UPD;
1072 case ARM_AM::da: return ARM::STMDA_UPD;
1073 case ARM_AM::db: return ARM::STMDB_UPD;
1078 default: llvm_unreachable("Unhandled submode!");
1079 case ARM_AM::ia: return ARM::t2LDMIA_UPD;
1080 case ARM_AM::db: return ARM::t2LDMDB_UPD;
1085 default: llvm_unreachable("Unhandled submode!");
1086 case ARM_AM::ia: return ARM::t2STMIA_UPD;
1087 case ARM_AM::db: return ARM::t2STMDB_UPD;
1091 default: llvm_unreachable("Unhandled submode!");
1092 case ARM_AM::ia: return ARM::VLDMSIA_UPD;
1093 case ARM_AM::db: return ARM::VLDMSDB_UPD;
1097 default: llvm_unreachable("Unhandled submode!");
1098 case ARM_AM::ia: return ARM::VLDMDIA_UPD;
1099 case ARM_AM::db: return ARM::VLDMDDB_UPD;
1103 default: llvm_unreachable("Unhandled submode!");
1104 case ARM_AM::ia: return ARM::VSTMSIA_UPD;
1105 case ARM_AM::db: return ARM::VSTMSDB_UPD;
1109 default: llvm_unreachable("Unhandled submode!");
1110 case ARM_AM::ia: return ARM::VSTMDIA_UPD;
1111 case ARM_AM::db: return ARM::VSTMDDB_UPD;
1116 /// Check if the given instruction increments or decrements a register and
1117 /// return the amount it is incremented/decremented. Returns 0 if the CPSR flags
1118 /// generated by the instruction are possibly read as well.
1119 static int isIncrementOrDecrement(const MachineInstr &MI, unsigned Reg,
1120 ARMCC::CondCodes Pred, unsigned PredReg) {
1123 switch (MI.getOpcode()) {
1124 case ARM::tADDi8: Scale = 4; CheckCPSRDef = true; break;
1125 case ARM::tSUBi8: Scale = -4; CheckCPSRDef = true; break;
1127 case ARM::SUBri: Scale = -1; CheckCPSRDef = true; break;
1129 case ARM::ADDri: Scale = 1; CheckCPSRDef = true; break;
1130 case ARM::tADDspi: Scale = 4; CheckCPSRDef = false; break;
1131 case ARM::tSUBspi: Scale = -4; CheckCPSRDef = false; break;
1136 if (MI.getOperand(0).getReg() != Reg ||
1137 MI.getOperand(1).getReg() != Reg ||
1138 getInstrPredicate(MI, MIPredReg) != Pred ||
1139 MIPredReg != PredReg)
1142 if (CheckCPSRDef && definesCPSR(MI))
1144 return MI.getOperand(2).getImm() * Scale;
1147 /// Searches for an increment or decrement of \p Reg before \p MBBI.
1148 static MachineBasicBlock::iterator
1149 findIncDecBefore(MachineBasicBlock::iterator MBBI, unsigned Reg,
1150 ARMCC::CondCodes Pred, unsigned PredReg, int &Offset) {
1152 MachineBasicBlock &MBB = *MBBI->getParent();
1153 MachineBasicBlock::iterator BeginMBBI = MBB.begin();
1154 MachineBasicBlock::iterator EndMBBI = MBB.end();
1155 if (MBBI == BeginMBBI)
1158 // Skip debug values.
1159 MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI);
1160 while (PrevMBBI->isDebugValue() && PrevMBBI != BeginMBBI)
1163 Offset = isIncrementOrDecrement(*PrevMBBI, Reg, Pred, PredReg);
1164 return Offset == 0 ? EndMBBI : PrevMBBI;
1167 /// Searches for a increment or decrement of \p Reg after \p MBBI.
1168 static MachineBasicBlock::iterator
1169 findIncDecAfter(MachineBasicBlock::iterator MBBI, unsigned Reg,
1170 ARMCC::CondCodes Pred, unsigned PredReg, int &Offset) {
1172 MachineBasicBlock &MBB = *MBBI->getParent();
1173 MachineBasicBlock::iterator EndMBBI = MBB.end();
1174 MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
1175 // Skip debug values.
1176 while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
1178 if (NextMBBI == EndMBBI)
1181 Offset = isIncrementOrDecrement(*NextMBBI, Reg, Pred, PredReg);
1182 return Offset == 0 ? EndMBBI : NextMBBI;
1185 /// Fold proceeding/trailing inc/dec of base register into the
1186 /// LDM/STM/VLDM{D|S}/VSTM{D|S} op when possible:
1188 /// stmia rn, <ra, rb, rc>
1189 /// rn := rn + 4 * 3;
1191 /// stmia rn!, <ra, rb, rc>
1193 /// rn := rn - 4 * 3;
1194 /// ldmia rn, <ra, rb, rc>
1196 /// ldmdb rn!, <ra, rb, rc>
1197 bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) {
1198 // Thumb1 is already using updating loads/stores.
1199 if (isThumb1) return false;
1201 const MachineOperand &BaseOP = MI->getOperand(0);
1202 unsigned Base = BaseOP.getReg();
1203 bool BaseKill = BaseOP.isKill();
1204 unsigned PredReg = 0;
1205 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
1206 unsigned Opcode = MI->getOpcode();
1207 DebugLoc DL = MI->getDebugLoc();
1209 // Can't use an updating ld/st if the base register is also a dest
1210 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
1211 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
1212 if (MI->getOperand(i).getReg() == Base)
1215 int Bytes = getLSMultipleTransferSize(MI);
1216 MachineBasicBlock &MBB = *MI->getParent();
1217 MachineBasicBlock::iterator MBBI(MI);
1219 MachineBasicBlock::iterator MergeInstr
1220 = findIncDecBefore(MBBI, Base, Pred, PredReg, Offset);
1221 ARM_AM::AMSubMode Mode = getLoadStoreMultipleSubMode(Opcode);
1222 if (Mode == ARM_AM::ia && Offset == -Bytes) {
1224 } else if (Mode == ARM_AM::ib && Offset == -Bytes) {
1227 MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
1228 if (((Mode != ARM_AM::ia && Mode != ARM_AM::ib) || Offset != Bytes) &&
1229 ((Mode != ARM_AM::da && Mode != ARM_AM::db) || Offset != -Bytes)) {
1231 // We couldn't find an inc/dec to merge. But if the base is dead, we
1232 // can still change to a writeback form as that will save us 2 bytes
1233 // of code size. It can create WAW hazards though, so only do it if
1234 // we're minimizing code size.
1235 if (!MBB.getParent()->getFunction()->optForMinSize() || !BaseKill)
1238 bool HighRegsUsed = false;
1239 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
1240 if (MI->getOperand(i).getReg() >= ARM::R8) {
1241 HighRegsUsed = true;
1246 MergeInstr = MBB.end();
1251 if (MergeInstr != MBB.end())
1252 MBB.erase(MergeInstr);
1254 unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode, Mode);
1255 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
1256 .addReg(Base, getDefRegState(true)) // WB base register
1257 .addReg(Base, getKillRegState(BaseKill))
1258 .addImm(Pred).addReg(PredReg);
1260 // Transfer the rest of operands.
1261 for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum)
1262 MIB.addOperand(MI->getOperand(OpNum));
1264 // Transfer memoperands.
1265 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1271 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc,
1272 ARM_AM::AddrOpc Mode) {
1275 return ARM::LDR_PRE_IMM;
1277 return ARM::STR_PRE_IMM;
1279 return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
1281 return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
1283 return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
1285 return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
1288 return ARM::t2LDR_PRE;
1291 return ARM::t2STR_PRE;
1292 default: llvm_unreachable("Unhandled opcode!");
1296 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc,
1297 ARM_AM::AddrOpc Mode) {
1300 return ARM::LDR_POST_IMM;
1302 return ARM::STR_POST_IMM;
1304 return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
1306 return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
1308 return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
1310 return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
1313 return ARM::t2LDR_POST;
1316 return ARM::t2STR_POST;
1317 default: llvm_unreachable("Unhandled opcode!");
1321 /// Fold proceeding/trailing inc/dec of base register into the
1322 /// LDR/STR/FLD{D|S}/FST{D|S} op when possible:
1323 bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
1324 // Thumb1 doesn't have updating LDR/STR.
1325 // FIXME: Use LDM/STM with single register instead.
1326 if (isThumb1) return false;
1328 unsigned Base = getLoadStoreBaseOp(*MI).getReg();
1329 bool BaseKill = getLoadStoreBaseOp(*MI).isKill();
1330 unsigned Opcode = MI->getOpcode();
1331 DebugLoc DL = MI->getDebugLoc();
1332 bool isAM5 = (Opcode == ARM::VLDRD || Opcode == ARM::VLDRS ||
1333 Opcode == ARM::VSTRD || Opcode == ARM::VSTRS);
1334 bool isAM2 = (Opcode == ARM::LDRi12 || Opcode == ARM::STRi12);
1335 if (isi32Load(Opcode) || isi32Store(Opcode))
1336 if (MI->getOperand(2).getImm() != 0)
1338 if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
1341 // Can't do the merge if the destination register is the same as the would-be
1342 // writeback register.
1343 if (MI->getOperand(0).getReg() == Base)
1346 unsigned PredReg = 0;
1347 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
1348 int Bytes = getLSMultipleTransferSize(MI);
1349 MachineBasicBlock &MBB = *MI->getParent();
1350 MachineBasicBlock::iterator MBBI(MI);
1352 MachineBasicBlock::iterator MergeInstr
1353 = findIncDecBefore(MBBI, Base, Pred, PredReg, Offset);
1355 if (!isAM5 && Offset == Bytes) {
1356 NewOpc = getPreIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
1357 } else if (Offset == -Bytes) {
1358 NewOpc = getPreIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
1360 MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
1361 if (Offset == Bytes) {
1362 NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
1363 } else if (!isAM5 && Offset == -Bytes) {
1364 NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
1368 MBB.erase(MergeInstr);
1370 ARM_AM::AddrOpc AddSub = Offset < 0 ? ARM_AM::sub : ARM_AM::add;
1372 bool isLd = isLoadSingle(Opcode);
1374 // VLDM[SD]_UPD, VSTM[SD]_UPD
1375 // (There are no base-updating versions of VLDR/VSTR instructions, but the
1376 // updating load/store-multiple instructions can be used with only one
1378 MachineOperand &MO = MI->getOperand(0);
1379 BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
1380 .addReg(Base, getDefRegState(true)) // WB base register
1381 .addReg(Base, getKillRegState(isLd ? BaseKill : false))
1382 .addImm(Pred).addReg(PredReg)
1383 .addReg(MO.getReg(), (isLd ? getDefRegState(true) :
1384 getKillRegState(MO.isKill())));
1387 // LDR_PRE, LDR_POST
1388 if (NewOpc == ARM::LDR_PRE_IMM || NewOpc == ARM::LDRB_PRE_IMM) {
1389 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
1390 .addReg(Base, RegState::Define)
1391 .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
1393 int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
1394 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
1395 .addReg(Base, RegState::Define)
1396 .addReg(Base).addReg(0).addImm(Imm).addImm(Pred).addReg(PredReg);
1399 // t2LDR_PRE, t2LDR_POST
1400 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
1401 .addReg(Base, RegState::Define)
1402 .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
1405 MachineOperand &MO = MI->getOperand(0);
1406 // FIXME: post-indexed stores use am2offset_imm, which still encodes
1407 // the vestigal zero-reg offset register. When that's fixed, this clause
1408 // can be removed entirely.
1409 if (isAM2 && NewOpc == ARM::STR_POST_IMM) {
1410 int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
1411 // STR_PRE, STR_POST
1412 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base)
1413 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
1414 .addReg(Base).addReg(0).addImm(Imm).addImm(Pred).addReg(PredReg);
1416 // t2STR_PRE, t2STR_POST
1417 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base)
1418 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
1419 .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
1427 bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const {
1428 unsigned Opcode = MI.getOpcode();
1429 assert((Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8) &&
1430 "Must have t2STRDi8 or t2LDRDi8");
1431 if (MI.getOperand(3).getImm() != 0)
1434 // Behaviour for writeback is undefined if base register is the same as one
1436 const MachineOperand &BaseOp = MI.getOperand(2);
1437 unsigned Base = BaseOp.getReg();
1438 const MachineOperand &Reg0Op = MI.getOperand(0);
1439 const MachineOperand &Reg1Op = MI.getOperand(1);
1440 if (Reg0Op.getReg() == Base || Reg1Op.getReg() == Base)
1444 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
1445 MachineBasicBlock::iterator MBBI(MI);
1446 MachineBasicBlock &MBB = *MI.getParent();
1448 MachineBasicBlock::iterator MergeInstr = findIncDecBefore(MBBI, Base, Pred,
1451 if (Offset == 8 || Offset == -8) {
1452 NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_PRE : ARM::t2STRD_PRE;
1454 MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
1455 if (Offset == 8 || Offset == -8) {
1456 NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_POST : ARM::t2STRD_POST;
1460 MBB.erase(MergeInstr);
1462 DebugLoc DL = MI.getDebugLoc();
1463 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
1464 if (NewOpc == ARM::t2LDRD_PRE || NewOpc == ARM::t2LDRD_POST) {
1465 MIB.addOperand(Reg0Op).addOperand(Reg1Op)
1466 .addReg(BaseOp.getReg(), RegState::Define);
1468 assert(NewOpc == ARM::t2STRD_PRE || NewOpc == ARM::t2STRD_POST);
1469 MIB.addReg(BaseOp.getReg(), RegState::Define)
1470 .addOperand(Reg0Op).addOperand(Reg1Op);
1472 MIB.addReg(BaseOp.getReg(), RegState::Kill)
1473 .addImm(Offset).addImm(Pred).addReg(PredReg);
1474 assert(TII->get(Opcode).getNumOperands() == 6 &&
1475 TII->get(NewOpc).getNumOperands() == 7 &&
1476 "Unexpected number of operands in Opcode specification.");
1478 // Transfer implicit operands.
1479 for (const MachineOperand &MO : MI.implicit_operands())
1481 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1487 /// Returns true if instruction is a memory operation that this pass is capable
1488 /// of operating on.
1489 static bool isMemoryOp(const MachineInstr &MI) {
1490 unsigned Opcode = MI.getOpcode();
1510 if (!MI.getOperand(1).isReg())
1513 // When no memory operands are present, conservatively assume unaligned,
1514 // volatile, unfoldable.
1515 if (!MI.hasOneMemOperand())
1518 const MachineMemOperand &MMO = **MI.memoperands_begin();
1520 // Don't touch volatile memory accesses - we may be changing their order.
1521 if (MMO.isVolatile())
1524 // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
1526 if (MMO.getAlignment() < 4)
1529 // str <undef> could probably be eliminated entirely, but for now we just want
1530 // to avoid making a mess of it.
1531 // FIXME: Use str <undef> as a wildcard to enable better stm folding.
1532 if (MI.getOperand(0).isReg() && MI.getOperand(0).isUndef())
1535 // Likewise don't mess with references to undefined addresses.
1536 if (MI.getOperand(1).isUndef())
1542 static void InsertLDR_STR(MachineBasicBlock &MBB,
1543 MachineBasicBlock::iterator &MBBI, int Offset,
1544 bool isDef, const DebugLoc &DL, unsigned NewOpc,
1545 unsigned Reg, bool RegDeadKill, bool RegUndef,
1546 unsigned BaseReg, bool BaseKill, bool BaseUndef,
1547 bool OffKill, bool OffUndef, ARMCC::CondCodes Pred,
1548 unsigned PredReg, const TargetInstrInfo *TII,
1551 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1553 .addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
1554 .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
1555 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
1557 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1559 .addReg(Reg, getKillRegState(RegDeadKill) | getUndefRegState(RegUndef))
1560 .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
1561 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
1565 bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
1566 MachineBasicBlock::iterator &MBBI) {
1567 MachineInstr *MI = &*MBBI;
1568 unsigned Opcode = MI->getOpcode();
1569 if (Opcode != ARM::LDRD && Opcode != ARM::STRD && Opcode != ARM::t2LDRDi8)
1572 const MachineOperand &BaseOp = MI->getOperand(2);
1573 unsigned BaseReg = BaseOp.getReg();
1574 unsigned EvenReg = MI->getOperand(0).getReg();
1575 unsigned OddReg = MI->getOperand(1).getReg();
1576 unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
1577 unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
1579 // ARM errata 602117: LDRD with base in list may result in incorrect base
1580 // register when interrupted or faulted.
1581 bool Errata602117 = EvenReg == BaseReg &&
1582 (Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8) && STI->isCortexM3();
1583 // ARM LDRD/STRD needs consecutive registers.
1584 bool NonConsecutiveRegs = (Opcode == ARM::LDRD || Opcode == ARM::STRD) &&
1585 (EvenRegNum % 2 != 0 || EvenRegNum + 1 != OddRegNum);
1587 if (!Errata602117 && !NonConsecutiveRegs)
1590 bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
1591 bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
1592 bool EvenDeadKill = isLd ?
1593 MI->getOperand(0).isDead() : MI->getOperand(0).isKill();
1594 bool EvenUndef = MI->getOperand(0).isUndef();
1595 bool OddDeadKill = isLd ?
1596 MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
1597 bool OddUndef = MI->getOperand(1).isUndef();
1598 bool BaseKill = BaseOp.isKill();
1599 bool BaseUndef = BaseOp.isUndef();
1600 bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
1601 bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
1602 int OffImm = getMemoryOpOffset(*MI);
1603 unsigned PredReg = 0;
1604 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
1606 if (OddRegNum > EvenRegNum && OffImm == 0) {
1607 // Ascending register numbers and no offset. It's safe to change it to a
1609 unsigned NewOpc = (isLd)
1610 ? (isT2 ? ARM::t2LDMIA : ARM::LDMIA)
1611 : (isT2 ? ARM::t2STMIA : ARM::STMIA);
1613 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
1614 .addReg(BaseReg, getKillRegState(BaseKill))
1615 .addImm(Pred).addReg(PredReg)
1616 .addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
1617 .addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
1620 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
1621 .addReg(BaseReg, getKillRegState(BaseKill))
1622 .addImm(Pred).addReg(PredReg)
1624 getKillRegState(EvenDeadKill) | getUndefRegState(EvenUndef))
1626 getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
1630 // Split into two instructions.
1631 unsigned NewOpc = (isLd)
1632 ? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
1633 : (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
1634 // Be extra careful for thumb2. t2LDRi8 can't reference a zero offset,
1635 // so adjust and use t2LDRi12 here for that.
1636 unsigned NewOpc2 = (isLd)
1637 ? (isT2 ? (OffImm+4 < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
1638 : (isT2 ? (OffImm+4 < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
1639 DebugLoc dl = MBBI->getDebugLoc();
1640 // If this is a load and base register is killed, it may have been
1641 // re-defed by the load, make sure the first load does not clobber it.
1643 (BaseKill || OffKill) &&
1644 (TRI->regsOverlap(EvenReg, BaseReg))) {
1645 assert(!TRI->regsOverlap(OddReg, BaseReg));
1646 InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc2,
1647 OddReg, OddDeadKill, false,
1648 BaseReg, false, BaseUndef, false, OffUndef,
1649 Pred, PredReg, TII, isT2);
1650 InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
1651 EvenReg, EvenDeadKill, false,
1652 BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
1653 Pred, PredReg, TII, isT2);
1655 if (OddReg == EvenReg && EvenDeadKill) {
1656 // If the two source operands are the same, the kill marker is
1657 // probably on the first one. e.g.
1658 // t2STRDi8 %R5<kill>, %R5, %R9<kill>, 0, 14, %reg0
1659 EvenDeadKill = false;
1662 // Never kill the base register in the first instruction.
1663 if (EvenReg == BaseReg)
1664 EvenDeadKill = false;
1665 InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
1666 EvenReg, EvenDeadKill, EvenUndef,
1667 BaseReg, false, BaseUndef, false, OffUndef,
1668 Pred, PredReg, TII, isT2);
1669 InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc2,
1670 OddReg, OddDeadKill, OddUndef,
1671 BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
1672 Pred, PredReg, TII, isT2);
1680 MBBI = MBB.erase(MBBI);
1684 /// An optimization pass to turn multiple LDR / STR ops of the same base and
1685 /// incrementing offset into LDM / STM ops.
1686 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
1688 unsigned CurrBase = 0;
1689 unsigned CurrOpc = ~0u;
1690 ARMCC::CondCodes CurrPred = ARMCC::AL;
1691 unsigned Position = 0;
1692 assert(Candidates.size() == 0);
1693 assert(MergeBaseCandidates.size() == 0);
1694 LiveRegsValid = false;
1696 for (MachineBasicBlock::iterator I = MBB.end(), MBBI; I != MBB.begin();
1698 // The instruction in front of the iterator is the one we look at.
1699 MBBI = std::prev(I);
1700 if (FixInvalidRegPairOp(MBB, MBBI))
1704 if (isMemoryOp(*MBBI)) {
1705 unsigned Opcode = MBBI->getOpcode();
1706 const MachineOperand &MO = MBBI->getOperand(0);
1707 unsigned Reg = MO.getReg();
1708 unsigned Base = getLoadStoreBaseOp(*MBBI).getReg();
1709 unsigned PredReg = 0;
1710 ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg);
1711 int Offset = getMemoryOpOffset(*MBBI);
1712 if (CurrBase == 0) {
1713 // Start of a new chain.
1717 MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
1720 // Note: No need to match PredReg in the next if.
1721 if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
1723 // r4 := ldr [r0, #8]
1724 // r4 := ldr [r0, #4]
1727 // If a load overrides the base register or a register loaded by
1728 // another load in our chain, we cannot take this instruction.
1729 bool Overlap = false;
1730 if (isLoadSingle(Opcode)) {
1731 Overlap = (Base == Reg);
1733 for (const MemOpQueueEntry &E : MemOps) {
1734 if (TRI->regsOverlap(Reg, E.MI->getOperand(0).getReg())) {
1743 // Check offset and sort memory operation into the current chain.
1744 if (Offset > MemOps.back().Offset) {
1745 MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
1748 MemOpQueue::iterator MI, ME;
1749 for (MI = MemOps.begin(), ME = MemOps.end(); MI != ME; ++MI) {
1750 if (Offset < MI->Offset) {
1751 // Found a place to insert.
1754 if (Offset == MI->Offset) {
1755 // Collision, abort.
1760 if (MI != MemOps.end()) {
1761 MemOps.insert(MI, MemOpQueueEntry(*MBBI, Offset, Position));
1768 // Don't advance the iterator; The op will start a new chain next.
1771 // Fallthrough to look into existing chain.
1772 } else if (MBBI->isDebugValue()) {
1774 } else if (MBBI->getOpcode() == ARM::t2LDRDi8 ||
1775 MBBI->getOpcode() == ARM::t2STRDi8) {
1776 // ARMPreAllocLoadStoreOpt has already formed some LDRD/STRD instructions
1777 // remember them because we may still be able to merge add/sub into them.
1778 MergeBaseCandidates.push_back(&*MBBI);
1782 // If we are here then the chain is broken; Extract candidates for a merge.
1783 if (MemOps.size() > 0) {
1784 FormCandidates(MemOps);
1785 // Reset for the next chain.
1788 CurrPred = ARMCC::AL;
1792 if (MemOps.size() > 0)
1793 FormCandidates(MemOps);
1795 // Sort candidates so they get processed from end to begin of the basic
1796 // block later; This is necessary for liveness calculation.
1797 auto LessThan = [](const MergeCandidate* M0, const MergeCandidate *M1) {
1798 return M0->InsertPos < M1->InsertPos;
1800 std::sort(Candidates.begin(), Candidates.end(), LessThan);
1802 // Go through list of candidates and merge.
1803 bool Changed = false;
1804 for (const MergeCandidate *Candidate : Candidates) {
1805 if (Candidate->CanMergeToLSMulti || Candidate->CanMergeToLSDouble) {
1806 MachineInstr *Merged = MergeOpsUpdate(*Candidate);
1807 // Merge preceding/trailing base inc/dec into the merged op.
1810 unsigned Opcode = Merged->getOpcode();
1811 if (Opcode == ARM::t2STRDi8 || Opcode == ARM::t2LDRDi8)
1812 MergeBaseUpdateLSDouble(*Merged);
1814 MergeBaseUpdateLSMultiple(Merged);
1816 for (MachineInstr *MI : Candidate->Instrs) {
1817 if (MergeBaseUpdateLoadStore(MI))
1822 assert(Candidate->Instrs.size() == 1);
1823 if (MergeBaseUpdateLoadStore(Candidate->Instrs.front()))
1828 // Try to fold add/sub into the LDRD/STRD formed by ARMPreAllocLoadStoreOpt.
1829 for (MachineInstr *MI : MergeBaseCandidates)
1830 MergeBaseUpdateLSDouble(*MI);
1831 MergeBaseCandidates.clear();
1836 /// If this is a exit BB, try merging the return ops ("bx lr" and "mov pc, lr")
1837 /// into the preceding stack restore so it directly restore the value of LR
1839 /// ldmfd sp!, {..., lr}
1842 /// ldmfd sp!, {..., lr}
1845 /// ldmfd sp!, {..., pc}
1846 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
1847 // Thumb1 LDM doesn't allow high registers.
1848 if (isThumb1) return false;
1849 if (MBB.empty()) return false;
1851 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
1852 if (MBBI != MBB.begin() && MBBI != MBB.end() &&
1853 (MBBI->getOpcode() == ARM::BX_RET ||
1854 MBBI->getOpcode() == ARM::tBX_RET ||
1855 MBBI->getOpcode() == ARM::MOVPCLR)) {
1856 MachineBasicBlock::iterator PrevI = std::prev(MBBI);
1857 // Ignore any DBG_VALUE instructions.
1858 while (PrevI->isDebugValue() && PrevI != MBB.begin())
1860 MachineInstr &PrevMI = *PrevI;
1861 unsigned Opcode = PrevMI.getOpcode();
1862 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
1863 Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
1864 Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
1865 MachineOperand &MO = PrevMI.getOperand(PrevMI.getNumOperands() - 1);
1866 if (MO.getReg() != ARM::LR)
1868 unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
1869 assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
1870 Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
1871 PrevMI.setDesc(TII->get(NewOpc));
1873 PrevMI.copyImplicitOps(*MBB.getParent(), *MBBI);
1881 bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &MBB) {
1882 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1883 if (MBBI == MBB.begin() || MBBI == MBB.end() ||
1884 MBBI->getOpcode() != ARM::tBX_RET)
1887 MachineBasicBlock::iterator Prev = MBBI;
1889 if (Prev->getOpcode() != ARM::tMOVr || !Prev->definesRegister(ARM::LR))
1892 for (auto Use : Prev->uses())
1894 AddDefaultPred(BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX))
1895 .addReg(Use.getReg(), RegState::Kill))
1896 .copyImplicitOps(*MBBI);
1902 llvm_unreachable("tMOVr doesn't kill a reg before tBX_RET?");
1905 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1906 if (skipFunction(*Fn.getFunction()))
1910 STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
1911 TL = STI->getTargetLowering();
1912 AFI = Fn.getInfo<ARMFunctionInfo>();
1913 TII = STI->getInstrInfo();
1914 TRI = STI->getRegisterInfo();
1916 RegClassInfoValid = false;
1917 isThumb2 = AFI->isThumb2Function();
1918 isThumb1 = AFI->isThumbFunction() && !isThumb2;
1920 bool Modified = false;
1921 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
1923 MachineBasicBlock &MBB = *MFI;
1924 Modified |= LoadStoreMultipleOpti(MBB);
1925 if (STI->hasV5TOps())
1926 Modified |= MergeReturnIntoLDM(MBB);
1928 Modified |= CombineMovBx(MBB);
1931 Allocator.DestroyAll();
1935 #define ARM_PREALLOC_LOAD_STORE_OPT_NAME \
1936 "ARM pre- register allocation load / store optimization pass"
1939 /// Pre- register allocation pass that move load / stores from consecutive
1940 /// locations close to make it more likely they will be combined later.
1941 struct ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
1943 ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
1945 const DataLayout *TD;
1946 const TargetInstrInfo *TII;
1947 const TargetRegisterInfo *TRI;
1948 const ARMSubtarget *STI;
1949 MachineRegisterInfo *MRI;
1950 MachineFunction *MF;
1952 bool runOnMachineFunction(MachineFunction &Fn) override;
1954 StringRef getPassName() const override {
1955 return ARM_PREALLOC_LOAD_STORE_OPT_NAME;
1959 bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
1960 unsigned &NewOpc, unsigned &EvenReg,
1961 unsigned &OddReg, unsigned &BaseReg,
1963 unsigned &PredReg, ARMCC::CondCodes &Pred,
1965 bool RescheduleOps(MachineBasicBlock *MBB,
1966 SmallVectorImpl<MachineInstr *> &Ops,
1967 unsigned Base, bool isLd,
1968 DenseMap<MachineInstr*, unsigned> &MI2LocMap);
1969 bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
1971 char ARMPreAllocLoadStoreOpt::ID = 0;
1974 INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-ldst-opt",
1975 ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false)
1977 bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1978 if (AssumeMisalignedLoadStores || skipFunction(*Fn.getFunction()))
1981 TD = &Fn.getDataLayout();
1982 STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
1983 TII = STI->getInstrInfo();
1984 TRI = STI->getRegisterInfo();
1985 MRI = &Fn.getRegInfo();
1988 bool Modified = false;
1989 for (MachineBasicBlock &MFI : Fn)
1990 Modified |= RescheduleLoadStoreInstrs(&MFI);
1995 static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
1996 MachineBasicBlock::iterator I,
1997 MachineBasicBlock::iterator E,
1998 SmallPtrSetImpl<MachineInstr*> &MemOps,
1999 SmallSet<unsigned, 4> &MemRegs,
2000 const TargetRegisterInfo *TRI) {
2001 // Are there stores / loads / calls between them?
2002 // FIXME: This is overly conservative. We should make use of alias information
2004 SmallSet<unsigned, 4> AddedRegPressure;
2006 if (I->isDebugValue() || MemOps.count(&*I))
2008 if (I->isCall() || I->isTerminator() || I->hasUnmodeledSideEffects())
2010 if (isLd && I->mayStore())
2015 // It's not safe to move the first 'str' down.
2018 // str r4, [r0, #+4]
2022 for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
2023 MachineOperand &MO = I->getOperand(j);
2026 unsigned Reg = MO.getReg();
2027 if (MO.isDef() && TRI->regsOverlap(Reg, Base))
2029 if (Reg != Base && !MemRegs.count(Reg))
2030 AddedRegPressure.insert(Reg);
2034 // Estimate register pressure increase due to the transformation.
2035 if (MemRegs.size() <= 4)
2036 // Ok if we are moving small number of instructions.
2038 return AddedRegPressure.size() <= MemRegs.size() * 2;
2042 ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
2043 DebugLoc &dl, unsigned &NewOpc,
2045 unsigned &SecondReg,
2046 unsigned &BaseReg, int &Offset,
2048 ARMCC::CondCodes &Pred,
2050 // Make sure we're allowed to generate LDRD/STRD.
2051 if (!STI->hasV5TEOps())
2054 // FIXME: VLDRS / VSTRS -> VLDRD / VSTRD
2056 unsigned Opcode = Op0->getOpcode();
2057 if (Opcode == ARM::LDRi12) {
2059 } else if (Opcode == ARM::STRi12) {
2061 } else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
2062 NewOpc = ARM::t2LDRDi8;
2065 } else if (Opcode == ARM::t2STRi8 || Opcode == ARM::t2STRi12) {
2066 NewOpc = ARM::t2STRDi8;
2073 // Make sure the base address satisfies i64 ld / st alignment requirement.
2074 // At the moment, we ignore the memoryoperand's value.
2075 // If we want to use AliasAnalysis, we should check it accordingly.
2076 if (!Op0->hasOneMemOperand() ||
2077 (*Op0->memoperands_begin())->isVolatile())
2080 unsigned Align = (*Op0->memoperands_begin())->getAlignment();
2081 const Function *Func = MF->getFunction();
2082 unsigned ReqAlign = STI->hasV6Ops()
2083 ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext()))
2084 : 8; // Pre-v6 need 8-byte align
2085 if (Align < ReqAlign)
2088 // Then make sure the immediate offset fits.
2089 int OffImm = getMemoryOpOffset(*Op0);
2091 int Limit = (1 << 8) * Scale;
2092 if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
2096 ARM_AM::AddrOpc AddSub = ARM_AM::add;
2098 AddSub = ARM_AM::sub;
2101 int Limit = (1 << 8) * Scale;
2102 if (OffImm >= Limit || (OffImm & (Scale-1)))
2104 Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
2106 FirstReg = Op0->getOperand(0).getReg();
2107 SecondReg = Op1->getOperand(0).getReg();
2108 if (FirstReg == SecondReg)
2110 BaseReg = Op0->getOperand(1).getReg();
2111 Pred = getInstrPredicate(*Op0, PredReg);
2112 dl = Op0->getDebugLoc();
2116 bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
2117 SmallVectorImpl<MachineInstr *> &Ops,
2118 unsigned Base, bool isLd,
2119 DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
2120 bool RetVal = false;
2122 // Sort by offset (in reverse order).
2123 std::sort(Ops.begin(), Ops.end(),
2124 [](const MachineInstr *LHS, const MachineInstr *RHS) {
2125 int LOffset = getMemoryOpOffset(*LHS);
2126 int ROffset = getMemoryOpOffset(*RHS);
2127 assert(LHS == RHS || LOffset != ROffset);
2128 return LOffset > ROffset;
2131 // The loads / stores of the same base are in order. Scan them from first to
2132 // last and check for the following:
2133 // 1. Any def of base.
2135 while (Ops.size() > 1) {
2136 unsigned FirstLoc = ~0U;
2137 unsigned LastLoc = 0;
2138 MachineInstr *FirstOp = nullptr;
2139 MachineInstr *LastOp = nullptr;
2141 unsigned LastOpcode = 0;
2142 unsigned LastBytes = 0;
2143 unsigned NumMove = 0;
2144 for (int i = Ops.size() - 1; i >= 0; --i) {
2145 MachineInstr *Op = Ops[i];
2146 unsigned Loc = MI2LocMap[Op];
2147 if (Loc <= FirstLoc) {
2151 if (Loc >= LastLoc) {
2157 = getLoadStoreMultipleOpcode(Op->getOpcode(), ARM_AM::ia);
2158 if (LastOpcode && LSMOpcode != LastOpcode)
2161 int Offset = getMemoryOpOffset(*Op);
2162 unsigned Bytes = getLSMultipleTransferSize(Op);
2164 if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
2167 LastOffset = Offset;
2169 LastOpcode = LSMOpcode;
2170 if (++NumMove == 8) // FIXME: Tune this limit.
2177 SmallPtrSet<MachineInstr*, 4> MemOps;
2178 SmallSet<unsigned, 4> MemRegs;
2179 for (int i = NumMove-1; i >= 0; --i) {
2180 MemOps.insert(Ops[i]);
2181 MemRegs.insert(Ops[i]->getOperand(0).getReg());
2184 // Be conservative, if the instructions are too far apart, don't
2185 // move them. We want to limit the increase of register pressure.
2186 bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this.
2188 DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp,
2189 MemOps, MemRegs, TRI);
2191 for (unsigned i = 0; i != NumMove; ++i)
2194 // This is the new location for the loads / stores.
2195 MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
2196 while (InsertPos != MBB->end() &&
2197 (MemOps.count(&*InsertPos) || InsertPos->isDebugValue()))
2200 // If we are moving a pair of loads / stores, see if it makes sense
2201 // to try to allocate a pair of registers that can form register pairs.
2202 MachineInstr *Op0 = Ops.back();
2203 MachineInstr *Op1 = Ops[Ops.size()-2];
2204 unsigned FirstReg = 0, SecondReg = 0;
2205 unsigned BaseReg = 0, PredReg = 0;
2206 ARMCC::CondCodes Pred = ARMCC::AL;
2208 unsigned NewOpc = 0;
2211 if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
2212 FirstReg, SecondReg, BaseReg,
2213 Offset, PredReg, Pred, isT2)) {
2217 const MCInstrDesc &MCID = TII->get(NewOpc);
2218 const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI, *MF);
2219 MRI->constrainRegClass(FirstReg, TRC);
2220 MRI->constrainRegClass(SecondReg, TRC);
2222 // Form the pair instruction.
2224 MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
2225 .addReg(FirstReg, RegState::Define)
2226 .addReg(SecondReg, RegState::Define)
2228 // FIXME: We're converting from LDRi12 to an insn that still
2229 // uses addrmode2, so we need an explicit offset reg. It should
2230 // always by reg0 since we're transforming LDRi12s.
2233 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
2234 MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
2235 DEBUG(dbgs() << "Formed " << *MIB << "\n");
2238 MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
2242 // FIXME: We're converting from LDRi12 to an insn that still
2243 // uses addrmode2, so we need an explicit offset reg. It should
2244 // always by reg0 since we're transforming STRi12s.
2247 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
2248 MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
2249 DEBUG(dbgs() << "Formed " << *MIB << "\n");
2256 // Add register allocation hints to form register pairs.
2257 MRI->setRegAllocationHint(FirstReg, ARMRI::RegPairEven, SecondReg);
2258 MRI->setRegAllocationHint(SecondReg, ARMRI::RegPairOdd, FirstReg);
2261 for (unsigned i = 0; i != NumMove; ++i) {
2262 MachineInstr *Op = Ops.back();
2264 MBB->splice(InsertPos, MBB, Op);
2268 NumLdStMoved += NumMove;
2278 ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
2279 bool RetVal = false;
2281 DenseMap<MachineInstr*, unsigned> MI2LocMap;
2282 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
2283 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
2284 SmallVector<unsigned, 4> LdBases;
2285 SmallVector<unsigned, 4> StBases;
2288 MachineBasicBlock::iterator MBBI = MBB->begin();
2289 MachineBasicBlock::iterator E = MBB->end();
2291 for (; MBBI != E; ++MBBI) {
2292 MachineInstr &MI = *MBBI;
2293 if (MI.isCall() || MI.isTerminator()) {
2294 // Stop at barriers.
2299 if (!MI.isDebugValue())
2300 MI2LocMap[&MI] = ++Loc;
2302 if (!isMemoryOp(MI))
2304 unsigned PredReg = 0;
2305 if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
2308 int Opc = MI.getOpcode();
2309 bool isLd = isLoadSingle(Opc);
2310 unsigned Base = MI.getOperand(1).getReg();
2311 int Offset = getMemoryOpOffset(MI);
2313 bool StopHere = false;
2315 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
2316 Base2LdsMap.find(Base);
2317 if (BI != Base2LdsMap.end()) {
2318 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
2319 if (Offset == getMemoryOpOffset(*BI->second[i])) {
2325 BI->second.push_back(&MI);
2327 Base2LdsMap[Base].push_back(&MI);
2328 LdBases.push_back(Base);
2331 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
2332 Base2StsMap.find(Base);
2333 if (BI != Base2StsMap.end()) {
2334 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
2335 if (Offset == getMemoryOpOffset(*BI->second[i])) {
2341 BI->second.push_back(&MI);
2343 Base2StsMap[Base].push_back(&MI);
2344 StBases.push_back(Base);
2349 // Found a duplicate (a base+offset combination that's seen earlier).
2356 // Re-schedule loads.
2357 for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
2358 unsigned Base = LdBases[i];
2359 SmallVectorImpl<MachineInstr *> &Lds = Base2LdsMap[Base];
2361 RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
2364 // Re-schedule stores.
2365 for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
2366 unsigned Base = StBases[i];
2367 SmallVectorImpl<MachineInstr *> &Sts = Base2StsMap[Base];
2369 RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
2373 Base2LdsMap.clear();
2374 Base2StsMap.clear();
2384 /// Returns an instance of the load / store optimization pass.
2385 FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
2387 return new ARMPreAllocLoadStoreOpt();
2388 return new ARMLoadStoreOpt();