1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass tries to fuse DS instructions with close by immediate offsets.
10 // This will fuse operations such as
11 // ds_read_b32 v0, v2 offset:16
12 // ds_read_b32 v1, v2 offset:32
14 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 // The same is done for certain SMEM and VMEM opcodes, e.g.:
17 // s_buffer_load_dword s4, s[0:3], 4
18 // s_buffer_load_dword s5, s[0:3], 8
20 // s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22 // This pass also tries to promote constant offset to the immediate by
23 // adjusting the base. It tries to use a base from the nearby instructions that
24 // allows it to have a 13bit constant offset and then promotes the 13bit offset
27 // s_movk_i32 s0, 0x1800
28 // v_add_co_u32_e32 v0, vcc, s0, v2
29 // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
31 // s_movk_i32 s0, 0x1000
32 // v_add_co_u32_e32 v5, vcc, s0, v2
33 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34 // global_load_dwordx2 v[5:6], v[5:6], off
35 // global_load_dwordx2 v[0:1], v[0:1], off
37 // s_movk_i32 s0, 0x1000
38 // v_add_co_u32_e32 v5, vcc, s0, v2
39 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40 // global_load_dwordx2 v[5:6], v[5:6], off
41 // global_load_dwordx2 v[0:1], v[5:6], off offset:2048
43 // Future improvements:
45 // - This is currently missing stores of constants because loading
46 // the constant into the data register is placed between the stores, although
47 // this is arguably a scheduling problem.
49 // - Live interval recomputing seems inefficient. This currently only matches
50 // one pair, and recomputes live intervals and moves on to the next pair. It
51 // would be better to compute a list of all merges that need to occur.
53 // - With a list of instructions to process, we can also merge more. If a
54 // cluster of loads have offsets that are too large to fit in the 8-bit
55 // offsets, but are close enough to fit in the 8 bits, we can add to the base
56 // pointer and use the new reduced offsets.
58 //===----------------------------------------------------------------------===//
61 #include "AMDGPUSubtarget.h"
62 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
63 #include "SIInstrInfo.h"
64 #include "SIRegisterInfo.h"
65 #include "Utils/AMDGPUBaseInfo.h"
66 #include "llvm/ADT/ArrayRef.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/StringRef.h"
69 #include "llvm/Analysis/AliasAnalysis.h"
70 #include "llvm/CodeGen/MachineBasicBlock.h"
71 #include "llvm/CodeGen/MachineFunction.h"
72 #include "llvm/CodeGen/MachineFunctionPass.h"
73 #include "llvm/CodeGen/MachineInstr.h"
74 #include "llvm/CodeGen/MachineInstrBuilder.h"
75 #include "llvm/CodeGen/MachineOperand.h"
76 #include "llvm/CodeGen/MachineRegisterInfo.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/InitializePasses.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/MathExtras.h"
82 #include "llvm/Support/raw_ostream.h"
91 #define DEBUG_TYPE "si-load-store-opt"
107 unsigned char NumVAddrs = 0;
110 bool SOffset = false;
116 // GFX10 image_sample instructions can have 12 vaddrs + srsrc + ssamp.
117 const unsigned MaxAddressRegs = 12 + 1 + 1;
119 class SILoadStoreOptimizer : public MachineFunctionPass {
121 MachineBasicBlock::iterator I;
128 InstClassEnum InstClass;
133 int AddrIdx[MaxAddressRegs];
134 const MachineOperand *AddrReg[MaxAddressRegs];
135 unsigned NumAddresses;
138 bool hasSameBaseAddress(const MachineInstr &MI) {
139 for (unsigned i = 0; i < NumAddresses; i++) {
140 const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]);
142 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
143 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
144 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
150 // Check same base pointer. Be careful of subregisters, which can occur
151 // with vectors of pointers.
152 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
153 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
160 bool hasMergeableAddress(const MachineRegisterInfo &MRI) {
161 for (unsigned i = 0; i < NumAddresses; ++i) {
162 const MachineOperand *AddrOp = AddrReg[i];
163 // Immediates are always OK.
167 // Don't try to merge addresses that aren't either immediates or registers.
168 // TODO: Should be possible to merge FrameIndexes and maybe some other
170 if (!AddrOp->isReg())
173 // TODO: We should be able to merge physical reg addreses.
174 if (Register::isPhysicalRegister(AddrOp->getReg()))
177 // If an address has only one use then there will be on other
178 // instructions with the same address, so we can't merge this one.
179 if (MRI.hasOneNonDBGUse(AddrOp->getReg()))
185 void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII,
186 const GCNSubtarget &STM);
189 struct BaseRegisters {
193 unsigned LoSubReg = 0;
194 unsigned HiSubReg = 0;
202 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
205 const GCNSubtarget *STM = nullptr;
206 const SIInstrInfo *TII = nullptr;
207 const SIRegisterInfo *TRI = nullptr;
208 MachineRegisterInfo *MRI = nullptr;
209 AliasAnalysis *AA = nullptr;
212 static bool dmasksCanBeCombined(const CombineInfo &CI,
213 const SIInstrInfo &TII,
214 const CombineInfo &Paired);
215 static bool offsetsCanBeCombined(CombineInfo &CI, const GCNSubtarget &STI,
216 CombineInfo &Paired, bool Modify = false);
217 static bool widthsFit(const GCNSubtarget &STI, const CombineInfo &CI,
218 const CombineInfo &Paired);
219 static unsigned getNewOpcode(const CombineInfo &CI, const CombineInfo &Paired);
220 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI,
221 const CombineInfo &Paired);
222 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI,
223 const CombineInfo &Paired);
225 bool checkAndPrepareMerge(CombineInfo &CI, CombineInfo &Paired,
226 SmallVectorImpl<MachineInstr *> &InstsToMove);
228 unsigned read2Opcode(unsigned EltSize) const;
229 unsigned read2ST64Opcode(unsigned EltSize) const;
230 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI,
232 const SmallVectorImpl<MachineInstr *> &InstsToMove);
234 unsigned write2Opcode(unsigned EltSize) const;
235 unsigned write2ST64Opcode(unsigned EltSize) const;
236 MachineBasicBlock::iterator
237 mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired,
238 const SmallVectorImpl<MachineInstr *> &InstsToMove);
239 MachineBasicBlock::iterator
240 mergeImagePair(CombineInfo &CI, CombineInfo &Paired,
241 const SmallVectorImpl<MachineInstr *> &InstsToMove);
242 MachineBasicBlock::iterator
243 mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired,
244 const SmallVectorImpl<MachineInstr *> &InstsToMove);
245 MachineBasicBlock::iterator
246 mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired,
247 const SmallVectorImpl<MachineInstr *> &InstsToMove);
248 MachineBasicBlock::iterator
249 mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired,
250 const SmallVectorImpl<MachineInstr *> &InstsToMove);
251 MachineBasicBlock::iterator
252 mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired,
253 const SmallVectorImpl<MachineInstr *> &InstsToMove);
254 MachineBasicBlock::iterator
255 mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired,
256 const SmallVectorImpl<MachineInstr *> &InstsToMove);
258 void updateBaseAndOffset(MachineInstr &I, Register NewBase,
259 int32_t NewOffset) const;
260 Register computeBase(MachineInstr &MI, const MemAddress &Addr) const;
261 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
262 Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
263 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
264 /// Promotes constant offset to the immediate by adjusting the base. It
265 /// tries to use a base from the nearby instructions that allows it to have
266 /// a 13bit constant offset which gets promoted to the immediate.
267 bool promoteConstantOffsetToImm(MachineInstr &CI,
269 SmallPtrSet<MachineInstr *, 4> &Promoted) const;
270 void addInstToMergeableList(const CombineInfo &CI,
271 std::list<std::list<CombineInfo> > &MergeableInsts) const;
273 std::pair<MachineBasicBlock::iterator, bool> collectMergeableInsts(
274 MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End,
275 MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList,
276 std::list<std::list<CombineInfo>> &MergeableInsts) const;
281 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
282 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
285 bool optimizeInstsWithSameBaseAddr(std::list<CombineInfo> &MergeList,
286 bool &OptimizeListAgain);
287 bool optimizeBlock(std::list<std::list<CombineInfo> > &MergeableInsts);
289 bool runOnMachineFunction(MachineFunction &MF) override;
291 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
293 void getAnalysisUsage(AnalysisUsage &AU) const override {
294 AU.setPreservesCFG();
295 AU.addRequired<AAResultsWrapperPass>();
297 MachineFunctionPass::getAnalysisUsage(AU);
300 MachineFunctionProperties getRequiredProperties() const override {
301 return MachineFunctionProperties()
302 .set(MachineFunctionProperties::Property::IsSSA);
306 static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
307 const unsigned Opc = MI.getOpcode();
309 if (TII.isMUBUF(Opc)) {
310 // FIXME: Handle d16 correctly
311 return AMDGPU::getMUBUFElements(Opc);
313 if (TII.isMIMG(MI)) {
315 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm();
316 return countPopulation(DMaskImm);
318 if (TII.isMTBUF(Opc)) {
319 return AMDGPU::getMTBUFElements(Opc);
323 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
325 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
327 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
334 /// Maps instruction opcode to enum InstClassEnum.
335 static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
338 if (TII.isMUBUF(Opc)) {
339 switch (AMDGPU::getMUBUFBaseOpcode(Opc)) {
342 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
343 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
344 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
345 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
347 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
348 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
349 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
350 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
354 if (TII.isMIMG(Opc)) {
355 // Ignore instructions encoded without vaddr.
356 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) == -1 &&
357 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) == -1)
359 // TODO: Support IMAGE_GET_RESINFO and IMAGE_GET_LOD.
360 if (TII.get(Opc).mayStore() || !TII.get(Opc).mayLoad() ||
365 if (TII.isMTBUF(Opc)) {
366 switch (AMDGPU::getMTBUFBaseOpcode(Opc)) {
369 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN:
370 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN_exact:
371 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET:
372 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET_exact:
374 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN:
375 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN_exact:
376 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET:
377 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET_exact:
378 return TBUFFER_STORE;
382 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
383 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
384 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
385 return S_BUFFER_LOAD_IMM;
386 case AMDGPU::DS_READ_B32:
387 case AMDGPU::DS_READ_B32_gfx9:
388 case AMDGPU::DS_READ_B64:
389 case AMDGPU::DS_READ_B64_gfx9:
391 case AMDGPU::DS_WRITE_B32:
392 case AMDGPU::DS_WRITE_B32_gfx9:
393 case AMDGPU::DS_WRITE_B64:
394 case AMDGPU::DS_WRITE_B64_gfx9:
399 /// Determines instruction subclass from opcode. Only instructions
400 /// of the same subclass can be merged together.
401 static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
404 if (TII.isMUBUF(Opc))
405 return AMDGPU::getMUBUFBaseOpcode(Opc);
406 if (TII.isMIMG(Opc)) {
407 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
409 return Info->BaseOpcode;
411 if (TII.isMTBUF(Opc))
412 return AMDGPU::getMTBUFBaseOpcode(Opc);
414 case AMDGPU::DS_READ_B32:
415 case AMDGPU::DS_READ_B32_gfx9:
416 case AMDGPU::DS_READ_B64:
417 case AMDGPU::DS_READ_B64_gfx9:
418 case AMDGPU::DS_WRITE_B32:
419 case AMDGPU::DS_WRITE_B32_gfx9:
420 case AMDGPU::DS_WRITE_B64:
421 case AMDGPU::DS_WRITE_B64_gfx9:
423 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
424 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
425 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
426 return AMDGPU::S_BUFFER_LOAD_DWORD_IMM;
430 static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
433 if (TII.isMUBUF(Opc)) {
434 if (AMDGPU::getMUBUFHasVAddr(Opc))
436 if (AMDGPU::getMUBUFHasSrsrc(Opc))
438 if (AMDGPU::getMUBUFHasSoffset(Opc))
439 Result.SOffset = true;
444 if (TII.isMIMG(Opc)) {
445 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
446 if (VAddr0Idx >= 0) {
447 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
448 Result.NumVAddrs = SRsrcIdx - VAddr0Idx;
453 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
454 if (Info && AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler)
459 if (TII.isMTBUF(Opc)) {
460 if (AMDGPU::getMTBUFHasVAddr(Opc))
462 if (AMDGPU::getMTBUFHasSrsrc(Opc))
464 if (AMDGPU::getMTBUFHasSoffset(Opc))
465 Result.SOffset = true;
473 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
474 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
475 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
478 case AMDGPU::DS_READ_B32:
479 case AMDGPU::DS_READ_B64:
480 case AMDGPU::DS_READ_B32_gfx9:
481 case AMDGPU::DS_READ_B64_gfx9:
482 case AMDGPU::DS_WRITE_B32:
483 case AMDGPU::DS_WRITE_B64:
484 case AMDGPU::DS_WRITE_B32_gfx9:
485 case AMDGPU::DS_WRITE_B64_gfx9:
491 void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
492 const SIInstrInfo &TII,
493 const GCNSubtarget &STM) {
495 unsigned Opc = MI->getOpcode();
496 InstClass = getInstClass(Opc, TII);
498 if (InstClass == UNKNOWN)
504 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
509 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
512 case S_BUFFER_LOAD_IMM:
513 EltSize = AMDGPU::convertSMRDOffsetUnits(STM, 4);
520 if (InstClass == MIMG) {
521 DMask = TII.getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm();
522 // Offset is not considered for MIMG instructions.
525 int OffsetIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset);
526 Offset = I->getOperand(OffsetIdx).getImm();
529 if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE)
530 Format = TII.getNamedOperand(*I, AMDGPU::OpName::format)->getImm();
532 Width = getOpcodeWidth(*I, TII);
534 if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
536 } else if (InstClass != MIMG) {
537 GLC = TII.getNamedOperand(*I, AMDGPU::OpName::glc)->getImm();
538 if (InstClass != S_BUFFER_LOAD_IMM) {
539 SLC = TII.getNamedOperand(*I, AMDGPU::OpName::slc)->getImm();
541 DLC = TII.getNamedOperand(*I, AMDGPU::OpName::dlc)->getImm();
544 AddressRegs Regs = getRegs(Opc, TII);
547 for (unsigned J = 0; J < Regs.NumVAddrs; J++)
548 AddrIdx[NumAddresses++] =
549 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0) + J;
551 AddrIdx[NumAddresses++] =
552 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::addr);
554 AddrIdx[NumAddresses++] =
555 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sbase);
557 AddrIdx[NumAddresses++] =
558 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
560 AddrIdx[NumAddresses++] =
561 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset);
563 AddrIdx[NumAddresses++] =
564 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
566 AddrIdx[NumAddresses++] =
567 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::ssamp);
568 assert(NumAddresses <= MaxAddressRegs);
570 for (unsigned J = 0; J < NumAddresses; J++)
571 AddrReg[J] = &I->getOperand(AddrIdx[J]);
574 } // end anonymous namespace.
576 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
577 "SI Load Store Optimizer", false, false)
578 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
579 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
582 char SILoadStoreOptimizer::ID = 0;
584 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
586 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
587 return new SILoadStoreOptimizer();
590 static void moveInstsAfter(MachineBasicBlock::iterator I,
591 ArrayRef<MachineInstr *> InstsToMove) {
592 MachineBasicBlock *MBB = I->getParent();
594 for (MachineInstr *MI : InstsToMove) {
595 MI->removeFromParent();
600 static void addDefsUsesToList(const MachineInstr &MI,
601 DenseSet<Register> &RegDefs,
602 DenseSet<Register> &PhysRegUses) {
603 for (const MachineOperand &Op : MI.operands()) {
606 RegDefs.insert(Op.getReg());
607 else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
608 PhysRegUses.insert(Op.getReg());
613 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
614 MachineBasicBlock::iterator B,
616 // RAW or WAR - cannot reorder
617 // WAW - cannot reorder
618 // RAR - safe to reorder
619 return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
622 // Add MI and its defs to the lists if MI reads one of the defs that are
623 // already in the list. Returns true in that case.
624 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<Register> &RegDefs,
625 DenseSet<Register> &PhysRegUses,
626 SmallVectorImpl<MachineInstr *> &Insts) {
627 for (MachineOperand &Use : MI.operands()) {
628 // If one of the defs is read, then there is a use of Def between I and the
629 // instruction that I will potentially be merged with. We will need to move
630 // this instruction after the merged instructions.
632 // Similarly, if there is a def which is read by an instruction that is to
633 // be moved for merging, then we need to move the def-instruction as well.
634 // This can only happen for physical registers such as M0; virtual
635 // registers are in SSA form.
637 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
638 (Use.isDef() && RegDefs.count(Use.getReg())) ||
639 (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
640 PhysRegUses.count(Use.getReg())))) {
641 Insts.push_back(&MI);
642 addDefsUsesToList(MI, RegDefs, PhysRegUses);
650 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
651 ArrayRef<MachineInstr *> InstsToMove,
653 assert(MemOp.mayLoadOrStore());
655 for (MachineInstr *InstToMove : InstsToMove) {
656 if (!InstToMove->mayLoadOrStore())
658 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
664 // This function assumes that \p A and \p B have are identical except for
665 // size and offset, and they referecne adjacent memory.
666 static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
667 const MachineMemOperand *A,
668 const MachineMemOperand *B) {
669 unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
670 unsigned Size = A->getSize() + B->getSize();
671 // This function adds the offset parameter to the existing offset for A,
672 // so we pass 0 here as the offset and then manually set it to the correct
673 // value after the call.
674 MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
675 MMO->setOffset(MinOffset);
679 bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI,
680 const SIInstrInfo &TII,
681 const CombineInfo &Paired) {
682 assert(CI.InstClass == MIMG);
684 // Ignore instructions with tfe/lwe set.
685 const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe);
686 const auto *LWEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::lwe);
688 if ((TFEOp && TFEOp->getImm()) || (LWEOp && LWEOp->getImm()))
691 // Check other optional immediate operands for equality.
692 unsigned OperandsToMatch[] = {AMDGPU::OpName::glc, AMDGPU::OpName::slc,
693 AMDGPU::OpName::d16, AMDGPU::OpName::unorm,
694 AMDGPU::OpName::da, AMDGPU::OpName::r128,
695 AMDGPU::OpName::a16, AMDGPU::OpName::dlc};
697 for (auto op : OperandsToMatch) {
698 int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op);
699 if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx)
702 CI.I->getOperand(Idx).getImm() != Paired.I->getOperand(Idx).getImm())
706 // Check DMask for overlaps.
707 unsigned MaxMask = std::max(CI.DMask, Paired.DMask);
708 unsigned MinMask = std::min(CI.DMask, Paired.DMask);
710 unsigned AllowedBitsForMin = llvm::countTrailingZeros(MaxMask);
711 if ((1u << AllowedBitsForMin) <= MinMask)
717 static unsigned getBufferFormatWithCompCount(unsigned OldFormat,
718 unsigned ComponentCount,
719 const GCNSubtarget &STI) {
720 if (ComponentCount > 4)
723 const llvm::AMDGPU::GcnBufferFormatInfo *OldFormatInfo =
724 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormat, STI);
728 const llvm::AMDGPU::GcnBufferFormatInfo *NewFormatInfo =
729 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormatInfo->BitsPerComp,
731 OldFormatInfo->NumFormat, STI);
736 assert(NewFormatInfo->NumFormat == OldFormatInfo->NumFormat &&
737 NewFormatInfo->BitsPerComp == OldFormatInfo->BitsPerComp);
739 return NewFormatInfo->Format;
742 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI,
743 const GCNSubtarget &STI,
746 assert(CI.InstClass != MIMG);
748 // XXX - Would the same offset be OK? Is there any reason this would happen or
750 if (CI.Offset == Paired.Offset)
753 // This won't be valid if the offset isn't aligned.
754 if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0))
757 if (CI.InstClass == TBUFFER_LOAD || CI.InstClass == TBUFFER_STORE) {
759 const llvm::AMDGPU::GcnBufferFormatInfo *Info0 =
760 llvm::AMDGPU::getGcnBufferFormatInfo(CI.Format, STI);
763 const llvm::AMDGPU::GcnBufferFormatInfo *Info1 =
764 llvm::AMDGPU::getGcnBufferFormatInfo(Paired.Format, STI);
768 if (Info0->BitsPerComp != Info1->BitsPerComp ||
769 Info0->NumFormat != Info1->NumFormat)
772 // TODO: Should be possible to support more formats, but if format loads
773 // are not dword-aligned, the merged load might not be valid.
774 if (Info0->BitsPerComp != 32)
777 if (getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, STI) == 0)
781 unsigned EltOffset0 = CI.Offset / CI.EltSize;
782 unsigned EltOffset1 = Paired.Offset / CI.EltSize;
786 // Handle DS instructions.
787 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
788 return (EltOffset0 + CI.Width == EltOffset1 ||
789 EltOffset1 + Paired.Width == EltOffset0) &&
790 CI.GLC == Paired.GLC && CI.DLC == Paired.DLC &&
791 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC == Paired.SLC);
794 // Handle SMEM and VMEM instructions.
795 // If the offset in elements doesn't fit in 8-bits, we might be able to use
796 // the stride 64 versions.
797 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
798 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
800 CI.Offset = EltOffset0 / 64;
801 Paired.Offset = EltOffset1 / 64;
807 // Check if the new offsets fit in the reduced 8-bit range.
808 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
810 CI.Offset = EltOffset0;
811 Paired.Offset = EltOffset1;
816 // Try to shift base address to decrease offsets.
817 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
818 CI.BaseOff = std::min(CI.Offset, Paired.Offset);
820 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
822 CI.Offset = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
823 Paired.Offset = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
829 if (isUInt<8>(OffsetDiff)) {
831 CI.Offset = EltOffset0 - CI.BaseOff / CI.EltSize;
832 Paired.Offset = EltOffset1 - CI.BaseOff / CI.EltSize;
840 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
841 const CombineInfo &CI,
842 const CombineInfo &Paired) {
843 const unsigned Width = (CI.Width + Paired.Width);
844 switch (CI.InstClass) {
846 return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
847 case S_BUFFER_LOAD_IMM:
858 /// This function assumes that CI comes before Paired in a basic block.
859 bool SILoadStoreOptimizer::checkAndPrepareMerge(
860 CombineInfo &CI, CombineInfo &Paired,
861 SmallVectorImpl<MachineInstr *> &InstsToMove) {
863 // Check both offsets (or masks for MIMG) can be combined and fit in the
865 if (CI.InstClass == MIMG && !dmasksCanBeCombined(CI, *TII, Paired))
868 if (CI.InstClass != MIMG &&
869 (!widthsFit(*STM, CI, Paired) || !offsetsCanBeCombined(CI, *STM, Paired)))
872 const unsigned Opc = CI.I->getOpcode();
873 const InstClassEnum InstClass = getInstClass(Opc, *TII);
875 if (InstClass == UNKNOWN) {
878 const unsigned InstSubclass = getInstSubclass(Opc, *TII);
880 // Do not merge VMEM buffer instructions with "swizzled" bit set.
882 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::swz);
883 if (Swizzled != -1 && CI.I->getOperand(Swizzled).getImm())
886 DenseSet<Register> RegDefsToMove;
887 DenseSet<Register> PhysRegUsesToMove;
888 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
890 MachineBasicBlock::iterator E = std::next(Paired.I);
891 MachineBasicBlock::iterator MBBI = std::next(CI.I);
892 MachineBasicBlock::iterator MBBE = CI.I->getParent()->end();
893 for (; MBBI != E; ++MBBI) {
896 // CombineInfo::Order is a hint on the instruction ordering within the
897 // basic block. This hint suggests that CI precedes Paired, which is
898 // true most of the time. However, moveInstsAfter() processing a
899 // previous list may have changed this order in a situation when it
900 // moves an instruction which exists in some other merge list.
901 // In this case it must be dependent.
905 if ((getInstClass(MBBI->getOpcode(), *TII) != InstClass) ||
906 (getInstSubclass(MBBI->getOpcode(), *TII) != InstSubclass)) {
907 // This is not a matching instruction, but we can keep looking as
908 // long as one of these conditions are met:
909 // 1. It is safe to move I down past MBBI.
910 // 2. It is safe to move MBBI down past the instruction that I will
913 if (MBBI->hasUnmodeledSideEffects()) {
914 // We can't re-order this instruction with respect to other memory
915 // operations, so we fail both conditions mentioned above.
919 if (MBBI->mayLoadOrStore() &&
920 (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
921 !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA))) {
922 // We fail condition #1, but we may still be able to satisfy condition
923 // #2. Add this instruction to the move list and then we will check
924 // if condition #2 holds once we have selected the matching instruction.
925 InstsToMove.push_back(&*MBBI);
926 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
930 // When we match I with another DS instruction we will be moving I down
931 // to the location of the matched instruction any uses of I will need to
932 // be moved down as well.
933 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
938 // Don't merge volatiles.
939 if (MBBI->hasOrderedMemoryRef())
943 AMDGPU::getNamedOperandIdx(MBBI->getOpcode(), AMDGPU::OpName::swz);
944 if (Swizzled != -1 && MBBI->getOperand(Swizzled).getImm())
947 // Handle a case like
948 // DS_WRITE_B32 addr, v, idx0
949 // w = DS_READ_B32 addr, idx0
950 // DS_WRITE_B32 addr, f(w), idx1
951 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
952 // merging of the two writes.
953 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
957 if (&*MBBI == &*Paired.I) {
958 // We need to go through the list of instructions that we plan to
959 // move and make sure they are all safe to move down past the merged
961 if (canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA)) {
963 // Call offsetsCanBeCombined with modify = true so that the offsets are
964 // correct for the new instruction. This should return true, because
965 // this function should only be called on CombineInfo objects that
966 // have already been confirmed to be mergeable.
967 if (CI.InstClass != MIMG)
968 offsetsCanBeCombined(CI, *STM, Paired, true);
974 // We've found a load/store that we couldn't merge for some reason.
975 // We could potentially keep looking, but we'd need to make sure that
976 // it was safe to move I and also all the instruction in InstsToMove
977 // down past this instruction.
978 // check if we can move I across MBBI and if we can move all I's users
979 if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
980 !canMoveInstsAcrossMemOp(*MBBI, InstsToMove, AA))
986 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
987 if (STM->ldsRequiresM0Init())
988 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
989 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
992 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
993 if (STM->ldsRequiresM0Init())
994 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
996 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
997 : AMDGPU::DS_READ2ST64_B64_gfx9;
1000 MachineBasicBlock::iterator
1001 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired,
1002 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1003 MachineBasicBlock *MBB = CI.I->getParent();
1005 // Be careful, since the addresses could be subregisters themselves in weird
1006 // cases, like vectors of pointers.
1007 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
1009 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
1010 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdst);
1012 unsigned NewOffset0 = CI.Offset;
1013 unsigned NewOffset1 = Paired.Offset;
1015 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
1017 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
1018 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
1020 if (NewOffset0 > NewOffset1) {
1021 // Canonicalize the merged instruction so the smaller offset comes first.
1022 std::swap(NewOffset0, NewOffset1);
1023 std::swap(SubRegIdx0, SubRegIdx1);
1026 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
1027 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
1029 const MCInstrDesc &Read2Desc = TII->get(Opc);
1031 const TargetRegisterClass *SuperRC =
1032 (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
1033 Register DestReg = MRI->createVirtualRegister(SuperRC);
1035 DebugLoc DL = CI.I->getDebugLoc();
1037 Register BaseReg = AddrReg->getReg();
1038 unsigned BaseSubReg = AddrReg->getSubReg();
1039 unsigned BaseRegFlags = 0;
1041 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1042 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
1043 .addImm(CI.BaseOff);
1045 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1046 BaseRegFlags = RegState::Kill;
1048 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1050 .addReg(AddrReg->getReg(), 0, BaseSubReg)
1051 .addImm(0); // clamp bit
1055 MachineInstrBuilder Read2 =
1056 BuildMI(*MBB, Paired.I, DL, Read2Desc, DestReg)
1057 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1058 .addImm(NewOffset0) // offset0
1059 .addImm(NewOffset1) // offset1
1061 .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1065 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1067 // Copy to the old destination registers.
1068 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1069 .add(*Dest0) // Copy to same destination including flags and sub reg.
1070 .addReg(DestReg, 0, SubRegIdx0);
1071 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1073 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1075 moveInstsAfter(Copy1, InstsToMove);
1077 CI.I->eraseFromParent();
1078 Paired.I->eraseFromParent();
1080 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
1084 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
1085 if (STM->ldsRequiresM0Init())
1086 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
1087 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
1088 : AMDGPU::DS_WRITE2_B64_gfx9;
1091 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
1092 if (STM->ldsRequiresM0Init())
1093 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
1094 : AMDGPU::DS_WRITE2ST64_B64;
1096 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
1097 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
1100 MachineBasicBlock::iterator
1101 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired,
1102 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1103 MachineBasicBlock *MBB = CI.I->getParent();
1105 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
1106 // sure we preserve the subregister index and any register flags set on them.
1107 const MachineOperand *AddrReg =
1108 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
1109 const MachineOperand *Data0 =
1110 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
1111 const MachineOperand *Data1 =
1112 TII->getNamedOperand(*Paired.I, AMDGPU::OpName::data0);
1114 unsigned NewOffset0 = CI.Offset;
1115 unsigned NewOffset1 = Paired.Offset;
1117 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
1119 if (NewOffset0 > NewOffset1) {
1120 // Canonicalize the merged instruction so the smaller offset comes first.
1121 std::swap(NewOffset0, NewOffset1);
1122 std::swap(Data0, Data1);
1125 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
1126 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
1128 const MCInstrDesc &Write2Desc = TII->get(Opc);
1129 DebugLoc DL = CI.I->getDebugLoc();
1131 Register BaseReg = AddrReg->getReg();
1132 unsigned BaseSubReg = AddrReg->getSubReg();
1133 unsigned BaseRegFlags = 0;
1135 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1136 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
1137 .addImm(CI.BaseOff);
1139 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1140 BaseRegFlags = RegState::Kill;
1142 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1144 .addReg(AddrReg->getReg(), 0, BaseSubReg)
1145 .addImm(0); // clamp bit
1149 MachineInstrBuilder Write2 =
1150 BuildMI(*MBB, Paired.I, DL, Write2Desc)
1151 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1152 .add(*Data0) // data0
1153 .add(*Data1) // data1
1154 .addImm(NewOffset0) // offset0
1155 .addImm(NewOffset1) // offset1
1157 .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1159 moveInstsAfter(Write2, InstsToMove);
1161 CI.I->eraseFromParent();
1162 Paired.I->eraseFromParent();
1164 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
1168 MachineBasicBlock::iterator
1169 SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired,
1170 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1171 MachineBasicBlock *MBB = CI.I->getParent();
1172 DebugLoc DL = CI.I->getDebugLoc();
1173 const unsigned Opcode = getNewOpcode(CI, Paired);
1175 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1177 Register DestReg = MRI->createVirtualRegister(SuperRC);
1178 unsigned MergedDMask = CI.DMask | Paired.DMask;
1180 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::dmask);
1182 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1183 for (unsigned I = 1, E = (*CI.I).getNumOperands(); I != E; ++I) {
1185 MIB.addImm(MergedDMask);
1187 MIB.add((*CI.I).getOperand(I));
1190 // It shouldn't be possible to get this far if the two instructions
1191 // don't have a single memoperand, because MachineInstr::mayAlias()
1192 // will return true if this is the case.
1193 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1195 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1196 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1198 MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1200 unsigned SubRegIdx0, SubRegIdx1;
1201 std::tie(SubRegIdx0, SubRegIdx1) = getSubRegIdxs(CI, Paired);
1203 // Copy to the old destination registers.
1204 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1205 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1206 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1208 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1209 .add(*Dest0) // Copy to same destination including flags and sub reg.
1210 .addReg(DestReg, 0, SubRegIdx0);
1211 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1213 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1215 moveInstsAfter(Copy1, InstsToMove);
1217 CI.I->eraseFromParent();
1218 Paired.I->eraseFromParent();
1222 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
1223 CombineInfo &CI, CombineInfo &Paired,
1224 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1225 MachineBasicBlock *MBB = CI.I->getParent();
1226 DebugLoc DL = CI.I->getDebugLoc();
1227 const unsigned Opcode = getNewOpcode(CI, Paired);
1229 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1231 Register DestReg = MRI->createVirtualRegister(SuperRC);
1232 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1234 // It shouldn't be possible to get this far if the two instructions
1235 // don't have a single memoperand, because MachineInstr::mayAlias()
1236 // will return true if this is the case.
1237 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1239 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1240 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1243 BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg)
1244 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
1245 .addImm(MergedOffset) // offset
1246 .addImm(CI.GLC) // glc
1247 .addImm(CI.DLC) // dlc
1248 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1250 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1251 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1252 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1254 // Copy to the old destination registers.
1255 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1256 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
1257 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::sdst);
1259 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1260 .add(*Dest0) // Copy to same destination including flags and sub reg.
1261 .addReg(DestReg, 0, SubRegIdx0);
1262 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1264 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1266 moveInstsAfter(Copy1, InstsToMove);
1268 CI.I->eraseFromParent();
1269 Paired.I->eraseFromParent();
1273 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
1274 CombineInfo &CI, CombineInfo &Paired,
1275 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1276 MachineBasicBlock *MBB = CI.I->getParent();
1277 DebugLoc DL = CI.I->getDebugLoc();
1279 const unsigned Opcode = getNewOpcode(CI, Paired);
1281 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1283 // Copy to the new source register.
1284 Register DestReg = MRI->createVirtualRegister(SuperRC);
1285 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1287 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1289 AddressRegs Regs = getRegs(Opcode, *TII);
1292 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1294 // It shouldn't be possible to get this far if the two instructions
1295 // don't have a single memoperand, because MachineInstr::mayAlias()
1296 // will return true if this is the case.
1297 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1299 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1300 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1303 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1304 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1305 .addImm(MergedOffset) // offset
1306 .addImm(CI.GLC) // glc
1307 .addImm(CI.SLC) // slc
1309 .addImm(CI.DLC) // dlc
1311 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1313 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1314 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1315 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1317 // Copy to the old destination registers.
1318 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1319 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1320 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1322 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1323 .add(*Dest0) // Copy to same destination including flags and sub reg.
1324 .addReg(DestReg, 0, SubRegIdx0);
1325 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1327 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1329 moveInstsAfter(Copy1, InstsToMove);
1331 CI.I->eraseFromParent();
1332 Paired.I->eraseFromParent();
1336 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferLoadPair(
1337 CombineInfo &CI, CombineInfo &Paired,
1338 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1339 MachineBasicBlock *MBB = CI.I->getParent();
1340 DebugLoc DL = CI.I->getDebugLoc();
1342 const unsigned Opcode = getNewOpcode(CI, Paired);
1344 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1346 // Copy to the new source register.
1347 Register DestReg = MRI->createVirtualRegister(SuperRC);
1348 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1350 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1352 AddressRegs Regs = getRegs(Opcode, *TII);
1355 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1357 unsigned JoinedFormat =
1358 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM);
1360 // It shouldn't be possible to get this far if the two instructions
1361 // don't have a single memoperand, because MachineInstr::mayAlias()
1362 // will return true if this is the case.
1363 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1365 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1366 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1369 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1370 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1371 .addImm(MergedOffset) // offset
1372 .addImm(JoinedFormat) // format
1373 .addImm(CI.GLC) // glc
1374 .addImm(CI.SLC) // slc
1376 .addImm(CI.DLC) // dlc
1379 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1381 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1382 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1383 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1385 // Copy to the old destination registers.
1386 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1387 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1388 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1390 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1391 .add(*Dest0) // Copy to same destination including flags and sub reg.
1392 .addReg(DestReg, 0, SubRegIdx0);
1393 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1395 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1397 moveInstsAfter(Copy1, InstsToMove);
1399 CI.I->eraseFromParent();
1400 Paired.I->eraseFromParent();
1404 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferStorePair(
1405 CombineInfo &CI, CombineInfo &Paired,
1406 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1407 MachineBasicBlock *MBB = CI.I->getParent();
1408 DebugLoc DL = CI.I->getDebugLoc();
1410 const unsigned Opcode = getNewOpcode(CI, Paired);
1412 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1413 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1414 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1416 // Copy to the new source register.
1417 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1418 Register SrcReg = MRI->createVirtualRegister(SuperRC);
1420 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1421 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1423 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1427 .addImm(SubRegIdx1);
1429 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1430 .addReg(SrcReg, RegState::Kill);
1432 AddressRegs Regs = getRegs(Opcode, *TII);
1435 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1437 unsigned JoinedFormat =
1438 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STM);
1440 // It shouldn't be possible to get this far if the two instructions
1441 // don't have a single memoperand, because MachineInstr::mayAlias()
1442 // will return true if this is the case.
1443 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1445 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1446 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1449 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1450 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1451 .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1452 .addImm(JoinedFormat) // format
1453 .addImm(CI.GLC) // glc
1454 .addImm(CI.SLC) // slc
1456 .addImm(CI.DLC) // dlc
1459 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1461 moveInstsAfter(MIB, InstsToMove);
1463 CI.I->eraseFromParent();
1464 Paired.I->eraseFromParent();
1468 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI,
1469 const CombineInfo &Paired) {
1470 const unsigned Width = CI.Width + Paired.Width;
1472 switch (CI.InstClass) {
1474 assert(CI.InstClass == BUFFER_LOAD || CI.InstClass == BUFFER_STORE);
1475 // FIXME: Handle d16 correctly
1476 return AMDGPU::getMUBUFOpcode(AMDGPU::getMUBUFBaseOpcode(CI.I->getOpcode()),
1480 return AMDGPU::getMTBUFOpcode(AMDGPU::getMTBUFBaseOpcode(CI.I->getOpcode()),
1484 llvm_unreachable("Unknown instruction class");
1485 case S_BUFFER_LOAD_IMM:
1490 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
1492 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
1495 assert("No overlaps" && (countPopulation(CI.DMask | Paired.DMask) == Width));
1496 return AMDGPU::getMaskedMIMGOp(CI.I->getOpcode(), Width);
1500 std::pair<unsigned, unsigned>
1501 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI, const CombineInfo &Paired) {
1503 if (CI.Width == 0 || Paired.Width == 0 || CI.Width + Paired.Width > 4)
1504 return std::make_pair(0, 0);
1507 if (CI.InstClass == MIMG) {
1508 assert((countPopulation(CI.DMask | Paired.DMask) == CI.Width + Paired.Width) &&
1510 ReverseOrder = CI.DMask > Paired.DMask;
1512 ReverseOrder = CI.Offset > Paired.Offset;
1514 static const unsigned Idxs[4][4] = {
1515 {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3},
1516 {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, 0},
1517 {AMDGPU::sub2, AMDGPU::sub2_sub3, 0, 0},
1518 {AMDGPU::sub3, 0, 0, 0},
1523 assert(CI.Width >= 1 && CI.Width <= 3);
1524 assert(Paired.Width >= 1 && Paired.Width <= 3);
1527 Idx1 = Idxs[0][Paired.Width - 1];
1528 Idx0 = Idxs[Paired.Width][CI.Width - 1];
1530 Idx0 = Idxs[0][CI.Width - 1];
1531 Idx1 = Idxs[CI.Width][Paired.Width - 1];
1534 return std::make_pair(Idx0, Idx1);
1537 const TargetRegisterClass *
1538 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI,
1539 const CombineInfo &Paired) {
1540 if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1541 switch (CI.Width + Paired.Width) {
1545 return &AMDGPU::SReg_64_XEXECRegClass;
1547 return &AMDGPU::SGPR_128RegClass;
1549 return &AMDGPU::SGPR_256RegClass;
1551 return &AMDGPU::SGPR_512RegClass;
1554 switch (CI.Width + Paired.Width) {
1558 return &AMDGPU::VReg_64RegClass;
1560 return &AMDGPU::VReg_96RegClass;
1562 return &AMDGPU::VReg_128RegClass;
1567 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
1568 CombineInfo &CI, CombineInfo &Paired,
1569 const SmallVectorImpl<MachineInstr *> &InstsToMove) {
1570 MachineBasicBlock *MBB = CI.I->getParent();
1571 DebugLoc DL = CI.I->getDebugLoc();
1573 const unsigned Opcode = getNewOpcode(CI, Paired);
1575 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1576 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1577 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1579 // Copy to the new source register.
1580 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1581 Register SrcReg = MRI->createVirtualRegister(SuperRC);
1583 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1584 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1586 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1590 .addImm(SubRegIdx1);
1592 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1593 .addReg(SrcReg, RegState::Kill);
1595 AddressRegs Regs = getRegs(Opcode, *TII);
1598 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1601 // It shouldn't be possible to get this far if the two instructions
1602 // don't have a single memoperand, because MachineInstr::mayAlias()
1603 // will return true if this is the case.
1604 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1606 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1607 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1610 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1611 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1612 .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1613 .addImm(CI.GLC) // glc
1614 .addImm(CI.SLC) // slc
1616 .addImm(CI.DLC) // dlc
1618 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1620 moveInstsAfter(MIB, InstsToMove);
1622 CI.I->eraseFromParent();
1623 Paired.I->eraseFromParent();
1628 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const {
1629 APInt V(32, Val, true);
1630 if (TII->isInlineConstant(V))
1631 return MachineOperand::CreateImm(Val);
1633 Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1635 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1636 TII->get(AMDGPU::S_MOV_B32), Reg)
1639 LLVM_DEBUG(dbgs() << " "; Mov->dump());
1640 return MachineOperand::CreateReg(Reg, false);
1643 // Compute base address using Addr and return the final register.
1644 Register SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1645 const MemAddress &Addr) const {
1646 MachineBasicBlock *MBB = MI.getParent();
1647 MachineBasicBlock::iterator MBBI = MI.getIterator();
1648 DebugLoc DL = MI.getDebugLoc();
1650 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1651 Addr.Base.LoSubReg) &&
1652 "Expected 32-bit Base-Register-Low!!");
1654 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1655 Addr.Base.HiSubReg) &&
1656 "Expected 32-bit Base-Register-Hi!!");
1658 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
1659 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1660 MachineOperand OffsetHi =
1661 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1663 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1664 Register CarryReg = MRI->createVirtualRegister(CarryRC);
1665 Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1667 Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1668 Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1669 MachineInstr *LoHalf =
1670 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1671 .addReg(CarryReg, RegState::Define)
1672 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1674 .addImm(0); // clamp bit
1676 LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
1678 MachineInstr *HiHalf =
1679 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1680 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1681 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1683 .addReg(CarryReg, RegState::Kill)
1684 .addImm(0); // clamp bit
1686 LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
1688 Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1689 MachineInstr *FullBase =
1690 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1692 .addImm(AMDGPU::sub0)
1694 .addImm(AMDGPU::sub1);
1696 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
1701 // Update base and offset with the NewBase and NewOffset in MI.
1702 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1704 int32_t NewOffset) const {
1705 auto Base = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1706 Base->setReg(NewBase);
1707 Base->setIsKill(false);
1708 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1712 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
1719 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1720 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1721 !Def->getOperand(1).isImm())
1724 return Def->getOperand(1).getImm();
1727 // Analyze Base and extracts:
1728 // - 32bit base registers, subregisters
1729 // - 64bit constant offset
1730 // Expecting base computation as:
1731 // %OFFSET0:sgpr_32 = S_MOV_B32 8000
1732 // %LO:vgpr_32, %c:sreg_64_xexec =
1733 // V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1734 // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1736 // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1737 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1738 MemAddress &Addr) const {
1742 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1743 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1744 || Def->getNumOperands() != 5)
1747 MachineOperand BaseLo = Def->getOperand(1);
1748 MachineOperand BaseHi = Def->getOperand(3);
1749 if (!BaseLo.isReg() || !BaseHi.isReg())
1752 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1753 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1755 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1756 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1759 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1760 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1762 auto Offset0P = extractConstOffset(*Src0);
1766 if (!(Offset0P = extractConstOffset(*Src1)))
1771 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1772 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1775 std::swap(Src0, Src1);
1780 uint64_t Offset1 = Src1->getImm();
1783 Addr.Base.LoReg = BaseLo.getReg();
1784 Addr.Base.HiReg = BaseHi.getReg();
1785 Addr.Base.LoSubReg = BaseLo.getSubReg();
1786 Addr.Base.HiSubReg = BaseHi.getSubReg();
1787 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1790 bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1792 MemInfoMap &Visited,
1793 SmallPtrSet<MachineInstr *, 4> &AnchorList) const {
1795 if (!(MI.mayLoad() ^ MI.mayStore()))
1798 // TODO: Support flat and scratch.
1799 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
1802 if (MI.mayLoad() && TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1805 if (AnchorList.count(&MI))
1808 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1810 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1811 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
1815 // Step1: Find the base-registers and a 64bit constant offset.
1816 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1818 if (Visited.find(&MI) == Visited.end()) {
1819 processBaseWithConstOffset(Base, MAddr);
1820 Visited[&MI] = MAddr;
1822 MAddr = Visited[&MI];
1824 if (MAddr.Offset == 0) {
1825 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
1826 " constant offsets that can be promoted.\n";);
1830 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
1831 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1833 // Step2: Traverse through MI's basic block and find an anchor(that has the
1834 // same base-registers) with the highest 13bit distance from MI's offset.
1835 // E.g. (64bit loads)
1837 // addr1 = &a + 4096; load1 = load(addr1, 0)
1838 // addr2 = &a + 6144; load2 = load(addr2, 0)
1839 // addr3 = &a + 8192; load3 = load(addr3, 0)
1840 // addr4 = &a + 10240; load4 = load(addr4, 0)
1841 // addr5 = &a + 12288; load5 = load(addr5, 0)
1843 // Starting from the first load, the optimization will try to find a new base
1844 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1845 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1846 // as the new-base(anchor) because of the maximum distance which can
1847 // accomodate more intermediate bases presumeably.
1849 // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1850 // (&a + 8192) for load1, load2, load4.
1852 // load1 = load(addr, -4096)
1853 // load2 = load(addr, -2048)
1854 // load3 = load(addr, 0)
1855 // load4 = load(addr, 2048)
1856 // addr5 = &a + 12288; load5 = load(addr5, 0)
1858 MachineInstr *AnchorInst = nullptr;
1859 MemAddress AnchorAddr;
1860 uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1861 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1863 MachineBasicBlock *MBB = MI.getParent();
1864 MachineBasicBlock::iterator E = MBB->end();
1865 MachineBasicBlock::iterator MBBI = MI.getIterator();
1867 const SITargetLowering *TLI =
1868 static_cast<const SITargetLowering *>(STM->getTargetLowering());
1870 for ( ; MBBI != E; ++MBBI) {
1871 MachineInstr &MINext = *MBBI;
1872 // TODO: Support finding an anchor(with same base) from store addresses or
1873 // any other load addresses where the opcodes are different.
1874 if (MINext.getOpcode() != MI.getOpcode() ||
1875 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1878 const MachineOperand &BaseNext =
1879 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1880 MemAddress MAddrNext;
1881 if (Visited.find(&MINext) == Visited.end()) {
1882 processBaseWithConstOffset(BaseNext, MAddrNext);
1883 Visited[&MINext] = MAddrNext;
1885 MAddrNext = Visited[&MINext];
1887 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1888 MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1889 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1890 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1893 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1895 int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1896 TargetLoweringBase::AddrMode AM;
1897 AM.HasBaseReg = true;
1899 if (TLI->isLegalGlobalAddressingMode(AM) &&
1900 (uint32_t)std::abs(Dist) > MaxDist) {
1901 MaxDist = std::abs(Dist);
1903 AnchorAddr = MAddrNext;
1904 AnchorInst = &MINext;
1909 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
1910 AnchorInst->dump());
1911 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
1912 << AnchorAddr.Offset << "\n\n");
1914 // Instead of moving up, just re-compute anchor-instruction's base address.
1915 Register Base = computeBase(MI, AnchorAddr);
1917 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1918 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
1920 for (auto P : InstsWCommonBase) {
1921 TargetLoweringBase::AddrMode AM;
1922 AM.HasBaseReg = true;
1923 AM.BaseOffs = P.second - AnchorAddr.Offset;
1925 if (TLI->isLegalGlobalAddressingMode(AM)) {
1926 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
1927 dbgs() << ")"; P.first->dump());
1928 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1929 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
1932 AnchorList.insert(AnchorInst);
1939 void SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI,
1940 std::list<std::list<CombineInfo> > &MergeableInsts) const {
1941 for (std::list<CombineInfo> &AddrList : MergeableInsts) {
1942 if (AddrList.front().InstClass == CI.InstClass &&
1943 AddrList.front().hasSameBaseAddress(*CI.I)) {
1944 AddrList.emplace_back(CI);
1949 // Base address not found, so add a new list.
1950 MergeableInsts.emplace_back(1, CI);
1953 std::pair<MachineBasicBlock::iterator, bool>
1954 SILoadStoreOptimizer::collectMergeableInsts(
1955 MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End,
1956 MemInfoMap &Visited, SmallPtrSet<MachineInstr *, 4> &AnchorList,
1957 std::list<std::list<CombineInfo>> &MergeableInsts) const {
1958 bool Modified = false;
1960 // Sort potential mergeable instructions into lists. One list per base address.
1962 MachineBasicBlock::iterator BlockI = Begin;
1963 for (; BlockI != End; ++BlockI) {
1964 MachineInstr &MI = *BlockI;
1966 // We run this before checking if an address is mergeable, because it can produce
1967 // better code even if the instructions aren't mergeable.
1968 if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1971 // Don't combine if volatile. We also won't be able to merge across this, so
1972 // break the search. We can look after this barrier for separate merges.
1973 if (MI.hasOrderedMemoryRef()) {
1974 LLVM_DEBUG(dbgs() << "Breaking search on memory fence: " << MI);
1976 // Search will resume after this instruction in a separate merge list.
1981 const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII);
1982 if (InstClass == UNKNOWN)
1986 CI.setMI(MI, *TII, *STM);
1989 if (!CI.hasMergeableAddress(*MRI))
1992 LLVM_DEBUG(dbgs() << "Mergeable: " << MI);
1994 addInstToMergeableList(CI, MergeableInsts);
1997 // At this point we have lists of Mergeable instructions.
1999 // Part 2: Sort lists by offset and then for each CombineInfo object in the
2000 // list try to find an instruction that can be merged with I. If an instruction
2001 // is found, it is stored in the Paired field. If no instructions are found, then
2002 // the CombineInfo object is deleted from the list.
2004 for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(),
2005 E = MergeableInsts.end(); I != E;) {
2007 std::list<CombineInfo> &MergeList = *I;
2008 if (MergeList.size() <= 1) {
2009 // This means we have found only one instruction with a given address
2010 // that can be merged, and we need at least 2 instructions to do a merge,
2011 // so this list can be discarded.
2012 I = MergeableInsts.erase(I);
2016 // Sort the lists by offsets, this way mergeable instructions will be
2017 // adjacent to each other in the list, which will make it easier to find
2020 [] (const CombineInfo &A, CombineInfo &B) {
2021 return A.Offset < B.Offset;
2026 return std::make_pair(BlockI, Modified);
2029 // Scan through looking for adjacent LDS operations with constant offsets from
2030 // the same base register. We rely on the scheduler to do the hard work of
2031 // clustering nearby loads, and assume these are all adjacent.
2032 bool SILoadStoreOptimizer::optimizeBlock(
2033 std::list<std::list<CombineInfo> > &MergeableInsts) {
2034 bool Modified = false;
2036 for (std::list<std::list<CombineInfo>>::iterator I = MergeableInsts.begin(),
2037 E = MergeableInsts.end(); I != E;) {
2038 std::list<CombineInfo> &MergeList = *I;
2040 bool OptimizeListAgain = false;
2041 if (!optimizeInstsWithSameBaseAddr(MergeList, OptimizeListAgain)) {
2042 // We weren't able to make any changes, so delete the list so we don't
2043 // process the same instructions the next time we try to optimize this
2045 I = MergeableInsts.erase(I);
2051 // We made changes, but also determined that there were no more optimization
2052 // opportunities, so we don't need to reprocess the list
2053 if (!OptimizeListAgain) {
2054 I = MergeableInsts.erase(I);
2057 OptimizeAgain = true;
2063 SILoadStoreOptimizer::optimizeInstsWithSameBaseAddr(
2064 std::list<CombineInfo> &MergeList,
2065 bool &OptimizeListAgain) {
2066 if (MergeList.empty())
2069 bool Modified = false;
2071 for (auto I = MergeList.begin(), Next = std::next(I); Next != MergeList.end();
2072 Next = std::next(I)) {
2077 if ((*First).Order > (*Second).Order)
2078 std::swap(First, Second);
2079 CombineInfo &CI = *First;
2080 CombineInfo &Paired = *Second;
2082 SmallVector<MachineInstr *, 8> InstsToMove;
2083 if (!checkAndPrepareMerge(CI, Paired, InstsToMove)) {
2090 LLVM_DEBUG(dbgs() << "Merging: " << *CI.I << " with: " << *Paired.I);
2092 switch (CI.InstClass) {
2094 llvm_unreachable("unknown InstClass");
2097 MachineBasicBlock::iterator NewMI =
2098 mergeRead2Pair(CI, Paired, InstsToMove);
2099 CI.setMI(NewMI, *TII, *STM);
2103 MachineBasicBlock::iterator NewMI =
2104 mergeWrite2Pair(CI, Paired, InstsToMove);
2105 CI.setMI(NewMI, *TII, *STM);
2108 case S_BUFFER_LOAD_IMM: {
2109 MachineBasicBlock::iterator NewMI =
2110 mergeSBufferLoadImmPair(CI, Paired, InstsToMove);
2111 CI.setMI(NewMI, *TII, *STM);
2112 OptimizeListAgain |= (CI.Width + Paired.Width) < 16;
2116 MachineBasicBlock::iterator NewMI =
2117 mergeBufferLoadPair(CI, Paired, InstsToMove);
2118 CI.setMI(NewMI, *TII, *STM);
2119 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2122 case BUFFER_STORE: {
2123 MachineBasicBlock::iterator NewMI =
2124 mergeBufferStorePair(CI, Paired, InstsToMove);
2125 CI.setMI(NewMI, *TII, *STM);
2126 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2130 MachineBasicBlock::iterator NewMI =
2131 mergeImagePair(CI, Paired, InstsToMove);
2132 CI.setMI(NewMI, *TII, *STM);
2133 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2136 case TBUFFER_LOAD: {
2137 MachineBasicBlock::iterator NewMI =
2138 mergeTBufferLoadPair(CI, Paired, InstsToMove);
2139 CI.setMI(NewMI, *TII, *STM);
2140 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2143 case TBUFFER_STORE: {
2144 MachineBasicBlock::iterator NewMI =
2145 mergeTBufferStorePair(CI, Paired, InstsToMove);
2146 CI.setMI(NewMI, *TII, *STM);
2147 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2151 CI.Order = Paired.Order;
2155 MergeList.erase(Second);
2161 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
2162 if (skipFunction(MF.getFunction()))
2165 STM = &MF.getSubtarget<GCNSubtarget>();
2166 if (!STM->loadStoreOptEnabled())
2169 TII = STM->getInstrInfo();
2170 TRI = &TII->getRegisterInfo();
2172 MRI = &MF.getRegInfo();
2173 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2175 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
2177 bool Modified = false;
2179 // Contains the list of instructions for which constant offsets are being
2180 // promoted to the IMM. This is tracked for an entire block at time.
2181 SmallPtrSet<MachineInstr *, 4> AnchorList;
2184 for (MachineBasicBlock &MBB : MF) {
2185 MachineBasicBlock::iterator SectionEnd;
2186 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
2188 bool CollectModified;
2189 std::list<std::list<CombineInfo>> MergeableInsts;
2191 // First pass: Collect list of all instructions we know how to merge in a
2192 // subset of the block.
2193 std::tie(SectionEnd, CollectModified) =
2194 collectMergeableInsts(I, E, Visited, AnchorList, MergeableInsts);
2196 Modified |= CollectModified;
2199 OptimizeAgain = false;
2200 Modified |= optimizeBlock(MergeableInsts);
2201 } while (OptimizeAgain);