1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass tries to fuse DS instructions with close by immediate offsets.
10 // This will fuse operations such as
11 // ds_read_b32 v0, v2 offset:16
12 // ds_read_b32 v1, v2 offset:32
14 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16 // The same is done for certain SMEM and VMEM opcodes, e.g.:
17 // s_buffer_load_dword s4, s[0:3], 4
18 // s_buffer_load_dword s5, s[0:3], 8
20 // s_buffer_load_dwordx2 s[4:5], s[0:3], 4
22 // This pass also tries to promote constant offset to the immediate by
23 // adjusting the base. It tries to use a base from the nearby instructions that
24 // allows it to have a 13bit constant offset and then promotes the 13bit offset
27 // s_movk_i32 s0, 0x1800
28 // v_add_co_u32_e32 v0, vcc, s0, v2
29 // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
31 // s_movk_i32 s0, 0x1000
32 // v_add_co_u32_e32 v5, vcc, s0, v2
33 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34 // global_load_dwordx2 v[5:6], v[5:6], off
35 // global_load_dwordx2 v[0:1], v[0:1], off
37 // s_movk_i32 s0, 0x1000
38 // v_add_co_u32_e32 v5, vcc, s0, v2
39 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40 // global_load_dwordx2 v[5:6], v[5:6], off
41 // global_load_dwordx2 v[0:1], v[5:6], off offset:2048
43 // Future improvements:
45 // - This currently relies on the scheduler to place loads and stores next to
46 // each other, and then only merges adjacent pairs of instructions. It would
47 // be good to be more flexible with interleaved instructions, and possibly run
48 // before scheduling. It currently missing stores of constants because loading
49 // the constant into the data register is placed between the stores, although
50 // this is arguably a scheduling problem.
52 // - Live interval recomputing seems inefficient. This currently only matches
53 // one pair, and recomputes live intervals and moves on to the next pair. It
54 // would be better to compute a list of all merges that need to occur.
56 // - With a list of instructions to process, we can also merge more. If a
57 // cluster of loads have offsets that are too large to fit in the 8-bit
58 // offsets, but are close enough to fit in the 8 bits, we can add to the base
59 // pointer and use the new reduced offsets.
61 //===----------------------------------------------------------------------===//
64 #include "AMDGPUSubtarget.h"
65 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
66 #include "SIInstrInfo.h"
67 #include "SIRegisterInfo.h"
68 #include "Utils/AMDGPUBaseInfo.h"
69 #include "llvm/ADT/ArrayRef.h"
70 #include "llvm/ADT/SmallVector.h"
71 #include "llvm/ADT/StringRef.h"
72 #include "llvm/Analysis/AliasAnalysis.h"
73 #include "llvm/CodeGen/MachineBasicBlock.h"
74 #include "llvm/CodeGen/MachineFunction.h"
75 #include "llvm/CodeGen/MachineFunctionPass.h"
76 #include "llvm/CodeGen/MachineInstr.h"
77 #include "llvm/CodeGen/MachineInstrBuilder.h"
78 #include "llvm/CodeGen/MachineOperand.h"
79 #include "llvm/CodeGen/MachineRegisterInfo.h"
80 #include "llvm/IR/DebugLoc.h"
81 #include "llvm/Pass.h"
82 #include "llvm/Support/Debug.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
93 #define DEBUG_TYPE "si-load-store-opt"
101 BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
102 BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
103 BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
104 BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
105 BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
106 BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
107 BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
108 BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
119 class SILoadStoreOptimizer : public MachineFunctionPass {
121 MachineBasicBlock::iterator I;
122 MachineBasicBlock::iterator Paired;
129 InstClassEnum InstClass;
137 SmallVector<MachineInstr *, 8> InstsToMove;
140 struct BaseRegisters {
144 unsigned LoSubReg = 0;
145 unsigned HiSubReg = 0;
153 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
156 const GCNSubtarget *STM = nullptr;
157 const SIInstrInfo *TII = nullptr;
158 const SIRegisterInfo *TRI = nullptr;
159 MachineRegisterInfo *MRI = nullptr;
160 AliasAnalysis *AA = nullptr;
163 static bool offsetsCanBeCombined(CombineInfo &CI);
164 static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
165 static unsigned getNewOpcode(const CombineInfo &CI);
166 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
167 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
168 unsigned getOpcodeWidth(const MachineInstr &MI);
169 InstClassEnum getInstClass(unsigned Opc);
170 unsigned getRegs(unsigned Opc);
172 bool findMatchingInst(CombineInfo &CI);
174 unsigned read2Opcode(unsigned EltSize) const;
175 unsigned read2ST64Opcode(unsigned EltSize) const;
176 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
178 unsigned write2Opcode(unsigned EltSize) const;
179 unsigned write2ST64Opcode(unsigned EltSize) const;
180 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
181 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
182 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
183 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
185 void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
187 unsigned computeBase(MachineInstr &MI, const MemAddress &Addr);
188 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI);
189 Optional<int32_t> extractConstOffset(const MachineOperand &Op);
190 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr);
191 /// Promotes constant offset to the immediate by adjusting the base. It
192 /// tries to use a base from the nearby instructions that allows it to have
193 /// a 13bit constant offset which gets promoted to the immediate.
194 bool promoteConstantOffsetToImm(MachineInstr &CI,
196 SmallPtrSet<MachineInstr *, 4> &Promoted);
201 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
202 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
205 bool optimizeBlock(MachineBasicBlock &MBB);
207 bool runOnMachineFunction(MachineFunction &MF) override;
209 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
211 void getAnalysisUsage(AnalysisUsage &AU) const override {
212 AU.setPreservesCFG();
213 AU.addRequired<AAResultsWrapperPass>();
215 MachineFunctionPass::getAnalysisUsage(AU);
219 } // end anonymous namespace.
221 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
222 "SI Load Store Optimizer", false, false)
223 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
224 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
227 char SILoadStoreOptimizer::ID = 0;
229 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
231 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
232 return new SILoadStoreOptimizer();
235 static void moveInstsAfter(MachineBasicBlock::iterator I,
236 ArrayRef<MachineInstr *> InstsToMove) {
237 MachineBasicBlock *MBB = I->getParent();
239 for (MachineInstr *MI : InstsToMove) {
240 MI->removeFromParent();
245 static void addDefsUsesToList(const MachineInstr &MI,
246 DenseSet<unsigned> &RegDefs,
247 DenseSet<unsigned> &PhysRegUses) {
248 for (const MachineOperand &Op : MI.operands()) {
251 RegDefs.insert(Op.getReg());
252 else if (Op.readsReg() &&
253 TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
254 PhysRegUses.insert(Op.getReg());
259 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
260 MachineBasicBlock::iterator B,
262 // RAW or WAR - cannot reorder
263 // WAW - cannot reorder
264 // RAR - safe to reorder
265 return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
268 // Add MI and its defs to the lists if MI reads one of the defs that are
269 // already in the list. Returns true in that case.
270 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
271 DenseSet<unsigned> &PhysRegUses,
272 SmallVectorImpl<MachineInstr *> &Insts) {
273 for (MachineOperand &Use : MI.operands()) {
274 // If one of the defs is read, then there is a use of Def between I and the
275 // instruction that I will potentially be merged with. We will need to move
276 // this instruction after the merged instructions.
278 // Similarly, if there is a def which is read by an instruction that is to
279 // be moved for merging, then we need to move the def-instruction as well.
280 // This can only happen for physical registers such as M0; virtual
281 // registers are in SSA form.
283 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
284 (Use.isDef() && RegDefs.count(Use.getReg())) ||
285 (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
286 PhysRegUses.count(Use.getReg())))) {
287 Insts.push_back(&MI);
288 addDefsUsesToList(MI, RegDefs, PhysRegUses);
296 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
297 ArrayRef<MachineInstr *> InstsToMove,
299 assert(MemOp.mayLoadOrStore());
301 for (MachineInstr *InstToMove : InstsToMove) {
302 if (!InstToMove->mayLoadOrStore())
304 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
310 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
311 // XXX - Would the same offset be OK? Is there any reason this would happen or
313 if (CI.Offset0 == CI.Offset1)
316 // This won't be valid if the offset isn't aligned.
317 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
320 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
321 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
325 // Handle SMEM and VMEM instructions.
326 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
327 return (EltOffset0 + CI.Width0 == EltOffset1 ||
328 EltOffset1 + CI.Width1 == EltOffset0) &&
329 CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
330 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
333 // If the offset in elements doesn't fit in 8-bits, we might be able to use
334 // the stride 64 versions.
335 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
336 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
337 CI.Offset0 = EltOffset0 / 64;
338 CI.Offset1 = EltOffset1 / 64;
343 // Check if the new offsets fit in the reduced 8-bit range.
344 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
345 CI.Offset0 = EltOffset0;
346 CI.Offset1 = EltOffset1;
350 // Try to shift base address to decrease offsets.
351 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
352 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
354 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
355 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
356 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
361 if (isUInt<8>(OffsetDiff)) {
362 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
363 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
370 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
371 const CombineInfo &CI) {
372 const unsigned Width = (CI.Width0 + CI.Width1);
373 switch (CI.InstClass) {
375 return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
376 case S_BUFFER_LOAD_IMM:
387 unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) {
388 const unsigned Opc = MI.getOpcode();
390 if (TII->isMUBUF(MI)) {
391 return AMDGPU::getMUBUFDwords(Opc);
397 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
399 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
401 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
406 InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) {
407 if (TII->isMUBUF(Opc)) {
408 const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
410 // If we couldn't identify the opcode, bail out.
411 if (baseOpcode == -1) {
415 switch (baseOpcode) {
418 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
419 return BUFFER_LOAD_OFFEN;
420 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
421 return BUFFER_LOAD_OFFSET;
422 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
423 return BUFFER_STORE_OFFEN;
424 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
425 return BUFFER_STORE_OFFSET;
426 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
427 return BUFFER_LOAD_OFFEN_exact;
428 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
429 return BUFFER_LOAD_OFFSET_exact;
430 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
431 return BUFFER_STORE_OFFEN_exact;
432 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
433 return BUFFER_STORE_OFFSET_exact;
440 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
441 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
442 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
443 return S_BUFFER_LOAD_IMM;
444 case AMDGPU::DS_READ_B32:
445 case AMDGPU::DS_READ_B64:
446 case AMDGPU::DS_READ_B32_gfx9:
447 case AMDGPU::DS_READ_B64_gfx9:
449 case AMDGPU::DS_WRITE_B32:
450 case AMDGPU::DS_WRITE_B64:
451 case AMDGPU::DS_WRITE_B32_gfx9:
452 case AMDGPU::DS_WRITE_B64_gfx9:
457 unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) {
458 if (TII->isMUBUF(Opc)) {
461 if (AMDGPU::getMUBUFHasVAddr(Opc)) {
465 if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
469 if (AMDGPU::getMUBUFHasSoffset(Opc)) {
479 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
480 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
481 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
483 case AMDGPU::DS_READ_B32:
484 case AMDGPU::DS_READ_B64:
485 case AMDGPU::DS_READ_B32_gfx9:
486 case AMDGPU::DS_READ_B64_gfx9:
487 case AMDGPU::DS_WRITE_B32:
488 case AMDGPU::DS_WRITE_B64:
489 case AMDGPU::DS_WRITE_B32_gfx9:
490 case AMDGPU::DS_WRITE_B64_gfx9:
495 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
496 MachineBasicBlock *MBB = CI.I->getParent();
497 MachineBasicBlock::iterator E = MBB->end();
498 MachineBasicBlock::iterator MBBI = CI.I;
500 const unsigned Opc = CI.I->getOpcode();
501 const InstClassEnum InstClass = getInstClass(Opc);
503 if (InstClass == UNKNOWN) {
507 const unsigned Regs = getRegs(Opc);
509 unsigned AddrOpName[5] = {0};
511 const MachineOperand *AddrReg[5];
512 unsigned NumAddresses = 0;
515 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
519 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
523 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
526 if (Regs & SOFFSET) {
527 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
531 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
534 for (unsigned i = 0; i < NumAddresses; i++) {
535 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
536 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
538 // We only ever merge operations with the same base address register, so
539 // don't bother scanning forward if there are no other uses.
540 if (AddrReg[i]->isReg() &&
541 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
542 MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
548 DenseSet<unsigned> RegDefsToMove;
549 DenseSet<unsigned> PhysRegUsesToMove;
550 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
552 for (; MBBI != E; ++MBBI) {
553 const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
555 if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
556 (IsDS && (MBBI->getOpcode() != Opc))) {
557 // This is not a matching DS instruction, but we can keep looking as
558 // long as one of these conditions are met:
559 // 1. It is safe to move I down past MBBI.
560 // 2. It is safe to move MBBI down past the instruction that I will
563 if (MBBI->hasUnmodeledSideEffects()) {
564 // We can't re-order this instruction with respect to other memory
565 // operations, so we fail both conditions mentioned above.
569 if (MBBI->mayLoadOrStore() &&
570 (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
571 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
572 // We fail condition #1, but we may still be able to satisfy condition
573 // #2. Add this instruction to the move list and then we will check
574 // if condition #2 holds once we have selected the matching instruction.
575 CI.InstsToMove.push_back(&*MBBI);
576 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
580 // When we match I with another DS instruction we will be moving I down
581 // to the location of the matched instruction any uses of I will need to
582 // be moved down as well.
583 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
588 // Don't merge volatiles.
589 if (MBBI->hasOrderedMemoryRef())
592 // Handle a case like
593 // DS_WRITE_B32 addr, v, idx0
594 // w = DS_READ_B32 addr, idx0
595 // DS_WRITE_B32 addr, f(w), idx1
596 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
597 // merging of the two writes.
598 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
603 for (unsigned i = 0; i < NumAddresses; i++) {
604 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
606 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
607 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
608 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
615 // Check same base pointer. Be careful of subregisters, which can occur
616 // with vectors of pointers.
617 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
618 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
626 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
627 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
628 CI.Width0 = getOpcodeWidth(*CI.I);
629 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
630 CI.Width1 = getOpcodeWidth(*MBBI);
633 if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
634 CI.Offset0 &= 0xffff;
635 CI.Offset1 &= 0xffff;
637 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
638 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
639 if (CI.InstClass != S_BUFFER_LOAD_IMM) {
640 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
641 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
643 CI.DLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::dlc)->getImm();
644 CI.DLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::dlc)->getImm();
647 // Check both offsets fit in the reduced range.
648 // We also need to go through the list of instructions that we plan to
649 // move and make sure they are all safe to move down past the merged
651 if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
652 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
656 // We've found a load/store that we couldn't merge for some reason.
657 // We could potentially keep looking, but we'd need to make sure that
658 // it was safe to move I and also all the instruction in InstsToMove
659 // down past this instruction.
660 // check if we can move I across MBBI and if we can move all I's users
661 if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
662 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
668 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
669 if (STM->ldsRequiresM0Init())
670 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
671 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
674 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
675 if (STM->ldsRequiresM0Init())
676 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
678 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
679 : AMDGPU::DS_READ2ST64_B64_gfx9;
682 MachineBasicBlock::iterator
683 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
684 MachineBasicBlock *MBB = CI.I->getParent();
686 // Be careful, since the addresses could be subregisters themselves in weird
687 // cases, like vectors of pointers.
688 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
690 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
691 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
693 unsigned NewOffset0 = CI.Offset0;
694 unsigned NewOffset1 = CI.Offset1;
696 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
698 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
699 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
701 if (NewOffset0 > NewOffset1) {
702 // Canonicalize the merged instruction so the smaller offset comes first.
703 std::swap(NewOffset0, NewOffset1);
704 std::swap(SubRegIdx0, SubRegIdx1);
707 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
708 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
710 const MCInstrDesc &Read2Desc = TII->get(Opc);
712 const TargetRegisterClass *SuperRC =
713 (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
714 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
716 DebugLoc DL = CI.I->getDebugLoc();
718 unsigned BaseReg = AddrReg->getReg();
719 unsigned BaseSubReg = AddrReg->getSubReg();
720 unsigned BaseRegFlags = 0;
722 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
723 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
726 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
727 BaseRegFlags = RegState::Kill;
729 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
731 .addReg(AddrReg->getReg(), 0, BaseSubReg)
732 .addImm(0); // clamp bit
736 MachineInstrBuilder Read2 =
737 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
738 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
739 .addImm(NewOffset0) // offset0
740 .addImm(NewOffset1) // offset1
742 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
746 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
748 // Copy to the old destination registers.
749 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
750 .add(*Dest0) // Copy to same destination including flags and sub reg.
751 .addReg(DestReg, 0, SubRegIdx0);
752 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
754 .addReg(DestReg, RegState::Kill, SubRegIdx1);
756 moveInstsAfter(Copy1, CI.InstsToMove);
758 MachineBasicBlock::iterator Next = std::next(CI.I);
759 CI.I->eraseFromParent();
760 CI.Paired->eraseFromParent();
762 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
766 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
767 if (STM->ldsRequiresM0Init())
768 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
769 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
770 : AMDGPU::DS_WRITE2_B64_gfx9;
773 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
774 if (STM->ldsRequiresM0Init())
775 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
776 : AMDGPU::DS_WRITE2ST64_B64;
778 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
779 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
782 MachineBasicBlock::iterator
783 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
784 MachineBasicBlock *MBB = CI.I->getParent();
786 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
787 // sure we preserve the subregister index and any register flags set on them.
788 const MachineOperand *AddrReg =
789 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
790 const MachineOperand *Data0 =
791 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
792 const MachineOperand *Data1 =
793 TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
795 unsigned NewOffset0 = CI.Offset0;
796 unsigned NewOffset1 = CI.Offset1;
798 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
800 if (NewOffset0 > NewOffset1) {
801 // Canonicalize the merged instruction so the smaller offset comes first.
802 std::swap(NewOffset0, NewOffset1);
803 std::swap(Data0, Data1);
806 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
807 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
809 const MCInstrDesc &Write2Desc = TII->get(Opc);
810 DebugLoc DL = CI.I->getDebugLoc();
812 unsigned BaseReg = AddrReg->getReg();
813 unsigned BaseSubReg = AddrReg->getSubReg();
814 unsigned BaseRegFlags = 0;
816 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
817 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
820 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
821 BaseRegFlags = RegState::Kill;
823 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
825 .addReg(AddrReg->getReg(), 0, BaseSubReg)
826 .addImm(0); // clamp bit
830 MachineInstrBuilder Write2 =
831 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
832 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
833 .add(*Data0) // data0
834 .add(*Data1) // data1
835 .addImm(NewOffset0) // offset0
836 .addImm(NewOffset1) // offset1
838 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
840 moveInstsAfter(Write2, CI.InstsToMove);
842 MachineBasicBlock::iterator Next = std::next(CI.I);
843 CI.I->eraseFromParent();
844 CI.Paired->eraseFromParent();
846 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
850 MachineBasicBlock::iterator
851 SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
852 MachineBasicBlock *MBB = CI.I->getParent();
853 DebugLoc DL = CI.I->getDebugLoc();
854 const unsigned Opcode = getNewOpcode(CI);
856 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
858 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
859 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
861 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
862 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
863 .addImm(MergedOffset) // offset
864 .addImm(CI.GLC0) // glc
865 .addImm(CI.DLC0) // dlc
866 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
868 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
869 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
870 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
872 // Copy to the old destination registers.
873 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
874 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
875 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
877 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
878 .add(*Dest0) // Copy to same destination including flags and sub reg.
879 .addReg(DestReg, 0, SubRegIdx0);
880 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
882 .addReg(DestReg, RegState::Kill, SubRegIdx1);
884 moveInstsAfter(Copy1, CI.InstsToMove);
886 MachineBasicBlock::iterator Next = std::next(CI.I);
887 CI.I->eraseFromParent();
888 CI.Paired->eraseFromParent();
892 MachineBasicBlock::iterator
893 SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
894 MachineBasicBlock *MBB = CI.I->getParent();
895 DebugLoc DL = CI.I->getDebugLoc();
897 const unsigned Opcode = getNewOpcode(CI);
899 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
901 // Copy to the new source register.
902 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
903 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
905 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
907 const unsigned Regs = getRegs(Opcode);
910 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
912 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
913 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
914 .addImm(MergedOffset) // offset
915 .addImm(CI.GLC0) // glc
916 .addImm(CI.SLC0) // slc
918 .addImm(CI.DLC0) // dlc
919 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
921 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
922 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
923 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
925 // Copy to the old destination registers.
926 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
927 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
928 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
930 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
931 .add(*Dest0) // Copy to same destination including flags and sub reg.
932 .addReg(DestReg, 0, SubRegIdx0);
933 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
935 .addReg(DestReg, RegState::Kill, SubRegIdx1);
937 moveInstsAfter(Copy1, CI.InstsToMove);
939 MachineBasicBlock::iterator Next = std::next(CI.I);
940 CI.I->eraseFromParent();
941 CI.Paired->eraseFromParent();
945 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
946 const unsigned Width = CI.Width0 + CI.Width1;
948 switch (CI.InstClass) {
950 return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
952 llvm_unreachable("Unknown instruction class");
953 case S_BUFFER_LOAD_IMM:
958 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
960 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
965 std::pair<unsigned, unsigned>
966 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
967 if (CI.Offset0 > CI.Offset1) {
970 return std::make_pair(0, 0);
974 return std::make_pair(0, 0);
976 return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
978 return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
980 return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
985 return std::make_pair(0, 0);
987 return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
989 return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
994 return std::make_pair(0, 0);
996 return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
1000 switch (CI.Width0) {
1002 return std::make_pair(0, 0);
1004 switch (CI.Width1) {
1006 return std::make_pair(0, 0);
1008 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
1010 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
1012 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
1015 switch (CI.Width1) {
1017 return std::make_pair(0, 0);
1019 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
1021 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
1024 switch (CI.Width1) {
1026 return std::make_pair(0, 0);
1028 return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
1034 const TargetRegisterClass *
1035 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
1036 if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1037 switch (CI.Width0 + CI.Width1) {
1041 return &AMDGPU::SReg_64_XEXECRegClass;
1043 return &AMDGPU::SReg_128RegClass;
1045 return &AMDGPU::SReg_256RegClass;
1047 return &AMDGPU::SReg_512RegClass;
1050 switch (CI.Width0 + CI.Width1) {
1054 return &AMDGPU::VReg_64RegClass;
1056 return &AMDGPU::VReg_96RegClass;
1058 return &AMDGPU::VReg_128RegClass;
1063 MachineBasicBlock::iterator
1064 SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
1065 MachineBasicBlock *MBB = CI.I->getParent();
1066 DebugLoc DL = CI.I->getDebugLoc();
1068 const unsigned Opcode = getNewOpcode(CI);
1070 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1071 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1072 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1074 // Copy to the new source register.
1075 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
1076 unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
1078 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1079 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1081 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1085 .addImm(SubRegIdx1);
1087 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
1088 .addReg(SrcReg, RegState::Kill);
1090 const unsigned Regs = getRegs(Opcode);
1093 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1095 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1096 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1097 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
1098 .addImm(CI.GLC0) // glc
1099 .addImm(CI.SLC0) // slc
1101 .addImm(CI.DLC0) // dlc
1102 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
1104 moveInstsAfter(MIB, CI.InstsToMove);
1106 MachineBasicBlock::iterator Next = std::next(CI.I);
1107 CI.I->eraseFromParent();
1108 CI.Paired->eraseFromParent();
1113 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
1114 APInt V(32, Val, true);
1115 if (TII->isInlineConstant(V))
1116 return MachineOperand::CreateImm(Val);
1118 unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1120 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1121 TII->get(AMDGPU::S_MOV_B32), Reg)
1124 LLVM_DEBUG(dbgs() << " "; Mov->dump());
1125 return MachineOperand::CreateReg(Reg, false);
1128 // Compute base address using Addr and return the final register.
1129 unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1130 const MemAddress &Addr) {
1131 MachineBasicBlock *MBB = MI.getParent();
1132 MachineBasicBlock::iterator MBBI = MI.getIterator();
1133 DebugLoc DL = MI.getDebugLoc();
1135 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1136 Addr.Base.LoSubReg) &&
1137 "Expected 32-bit Base-Register-Low!!");
1139 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1140 Addr.Base.HiSubReg) &&
1141 "Expected 32-bit Base-Register-Hi!!");
1143 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
1144 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1145 MachineOperand OffsetHi =
1146 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1148 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1149 unsigned CarryReg = MRI->createVirtualRegister(CarryRC);
1150 unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1152 unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1153 unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1154 MachineInstr *LoHalf =
1155 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1156 .addReg(CarryReg, RegState::Define)
1157 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1159 .addImm(0); // clamp bit
1161 LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
1163 MachineInstr *HiHalf =
1164 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1165 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1166 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1168 .addReg(CarryReg, RegState::Kill)
1169 .addImm(0); // clamp bit
1171 LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
1173 unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1174 MachineInstr *FullBase =
1175 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1177 .addImm(AMDGPU::sub0)
1179 .addImm(AMDGPU::sub1);
1181 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
1186 // Update base and offset with the NewBase and NewOffset in MI.
1187 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1189 int32_t NewOffset) {
1190 TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
1191 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1195 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) {
1202 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1203 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1204 !Def->getOperand(1).isImm())
1207 return Def->getOperand(1).getImm();
1210 // Analyze Base and extracts:
1211 // - 32bit base registers, subregisters
1212 // - 64bit constant offset
1213 // Expecting base computation as:
1214 // %OFFSET0:sgpr_32 = S_MOV_B32 8000
1215 // %LO:vgpr_32, %c:sreg_64_xexec =
1216 // V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1217 // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1219 // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1220 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1225 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1226 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1227 || Def->getNumOperands() != 5)
1230 MachineOperand BaseLo = Def->getOperand(1);
1231 MachineOperand BaseHi = Def->getOperand(3);
1232 if (!BaseLo.isReg() || !BaseHi.isReg())
1235 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1236 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1238 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1239 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1242 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1243 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1245 auto Offset0P = extractConstOffset(*Src0);
1249 if (!(Offset0P = extractConstOffset(*Src1)))
1254 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1255 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1258 std::swap(Src0, Src1);
1263 uint64_t Offset1 = Src1->getImm();
1266 Addr.Base.LoReg = BaseLo.getReg();
1267 Addr.Base.HiReg = BaseHi.getReg();
1268 Addr.Base.LoSubReg = BaseLo.getSubReg();
1269 Addr.Base.HiSubReg = BaseHi.getSubReg();
1270 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1273 bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1275 MemInfoMap &Visited,
1276 SmallPtrSet<MachineInstr *, 4> &AnchorList) {
1278 // TODO: Support flat and scratch.
1279 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0 ||
1280 TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1283 // TODO: Support Store.
1287 if (AnchorList.count(&MI))
1290 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1292 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1293 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
1297 // Step1: Find the base-registers and a 64bit constant offset.
1298 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1300 if (Visited.find(&MI) == Visited.end()) {
1301 processBaseWithConstOffset(Base, MAddr);
1302 Visited[&MI] = MAddr;
1304 MAddr = Visited[&MI];
1306 if (MAddr.Offset == 0) {
1307 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
1308 " constant offsets that can be promoted.\n";);
1312 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
1313 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1315 // Step2: Traverse through MI's basic block and find an anchor(that has the
1316 // same base-registers) with the highest 13bit distance from MI's offset.
1317 // E.g. (64bit loads)
1319 // addr1 = &a + 4096; load1 = load(addr1, 0)
1320 // addr2 = &a + 6144; load2 = load(addr2, 0)
1321 // addr3 = &a + 8192; load3 = load(addr3, 0)
1322 // addr4 = &a + 10240; load4 = load(addr4, 0)
1323 // addr5 = &a + 12288; load5 = load(addr5, 0)
1325 // Starting from the first load, the optimization will try to find a new base
1326 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1327 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1328 // as the new-base(anchor) because of the maximum distance which can
1329 // accomodate more intermediate bases presumeably.
1331 // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1332 // (&a + 8192) for load1, load2, load4.
1334 // load1 = load(addr, -4096)
1335 // load2 = load(addr, -2048)
1336 // load3 = load(addr, 0)
1337 // load4 = load(addr, 2048)
1338 // addr5 = &a + 12288; load5 = load(addr5, 0)
1340 MachineInstr *AnchorInst = nullptr;
1341 MemAddress AnchorAddr;
1342 uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1343 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1345 MachineBasicBlock *MBB = MI.getParent();
1346 MachineBasicBlock::iterator E = MBB->end();
1347 MachineBasicBlock::iterator MBBI = MI.getIterator();
1349 const SITargetLowering *TLI =
1350 static_cast<const SITargetLowering *>(STM->getTargetLowering());
1352 for ( ; MBBI != E; ++MBBI) {
1353 MachineInstr &MINext = *MBBI;
1354 // TODO: Support finding an anchor(with same base) from store addresses or
1355 // any other load addresses where the opcodes are different.
1356 if (MINext.getOpcode() != MI.getOpcode() ||
1357 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1360 const MachineOperand &BaseNext =
1361 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1362 MemAddress MAddrNext;
1363 if (Visited.find(&MINext) == Visited.end()) {
1364 processBaseWithConstOffset(BaseNext, MAddrNext);
1365 Visited[&MINext] = MAddrNext;
1367 MAddrNext = Visited[&MINext];
1369 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1370 MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1371 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1372 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1375 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1377 int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1378 TargetLoweringBase::AddrMode AM;
1379 AM.HasBaseReg = true;
1381 if (TLI->isLegalGlobalAddressingMode(AM) &&
1382 (uint32_t)std::abs(Dist) > MaxDist) {
1383 MaxDist = std::abs(Dist);
1385 AnchorAddr = MAddrNext;
1386 AnchorInst = &MINext;
1391 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
1392 AnchorInst->dump());
1393 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
1394 << AnchorAddr.Offset << "\n\n");
1396 // Instead of moving up, just re-compute anchor-instruction's base address.
1397 unsigned Base = computeBase(MI, AnchorAddr);
1399 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1400 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
1402 for (auto P : InstsWCommonBase) {
1403 TargetLoweringBase::AddrMode AM;
1404 AM.HasBaseReg = true;
1405 AM.BaseOffs = P.second - AnchorAddr.Offset;
1407 if (TLI->isLegalGlobalAddressingMode(AM)) {
1408 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
1409 dbgs() << ")"; P.first->dump());
1410 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1411 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
1414 AnchorList.insert(AnchorInst);
1421 // Scan through looking for adjacent LDS operations with constant offsets from
1422 // the same base register. We rely on the scheduler to do the hard work of
1423 // clustering nearby loads, and assume these are all adjacent.
1424 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
1425 bool Modified = false;
1429 // Contains the list of instructions for which constant offsets are being
1430 // promoted to the IMM.
1431 SmallPtrSet<MachineInstr *, 4> AnchorList;
1433 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
1434 MachineInstr &MI = *I;
1436 if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1439 // Don't combine if volatile.
1440 if (MI.hasOrderedMemoryRef()) {
1445 const unsigned Opc = MI.getOpcode();
1449 CI.InstClass = getInstClass(Opc);
1451 switch (CI.InstClass) {
1456 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
1458 if (findMatchingInst(CI)) {
1460 I = mergeRead2Pair(CI);
1467 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
1469 if (findMatchingInst(CI)) {
1471 I = mergeWrite2Pair(CI);
1476 case S_BUFFER_LOAD_IMM:
1477 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
1478 if (findMatchingInst(CI)) {
1480 I = mergeSBufferLoadImmPair(CI);
1481 OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
1486 case BUFFER_LOAD_OFFEN:
1487 case BUFFER_LOAD_OFFSET:
1488 case BUFFER_LOAD_OFFEN_exact:
1489 case BUFFER_LOAD_OFFSET_exact:
1491 if (findMatchingInst(CI)) {
1493 I = mergeBufferLoadPair(CI);
1494 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
1499 case BUFFER_STORE_OFFEN:
1500 case BUFFER_STORE_OFFSET:
1501 case BUFFER_STORE_OFFEN_exact:
1502 case BUFFER_STORE_OFFSET_exact:
1504 if (findMatchingInst(CI)) {
1506 I = mergeBufferStorePair(CI);
1507 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
1520 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
1521 if (skipFunction(MF.getFunction()))
1524 STM = &MF.getSubtarget<GCNSubtarget>();
1525 if (!STM->loadStoreOptEnabled())
1528 TII = STM->getInstrInfo();
1529 TRI = &TII->getRegisterInfo();
1531 MRI = &MF.getRegInfo();
1532 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1534 assert(MRI->isSSA() && "Must be run on SSA");
1536 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
1538 bool Modified = false;
1540 for (MachineBasicBlock &MBB : MF) {
1542 OptimizeAgain = false;
1543 Modified |= optimizeBlock(MBB);
1544 } while (OptimizeAgain);