1 //===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass tries to fuse DS instructions with close by immediate offsets.
11 // This will fuse operations such as
12 // ds_read_b32 v0, v2 offset:16
13 // ds_read_b32 v1, v2 offset:32
15 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
18 // Future improvements:
20 // - This currently relies on the scheduler to place loads and stores next to
21 // each other, and then only merges adjacent pairs of instructions. It would
22 // be good to be more flexible with interleaved instructions, and possibly run
23 // before scheduling. It currently missing stores of constants because loading
24 // the constant into the data register is placed between the stores, although
25 // this is arguably a scheduling problem.
27 // - Live interval recomputing seems inefficient. This currently only matches
28 // one pair, and recomputes live intervals and moves on to the next pair. It
29 // would be better to compute a list of all merges that need to occur.
31 // - With a list of instructions to process, we can also merge more. If a
32 // cluster of loads have offsets that are too large to fit in the 8-bit
33 // offsets, but are close enough to fit in the 8 bits, we can add to the base
34 // pointer and use the new reduced offsets.
36 //===----------------------------------------------------------------------===//
39 #include "AMDGPUSubtarget.h"
40 #include "SIInstrInfo.h"
41 #include "SIRegisterInfo.h"
42 #include "Utils/AMDGPUBaseInfo.h"
43 #include "llvm/ADT/ArrayRef.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/Analysis/AliasAnalysis.h"
47 #include "llvm/CodeGen/MachineBasicBlock.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineFunctionPass.h"
50 #include "llvm/CodeGen/MachineInstr.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Target/TargetMachine.h"
66 #define DEBUG_TYPE "si-load-store-opt"
70 class SILoadStoreOptimizer : public MachineFunctionPass {
73 MachineBasicBlock::iterator I;
74 MachineBasicBlock::iterator Paired;
80 SmallVector<MachineInstr*, 8> InstsToMove;
84 const SIInstrInfo *TII = nullptr;
85 const SIRegisterInfo *TRI = nullptr;
86 MachineRegisterInfo *MRI = nullptr;
87 AliasAnalysis *AA = nullptr;
89 static bool offsetsCanBeCombined(CombineInfo &CI);
91 bool findMatchingDSInst(CombineInfo &CI);
93 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
95 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
100 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
101 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
104 bool optimizeBlock(MachineBasicBlock &MBB);
106 bool runOnMachineFunction(MachineFunction &MF) override;
108 StringRef getPassName() const override { return "SI Load / Store Optimizer"; }
110 void getAnalysisUsage(AnalysisUsage &AU) const override {
111 AU.setPreservesCFG();
112 AU.addRequired<AAResultsWrapperPass>();
114 MachineFunctionPass::getAnalysisUsage(AU);
118 } // end anonymous namespace.
120 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
121 "SI Load / Store Optimizer", false, false)
122 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
123 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
124 "SI Load / Store Optimizer", false, false)
126 char SILoadStoreOptimizer::ID = 0;
128 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
130 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
131 return new SILoadStoreOptimizer();
134 static void moveInstsAfter(MachineBasicBlock::iterator I,
135 ArrayRef<MachineInstr*> InstsToMove) {
136 MachineBasicBlock *MBB = I->getParent();
138 for (MachineInstr *MI : InstsToMove) {
139 MI->removeFromParent();
144 static void addDefsToList(const MachineInstr &MI,
145 SmallVectorImpl<const MachineOperand *> &Defs) {
146 for (const MachineOperand &Def : MI.defs()) {
147 Defs.push_back(&Def);
151 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
152 MachineBasicBlock::iterator B,
153 const SIInstrInfo *TII,
154 AliasAnalysis * AA) {
155 return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) ||
156 // RAW or WAR - cannot reorder
157 // WAW - cannot reorder
158 // RAR - safe to reorder
159 !(A->mayStore() || B->mayStore()));
162 // Add MI and its defs to the lists if MI reads one of the defs that are
163 // already in the list. Returns true in that case.
165 addToListsIfDependent(MachineInstr &MI,
166 SmallVectorImpl<const MachineOperand *> &Defs,
167 SmallVectorImpl<MachineInstr*> &Insts) {
168 for (const MachineOperand *Def : Defs) {
169 bool ReadDef = MI.readsVirtualRegister(Def->getReg());
170 // If ReadDef is true, then there is a use of Def between I
171 // and the instruction that I will potentially be merged with. We
172 // will need to move this instruction after the merged instructions.
174 Insts.push_back(&MI);
175 addDefsToList(MI, Defs);
184 canMoveInstsAcrossMemOp(MachineInstr &MemOp,
185 ArrayRef<MachineInstr*> InstsToMove,
186 const SIInstrInfo *TII,
188 assert(MemOp.mayLoadOrStore());
190 for (MachineInstr *InstToMove : InstsToMove) {
191 if (!InstToMove->mayLoadOrStore())
193 if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA))
199 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
200 // XXX - Would the same offset be OK? Is there any reason this would happen or
202 if (CI.Offset0 == CI.Offset1)
205 // This won't be valid if the offset isn't aligned.
206 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
209 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
210 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
214 // If the offset in elements doesn't fit in 8-bits, we might be able to use
215 // the stride 64 versions.
216 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
217 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
218 CI.Offset0 = EltOffset0 / 64;
219 CI.Offset1 = EltOffset1 / 64;
224 // Check if the new offsets fit in the reduced 8-bit range.
225 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
226 CI.Offset0 = EltOffset0;
227 CI.Offset1 = EltOffset1;
231 // Try to shift base address to decrease offsets.
232 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
233 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
235 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
236 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
237 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
242 if (isUInt<8>(OffsetDiff)) {
243 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
244 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
251 bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) {
252 MachineBasicBlock::iterator E = CI.I->getParent()->end();
253 MachineBasicBlock::iterator MBBI = CI.I;
256 SmallVector<const MachineOperand *, 8> DefsToMove;
257 addDefsToList(*CI.I, DefsToMove);
259 for ( ; MBBI != E; ++MBBI) {
260 if (MBBI->getOpcode() != CI.I->getOpcode()) {
262 // This is not a matching DS instruction, but we can keep looking as
263 // long as one of these conditions are met:
264 // 1. It is safe to move I down past MBBI.
265 // 2. It is safe to move MBBI down past the instruction that I will
268 if (MBBI->hasUnmodeledSideEffects())
269 // We can't re-order this instruction with respect to other memory
270 // opeations, so we fail both conditions mentioned above.
273 if (MBBI->mayLoadOrStore() &&
274 !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
275 // We fail condition #1, but we may still be able to satisfy condition
276 // #2. Add this instruction to the move list and then we will check
277 // if condition #2 holds once we have selected the matching instruction.
278 CI.InstsToMove.push_back(&*MBBI);
279 addDefsToList(*MBBI, DefsToMove);
283 // When we match I with another DS instruction we will be moving I down
284 // to the location of the matched instruction any uses of I will need to
285 // be moved down as well.
286 addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove);
290 // Don't merge volatiles.
291 if (MBBI->hasOrderedMemoryRef())
294 // Handle a case like
295 // DS_WRITE_B32 addr, v, idx0
296 // w = DS_READ_B32 addr, idx0
297 // DS_WRITE_B32 addr, f(w), idx1
298 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
299 // merging of the two writes.
300 if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove))
303 int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
304 AMDGPU::OpName::addr);
305 const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx);
306 const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
308 // Check same base pointer. Be careful of subregisters, which can occur with
309 // vectors of pointers.
310 if (AddrReg0.getReg() == AddrReg1.getReg() &&
311 AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
312 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(),
313 AMDGPU::OpName::offset);
314 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff;
315 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
318 // Check both offsets fit in the reduced range.
319 // We also need to go through the list of instructions that we plan to
320 // move and make sure they are all safe to move down past the merged
322 if (offsetsCanBeCombined(CI))
323 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
327 // We've found a load/store that we couldn't merge for some reason.
328 // We could potentially keep looking, but we'd need to make sure that
329 // it was safe to move I and also all the instruction in InstsToMove
330 // down past this instruction.
331 // check if we can move I across MBBI and if we can move all I's users
332 if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
333 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))
339 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
341 MachineBasicBlock *MBB = CI.I->getParent();
343 // Be careful, since the addresses could be subregisters themselves in weird
344 // cases, like vectors of pointers.
345 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
347 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
348 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
350 unsigned NewOffset0 = CI.Offset0;
351 unsigned NewOffset1 = CI.Offset1;
352 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32
353 : AMDGPU::DS_READ2_B64;
356 Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32
357 : AMDGPU::DS_READ2ST64_B64;
359 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
360 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
362 if (NewOffset0 > NewOffset1) {
363 // Canonicalize the merged instruction so the smaller offset comes first.
364 std::swap(NewOffset0, NewOffset1);
365 std::swap(SubRegIdx0, SubRegIdx1);
368 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
369 (NewOffset0 != NewOffset1) &&
370 "Computed offset doesn't fit");
372 const MCInstrDesc &Read2Desc = TII->get(Opc);
374 const TargetRegisterClass *SuperRC
375 = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
376 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
378 DebugLoc DL = CI.I->getDebugLoc();
380 unsigned BaseReg = AddrReg->getReg();
381 unsigned BaseRegFlags = 0;
383 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
384 BaseRegFlags = RegState::Kill;
385 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
387 .addReg(AddrReg->getReg());
390 MachineInstrBuilder Read2 =
391 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
392 .addReg(BaseReg, BaseRegFlags) // addr
393 .addImm(NewOffset0) // offset0
394 .addImm(NewOffset1) // offset1
396 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
400 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
402 // Copy to the old destination registers.
403 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
404 .add(*Dest0) // Copy to same destination including flags and sub reg.
405 .addReg(DestReg, 0, SubRegIdx0);
406 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
408 .addReg(DestReg, RegState::Kill, SubRegIdx1);
410 moveInstsAfter(Copy1, CI.InstsToMove);
412 MachineBasicBlock::iterator Next = std::next(CI.I);
413 CI.I->eraseFromParent();
414 CI.Paired->eraseFromParent();
416 DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
420 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
422 MachineBasicBlock *MBB = CI.I->getParent();
424 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
425 // sure we preserve the subregister index and any register flags set on them.
426 const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
427 const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
428 const MachineOperand *Data1
429 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
431 unsigned NewOffset0 = CI.Offset0;
432 unsigned NewOffset1 = CI.Offset1;
433 unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32
434 : AMDGPU::DS_WRITE2_B64;
437 Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
438 : AMDGPU::DS_WRITE2ST64_B64;
440 if (NewOffset0 > NewOffset1) {
441 // Canonicalize the merged instruction so the smaller offset comes first.
442 std::swap(NewOffset0, NewOffset1);
443 std::swap(Data0, Data1);
446 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
447 (NewOffset0 != NewOffset1) &&
448 "Computed offset doesn't fit");
450 const MCInstrDesc &Write2Desc = TII->get(Opc);
451 DebugLoc DL = CI.I->getDebugLoc();
453 unsigned BaseReg = Addr->getReg();
454 unsigned BaseRegFlags = 0;
456 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
457 BaseRegFlags = RegState::Kill;
458 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
460 .addReg(Addr->getReg());
463 MachineInstrBuilder Write2 =
464 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
465 .addReg(BaseReg, BaseRegFlags) // addr
466 .add(*Data0) // data0
467 .add(*Data1) // data1
468 .addImm(NewOffset0) // offset0
469 .addImm(NewOffset1) // offset1
471 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
473 moveInstsAfter(Write2, CI.InstsToMove);
475 MachineBasicBlock::iterator Next = std::next(CI.I);
476 CI.I->eraseFromParent();
477 CI.Paired->eraseFromParent();
479 DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
483 // Scan through looking for adjacent LDS operations with constant offsets from
484 // the same base register. We rely on the scheduler to do the hard work of
485 // clustering nearby loads, and assume these are all adjacent.
486 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
487 bool Modified = false;
489 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
490 MachineInstr &MI = *I;
492 // Don't combine if volatile.
493 if (MI.hasOrderedMemoryRef()) {
500 unsigned Opc = MI.getOpcode();
501 if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
502 CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
503 if (findMatchingDSInst(CI)) {
505 I = mergeRead2Pair(CI);
511 } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
512 CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
513 if (findMatchingDSInst(CI)) {
515 I = mergeWrite2Pair(CI);
529 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
530 if (skipFunction(*MF.getFunction()))
533 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
534 if (!STM.loadStoreOptEnabled())
537 TII = STM.getInstrInfo();
538 TRI = &TII->getRegisterInfo();
540 MRI = &MF.getRegInfo();
541 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
543 DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
545 bool Modified = false;
547 for (MachineBasicBlock &MBB : MF)
548 Modified |= optimizeBlock(MBB);