1 //===- X86AvoidStoreForwardingBlockis.cpp - Avoid HW Store Forward Block --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // If a load follows a store and reloads data that the store has written to
11 // memory, Intel microarchitectures can in many cases forward the data directly
12 // from the store to the load, This "store forwarding" saves cycles by enabling
13 // the load to directly obtain the data instead of accessing the data from
15 // A "store forward block" occurs in cases that a store cannot be forwarded to
16 // the load. The most typical case of store forward block on Intel Core
17 // microarchitecture that a small store cannot be forwarded to a large load.
18 // The estimated penalty for a store forward block is ~13 cycles.
20 // This pass tries to recognize and handle cases where "store forward block"
21 // is created by the compiler when lowering memcpy calls to a sequence
22 // of a load and a store.
24 // The pass currently only handles cases where memcpy is lowered to
25 // XMM/YMM registers, it tries to break the memcpy into smaller copies.
26 // breaking the memcpy should be possible since there is no atomicity
27 // guarantee for loads and stores to XMM/YMM.
29 // It could be better for performance to solve the problem by loading
30 // to XMM/YMM then inserting the partial store before storing back from XMM/YMM
31 // to memory, but this will result in a more conservative optimization since it
32 // requires we prove that all memory accesses between the blocking store and the
33 // load must alias/don't alias before we can move the store, whereas the
34 // transformation done here is correct regardless to other memory accesses.
35 //===----------------------------------------------------------------------===//
37 #include "X86InstrInfo.h"
38 #include "X86Subtarget.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineFunctionPass.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/IR/DebugInfoMetadata.h"
47 #include "llvm/IR/DebugLoc.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/MC/MCInstrDesc.h"
53 #define DEBUG_TYPE "x86-avoid-SFB"
55 static cl::opt<bool> DisableX86AvoidStoreForwardBlocks(
56 "x86-disable-avoid-SFB", cl::Hidden,
57 cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false));
59 static cl::opt<unsigned> X86AvoidSFBInspectionLimit(
60 "x86-sfb-inspection-limit",
61 cl::desc("X86: Number of instructions backward to "
62 "inspect for store forwarding blocks."),
63 cl::init(20), cl::Hidden);
67 using DisplacementSizeMap = std::map<int64_t, unsigned>;
69 class X86AvoidSFBPass : public MachineFunctionPass {
72 X86AvoidSFBPass() : MachineFunctionPass(ID) {
73 initializeX86AvoidSFBPassPass(*PassRegistry::getPassRegistry());
76 StringRef getPassName() const override {
77 return "X86 Avoid Store Forwarding Blocks";
80 bool runOnMachineFunction(MachineFunction &MF) override;
82 void getAnalysisUsage(AnalysisUsage &AU) const override {
83 MachineFunctionPass::getAnalysisUsage(AU);
84 AU.addRequired<AAResultsWrapperPass>();
88 MachineRegisterInfo *MRI;
89 const X86InstrInfo *TII;
90 const X86RegisterInfo *TRI;
91 SmallVector<std::pair<MachineInstr *, MachineInstr *>, 2>
92 BlockedLoadsStoresPairs;
93 SmallVector<MachineInstr *, 2> ForRemoval;
96 /// Returns couples of Load then Store to memory which look
98 void findPotentiallylBlockedCopies(MachineFunction &MF);
99 /// Break the memcpy's load and store into smaller copies
100 /// such that each memory load that was blocked by a smaller store
101 /// would now be copied separately.
102 void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst,
103 const DisplacementSizeMap &BlockingStoresDispSizeMap);
104 /// Break a copy of size Size to smaller copies.
105 void buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm,
106 MachineInstr *StoreInst, int64_t StDispImm,
107 int64_t LMMOffset, int64_t SMMOffset);
109 void buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, int64_t LoadDisp,
110 MachineInstr *StoreInst, unsigned NStoreOpcode,
111 int64_t StoreDisp, unsigned Size, int64_t LMMOffset,
114 bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2) const;
116 unsigned getRegSizeInBytes(MachineInstr *Inst);
119 } // end anonymous namespace
121 char X86AvoidSFBPass::ID = 0;
123 INITIALIZE_PASS_BEGIN(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking",
125 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
126 INITIALIZE_PASS_END(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", false,
129 FunctionPass *llvm::createX86AvoidStoreForwardingBlocks() {
130 return new X86AvoidSFBPass();
133 static bool isXMMLoadOpcode(unsigned Opcode) {
134 return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm ||
135 Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm ||
136 Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm ||
137 Opcode == X86::VMOVDQUrm || Opcode == X86::VMOVDQArm ||
138 Opcode == X86::VMOVUPSZ128rm || Opcode == X86::VMOVAPSZ128rm ||
139 Opcode == X86::VMOVUPDZ128rm || Opcode == X86::VMOVAPDZ128rm ||
140 Opcode == X86::VMOVDQU64Z128rm || Opcode == X86::VMOVDQA64Z128rm ||
141 Opcode == X86::VMOVDQU32Z128rm || Opcode == X86::VMOVDQA32Z128rm;
143 static bool isYMMLoadOpcode(unsigned Opcode) {
144 return Opcode == X86::VMOVUPSYrm || Opcode == X86::VMOVAPSYrm ||
145 Opcode == X86::VMOVUPDYrm || Opcode == X86::VMOVAPDYrm ||
146 Opcode == X86::VMOVDQUYrm || Opcode == X86::VMOVDQAYrm ||
147 Opcode == X86::VMOVUPSZ256rm || Opcode == X86::VMOVAPSZ256rm ||
148 Opcode == X86::VMOVUPDZ256rm || Opcode == X86::VMOVAPDZ256rm ||
149 Opcode == X86::VMOVDQU64Z256rm || Opcode == X86::VMOVDQA64Z256rm ||
150 Opcode == X86::VMOVDQU32Z256rm || Opcode == X86::VMOVDQA32Z256rm;
153 static bool isPotentialBlockedMemCpyLd(unsigned Opcode) {
154 return isXMMLoadOpcode(Opcode) || isYMMLoadOpcode(Opcode);
157 static bool isPotentialBlockedMemCpyPair(int LdOpcode, int StOpcode) {
161 return StOpcode == X86::MOVUPSmr || StOpcode == X86::MOVAPSmr;
164 return StOpcode == X86::VMOVUPSmr || StOpcode == X86::VMOVAPSmr;
167 return StOpcode == X86::VMOVUPDmr || StOpcode == X86::VMOVAPDmr;
170 return StOpcode == X86::VMOVDQUmr || StOpcode == X86::VMOVDQAmr;
171 case X86::VMOVUPSZ128rm:
172 case X86::VMOVAPSZ128rm:
173 return StOpcode == X86::VMOVUPSZ128mr || StOpcode == X86::VMOVAPSZ128mr;
174 case X86::VMOVUPDZ128rm:
175 case X86::VMOVAPDZ128rm:
176 return StOpcode == X86::VMOVUPDZ128mr || StOpcode == X86::VMOVAPDZ128mr;
177 case X86::VMOVUPSYrm:
178 case X86::VMOVAPSYrm:
179 return StOpcode == X86::VMOVUPSYmr || StOpcode == X86::VMOVAPSYmr;
180 case X86::VMOVUPDYrm:
181 case X86::VMOVAPDYrm:
182 return StOpcode == X86::VMOVUPDYmr || StOpcode == X86::VMOVAPDYmr;
183 case X86::VMOVDQUYrm:
184 case X86::VMOVDQAYrm:
185 return StOpcode == X86::VMOVDQUYmr || StOpcode == X86::VMOVDQAYmr;
186 case X86::VMOVUPSZ256rm:
187 case X86::VMOVAPSZ256rm:
188 return StOpcode == X86::VMOVUPSZ256mr || StOpcode == X86::VMOVAPSZ256mr;
189 case X86::VMOVUPDZ256rm:
190 case X86::VMOVAPDZ256rm:
191 return StOpcode == X86::VMOVUPDZ256mr || StOpcode == X86::VMOVAPDZ256mr;
192 case X86::VMOVDQU64Z128rm:
193 case X86::VMOVDQA64Z128rm:
194 return StOpcode == X86::VMOVDQU64Z128mr || StOpcode == X86::VMOVDQA64Z128mr;
195 case X86::VMOVDQU32Z128rm:
196 case X86::VMOVDQA32Z128rm:
197 return StOpcode == X86::VMOVDQU32Z128mr || StOpcode == X86::VMOVDQA32Z128mr;
198 case X86::VMOVDQU64Z256rm:
199 case X86::VMOVDQA64Z256rm:
200 return StOpcode == X86::VMOVDQU64Z256mr || StOpcode == X86::VMOVDQA64Z256mr;
201 case X86::VMOVDQU32Z256rm:
202 case X86::VMOVDQA32Z256rm:
203 return StOpcode == X86::VMOVDQU32Z256mr || StOpcode == X86::VMOVDQA32Z256mr;
209 static bool isPotentialBlockingStoreInst(int Opcode, int LoadOpcode) {
211 PBlock |= Opcode == X86::MOV64mr || Opcode == X86::MOV64mi32 ||
212 Opcode == X86::MOV32mr || Opcode == X86::MOV32mi ||
213 Opcode == X86::MOV16mr || Opcode == X86::MOV16mi ||
214 Opcode == X86::MOV8mr || Opcode == X86::MOV8mi;
215 if (isYMMLoadOpcode(LoadOpcode))
216 PBlock |= Opcode == X86::VMOVUPSmr || Opcode == X86::VMOVAPSmr ||
217 Opcode == X86::VMOVUPDmr || Opcode == X86::VMOVAPDmr ||
218 Opcode == X86::VMOVDQUmr || Opcode == X86::VMOVDQAmr ||
219 Opcode == X86::VMOVUPSZ128mr || Opcode == X86::VMOVAPSZ128mr ||
220 Opcode == X86::VMOVUPDZ128mr || Opcode == X86::VMOVAPDZ128mr ||
221 Opcode == X86::VMOVDQU64Z128mr ||
222 Opcode == X86::VMOVDQA64Z128mr ||
223 Opcode == X86::VMOVDQU32Z128mr || Opcode == X86::VMOVDQA32Z128mr;
227 static const int MOV128SZ = 16;
228 static const int MOV64SZ = 8;
229 static const int MOV32SZ = 4;
230 static const int MOV16SZ = 2;
231 static const int MOV8SZ = 1;
233 static unsigned getYMMtoXMMLoadOpcode(unsigned LoadOpcode) {
234 switch (LoadOpcode) {
235 case X86::VMOVUPSYrm:
236 case X86::VMOVAPSYrm:
237 return X86::VMOVUPSrm;
238 case X86::VMOVUPDYrm:
239 case X86::VMOVAPDYrm:
240 return X86::VMOVUPDrm;
241 case X86::VMOVDQUYrm:
242 case X86::VMOVDQAYrm:
243 return X86::VMOVDQUrm;
244 case X86::VMOVUPSZ256rm:
245 case X86::VMOVAPSZ256rm:
246 return X86::VMOVUPSZ128rm;
247 case X86::VMOVUPDZ256rm:
248 case X86::VMOVAPDZ256rm:
249 return X86::VMOVUPDZ128rm;
250 case X86::VMOVDQU64Z256rm:
251 case X86::VMOVDQA64Z256rm:
252 return X86::VMOVDQU64Z128rm;
253 case X86::VMOVDQU32Z256rm:
254 case X86::VMOVDQA32Z256rm:
255 return X86::VMOVDQU32Z128rm;
257 llvm_unreachable("Unexpected Load Instruction Opcode");
262 static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode) {
263 switch (StoreOpcode) {
264 case X86::VMOVUPSYmr:
265 case X86::VMOVAPSYmr:
266 return X86::VMOVUPSmr;
267 case X86::VMOVUPDYmr:
268 case X86::VMOVAPDYmr:
269 return X86::VMOVUPDmr;
270 case X86::VMOVDQUYmr:
271 case X86::VMOVDQAYmr:
272 return X86::VMOVDQUmr;
273 case X86::VMOVUPSZ256mr:
274 case X86::VMOVAPSZ256mr:
275 return X86::VMOVUPSZ128mr;
276 case X86::VMOVUPDZ256mr:
277 case X86::VMOVAPDZ256mr:
278 return X86::VMOVUPDZ128mr;
279 case X86::VMOVDQU64Z256mr:
280 case X86::VMOVDQA64Z256mr:
281 return X86::VMOVDQU64Z128mr;
282 case X86::VMOVDQU32Z256mr:
283 case X86::VMOVDQA32Z256mr:
284 return X86::VMOVDQU32Z128mr;
286 llvm_unreachable("Unexpected Load Instruction Opcode");
291 static int getAddrOffset(MachineInstr *MI) {
292 const MCInstrDesc &Descl = MI->getDesc();
293 int AddrOffset = X86II::getMemoryOperandNo(Descl.TSFlags);
294 assert(AddrOffset != -1 && "Expected Memory Operand");
295 AddrOffset += X86II::getOperandBias(Descl);
299 static MachineOperand &getBaseOperand(MachineInstr *MI) {
300 int AddrOffset = getAddrOffset(MI);
301 return MI->getOperand(AddrOffset + X86::AddrBaseReg);
304 static MachineOperand &getDispOperand(MachineInstr *MI) {
305 int AddrOffset = getAddrOffset(MI);
306 return MI->getOperand(AddrOffset + X86::AddrDisp);
309 // Relevant addressing modes contain only base register and immediate
310 // displacement or frameindex and immediate displacement.
311 // TODO: Consider expanding to other addressing modes in the future
312 static bool isRelevantAddressingMode(MachineInstr *MI) {
313 int AddrOffset = getAddrOffset(MI);
314 MachineOperand &Base = getBaseOperand(MI);
315 MachineOperand &Disp = getDispOperand(MI);
316 MachineOperand &Scale = MI->getOperand(AddrOffset + X86::AddrScaleAmt);
317 MachineOperand &Index = MI->getOperand(AddrOffset + X86::AddrIndexReg);
318 MachineOperand &Segment = MI->getOperand(AddrOffset + X86::AddrSegmentReg);
320 if (!((Base.isReg() && Base.getReg() != X86::NoRegister) || Base.isFI()))
324 if (Scale.getImm() != 1)
326 if (!(Index.isReg() && Index.getReg() == X86::NoRegister))
328 if (!(Segment.isReg() && Segment.getReg() == X86::NoRegister))
333 // Collect potentially blocking stores.
334 // Limit the number of instructions backwards we want to inspect
335 // since the effect of store block won't be visible if the store
336 // and load instructions have enough instructions in between to
337 // keep the core busy.
338 static SmallVector<MachineInstr *, 2>
339 findPotentialBlockers(MachineInstr *LoadInst) {
340 SmallVector<MachineInstr *, 2> PotentialBlockers;
341 unsigned BlockCount = 0;
342 const unsigned InspectionLimit = X86AvoidSFBInspectionLimit;
343 for (auto PBInst = std::next(MachineBasicBlock::reverse_iterator(LoadInst)),
344 E = LoadInst->getParent()->rend();
345 PBInst != E; ++PBInst) {
347 if (BlockCount >= InspectionLimit)
349 MachineInstr &MI = *PBInst;
350 if (MI.getDesc().isCall())
351 return PotentialBlockers;
352 PotentialBlockers.push_back(&MI);
354 // If we didn't get to the instructions limit try predecessing blocks.
355 // Ideally we should traverse the predecessor blocks in depth with some
356 // coloring algorithm, but for now let's just look at the first order
358 if (BlockCount < InspectionLimit) {
359 MachineBasicBlock *MBB = LoadInst->getParent();
360 int LimitLeft = InspectionLimit - BlockCount;
361 for (MachineBasicBlock::pred_iterator PB = MBB->pred_begin(),
362 PE = MBB->pred_end();
364 MachineBasicBlock *PMBB = *PB;
366 for (MachineBasicBlock::reverse_iterator PBInst = PMBB->rbegin(),
368 PBInst != PME; ++PBInst) {
370 if (PredCount >= LimitLeft)
372 if (PBInst->getDesc().isCall())
374 PotentialBlockers.push_back(&*PBInst);
378 return PotentialBlockers;
381 void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
382 int64_t LoadDisp, MachineInstr *StoreInst,
383 unsigned NStoreOpcode, int64_t StoreDisp,
384 unsigned Size, int64_t LMMOffset,
386 MachineOperand &LoadBase = getBaseOperand(LoadInst);
387 MachineOperand &StoreBase = getBaseOperand(StoreInst);
388 MachineBasicBlock *MBB = LoadInst->getParent();
389 MachineMemOperand *LMMO = *LoadInst->memoperands_begin();
390 MachineMemOperand *SMMO = *StoreInst->memoperands_begin();
392 unsigned Reg1 = MRI->createVirtualRegister(
393 TII->getRegClass(TII->get(NLoadOpcode), 0, TRI, *(MBB->getParent())));
394 MachineInstr *NewLoad =
395 BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode),
399 .addReg(X86::NoRegister)
401 .addReg(X86::NoRegister)
403 MBB->getParent()->getMachineMemOperand(LMMO, LMMOffset, Size));
404 if (LoadBase.isReg())
405 getBaseOperand(NewLoad).setIsKill(false);
406 LLVM_DEBUG(NewLoad->dump());
407 // If the load and store are consecutive, use the loadInst location to
408 // reduce register pressure.
409 MachineInstr *StInst = StoreInst;
410 if (StoreInst->getPrevNode() == LoadInst)
412 MachineInstr *NewStore =
413 BuildMI(*MBB, StInst, StInst->getDebugLoc(), TII->get(NStoreOpcode))
416 .addReg(X86::NoRegister)
418 .addReg(X86::NoRegister)
421 MBB->getParent()->getMachineMemOperand(SMMO, SMMOffset, Size));
422 if (StoreBase.isReg())
423 getBaseOperand(NewStore).setIsKill(false);
424 MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands);
425 assert(StoreSrcVReg.isReg() && "Expected virtual register");
426 NewStore->getOperand(X86::AddrNumOperands).setIsKill(StoreSrcVReg.isKill());
427 LLVM_DEBUG(NewStore->dump());
430 void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst,
431 int64_t LdDispImm, MachineInstr *StoreInst,
432 int64_t StDispImm, int64_t LMMOffset,
434 int LdDisp = LdDispImm;
435 int StDisp = StDispImm;
437 if ((Size - MOV128SZ >= 0) && isYMMLoadOpcode(LoadInst->getOpcode())) {
438 Size = Size - MOV128SZ;
439 buildCopy(LoadInst, getYMMtoXMMLoadOpcode(LoadInst->getOpcode()), LdDisp,
440 StoreInst, getYMMtoXMMStoreOpcode(StoreInst->getOpcode()),
441 StDisp, MOV128SZ, LMMOffset, SMMOffset);
444 LMMOffset += MOV128SZ;
445 SMMOffset += MOV128SZ;
448 if (Size - MOV64SZ >= 0) {
449 Size = Size - MOV64SZ;
450 buildCopy(LoadInst, X86::MOV64rm, LdDisp, StoreInst, X86::MOV64mr, StDisp,
451 MOV64SZ, LMMOffset, SMMOffset);
454 LMMOffset += MOV64SZ;
455 SMMOffset += MOV64SZ;
458 if (Size - MOV32SZ >= 0) {
459 Size = Size - MOV32SZ;
460 buildCopy(LoadInst, X86::MOV32rm, LdDisp, StoreInst, X86::MOV32mr, StDisp,
461 MOV32SZ, LMMOffset, SMMOffset);
464 LMMOffset += MOV32SZ;
465 SMMOffset += MOV32SZ;
468 if (Size - MOV16SZ >= 0) {
469 Size = Size - MOV16SZ;
470 buildCopy(LoadInst, X86::MOV16rm, LdDisp, StoreInst, X86::MOV16mr, StDisp,
471 MOV16SZ, LMMOffset, SMMOffset);
474 LMMOffset += MOV16SZ;
475 SMMOffset += MOV16SZ;
478 if (Size - MOV8SZ >= 0) {
479 Size = Size - MOV8SZ;
480 buildCopy(LoadInst, X86::MOV8rm, LdDisp, StoreInst, X86::MOV8mr, StDisp,
481 MOV8SZ, LMMOffset, SMMOffset);
489 assert(Size == 0 && "Wrong size division");
492 static void updateKillStatus(MachineInstr *LoadInst, MachineInstr *StoreInst) {
493 MachineOperand &LoadBase = getBaseOperand(LoadInst);
494 MachineOperand &StoreBase = getBaseOperand(StoreInst);
495 if (LoadBase.isReg()) {
496 MachineInstr *LastLoad = LoadInst->getPrevNode();
497 // If the original load and store to xmm/ymm were consecutive
498 // then the partial copies were also created in
499 // a consecutive order to reduce register pressure,
500 // and the location of the last load is before the last store.
501 if (StoreInst->getPrevNode() == LoadInst)
502 LastLoad = LoadInst->getPrevNode()->getPrevNode();
503 getBaseOperand(LastLoad).setIsKill(LoadBase.isKill());
505 if (StoreBase.isReg()) {
506 MachineInstr *StInst = StoreInst;
507 if (StoreInst->getPrevNode() == LoadInst)
509 getBaseOperand(StInst->getPrevNode()).setIsKill(StoreBase.isKill());
513 bool X86AvoidSFBPass::alias(const MachineMemOperand &Op1,
514 const MachineMemOperand &Op2) const {
515 if (!Op1.getValue() || !Op2.getValue())
518 int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset());
519 int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset;
520 int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset;
522 AliasResult AAResult =
523 AA->alias(MemoryLocation(Op1.getValue(), Overlapa, Op1.getAAInfo()),
524 MemoryLocation(Op2.getValue(), Overlapb, Op2.getAAInfo()));
525 return AAResult != NoAlias;
528 void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
530 for (auto &MI : MBB) {
531 if (!isPotentialBlockedMemCpyLd(MI.getOpcode()))
533 int DefVR = MI.getOperand(0).getReg();
534 if (!MRI->hasOneUse(DefVR))
536 for (auto UI = MRI->use_nodbg_begin(DefVR), UE = MRI->use_nodbg_end();
538 MachineOperand &StoreMO = *UI++;
539 MachineInstr &StoreMI = *StoreMO.getParent();
540 // Skip cases where the memcpy may overlap.
541 if (StoreMI.getParent() == MI.getParent() &&
542 isPotentialBlockedMemCpyPair(MI.getOpcode(), StoreMI.getOpcode()) &&
543 isRelevantAddressingMode(&MI) &&
544 isRelevantAddressingMode(&StoreMI)) {
545 assert(MI.hasOneMemOperand() &&
546 "Expected one memory operand for load instruction");
547 assert(StoreMI.hasOneMemOperand() &&
548 "Expected one memory operand for store instruction");
549 if (!alias(**MI.memoperands_begin(), **StoreMI.memoperands_begin()))
550 BlockedLoadsStoresPairs.push_back(std::make_pair(&MI, &StoreMI));
556 unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) {
557 auto TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI,
558 *LoadInst->getParent()->getParent());
559 return TRI->getRegSizeInBits(*TRC) / 8;
562 void X86AvoidSFBPass::breakBlockedCopies(
563 MachineInstr *LoadInst, MachineInstr *StoreInst,
564 const DisplacementSizeMap &BlockingStoresDispSizeMap) {
565 int64_t LdDispImm = getDispOperand(LoadInst).getImm();
566 int64_t StDispImm = getDispOperand(StoreInst).getImm();
567 int64_t LMMOffset = 0;
568 int64_t SMMOffset = 0;
570 int64_t LdDisp1 = LdDispImm;
572 int64_t StDisp1 = StDispImm;
576 int64_t LdStDelta = StDispImm - LdDispImm;
578 for (auto DispSizePair : BlockingStoresDispSizeMap) {
579 LdDisp2 = DispSizePair.first;
580 StDisp2 = DispSizePair.first + LdStDelta;
581 Size2 = DispSizePair.second;
582 // Avoid copying overlapping areas.
583 if (LdDisp2 < LdDisp1) {
584 int OverlapDelta = LdDisp1 - LdDisp2;
585 LdDisp2 += OverlapDelta;
586 StDisp2 += OverlapDelta;
587 Size2 -= OverlapDelta;
589 Size1 = LdDisp2 - LdDisp1;
591 // Build a copy for the point until the current blocking store's
593 buildCopies(Size1, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
595 // Build a copy for the current blocking store.
596 buildCopies(Size2, LoadInst, LdDisp2, StoreInst, StDisp2, LMMOffset + Size1,
598 LdDisp1 = LdDisp2 + Size2;
599 StDisp1 = StDisp2 + Size2;
600 LMMOffset += Size1 + Size2;
601 SMMOffset += Size1 + Size2;
603 unsigned Size3 = (LdDispImm + getRegSizeInBytes(LoadInst)) - LdDisp1;
604 buildCopies(Size3, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
608 static bool hasSameBaseOpValue(MachineInstr *LoadInst,
609 MachineInstr *StoreInst) {
610 MachineOperand &LoadBase = getBaseOperand(LoadInst);
611 MachineOperand &StoreBase = getBaseOperand(StoreInst);
612 if (LoadBase.isReg() != StoreBase.isReg())
614 if (LoadBase.isReg())
615 return LoadBase.getReg() == StoreBase.getReg();
616 return LoadBase.getIndex() == StoreBase.getIndex();
619 static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize,
620 int64_t StoreDispImm, unsigned StoreSize) {
621 return ((StoreDispImm >= LoadDispImm) &&
622 (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize)));
625 // Keep track of all stores blocking a load
627 updateBlockingStoresDispSizeMap(DisplacementSizeMap &BlockingStoresDispSizeMap,
628 int64_t DispImm, unsigned Size) {
629 if (BlockingStoresDispSizeMap.count(DispImm)) {
630 // Choose the smallest blocking store starting at this displacement.
631 if (BlockingStoresDispSizeMap[DispImm] > Size)
632 BlockingStoresDispSizeMap[DispImm] = Size;
635 BlockingStoresDispSizeMap[DispImm] = Size;
638 // Remove blocking stores contained in each other.
640 removeRedundantBlockingStores(DisplacementSizeMap &BlockingStoresDispSizeMap) {
641 if (BlockingStoresDispSizeMap.size() <= 1)
644 SmallVector<std::pair<int64_t, unsigned>, 0> DispSizeStack;
645 for (auto DispSizePair : BlockingStoresDispSizeMap) {
646 int64_t CurrDisp = DispSizePair.first;
647 unsigned CurrSize = DispSizePair.second;
648 while (DispSizeStack.size()) {
649 int64_t PrevDisp = DispSizeStack.back().first;
650 unsigned PrevSize = DispSizeStack.back().second;
651 if (CurrDisp + CurrSize > PrevDisp + PrevSize)
653 DispSizeStack.pop_back();
655 DispSizeStack.push_back(DispSizePair);
657 BlockingStoresDispSizeMap.clear();
658 for (auto Disp : DispSizeStack)
659 BlockingStoresDispSizeMap.insert(Disp);
662 bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
663 bool Changed = false;
665 if (DisableX86AvoidStoreForwardBlocks || skipFunction(MF.getFunction()) ||
666 !MF.getSubtarget<X86Subtarget>().is64Bit())
669 MRI = &MF.getRegInfo();
670 assert(MRI->isSSA() && "Expected MIR to be in SSA form");
671 TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
672 TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
673 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
674 LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";);
675 // Look for a load then a store to XMM/YMM which look like a memcpy
676 findPotentiallylBlockedCopies(MF);
678 for (auto LoadStoreInstPair : BlockedLoadsStoresPairs) {
679 MachineInstr *LoadInst = LoadStoreInstPair.first;
680 int64_t LdDispImm = getDispOperand(LoadInst).getImm();
681 DisplacementSizeMap BlockingStoresDispSizeMap;
683 SmallVector<MachineInstr *, 2> PotentialBlockers =
684 findPotentialBlockers(LoadInst);
685 for (auto PBInst : PotentialBlockers) {
686 if (!isPotentialBlockingStoreInst(PBInst->getOpcode(),
687 LoadInst->getOpcode()) ||
688 !isRelevantAddressingMode(PBInst))
690 int64_t PBstDispImm = getDispOperand(PBInst).getImm();
691 assert(PBInst->hasOneMemOperand() && "Expected One Memory Operand");
692 unsigned PBstSize = (*PBInst->memoperands_begin())->getSize();
693 // This check doesn't cover all cases, but it will suffice for now.
694 // TODO: take branch probability into consideration, if the blocking
695 // store is in an unreached block, breaking the memcopy could lose
697 if (hasSameBaseOpValue(LoadInst, PBInst) &&
698 isBlockingStore(LdDispImm, getRegSizeInBytes(LoadInst), PBstDispImm,
700 updateBlockingStoresDispSizeMap(BlockingStoresDispSizeMap, PBstDispImm,
704 if (BlockingStoresDispSizeMap.empty())
707 // We found a store forward block, break the memcpy's load and store
708 // into smaller copies such that each smaller store that was causing
709 // a store block would now be copied separately.
710 MachineInstr *StoreInst = LoadStoreInstPair.second;
711 LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n");
712 LLVM_DEBUG(LoadInst->dump());
713 LLVM_DEBUG(StoreInst->dump());
714 LLVM_DEBUG(dbgs() << "Replaced with:\n");
715 removeRedundantBlockingStores(BlockingStoresDispSizeMap);
716 breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap);
717 updateKillStatus(LoadInst, StoreInst);
718 ForRemoval.push_back(LoadInst);
719 ForRemoval.push_back(StoreInst);
721 for (auto RemovedInst : ForRemoval) {
722 RemovedInst->eraseFromParent();
725 BlockedLoadsStoresPairs.clear();
726 LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";);