1 //===-- RISCVExpandAtomicPseudoInsts.cpp - Expand atomic pseudo instrs. ---===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains a pass that expands atomic pseudo instructions into
10 // target instructions. This pass should be run at the last possible moment,
11 // avoiding the possibility for other passes to break the requirements for
12 // forward progress in the LR/SC block.
14 //===----------------------------------------------------------------------===//
17 #include "RISCVInstrInfo.h"
18 #include "RISCVTargetMachine.h"
20 #include "llvm/CodeGen/LivePhysRegs.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #define RISCV_EXPAND_ATOMIC_PSEUDO_NAME \
27 "RISCV atomic pseudo instruction expansion pass"
31 class RISCVExpandAtomicPseudo : public MachineFunctionPass {
33 const RISCVInstrInfo *TII;
36 RISCVExpandAtomicPseudo() : MachineFunctionPass(ID) {
37 initializeRISCVExpandAtomicPseudoPass(*PassRegistry::getPassRegistry());
40 bool runOnMachineFunction(MachineFunction &MF) override;
42 StringRef getPassName() const override {
43 return RISCV_EXPAND_ATOMIC_PSEUDO_NAME;
47 bool expandMBB(MachineBasicBlock &MBB);
48 bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
49 MachineBasicBlock::iterator &NextMBBI);
50 bool expandAtomicBinOp(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp,
52 bool IsMasked, int Width,
53 MachineBasicBlock::iterator &NextMBBI);
54 bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
55 MachineBasicBlock::iterator MBBI,
56 AtomicRMWInst::BinOp, bool IsMasked, int Width,
57 MachineBasicBlock::iterator &NextMBBI);
58 bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
59 MachineBasicBlock::iterator MBBI, bool IsMasked,
60 int Width, MachineBasicBlock::iterator &NextMBBI);
63 char RISCVExpandAtomicPseudo::ID = 0;
65 bool RISCVExpandAtomicPseudo::runOnMachineFunction(MachineFunction &MF) {
66 TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
67 bool Modified = false;
69 Modified |= expandMBB(MBB);
73 bool RISCVExpandAtomicPseudo::expandMBB(MachineBasicBlock &MBB) {
74 bool Modified = false;
76 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
78 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
79 Modified |= expandMI(MBB, MBBI, NMBBI);
86 bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB,
87 MachineBasicBlock::iterator MBBI,
88 MachineBasicBlock::iterator &NextMBBI) {
89 // RISCVInstrInfo::getInstSizeInBytes hard-codes the number of expanded
90 // instructions for each pseudo, and must be updated when adding new pseudos
91 // or changing existing ones.
92 switch (MBBI->getOpcode()) {
93 case RISCV::PseudoAtomicLoadNand32:
94 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
96 case RISCV::PseudoAtomicLoadNand64:
97 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
99 case RISCV::PseudoMaskedAtomicSwap32:
100 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
102 case RISCV::PseudoMaskedAtomicLoadAdd32:
103 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
104 case RISCV::PseudoMaskedAtomicLoadSub32:
105 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
106 case RISCV::PseudoMaskedAtomicLoadNand32:
107 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
109 case RISCV::PseudoMaskedAtomicLoadMax32:
110 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
112 case RISCV::PseudoMaskedAtomicLoadMin32:
113 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
115 case RISCV::PseudoMaskedAtomicLoadUMax32:
116 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
118 case RISCV::PseudoMaskedAtomicLoadUMin32:
119 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
121 case RISCV::PseudoCmpXchg32:
122 return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
123 case RISCV::PseudoCmpXchg64:
124 return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
125 case RISCV::PseudoMaskedCmpXchg32:
126 return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
132 static unsigned getLRForRMW32(AtomicOrdering Ordering) {
135 llvm_unreachable("Unexpected AtomicOrdering");
136 case AtomicOrdering::Monotonic:
138 case AtomicOrdering::Acquire:
139 return RISCV::LR_W_AQ;
140 case AtomicOrdering::Release:
142 case AtomicOrdering::AcquireRelease:
143 return RISCV::LR_W_AQ;
144 case AtomicOrdering::SequentiallyConsistent:
145 return RISCV::LR_W_AQ_RL;
149 static unsigned getSCForRMW32(AtomicOrdering Ordering) {
152 llvm_unreachable("Unexpected AtomicOrdering");
153 case AtomicOrdering::Monotonic:
155 case AtomicOrdering::Acquire:
157 case AtomicOrdering::Release:
158 return RISCV::SC_W_RL;
159 case AtomicOrdering::AcquireRelease:
160 return RISCV::SC_W_RL;
161 case AtomicOrdering::SequentiallyConsistent:
162 return RISCV::SC_W_AQ_RL;
166 static unsigned getLRForRMW64(AtomicOrdering Ordering) {
169 llvm_unreachable("Unexpected AtomicOrdering");
170 case AtomicOrdering::Monotonic:
172 case AtomicOrdering::Acquire:
173 return RISCV::LR_D_AQ;
174 case AtomicOrdering::Release:
176 case AtomicOrdering::AcquireRelease:
177 return RISCV::LR_D_AQ;
178 case AtomicOrdering::SequentiallyConsistent:
179 return RISCV::LR_D_AQ_RL;
183 static unsigned getSCForRMW64(AtomicOrdering Ordering) {
186 llvm_unreachable("Unexpected AtomicOrdering");
187 case AtomicOrdering::Monotonic:
189 case AtomicOrdering::Acquire:
191 case AtomicOrdering::Release:
192 return RISCV::SC_D_RL;
193 case AtomicOrdering::AcquireRelease:
194 return RISCV::SC_D_RL;
195 case AtomicOrdering::SequentiallyConsistent:
196 return RISCV::SC_D_AQ_RL;
200 static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
202 return getLRForRMW32(Ordering);
204 return getLRForRMW64(Ordering);
205 llvm_unreachable("Unexpected LR width\n");
208 static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
210 return getSCForRMW32(Ordering);
212 return getSCForRMW64(Ordering);
213 llvm_unreachable("Unexpected SC width\n");
216 static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
217 DebugLoc DL, MachineBasicBlock *ThisMBB,
218 MachineBasicBlock *LoopMBB,
219 MachineBasicBlock *DoneMBB,
220 AtomicRMWInst::BinOp BinOp, int Width) {
221 Register DestReg = MI.getOperand(0).getReg();
222 Register ScratchReg = MI.getOperand(1).getReg();
223 Register AddrReg = MI.getOperand(2).getReg();
224 Register IncrReg = MI.getOperand(3).getReg();
225 AtomicOrdering Ordering =
226 static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
229 // lr.[w|d] dest, (addr)
230 // binop scratch, dest, val
231 // sc.[w|d] scratch, scratch, (addr)
232 // bnez scratch, loop
233 BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
237 llvm_unreachable("Unexpected AtomicRMW BinOp");
238 case AtomicRMWInst::Nand:
239 BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
242 BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
247 BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
250 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
256 static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
257 MachineBasicBlock *MBB, Register DestReg,
258 Register OldValReg, Register NewValReg,
259 Register MaskReg, Register ScratchReg) {
260 assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
261 assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
262 assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
264 // We select bits from newval and oldval using:
265 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
266 // r = oldval ^ ((oldval ^ newval) & masktargetdata);
267 BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
270 BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
273 BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
278 static void doMaskedAtomicBinOpExpansion(
279 const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
280 MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
281 MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
282 assert(Width == 32 && "Should never need to expand masked 64-bit operations");
283 Register DestReg = MI.getOperand(0).getReg();
284 Register ScratchReg = MI.getOperand(1).getReg();
285 Register AddrReg = MI.getOperand(2).getReg();
286 Register IncrReg = MI.getOperand(3).getReg();
287 Register MaskReg = MI.getOperand(4).getReg();
288 AtomicOrdering Ordering =
289 static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
292 // lr.w destreg, (alignedaddr)
293 // binop scratch, destreg, incr
294 // xor scratch, destreg, scratch
295 // and scratch, scratch, masktargetdata
296 // xor scratch, destreg, scratch
297 // sc.w scratch, scratch, (alignedaddr)
298 // bnez scratch, loop
299 BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
303 llvm_unreachable("Unexpected AtomicRMW BinOp");
304 case AtomicRMWInst::Xchg:
305 BuildMI(LoopMBB, DL, TII->get(RISCV::ADDI), ScratchReg)
309 case AtomicRMWInst::Add:
310 BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
314 case AtomicRMWInst::Sub:
315 BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
319 case AtomicRMWInst::Nand:
320 BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
323 BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
329 insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
332 BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
335 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
341 bool RISCVExpandAtomicPseudo::expandAtomicBinOp(
342 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
343 AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
344 MachineBasicBlock::iterator &NextMBBI) {
345 MachineInstr &MI = *MBBI;
346 DebugLoc DL = MI.getDebugLoc();
348 MachineFunction *MF = MBB.getParent();
349 auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
350 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
353 MF->insert(++MBB.getIterator(), LoopMBB);
354 MF->insert(++LoopMBB->getIterator(), DoneMBB);
356 // Set up successors and transfer remaining instructions to DoneMBB.
357 LoopMBB->addSuccessor(LoopMBB);
358 LoopMBB->addSuccessor(DoneMBB);
359 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
360 DoneMBB->transferSuccessors(&MBB);
361 MBB.addSuccessor(LoopMBB);
364 doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
366 doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
369 NextMBBI = MBB.end();
370 MI.eraseFromParent();
372 LivePhysRegs LiveRegs;
373 computeAndAddLiveIns(LiveRegs, *LoopMBB);
374 computeAndAddLiveIns(LiveRegs, *DoneMBB);
379 static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
380 MachineBasicBlock *MBB, Register ValReg,
382 BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
385 BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
390 bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp(
391 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
392 AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
393 MachineBasicBlock::iterator &NextMBBI) {
394 assert(IsMasked == true &&
395 "Should only need to expand masked atomic max/min");
396 assert(Width == 32 && "Should never need to expand masked 64-bit operations");
398 MachineInstr &MI = *MBBI;
399 DebugLoc DL = MI.getDebugLoc();
400 MachineFunction *MF = MBB.getParent();
401 auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
402 auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
403 auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
404 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
407 MF->insert(++MBB.getIterator(), LoopHeadMBB);
408 MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
409 MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
410 MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
412 // Set up successors and transfer remaining instructions to DoneMBB.
413 LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
414 LoopHeadMBB->addSuccessor(LoopTailMBB);
415 LoopIfBodyMBB->addSuccessor(LoopTailMBB);
416 LoopTailMBB->addSuccessor(LoopHeadMBB);
417 LoopTailMBB->addSuccessor(DoneMBB);
418 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
419 DoneMBB->transferSuccessors(&MBB);
420 MBB.addSuccessor(LoopHeadMBB);
422 Register DestReg = MI.getOperand(0).getReg();
423 Register Scratch1Reg = MI.getOperand(1).getReg();
424 Register Scratch2Reg = MI.getOperand(2).getReg();
425 Register AddrReg = MI.getOperand(3).getReg();
426 Register IncrReg = MI.getOperand(4).getReg();
427 Register MaskReg = MI.getOperand(5).getReg();
428 bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
429 AtomicOrdering Ordering =
430 static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
434 // lr.w destreg, (alignedaddr)
435 // and scratch2, destreg, mask
436 // mv scratch1, destreg
437 // [sext scratch2 if signed min/max]
438 // ifnochangeneeded scratch2, incr, .looptail
439 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
441 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
444 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
450 llvm_unreachable("Unexpected AtomicRMW BinOp");
451 case AtomicRMWInst::Max: {
452 insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
453 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
456 .addMBB(LoopTailMBB);
459 case AtomicRMWInst::Min: {
460 insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
461 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
464 .addMBB(LoopTailMBB);
467 case AtomicRMWInst::UMax:
468 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
471 .addMBB(LoopTailMBB);
473 case AtomicRMWInst::UMin:
474 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
477 .addMBB(LoopTailMBB);
482 // xor scratch1, destreg, incr
483 // and scratch1, scratch1, mask
484 // xor scratch1, destreg, scratch1
485 insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
486 MaskReg, Scratch1Reg);
489 // sc.w scratch1, scratch1, (addr)
490 // bnez scratch1, loop
491 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
493 .addReg(Scratch1Reg);
494 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
497 .addMBB(LoopHeadMBB);
499 NextMBBI = MBB.end();
500 MI.eraseFromParent();
502 LivePhysRegs LiveRegs;
503 computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
504 computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
505 computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
506 computeAndAddLiveIns(LiveRegs, *DoneMBB);
511 bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg(
512 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
513 int Width, MachineBasicBlock::iterator &NextMBBI) {
514 MachineInstr &MI = *MBBI;
515 DebugLoc DL = MI.getDebugLoc();
516 MachineFunction *MF = MBB.getParent();
517 auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
518 auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
519 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
522 MF->insert(++MBB.getIterator(), LoopHeadMBB);
523 MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
524 MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
526 // Set up successors and transfer remaining instructions to DoneMBB.
527 LoopHeadMBB->addSuccessor(LoopTailMBB);
528 LoopHeadMBB->addSuccessor(DoneMBB);
529 LoopTailMBB->addSuccessor(DoneMBB);
530 LoopTailMBB->addSuccessor(LoopHeadMBB);
531 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
532 DoneMBB->transferSuccessors(&MBB);
533 MBB.addSuccessor(LoopHeadMBB);
535 Register DestReg = MI.getOperand(0).getReg();
536 Register ScratchReg = MI.getOperand(1).getReg();
537 Register AddrReg = MI.getOperand(2).getReg();
538 Register CmpValReg = MI.getOperand(3).getReg();
539 Register NewValReg = MI.getOperand(4).getReg();
540 AtomicOrdering Ordering =
541 static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
545 // lr.[w|d] dest, (addr)
546 // bne dest, cmpval, done
547 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
549 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
554 // sc.[w|d] scratch, newval, (addr)
555 // bnez scratch, loophead
556 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
559 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
562 .addMBB(LoopHeadMBB);
566 // and scratch, dest, mask
567 // bne scratch, cmpval, done
568 Register MaskReg = MI.getOperand(5).getReg();
569 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
571 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
574 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
580 // xor scratch, dest, newval
581 // and scratch, scratch, mask
582 // xor scratch, dest, scratch
583 // sc.w scratch, scratch, (adrr)
584 // bnez scratch, loophead
585 insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
586 MaskReg, ScratchReg);
587 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
590 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
593 .addMBB(LoopHeadMBB);
596 NextMBBI = MBB.end();
597 MI.eraseFromParent();
599 LivePhysRegs LiveRegs;
600 computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
601 computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
602 computeAndAddLiveIns(LiveRegs, *DoneMBB);
607 } // end of anonymous namespace
609 INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo",
610 RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false)
614 FunctionPass *createRISCVExpandAtomicPseudoPass() {
615 return new RISCVExpandAtomicPseudo();
618 } // end of namespace llvm