1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "RISCVInstrInfo.h"
15 #include "RISCVSubtarget.h"
16 #include "RISCVTargetMachine.h"
17 #include "Utils/RISCVMatInt.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
29 #define GEN_CHECK_COMPRESS_INSTR
30 #include "RISCVGenCompressInstEmitter.inc"
32 #define GET_INSTRINFO_CTOR_DTOR
33 #include "RISCVGenInstrInfo.inc"
35 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
36 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
39 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
40 int &FrameIndex) const {
41 switch (MI.getOpcode()) {
56 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
57 MI.getOperand(2).getImm() == 0) {
58 FrameIndex = MI.getOperand(1).getIndex();
59 return MI.getOperand(0).getReg();
65 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
66 int &FrameIndex) const {
67 switch (MI.getOpcode()) {
79 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
80 MI.getOperand(2).getImm() == 0) {
81 FrameIndex = MI.getOperand(1).getIndex();
82 return MI.getOperand(0).getReg();
88 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
89 MachineBasicBlock::iterator MBBI,
90 const DebugLoc &DL, MCRegister DstReg,
91 MCRegister SrcReg, bool KillSrc) const {
92 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
93 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
94 .addReg(SrcReg, getKillRegState(KillSrc))
101 if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
102 Opc = RISCV::FSGNJ_S;
103 else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
104 Opc = RISCV::FSGNJ_D;
106 llvm_unreachable("Impossible reg-to-reg copy");
108 BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
109 .addReg(SrcReg, getKillRegState(KillSrc))
110 .addReg(SrcReg, getKillRegState(KillSrc));
113 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
114 MachineBasicBlock::iterator I,
115 Register SrcReg, bool IsKill, int FI,
116 const TargetRegisterClass *RC,
117 const TargetRegisterInfo *TRI) const {
120 DL = I->getDebugLoc();
124 if (RISCV::GPRRegClass.hasSubClassEq(RC))
125 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
126 RISCV::SW : RISCV::SD;
127 else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
129 else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
132 llvm_unreachable("Can't store this register to stack slot");
134 BuildMI(MBB, I, DL, get(Opcode))
135 .addReg(SrcReg, getKillRegState(IsKill))
140 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
141 MachineBasicBlock::iterator I,
142 Register DstReg, int FI,
143 const TargetRegisterClass *RC,
144 const TargetRegisterInfo *TRI) const {
147 DL = I->getDebugLoc();
151 if (RISCV::GPRRegClass.hasSubClassEq(RC))
152 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
153 RISCV::LW : RISCV::LD;
154 else if (RISCV::FPR32RegClass.hasSubClassEq(RC))
156 else if (RISCV::FPR64RegClass.hasSubClassEq(RC))
159 llvm_unreachable("Can't load this register from stack slot");
161 BuildMI(MBB, I, DL, get(Opcode), DstReg).addFrameIndex(FI).addImm(0);
164 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
165 MachineBasicBlock::iterator MBBI,
166 const DebugLoc &DL, Register DstReg, uint64_t Val,
167 MachineInstr::MIFlag Flag) const {
168 MachineFunction *MF = MBB.getParent();
169 MachineRegisterInfo &MRI = MF->getRegInfo();
170 bool IsRV64 = MF->getSubtarget<RISCVSubtarget>().is64Bit();
171 Register SrcReg = RISCV::X0;
172 Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
175 if (!IsRV64 && !isInt<32>(Val))
176 report_fatal_error("Should only materialize 32-bit constants for RV32");
178 RISCVMatInt::InstSeq Seq;
179 RISCVMatInt::generateInstSeq(Val, IsRV64, Seq);
180 assert(Seq.size() > 0);
182 for (RISCVMatInt::Inst &Inst : Seq) {
183 // Write the final result to DstReg if it's the last instruction in the Seq.
184 // Otherwise, write the result to the temp register.
185 if (++Num == Seq.size())
188 if (Inst.Opc == RISCV::LUI) {
189 BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
193 BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
194 .addReg(SrcReg, RegState::Kill)
198 // Only the first instruction has X0 as its source.
203 // The contents of values added to Cond are not examined outside of
204 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
205 // push BranchOpcode, Reg1, Reg2.
206 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
207 SmallVectorImpl<MachineOperand> &Cond) {
208 // Block ends with fall-through condbranch.
209 assert(LastInst.getDesc().isConditionalBranch() &&
210 "Unknown conditional branch");
211 Target = LastInst.getOperand(2).getMBB();
212 Cond.push_back(MachineOperand::CreateImm(LastInst.getOpcode()));
213 Cond.push_back(LastInst.getOperand(0));
214 Cond.push_back(LastInst.getOperand(1));
217 static unsigned getOppositeBranchOpcode(int Opc) {
220 llvm_unreachable("Unrecognized conditional branch");
236 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
237 MachineBasicBlock *&TBB,
238 MachineBasicBlock *&FBB,
239 SmallVectorImpl<MachineOperand> &Cond,
240 bool AllowModify) const {
244 // If the block has no terminators, it just falls into the block after it.
245 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
246 if (I == MBB.end() || !isUnpredicatedTerminator(*I))
249 // Count the number of terminators and find the first unconditional or
251 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
252 int NumTerminators = 0;
253 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
256 if (J->getDesc().isUnconditionalBranch() ||
257 J->getDesc().isIndirectBranch()) {
258 FirstUncondOrIndirectBr = J.getReverse();
262 // If AllowModify is true, we can erase any terminators after
263 // FirstUncondOrIndirectBR.
264 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
265 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
266 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
269 I = FirstUncondOrIndirectBr;
272 // We can't handle blocks that end in an indirect branch.
273 if (I->getDesc().isIndirectBranch())
276 // We can't handle blocks with more than 2 terminators.
277 if (NumTerminators > 2)
280 // Handle a single unconditional branch.
281 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
282 TBB = I->getOperand(0).getMBB();
286 // Handle a single conditional branch.
287 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
288 parseCondBranch(*I, TBB, Cond);
292 // Handle a conditional branch followed by an unconditional branch.
293 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
294 I->getDesc().isUnconditionalBranch()) {
295 parseCondBranch(*std::prev(I), TBB, Cond);
296 FBB = I->getOperand(0).getMBB();
300 // Otherwise, we can't handle this.
304 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
305 int *BytesRemoved) const {
308 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
312 if (!I->getDesc().isUnconditionalBranch() &&
313 !I->getDesc().isConditionalBranch())
316 // Remove the branch.
318 *BytesRemoved += getInstSizeInBytes(*I);
319 I->eraseFromParent();
323 if (I == MBB.begin())
326 if (!I->getDesc().isConditionalBranch())
329 // Remove the branch.
331 *BytesRemoved += getInstSizeInBytes(*I);
332 I->eraseFromParent();
336 // Inserts a branch into the end of the specific MachineBasicBlock, returning
337 // the number of instructions inserted.
338 unsigned RISCVInstrInfo::insertBranch(
339 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
340 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
344 // Shouldn't be a fall through.
345 assert(TBB && "insertBranch must not be told to insert a fallthrough");
346 assert((Cond.size() == 3 || Cond.size() == 0) &&
347 "RISCV branch conditions have two components!");
349 // Unconditional branch.
351 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
353 *BytesAdded += getInstSizeInBytes(MI);
357 // Either a one or two-way conditional branch.
358 unsigned Opc = Cond[0].getImm();
359 MachineInstr &CondMI =
360 *BuildMI(&MBB, DL, get(Opc)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
362 *BytesAdded += getInstSizeInBytes(CondMI);
364 // One-way conditional branch.
368 // Two-way conditional branch.
369 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
371 *BytesAdded += getInstSizeInBytes(MI);
375 unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
376 MachineBasicBlock &DestBB,
379 RegScavenger *RS) const {
380 assert(RS && "RegScavenger required for long branching");
381 assert(MBB.empty() &&
382 "new block should be inserted for expanding unconditional branch");
383 assert(MBB.pred_size() == 1);
385 MachineFunction *MF = MBB.getParent();
386 MachineRegisterInfo &MRI = MF->getRegInfo();
387 const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
389 if (TM.isPositionIndependent())
390 report_fatal_error("Unable to insert indirect branch");
392 if (!isInt<32>(BrOffset))
394 "Branch offsets outside of the signed 32-bit range not supported");
396 // FIXME: A virtual register must be used initially, as the register
397 // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
398 // uses the same workaround).
399 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
402 MachineInstr &LuiMI = *BuildMI(MBB, II, DL, get(RISCV::LUI), ScratchReg)
403 .addMBB(&DestBB, RISCVII::MO_HI);
404 BuildMI(MBB, II, DL, get(RISCV::PseudoBRIND))
405 .addReg(ScratchReg, RegState::Kill)
406 .addMBB(&DestBB, RISCVII::MO_LO);
408 RS->enterBasicBlockEnd(MBB);
409 unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
410 LuiMI.getIterator(), false, 0);
411 MRI.replaceRegWith(ScratchReg, Scav);
413 RS->setRegUsed(Scav);
417 bool RISCVInstrInfo::reverseBranchCondition(
418 SmallVectorImpl<MachineOperand> &Cond) const {
419 assert((Cond.size() == 3) && "Invalid branch condition!");
420 Cond[0].setImm(getOppositeBranchOpcode(Cond[0].getImm()));
425 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
426 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
427 // The branch target is always the last operand.
428 int NumOp = MI.getNumExplicitOperands();
429 return MI.getOperand(NumOp - 1).getMBB();
432 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
433 int64_t BrOffset) const {
434 // Ideally we could determine the supported branch offset from the
435 // RISCVII::FormMask, but this can't be used for Pseudo instructions like
439 llvm_unreachable("Unexpected opcode!");
446 return isIntN(13, BrOffset);
448 case RISCV::PseudoBR:
449 return isIntN(21, BrOffset);
453 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
454 unsigned Opcode = MI.getOpcode();
458 if (MI.getParent() && MI.getParent()->getParent()) {
459 const auto MF = MI.getMF();
460 const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
461 const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
462 const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
463 const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
464 if (isCompressibleInst(MI, &ST, MRI, STI))
467 return get(Opcode).getSize();
469 case TargetOpcode::EH_LABEL:
470 case TargetOpcode::IMPLICIT_DEF:
471 case TargetOpcode::KILL:
472 case TargetOpcode::DBG_VALUE:
474 // These values are determined based on RISCVExpandAtomicPseudoInsts,
475 // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
476 // pseudos are expanded.
477 case RISCV::PseudoCALLReg:
478 case RISCV::PseudoCALL:
479 case RISCV::PseudoJump:
480 case RISCV::PseudoTAIL:
481 case RISCV::PseudoLLA:
482 case RISCV::PseudoLA:
483 case RISCV::PseudoLA_TLS_IE:
484 case RISCV::PseudoLA_TLS_GD:
486 case RISCV::PseudoAtomicLoadNand32:
487 case RISCV::PseudoAtomicLoadNand64:
489 case RISCV::PseudoMaskedAtomicSwap32:
490 case RISCV::PseudoMaskedAtomicLoadAdd32:
491 case RISCV::PseudoMaskedAtomicLoadSub32:
493 case RISCV::PseudoMaskedAtomicLoadNand32:
495 case RISCV::PseudoMaskedAtomicLoadMax32:
496 case RISCV::PseudoMaskedAtomicLoadMin32:
498 case RISCV::PseudoMaskedAtomicLoadUMax32:
499 case RISCV::PseudoMaskedAtomicLoadUMin32:
501 case RISCV::PseudoCmpXchg32:
502 case RISCV::PseudoCmpXchg64:
504 case RISCV::PseudoMaskedCmpXchg32:
506 case TargetOpcode::INLINEASM:
507 case TargetOpcode::INLINEASM_BR: {
508 const MachineFunction &MF = *MI.getParent()->getParent();
509 const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
510 return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
516 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
517 const unsigned Opcode = MI.getOpcode();
524 return (MI.getOperand(1).isReg() && MI.getOperand(1).getReg() == RISCV::X0);
526 return MI.isAsCheapAsAMove();
529 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
530 StringRef &ErrInfo) const {
531 const MCInstrInfo *MCII = STI.getInstrInfo();
532 MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
534 for (auto &OI : enumerate(Desc.operands())) {
535 unsigned OpType = OI.value().OperandType;
536 if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
537 OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
538 const MachineOperand &MO = MI.getOperand(OI.index());
540 int64_t Imm = MO.getImm();
544 llvm_unreachable("Unexpected operand type");
545 case RISCVOp::OPERAND_UIMM4:
548 case RISCVOp::OPERAND_UIMM5:
551 case RISCVOp::OPERAND_UIMM12:
552 Ok = isUInt<12>(Imm);
554 case RISCVOp::OPERAND_SIMM12:
557 case RISCVOp::OPERAND_SIMM13_LSB0:
558 Ok = isShiftedInt<12, 1>(Imm);
560 case RISCVOp::OPERAND_UIMM20:
561 Ok = isUInt<20>(Imm);
563 case RISCVOp::OPERAND_SIMM21_LSB0:
564 Ok = isShiftedInt<20, 1>(Imm);
566 case RISCVOp::OPERAND_UIMMLOG2XLEN:
567 if (STI.getTargetTriple().isArch64Bit())
574 ErrInfo = "Invalid immediate";
584 // Return true if get the base operand, byte offset of an instruction and the
585 // memory width. Width is the size of memory that is being loaded/stored.
586 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
587 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
588 unsigned &Width, const TargetRegisterInfo *TRI) const {
589 if (!LdSt.mayLoadOrStore())
592 // Here we assume the standard RISC-V ISA, which uses a base+offset
593 // addressing mode. You'll need to relax these conditions to support custom
594 // load/stores instructions.
595 if (LdSt.getNumExplicitOperands() != 3)
597 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
600 if (!LdSt.hasOneMemOperand())
603 Width = (*LdSt.memoperands_begin())->getSize();
604 BaseReg = &LdSt.getOperand(1);
605 Offset = LdSt.getOperand(2).getImm();
609 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
610 const MachineInstr &MIa, const MachineInstr &MIb) const {
611 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
612 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
614 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
615 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
618 // Retrieve the base register, offset from the base register and width. Width
619 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
620 // base registers are identical, and the offset of a lower memory access +
621 // the width doesn't overlap the offset of a higher memory access,
622 // then the memory accesses are different.
623 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
624 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
625 int64_t OffsetA = 0, OffsetB = 0;
626 unsigned int WidthA = 0, WidthB = 0;
627 if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
628 getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
629 if (BaseOpA->isIdenticalTo(*BaseOpB)) {
630 int LowOffset = std::min(OffsetA, OffsetB);
631 int HighOffset = std::max(OffsetA, OffsetB);
632 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
633 if (LowOffset + LowWidth <= HighOffset)
640 std::pair<unsigned, unsigned>
641 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
642 const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
643 return std::make_pair(TF & Mask, TF & ~Mask);
646 ArrayRef<std::pair<unsigned, const char *>>
647 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
648 using namespace RISCVII;
649 static const std::pair<unsigned, const char *> TargetFlags[] = {
650 {MO_CALL, "riscv-call"},
651 {MO_PLT, "riscv-plt"},
654 {MO_PCREL_LO, "riscv-pcrel-lo"},
655 {MO_PCREL_HI, "riscv-pcrel-hi"},
656 {MO_GOT_HI, "riscv-got-hi"},
657 {MO_TPREL_LO, "riscv-tprel-lo"},
658 {MO_TPREL_HI, "riscv-tprel-hi"},
659 {MO_TPREL_ADD, "riscv-tprel-add"},
660 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
661 {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
662 return makeArrayRef(TargetFlags);
664 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
665 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
666 const Function &F = MF.getFunction();
668 // Can F be deduplicated by the linker? If it can, don't outline from it.
669 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
672 // Don't outline from functions with section markings; the program could
673 // expect that all the code is in the named section.
677 // It's safe to outline from MF.
681 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
682 unsigned &Flags) const {
683 // More accurate safety checking is done in getOutliningCandidateInfo.
687 // Enum values indicating how an outlined call should be constructed.
688 enum MachineOutlinerConstructionID {
689 MachineOutlinerDefault
692 outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
693 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
695 // First we need to filter out candidates where the X5 register (IE t0) can't
696 // be used to setup the function call.
697 auto CannotInsertCall = [](outliner::Candidate &C) {
698 const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
701 LiveRegUnits LRU = C.LRU;
702 return !LRU.available(RISCV::X5);
705 RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
706 RepeatedSequenceLocs.end(),
708 RepeatedSequenceLocs.end());
710 // If the sequence doesn't have enough candidates left, then we're done.
711 if (RepeatedSequenceLocs.size() < 2)
712 return outliner::OutlinedFunction();
714 unsigned SequenceSize = 0;
716 auto I = RepeatedSequenceLocs[0].front();
717 auto E = std::next(RepeatedSequenceLocs[0].back());
719 SequenceSize += getInstSizeInBytes(*I);
721 // call t0, function = 8 bytes.
722 unsigned CallOverhead = 8;
723 for (auto &C : RepeatedSequenceLocs)
724 C.setCallInfo(MachineOutlinerDefault, CallOverhead);
726 // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
727 unsigned FrameOverhead = 4;
728 if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
729 .getFeatureBits()[RISCV::FeatureStdExtC])
732 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
733 FrameOverhead, MachineOutlinerDefault);
737 RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
738 unsigned Flags) const {
739 MachineInstr &MI = *MBBI;
740 MachineBasicBlock *MBB = MI.getParent();
741 const TargetRegisterInfo *TRI =
742 MBB->getParent()->getSubtarget().getRegisterInfo();
744 // Positions generally can't safely be outlined.
745 if (MI.isPosition()) {
746 // We can manually strip out CFI instructions later.
747 if (MI.isCFIInstruction())
748 return outliner::InstrType::Invisible;
750 return outliner::InstrType::Illegal;
753 // Don't trust the user to write safe inline assembly.
754 if (MI.isInlineAsm())
755 return outliner::InstrType::Illegal;
757 // We can't outline branches to other basic blocks.
758 if (MI.isTerminator() && !MBB->succ_empty())
759 return outliner::InstrType::Illegal;
761 // We need support for tail calls to outlined functions before return
762 // statements can be allowed.
764 return outliner::InstrType::Illegal;
766 // Don't allow modifying the X5 register which we use for return addresses for
767 // these outlined functions.
768 if (MI.modifiesRegister(RISCV::X5, TRI) ||
769 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
770 return outliner::InstrType::Illegal;
772 // Make sure the operands don't reference something unsafe.
773 for (const auto &MO : MI.operands())
774 if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
775 return outliner::InstrType::Illegal;
777 // Don't allow instructions which won't be materialized to impact outlining
779 if (MI.isMetaInstruction())
780 return outliner::InstrType::Invisible;
782 return outliner::InstrType::Legal;
785 void RISCVInstrInfo::buildOutlinedFrame(
786 MachineBasicBlock &MBB, MachineFunction &MF,
787 const outliner::OutlinedFunction &OF) const {
789 // Strip out any CFI instructions
793 auto I = MBB.begin();
795 for (; I != E; ++I) {
796 if (I->isCFIInstruction()) {
797 I->removeFromParent();
804 MBB.addLiveIn(RISCV::X5);
806 // Add in a return instruction to the end of the outlined frame.
807 MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
808 .addReg(RISCV::X0, RegState::Define)
813 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
814 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
815 MachineFunction &MF, const outliner::Candidate &C) const {
817 // Add in a call instruction to the outlined function at the given location.
819 BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
820 .addGlobalAddress(M.getNamedValue(MF.getName()), 0,