1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64MachineFunctionInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DebugLoc.h"
32 #include "llvm/IR/GlobalValue.h"
33 #include "llvm/MC/MCInst.h"
34 #include "llvm/MC/MCInstrDesc.h"
35 #include "llvm/Support/Casting.h"
36 #include "llvm/Support/CodeGen.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Compiler.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Target/TargetRegisterInfo.h"
44 #include "llvm/Target/TargetSubtargetInfo.h"
52 #define GET_INSTRINFO_CTOR_DTOR
53 #include "AArch64GenInstrInfo.inc"
55 static cl::opt<unsigned>
56 TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
57 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
59 static cl::opt<unsigned>
60 CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
61 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
63 static cl::opt<unsigned>
64 BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
65 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
67 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
68 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
69 RI(STI.getTargetTriple()), Subtarget(STI) {}
71 /// GetInstSize - Return the number of bytes of code the specified
72 /// instruction may be. This returns the maximum number of bytes.
73 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
74 const MachineBasicBlock &MBB = *MI.getParent();
75 const MachineFunction *MF = MBB.getParent();
76 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
78 if (MI.getOpcode() == AArch64::INLINEASM)
79 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
81 // FIXME: We currently only handle pseudoinstructions that don't get expanded
82 // before the assembly printer.
83 unsigned NumBytes = 0;
84 const MCInstrDesc &Desc = MI.getDesc();
85 switch (Desc.getOpcode()) {
87 // Anything not explicitly designated otherwise is a normal 4-byte insn.
90 case TargetOpcode::DBG_VALUE:
91 case TargetOpcode::EH_LABEL:
92 case TargetOpcode::IMPLICIT_DEF:
93 case TargetOpcode::KILL:
96 case TargetOpcode::STACKMAP:
97 // The upper bound for a stackmap intrinsic is the full length of its shadow
98 NumBytes = StackMapOpers(&MI).getNumPatchBytes();
99 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
101 case TargetOpcode::PATCHPOINT:
102 // The size of the patchpoint intrinsic is the number of bytes requested
103 NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
104 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
106 case AArch64::TLSDESC_CALLSEQ:
107 // This gets lowered to an instruction sequence which takes 16 bytes
115 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
116 SmallVectorImpl<MachineOperand> &Cond) {
117 // Block ends with fall-through condbranch.
118 switch (LastInst->getOpcode()) {
120 llvm_unreachable("Unknown branch instruction?");
122 Target = LastInst->getOperand(1).getMBB();
123 Cond.push_back(LastInst->getOperand(0));
129 Target = LastInst->getOperand(1).getMBB();
130 Cond.push_back(MachineOperand::CreateImm(-1));
131 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
132 Cond.push_back(LastInst->getOperand(0));
138 Target = LastInst->getOperand(2).getMBB();
139 Cond.push_back(MachineOperand::CreateImm(-1));
140 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
141 Cond.push_back(LastInst->getOperand(0));
142 Cond.push_back(LastInst->getOperand(1));
146 static unsigned getBranchDisplacementBits(unsigned Opc) {
149 llvm_unreachable("unexpected opcode!");
156 return TBZDisplacementBits;
161 return CBZDisplacementBits;
163 return BCCDisplacementBits;
167 bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
168 int64_t BrOffset) const {
169 unsigned Bits = getBranchDisplacementBits(BranchOp);
170 assert(Bits >= 3 && "max branch displacement must be enough to jump"
171 "over conditional branch expansion");
172 return isIntN(Bits, BrOffset / 4);
175 MachineBasicBlock *AArch64InstrInfo::getBranchDestBlock(
176 const MachineInstr &MI) const {
177 switch (MI.getOpcode()) {
179 llvm_unreachable("unexpected opcode!");
181 return MI.getOperand(0).getMBB();
186 return MI.getOperand(2).getMBB();
192 return MI.getOperand(1).getMBB();
197 bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
198 MachineBasicBlock *&TBB,
199 MachineBasicBlock *&FBB,
200 SmallVectorImpl<MachineOperand> &Cond,
201 bool AllowModify) const {
202 // If the block has no terminators, it just falls into the block after it.
203 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
207 if (!isUnpredicatedTerminator(*I))
210 // Get the last instruction in the block.
211 MachineInstr *LastInst = &*I;
213 // If there is only one terminator instruction, process it.
214 unsigned LastOpc = LastInst->getOpcode();
215 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
216 if (isUncondBranchOpcode(LastOpc)) {
217 TBB = LastInst->getOperand(0).getMBB();
220 if (isCondBranchOpcode(LastOpc)) {
221 // Block ends with fall-through condbranch.
222 parseCondBranch(LastInst, TBB, Cond);
225 return true; // Can't handle indirect branch.
228 // Get the instruction before it if it is a terminator.
229 MachineInstr *SecondLastInst = &*I;
230 unsigned SecondLastOpc = SecondLastInst->getOpcode();
232 // If AllowModify is true and the block ends with two or more unconditional
233 // branches, delete all but the first unconditional branch.
234 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
235 while (isUncondBranchOpcode(SecondLastOpc)) {
236 LastInst->eraseFromParent();
237 LastInst = SecondLastInst;
238 LastOpc = LastInst->getOpcode();
239 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
240 // Return now the only terminator is an unconditional branch.
241 TBB = LastInst->getOperand(0).getMBB();
244 SecondLastInst = &*I;
245 SecondLastOpc = SecondLastInst->getOpcode();
250 // If there are three terminators, we don't know what sort of block this is.
251 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
254 // If the block ends with a B and a Bcc, handle it.
255 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
256 parseCondBranch(SecondLastInst, TBB, Cond);
257 FBB = LastInst->getOperand(0).getMBB();
261 // If the block ends with two unconditional branches, handle it. The second
262 // one is not executed, so remove it.
263 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
264 TBB = SecondLastInst->getOperand(0).getMBB();
267 I->eraseFromParent();
271 // ...likewise if it ends with an indirect branch followed by an unconditional
273 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
276 I->eraseFromParent();
280 // Otherwise, can't handle this.
284 bool AArch64InstrInfo::reverseBranchCondition(
285 SmallVectorImpl<MachineOperand> &Cond) const {
286 if (Cond[0].getImm() != -1) {
288 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
289 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
291 // Folded compare-and-branch
292 switch (Cond[1].getImm()) {
294 llvm_unreachable("Unknown conditional branch!");
296 Cond[1].setImm(AArch64::CBNZW);
299 Cond[1].setImm(AArch64::CBZW);
302 Cond[1].setImm(AArch64::CBNZX);
305 Cond[1].setImm(AArch64::CBZX);
308 Cond[1].setImm(AArch64::TBNZW);
311 Cond[1].setImm(AArch64::TBZW);
314 Cond[1].setImm(AArch64::TBNZX);
317 Cond[1].setImm(AArch64::TBZX);
325 unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
326 int *BytesRemoved) const {
327 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
331 if (!isUncondBranchOpcode(I->getOpcode()) &&
332 !isCondBranchOpcode(I->getOpcode()))
335 // Remove the branch.
336 I->eraseFromParent();
340 if (I == MBB.begin()) {
346 if (!isCondBranchOpcode(I->getOpcode())) {
352 // Remove the branch.
353 I->eraseFromParent();
360 void AArch64InstrInfo::instantiateCondBranch(
361 MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
362 ArrayRef<MachineOperand> Cond) const {
363 if (Cond[0].getImm() != -1) {
365 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
367 // Folded compare-and-branch
368 // Note that we use addOperand instead of addReg to keep the flags.
369 const MachineInstrBuilder MIB =
370 BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
372 MIB.addImm(Cond[3].getImm());
377 unsigned AArch64InstrInfo::insertBranch(MachineBasicBlock &MBB,
378 MachineBasicBlock *TBB,
379 MachineBasicBlock *FBB,
380 ArrayRef<MachineOperand> Cond,
382 int *BytesAdded) const {
383 // Shouldn't be a fall through.
384 assert(TBB && "insertBranch must not be told to insert a fallthrough");
387 if (Cond.empty()) // Unconditional branch?
388 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
390 instantiateCondBranch(MBB, DL, TBB, Cond);
398 // Two-way conditional branch.
399 instantiateCondBranch(MBB, DL, TBB, Cond);
400 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
408 // Find the original register that VReg is copied from.
409 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
410 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
411 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
412 if (!DefMI->isFullCopy())
414 VReg = DefMI->getOperand(1).getReg();
419 // Determine if VReg is defined by an instruction that can be folded into a
420 // csel instruction. If so, return the folded opcode, and the replacement
422 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
423 unsigned *NewVReg = nullptr) {
424 VReg = removeCopies(MRI, VReg);
425 if (!TargetRegisterInfo::isVirtualRegister(VReg))
428 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
429 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
431 unsigned SrcOpNum = 0;
432 switch (DefMI->getOpcode()) {
433 case AArch64::ADDSXri:
434 case AArch64::ADDSWri:
435 // if NZCV is used, do not fold.
436 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
438 // fall-through to ADDXri and ADDWri.
440 case AArch64::ADDXri:
441 case AArch64::ADDWri:
442 // add x, 1 -> csinc.
443 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
444 DefMI->getOperand(3).getImm() != 0)
447 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
450 case AArch64::ORNXrr:
451 case AArch64::ORNWrr: {
452 // not x -> csinv, represented as orn dst, xzr, src.
453 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
454 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
457 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
461 case AArch64::SUBSXrr:
462 case AArch64::SUBSWrr:
463 // if NZCV is used, do not fold.
464 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
466 // fall-through to SUBXrr and SUBWrr.
468 case AArch64::SUBXrr:
469 case AArch64::SUBWrr: {
470 // neg x -> csneg, represented as sub dst, xzr, src.
471 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
472 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
475 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
481 assert(Opc && SrcOpNum && "Missing parameters");
484 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
488 bool AArch64InstrInfo::canInsertSelect(
489 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
490 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
491 int &FalseCycles) const {
492 // Check register classes.
493 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
494 const TargetRegisterClass *RC =
495 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
499 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
500 unsigned ExtraCondLat = Cond.size() != 1;
502 // GPRs are handled by csel.
503 // FIXME: Fold in x+1, -x, and ~x when applicable.
504 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
505 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
506 // Single-cycle csel, csinc, csinv, and csneg.
507 CondCycles = 1 + ExtraCondLat;
508 TrueCycles = FalseCycles = 1;
509 if (canFoldIntoCSel(MRI, TrueReg))
511 else if (canFoldIntoCSel(MRI, FalseReg))
516 // Scalar floating point is handled by fcsel.
517 // FIXME: Form fabs, fmin, and fmax when applicable.
518 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
519 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
520 CondCycles = 5 + ExtraCondLat;
521 TrueCycles = FalseCycles = 2;
529 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
530 MachineBasicBlock::iterator I,
531 const DebugLoc &DL, unsigned DstReg,
532 ArrayRef<MachineOperand> Cond,
533 unsigned TrueReg, unsigned FalseReg) const {
534 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
536 // Parse the condition code, see parseCondBranch() above.
537 AArch64CC::CondCode CC;
538 switch (Cond.size()) {
540 llvm_unreachable("Unknown condition opcode in Cond");
542 CC = AArch64CC::CondCode(Cond[0].getImm());
544 case 3: { // cbz/cbnz
545 // We must insert a compare against 0.
547 switch (Cond[1].getImm()) {
549 llvm_unreachable("Unknown branch opcode in Cond");
567 unsigned SrcReg = Cond[2].getReg();
569 // cmp reg, #0 is actually subs xzr, reg, #0.
570 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
571 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
576 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
577 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
584 case 4: { // tbz/tbnz
585 // We must insert a tst instruction.
586 switch (Cond[1].getImm()) {
588 llvm_unreachable("Unknown branch opcode in Cond");
598 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
599 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
600 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
601 .addReg(Cond[2].getReg())
603 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
605 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
606 .addReg(Cond[2].getReg())
608 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
614 const TargetRegisterClass *RC = nullptr;
615 bool TryFold = false;
616 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
617 RC = &AArch64::GPR64RegClass;
618 Opc = AArch64::CSELXr;
620 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
621 RC = &AArch64::GPR32RegClass;
622 Opc = AArch64::CSELWr;
624 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
625 RC = &AArch64::FPR64RegClass;
626 Opc = AArch64::FCSELDrrr;
627 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
628 RC = &AArch64::FPR32RegClass;
629 Opc = AArch64::FCSELSrrr;
631 assert(RC && "Unsupported regclass");
633 // Try folding simple instructions into the csel.
635 unsigned NewVReg = 0;
636 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
638 // The folded opcodes csinc, csinc and csneg apply the operation to
639 // FalseReg, so we need to invert the condition.
640 CC = AArch64CC::getInvertedCondCode(CC);
643 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
645 // Fold the operation. Leave any dead instructions for DCE to clean up.
649 // The extends the live range of NewVReg.
650 MRI.clearKillFlags(NewVReg);
654 // Pull all virtual register into the appropriate class.
655 MRI.constrainRegClass(TrueReg, RC);
656 MRI.constrainRegClass(FalseReg, RC);
659 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
663 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
664 static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
665 uint64_t Imm = MI.getOperand(1).getImm();
666 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
668 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
671 // FIXME: this implementation should be micro-architecture dependent, so a
672 // micro-architecture target hook should be introduced here in future.
673 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
674 if (!Subtarget.hasCustomCheapAsMoveHandling())
675 return MI.isAsCheapAsAMove();
679 switch (MI.getOpcode()) {
683 // add/sub on register without shift
684 case AArch64::ADDWri:
685 case AArch64::ADDXri:
686 case AArch64::SUBWri:
687 case AArch64::SUBXri:
688 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
689 MI.getOperand(3).getImm() == 0);
691 // add/sub on register with shift
692 case AArch64::ADDWrs:
693 case AArch64::ADDXrs:
694 case AArch64::SUBWrs:
695 case AArch64::SUBXrs:
696 Imm = MI.getOperand(3).getImm();
697 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
698 AArch64_AM::getArithShiftValue(Imm) < 4);
700 // logical ops on immediate
701 case AArch64::ANDWri:
702 case AArch64::ANDXri:
703 case AArch64::EORWri:
704 case AArch64::EORXri:
705 case AArch64::ORRWri:
706 case AArch64::ORRXri:
709 // logical ops on register without shift
710 case AArch64::ANDWrr:
711 case AArch64::ANDXrr:
712 case AArch64::BICWrr:
713 case AArch64::BICXrr:
714 case AArch64::EONWrr:
715 case AArch64::EONXrr:
716 case AArch64::EORWrr:
717 case AArch64::EORXrr:
718 case AArch64::ORNWrr:
719 case AArch64::ORNXrr:
720 case AArch64::ORRWrr:
721 case AArch64::ORRXrr:
724 // logical ops on register with shift
725 case AArch64::ANDWrs:
726 case AArch64::ANDXrs:
727 case AArch64::BICWrs:
728 case AArch64::BICXrs:
729 case AArch64::EONWrs:
730 case AArch64::EONXrs:
731 case AArch64::EORWrs:
732 case AArch64::EORXrs:
733 case AArch64::ORNWrs:
734 case AArch64::ORNXrs:
735 case AArch64::ORRWrs:
736 case AArch64::ORRXrs:
737 Imm = MI.getOperand(3).getImm();
738 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
739 AArch64_AM::getShiftValue(Imm) < 4 &&
740 AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
742 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
743 // ORRXri, it is as cheap as MOV
744 case AArch64::MOVi32imm:
745 return canBeExpandedToORR(MI, 32);
746 case AArch64::MOVi64imm:
747 return canBeExpandedToORR(MI, 64);
749 // It is cheap to zero out registers if the subtarget has ZeroCycleZeroing
751 case AArch64::FMOVS0:
752 case AArch64::FMOVD0:
753 return Subtarget.hasZeroCycleZeroing();
754 case TargetOpcode::COPY:
755 return (Subtarget.hasZeroCycleZeroing() &&
756 (MI.getOperand(1).getReg() == AArch64::WZR ||
757 MI.getOperand(1).getReg() == AArch64::XZR));
760 llvm_unreachable("Unknown opcode to check as cheap as a move!");
763 bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) const {
764 switch (MI.getOpcode()) {
768 case AArch64::ADDWrs:
769 case AArch64::ADDXrs:
770 case AArch64::ADDSWrs:
771 case AArch64::ADDSXrs: {
772 unsigned Imm = MI.getOperand(3).getImm();
773 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
776 return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5;
779 case AArch64::ADDWrx:
780 case AArch64::ADDXrx:
781 case AArch64::ADDXrx64:
782 case AArch64::ADDSWrx:
783 case AArch64::ADDSXrx:
784 case AArch64::ADDSXrx64: {
785 unsigned Imm = MI.getOperand(3).getImm();
786 switch (AArch64_AM::getArithExtendType(Imm)) {
789 case AArch64_AM::UXTB:
790 case AArch64_AM::UXTH:
791 case AArch64_AM::UXTW:
792 case AArch64_AM::UXTX:
793 return AArch64_AM::getArithShiftValue(Imm) <= 4;
797 case AArch64::SUBWrs:
798 case AArch64::SUBSWrs: {
799 unsigned Imm = MI.getOperand(3).getImm();
800 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
801 return ShiftVal == 0 ||
802 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31);
805 case AArch64::SUBXrs:
806 case AArch64::SUBSXrs: {
807 unsigned Imm = MI.getOperand(3).getImm();
808 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
809 return ShiftVal == 0 ||
810 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63);
813 case AArch64::SUBWrx:
814 case AArch64::SUBXrx:
815 case AArch64::SUBXrx64:
816 case AArch64::SUBSWrx:
817 case AArch64::SUBSXrx:
818 case AArch64::SUBSXrx64: {
819 unsigned Imm = MI.getOperand(3).getImm();
820 switch (AArch64_AM::getArithExtendType(Imm)) {
823 case AArch64_AM::UXTB:
824 case AArch64_AM::UXTH:
825 case AArch64_AM::UXTW:
826 case AArch64_AM::UXTX:
827 return AArch64_AM::getArithShiftValue(Imm) == 0;
831 case AArch64::LDRBBroW:
832 case AArch64::LDRBBroX:
833 case AArch64::LDRBroW:
834 case AArch64::LDRBroX:
835 case AArch64::LDRDroW:
836 case AArch64::LDRDroX:
837 case AArch64::LDRHHroW:
838 case AArch64::LDRHHroX:
839 case AArch64::LDRHroW:
840 case AArch64::LDRHroX:
841 case AArch64::LDRQroW:
842 case AArch64::LDRQroX:
843 case AArch64::LDRSBWroW:
844 case AArch64::LDRSBWroX:
845 case AArch64::LDRSBXroW:
846 case AArch64::LDRSBXroX:
847 case AArch64::LDRSHWroW:
848 case AArch64::LDRSHWroX:
849 case AArch64::LDRSHXroW:
850 case AArch64::LDRSHXroX:
851 case AArch64::LDRSWroW:
852 case AArch64::LDRSWroX:
853 case AArch64::LDRSroW:
854 case AArch64::LDRSroX:
855 case AArch64::LDRWroW:
856 case AArch64::LDRWroX:
857 case AArch64::LDRXroW:
858 case AArch64::LDRXroX:
859 case AArch64::PRFMroW:
860 case AArch64::PRFMroX:
861 case AArch64::STRBBroW:
862 case AArch64::STRBBroX:
863 case AArch64::STRBroW:
864 case AArch64::STRBroX:
865 case AArch64::STRDroW:
866 case AArch64::STRDroX:
867 case AArch64::STRHHroW:
868 case AArch64::STRHHroX:
869 case AArch64::STRHroW:
870 case AArch64::STRHroX:
871 case AArch64::STRQroW:
872 case AArch64::STRQroX:
873 case AArch64::STRSroW:
874 case AArch64::STRSroX:
875 case AArch64::STRWroW:
876 case AArch64::STRWroX:
877 case AArch64::STRXroW:
878 case AArch64::STRXroX: {
879 unsigned IsSigned = MI.getOperand(3).getImm();
885 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
886 unsigned &SrcReg, unsigned &DstReg,
887 unsigned &SubIdx) const {
888 switch (MI.getOpcode()) {
891 case AArch64::SBFMXri: // aka sxtw
892 case AArch64::UBFMXri: // aka uxtw
893 // Check for the 32 -> 64 bit extension case, these instructions can do
895 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
897 // This is a signed or unsigned 32 -> 64 bit extension.
898 SrcReg = MI.getOperand(1).getReg();
899 DstReg = MI.getOperand(0).getReg();
900 SubIdx = AArch64::sub_32;
905 bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
906 MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
907 const TargetRegisterInfo *TRI = &getRegisterInfo();
908 unsigned BaseRegA = 0, BaseRegB = 0;
909 int64_t OffsetA = 0, OffsetB = 0;
910 unsigned WidthA = 0, WidthB = 0;
912 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
913 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
915 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
916 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
919 // Retrieve the base register, offset from the base register and width. Width
920 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
921 // base registers are identical, and the offset of a lower memory access +
922 // the width doesn't overlap the offset of a higher memory access,
923 // then the memory accesses are different.
924 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
925 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
926 if (BaseRegA == BaseRegB) {
927 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
928 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
929 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
930 if (LowOffset + LowWidth <= HighOffset)
937 /// analyzeCompare - For a comparison instruction, return the source registers
938 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
939 /// Return true if the comparison instruction can be analyzed.
940 bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
941 unsigned &SrcReg2, int &CmpMask,
942 int &CmpValue) const {
943 switch (MI.getOpcode()) {
946 case AArch64::SUBSWrr:
947 case AArch64::SUBSWrs:
948 case AArch64::SUBSWrx:
949 case AArch64::SUBSXrr:
950 case AArch64::SUBSXrs:
951 case AArch64::SUBSXrx:
952 case AArch64::ADDSWrr:
953 case AArch64::ADDSWrs:
954 case AArch64::ADDSWrx:
955 case AArch64::ADDSXrr:
956 case AArch64::ADDSXrs:
957 case AArch64::ADDSXrx:
958 // Replace SUBSWrr with SUBWrr if NZCV is not used.
959 SrcReg = MI.getOperand(1).getReg();
960 SrcReg2 = MI.getOperand(2).getReg();
964 case AArch64::SUBSWri:
965 case AArch64::ADDSWri:
966 case AArch64::SUBSXri:
967 case AArch64::ADDSXri:
968 SrcReg = MI.getOperand(1).getReg();
971 // FIXME: In order to convert CmpValue to 0 or 1
972 CmpValue = MI.getOperand(2).getImm() != 0;
974 case AArch64::ANDSWri:
975 case AArch64::ANDSXri:
976 // ANDS does not use the same encoding scheme as the others xxxS
978 SrcReg = MI.getOperand(1).getReg();
981 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
982 // while the type of CmpValue is int. When converting uint64_t to int,
983 // the high 32 bits of uint64_t will be lost.
984 // In fact it causes a bug in spec2006-483.xalancbmk
985 // CmpValue is only used to compare with zero in OptimizeCompareInstr
986 CmpValue = AArch64_AM::decodeLogicalImmediate(
987 MI.getOperand(2).getImm(),
988 MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
995 static bool UpdateOperandRegClass(MachineInstr &Instr) {
996 MachineBasicBlock *MBB = Instr.getParent();
997 assert(MBB && "Can't get MachineBasicBlock here");
998 MachineFunction *MF = MBB->getParent();
999 assert(MF && "Can't get MachineFunction here");
1000 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1001 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1002 MachineRegisterInfo *MRI = &MF->getRegInfo();
1004 for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
1006 MachineOperand &MO = Instr.getOperand(OpIdx);
1007 const TargetRegisterClass *OpRegCstraints =
1008 Instr.getRegClassConstraint(OpIdx, TII, TRI);
1010 // If there's no constraint, there's nothing to do.
1011 if (!OpRegCstraints)
1013 // If the operand is a frame index, there's nothing to do here.
1014 // A frame index operand will resolve correctly during PEI.
1018 assert(MO.isReg() &&
1019 "Operand has register constraints without being a register!");
1021 unsigned Reg = MO.getReg();
1022 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1023 if (!OpRegCstraints->contains(Reg))
1025 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
1026 !MRI->constrainRegClass(Reg, OpRegCstraints))
1033 /// \brief Return the opcode that does not set flags when possible - otherwise
1034 /// return the original opcode. The caller is responsible to do the actual
1035 /// substitution and legality checking.
1036 static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
1037 // Don't convert all compare instructions, because for some the zero register
1038 // encoding becomes the sp register.
1039 bool MIDefinesZeroReg = false;
1040 if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
1041 MIDefinesZeroReg = true;
1043 switch (MI.getOpcode()) {
1045 return MI.getOpcode();
1046 case AArch64::ADDSWrr:
1047 return AArch64::ADDWrr;
1048 case AArch64::ADDSWri:
1049 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
1050 case AArch64::ADDSWrs:
1051 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
1052 case AArch64::ADDSWrx:
1053 return AArch64::ADDWrx;
1054 case AArch64::ADDSXrr:
1055 return AArch64::ADDXrr;
1056 case AArch64::ADDSXri:
1057 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
1058 case AArch64::ADDSXrs:
1059 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
1060 case AArch64::ADDSXrx:
1061 return AArch64::ADDXrx;
1062 case AArch64::SUBSWrr:
1063 return AArch64::SUBWrr;
1064 case AArch64::SUBSWri:
1065 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
1066 case AArch64::SUBSWrs:
1067 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
1068 case AArch64::SUBSWrx:
1069 return AArch64::SUBWrx;
1070 case AArch64::SUBSXrr:
1071 return AArch64::SUBXrr;
1072 case AArch64::SUBSXri:
1073 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
1074 case AArch64::SUBSXrs:
1075 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
1076 case AArch64::SUBSXrx:
1077 return AArch64::SUBXrx;
1087 /// True when condition flags are accessed (either by writing or reading)
1088 /// on the instruction trace starting at From and ending at To.
1090 /// Note: If From and To are from different blocks it's assumed CC are accessed
1092 static bool areCFlagsAccessedBetweenInstrs(
1093 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
1094 const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
1095 // Early exit if To is at the beginning of the BB.
1096 if (To == To->getParent()->begin())
1099 // Check whether the instructions are in the same basic block
1100 // If not, assume the condition flags might get modified somewhere.
1101 if (To->getParent() != From->getParent())
1104 // From must be above To.
1105 assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
1106 [From](MachineInstr &MI) {
1107 return MI.getIterator() == From;
1108 }) != To->getParent()->rend());
1110 // We iterate backward starting \p To until we hit \p From.
1111 for (--To; To != From; --To) {
1112 const MachineInstr &Instr = *To;
1114 if ( ((AccessToCheck & AK_Write) && Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
1115 ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
1121 /// Try to optimize a compare instruction. A compare instruction is an
1122 /// instruction which produces AArch64::NZCV. It can be truly compare instruction
1123 /// when there are no uses of its destination register.
1125 /// The following steps are tried in order:
1126 /// 1. Convert CmpInstr into an unconditional version.
1127 /// 2. Remove CmpInstr if above there is an instruction producing a needed
1128 /// condition code or an instruction which can be converted into such an instruction.
1129 /// Only comparison with zero is supported.
1130 bool AArch64InstrInfo::optimizeCompareInstr(
1131 MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
1132 int CmpValue, const MachineRegisterInfo *MRI) const {
1133 assert(CmpInstr.getParent());
1136 // Replace SUBSWrr with SUBWrr if NZCV is not used.
1137 int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
1138 if (DeadNZCVIdx != -1) {
1139 if (CmpInstr.definesRegister(AArch64::WZR) ||
1140 CmpInstr.definesRegister(AArch64::XZR)) {
1141 CmpInstr.eraseFromParent();
1144 unsigned Opc = CmpInstr.getOpcode();
1145 unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr);
1148 const MCInstrDesc &MCID = get(NewOpc);
1149 CmpInstr.setDesc(MCID);
1150 CmpInstr.RemoveOperand(DeadNZCVIdx);
1151 bool succeeded = UpdateOperandRegClass(CmpInstr);
1153 assert(succeeded && "Some operands reg class are incompatible!");
1157 // Continue only if we have a "ri" where immediate is zero.
1158 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1160 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
1161 if (CmpValue != 0 || SrcReg2 != 0)
1164 // CmpInstr is a Compare instruction if destination register is not used.
1165 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
1168 return substituteCmpToZero(CmpInstr, SrcReg, MRI);
1171 /// Get opcode of S version of Instr.
1172 /// If Instr is S version its opcode is returned.
1173 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1174 /// or we are not interested in it.
1175 static unsigned sForm(MachineInstr &Instr) {
1176 switch (Instr.getOpcode()) {
1178 return AArch64::INSTRUCTION_LIST_END;
1180 case AArch64::ADDSWrr:
1181 case AArch64::ADDSWri:
1182 case AArch64::ADDSXrr:
1183 case AArch64::ADDSXri:
1184 case AArch64::SUBSWrr:
1185 case AArch64::SUBSWri:
1186 case AArch64::SUBSXrr:
1187 case AArch64::SUBSXri:
1188 return Instr.getOpcode();
1190 case AArch64::ADDWrr: return AArch64::ADDSWrr;
1191 case AArch64::ADDWri: return AArch64::ADDSWri;
1192 case AArch64::ADDXrr: return AArch64::ADDSXrr;
1193 case AArch64::ADDXri: return AArch64::ADDSXri;
1194 case AArch64::ADCWr: return AArch64::ADCSWr;
1195 case AArch64::ADCXr: return AArch64::ADCSXr;
1196 case AArch64::SUBWrr: return AArch64::SUBSWrr;
1197 case AArch64::SUBWri: return AArch64::SUBSWri;
1198 case AArch64::SUBXrr: return AArch64::SUBSXrr;
1199 case AArch64::SUBXri: return AArch64::SUBSXri;
1200 case AArch64::SBCWr: return AArch64::SBCSWr;
1201 case AArch64::SBCXr: return AArch64::SBCSXr;
1202 case AArch64::ANDWri: return AArch64::ANDSWri;
1203 case AArch64::ANDXri: return AArch64::ANDSXri;
1207 /// Check if AArch64::NZCV should be alive in successors of MBB.
1208 static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
1209 for (auto *BB : MBB->successors())
1210 if (BB->isLiveIn(AArch64::NZCV))
1223 UsedNZCV() = default;
1225 UsedNZCV& operator |=(const UsedNZCV& UsedFlags) {
1226 this->N |= UsedFlags.N;
1227 this->Z |= UsedFlags.Z;
1228 this->C |= UsedFlags.C;
1229 this->V |= UsedFlags.V;
1234 } // end anonymous namespace
1236 /// Find a condition code used by the instruction.
1237 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1238 /// codes or we don't optimize CmpInstr in the presence of such instructions.
1239 static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1240 switch (Instr.getOpcode()) {
1242 return AArch64CC::Invalid;
1244 case AArch64::Bcc: {
1245 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1247 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
1250 case AArch64::CSINVWr:
1251 case AArch64::CSINVXr:
1252 case AArch64::CSINCWr:
1253 case AArch64::CSINCXr:
1254 case AArch64::CSELWr:
1255 case AArch64::CSELXr:
1256 case AArch64::CSNEGWr:
1257 case AArch64::CSNEGXr:
1258 case AArch64::FCSELSrrr:
1259 case AArch64::FCSELDrrr: {
1260 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1262 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
1267 static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1268 assert(CC != AArch64CC::Invalid);
1274 case AArch64CC::EQ: // Z set
1275 case AArch64CC::NE: // Z clear
1279 case AArch64CC::HI: // Z clear and C set
1280 case AArch64CC::LS: // Z set or C clear
1283 case AArch64CC::HS: // C set
1284 case AArch64CC::LO: // C clear
1288 case AArch64CC::MI: // N set
1289 case AArch64CC::PL: // N clear
1293 case AArch64CC::VS: // V set
1294 case AArch64CC::VC: // V clear
1298 case AArch64CC::GT: // Z clear, N and V the same
1299 case AArch64CC::LE: // Z set, N and V differ
1302 case AArch64CC::GE: // N and V the same
1303 case AArch64CC::LT: // N and V differ
1311 static bool isADDSRegImm(unsigned Opcode) {
1312 return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1315 static bool isSUBSRegImm(unsigned Opcode) {
1316 return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1319 /// Check if CmpInstr can be substituted by MI.
1321 /// CmpInstr can be substituted:
1322 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1323 /// - and, MI and CmpInstr are from the same MachineBB
1324 /// - and, condition flags are not alive in successors of the CmpInstr parent
1325 /// - and, if MI opcode is the S form there must be no defs of flags between
1327 /// or if MI opcode is not the S form there must be neither defs of flags
1328 /// nor uses of flags between MI and CmpInstr.
1329 /// - and C/V flags are not used after CmpInstr
1330 static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
1331 const TargetRegisterInfo *TRI) {
1333 assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1336 const unsigned CmpOpcode = CmpInstr->getOpcode();
1337 if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1340 if (MI->getParent() != CmpInstr->getParent())
1343 if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
1346 AccessKind AccessToCheck = AK_Write;
1347 if (sForm(*MI) != MI->getOpcode())
1348 AccessToCheck = AK_All;
1349 if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
1352 UsedNZCV NZCVUsedAfterCmp;
1353 for (auto I = std::next(CmpInstr->getIterator()), E = CmpInstr->getParent()->instr_end();
1355 const MachineInstr &Instr = *I;
1356 if (Instr.readsRegister(AArch64::NZCV, TRI)) {
1357 AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1358 if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1360 NZCVUsedAfterCmp |= getUsedNZCV(CC);
1363 if (Instr.modifiesRegister(AArch64::NZCV, TRI))
1367 return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1370 /// Substitute an instruction comparing to zero with another instruction
1371 /// which produces needed condition flags.
1373 /// Return true on success.
1374 bool AArch64InstrInfo::substituteCmpToZero(
1375 MachineInstr &CmpInstr, unsigned SrcReg,
1376 const MachineRegisterInfo *MRI) const {
1378 // Get the unique definition of SrcReg.
1379 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1383 const TargetRegisterInfo *TRI = &getRegisterInfo();
1385 unsigned NewOpc = sForm(*MI);
1386 if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1389 if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
1392 // Update the instruction to set NZCV.
1393 MI->setDesc(get(NewOpc));
1394 CmpInstr.eraseFromParent();
1395 bool succeeded = UpdateOperandRegClass(*MI);
1397 assert(succeeded && "Some operands reg class are incompatible!");
1398 MI->addRegisterDefined(AArch64::NZCV, TRI);
1402 bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1403 if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
1406 MachineBasicBlock &MBB = *MI.getParent();
1407 DebugLoc DL = MI.getDebugLoc();
1408 unsigned Reg = MI.getOperand(0).getReg();
1409 const GlobalValue *GV =
1410 cast<GlobalValue>((*MI.memoperands_begin())->getValue());
1411 const TargetMachine &TM = MBB.getParent()->getTarget();
1412 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1413 const unsigned char MO_NC = AArch64II::MO_NC;
1415 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1416 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1417 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1418 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1419 .addReg(Reg, RegState::Kill)
1421 .addMemOperand(*MI.memoperands_begin());
1422 } else if (TM.getCodeModel() == CodeModel::Large) {
1423 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1424 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1425 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1426 .addReg(Reg, RegState::Kill)
1427 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1428 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1429 .addReg(Reg, RegState::Kill)
1430 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1431 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1432 .addReg(Reg, RegState::Kill)
1433 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1434 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1435 .addReg(Reg, RegState::Kill)
1437 .addMemOperand(*MI.memoperands_begin());
1439 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1440 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1441 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1442 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1443 .addReg(Reg, RegState::Kill)
1444 .addGlobalAddress(GV, 0, LoFlags)
1445 .addMemOperand(*MI.memoperands_begin());
1453 /// Return true if this is this instruction has a non-zero immediate
1454 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr &MI) const {
1455 switch (MI.getOpcode()) {
1458 case AArch64::ADDSWrs:
1459 case AArch64::ADDSXrs:
1460 case AArch64::ADDWrs:
1461 case AArch64::ADDXrs:
1462 case AArch64::ANDSWrs:
1463 case AArch64::ANDSXrs:
1464 case AArch64::ANDWrs:
1465 case AArch64::ANDXrs:
1466 case AArch64::BICSWrs:
1467 case AArch64::BICSXrs:
1468 case AArch64::BICWrs:
1469 case AArch64::BICXrs:
1470 case AArch64::EONWrs:
1471 case AArch64::EONXrs:
1472 case AArch64::EORWrs:
1473 case AArch64::EORXrs:
1474 case AArch64::ORNWrs:
1475 case AArch64::ORNXrs:
1476 case AArch64::ORRWrs:
1477 case AArch64::ORRXrs:
1478 case AArch64::SUBSWrs:
1479 case AArch64::SUBSXrs:
1480 case AArch64::SUBWrs:
1481 case AArch64::SUBXrs:
1482 if (MI.getOperand(3).isImm()) {
1483 unsigned val = MI.getOperand(3).getImm();
1491 /// Return true if this is this instruction has a non-zero immediate
1492 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr &MI) const {
1493 switch (MI.getOpcode()) {
1496 case AArch64::ADDSWrx:
1497 case AArch64::ADDSXrx:
1498 case AArch64::ADDSXrx64:
1499 case AArch64::ADDWrx:
1500 case AArch64::ADDXrx:
1501 case AArch64::ADDXrx64:
1502 case AArch64::SUBSWrx:
1503 case AArch64::SUBSXrx:
1504 case AArch64::SUBSXrx64:
1505 case AArch64::SUBWrx:
1506 case AArch64::SUBXrx:
1507 case AArch64::SUBXrx64:
1508 if (MI.getOperand(3).isImm()) {
1509 unsigned val = MI.getOperand(3).getImm();
1518 // Return true if this instruction simply sets its single destination register
1519 // to zero. This is equivalent to a register rename of the zero-register.
1520 bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) const {
1521 switch (MI.getOpcode()) {
1524 case AArch64::MOVZWi:
1525 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1526 if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
1527 assert(MI.getDesc().getNumOperands() == 3 &&
1528 MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1532 case AArch64::ANDWri: // and Rd, Rzr, #imm
1533 return MI.getOperand(1).getReg() == AArch64::WZR;
1534 case AArch64::ANDXri:
1535 return MI.getOperand(1).getReg() == AArch64::XZR;
1536 case TargetOpcode::COPY:
1537 return MI.getOperand(1).getReg() == AArch64::WZR;
1542 // Return true if this instruction simply renames a general register without
1544 bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) const {
1545 switch (MI.getOpcode()) {
1548 case TargetOpcode::COPY: {
1549 // GPR32 copies will by lowered to ORRXrs
1550 unsigned DstReg = MI.getOperand(0).getReg();
1551 return (AArch64::GPR32RegClass.contains(DstReg) ||
1552 AArch64::GPR64RegClass.contains(DstReg));
1554 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1555 if (MI.getOperand(1).getReg() == AArch64::XZR) {
1556 assert(MI.getDesc().getNumOperands() == 4 &&
1557 MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1561 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1562 if (MI.getOperand(2).getImm() == 0) {
1563 assert(MI.getDesc().getNumOperands() == 4 &&
1564 MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1572 // Return true if this instruction simply renames a general register without
1574 bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) const {
1575 switch (MI.getOpcode()) {
1578 case TargetOpcode::COPY: {
1579 // FPR64 copies will by lowered to ORR.16b
1580 unsigned DstReg = MI.getOperand(0).getReg();
1581 return (AArch64::FPR64RegClass.contains(DstReg) ||
1582 AArch64::FPR128RegClass.contains(DstReg));
1584 case AArch64::ORRv16i8:
1585 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
1586 assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
1587 "invalid ORRv16i8 operands");
1595 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1596 int &FrameIndex) const {
1597 switch (MI.getOpcode()) {
1600 case AArch64::LDRWui:
1601 case AArch64::LDRXui:
1602 case AArch64::LDRBui:
1603 case AArch64::LDRHui:
1604 case AArch64::LDRSui:
1605 case AArch64::LDRDui:
1606 case AArch64::LDRQui:
1607 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1608 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1609 FrameIndex = MI.getOperand(1).getIndex();
1610 return MI.getOperand(0).getReg();
1618 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1619 int &FrameIndex) const {
1620 switch (MI.getOpcode()) {
1623 case AArch64::STRWui:
1624 case AArch64::STRXui:
1625 case AArch64::STRBui:
1626 case AArch64::STRHui:
1627 case AArch64::STRSui:
1628 case AArch64::STRDui:
1629 case AArch64::STRQui:
1630 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1631 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1632 FrameIndex = MI.getOperand(1).getIndex();
1633 return MI.getOperand(0).getReg();
1640 /// Return true if this is load/store scales or extends its register offset.
1641 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1642 /// MI should be a memory op that allows scaled addressing.
1643 bool AArch64InstrInfo::isScaledAddr(const MachineInstr &MI) const {
1644 switch (MI.getOpcode()) {
1647 case AArch64::LDRBBroW:
1648 case AArch64::LDRBroW:
1649 case AArch64::LDRDroW:
1650 case AArch64::LDRHHroW:
1651 case AArch64::LDRHroW:
1652 case AArch64::LDRQroW:
1653 case AArch64::LDRSBWroW:
1654 case AArch64::LDRSBXroW:
1655 case AArch64::LDRSHWroW:
1656 case AArch64::LDRSHXroW:
1657 case AArch64::LDRSWroW:
1658 case AArch64::LDRSroW:
1659 case AArch64::LDRWroW:
1660 case AArch64::LDRXroW:
1661 case AArch64::STRBBroW:
1662 case AArch64::STRBroW:
1663 case AArch64::STRDroW:
1664 case AArch64::STRHHroW:
1665 case AArch64::STRHroW:
1666 case AArch64::STRQroW:
1667 case AArch64::STRSroW:
1668 case AArch64::STRWroW:
1669 case AArch64::STRXroW:
1670 case AArch64::LDRBBroX:
1671 case AArch64::LDRBroX:
1672 case AArch64::LDRDroX:
1673 case AArch64::LDRHHroX:
1674 case AArch64::LDRHroX:
1675 case AArch64::LDRQroX:
1676 case AArch64::LDRSBWroX:
1677 case AArch64::LDRSBXroX:
1678 case AArch64::LDRSHWroX:
1679 case AArch64::LDRSHXroX:
1680 case AArch64::LDRSWroX:
1681 case AArch64::LDRSroX:
1682 case AArch64::LDRWroX:
1683 case AArch64::LDRXroX:
1684 case AArch64::STRBBroX:
1685 case AArch64::STRBroX:
1686 case AArch64::STRDroX:
1687 case AArch64::STRHHroX:
1688 case AArch64::STRHroX:
1689 case AArch64::STRQroX:
1690 case AArch64::STRSroX:
1691 case AArch64::STRWroX:
1692 case AArch64::STRXroX:
1694 unsigned Val = MI.getOperand(3).getImm();
1695 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1696 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1701 /// Check all MachineMemOperands for a hint to suppress pairing.
1702 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) const {
1703 return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1704 return MMO->getFlags() & MOSuppressPair;
1708 /// Set a flag on the first MachineMemOperand to suppress pairing.
1709 void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) const {
1710 if (MI.memoperands_empty())
1712 (*MI.memoperands_begin())->setFlags(MOSuppressPair);
1715 /// Check all MachineMemOperands for a hint that the load/store is strided.
1716 bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) const {
1717 return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1718 return MMO->getFlags() & MOStridedAccess;
1722 bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
1726 case AArch64::STURSi:
1727 case AArch64::STURDi:
1728 case AArch64::STURQi:
1729 case AArch64::STURBBi:
1730 case AArch64::STURHHi:
1731 case AArch64::STURWi:
1732 case AArch64::STURXi:
1733 case AArch64::LDURSi:
1734 case AArch64::LDURDi:
1735 case AArch64::LDURQi:
1736 case AArch64::LDURWi:
1737 case AArch64::LDURXi:
1738 case AArch64::LDURSWi:
1739 case AArch64::LDURHHi:
1740 case AArch64::LDURBBi:
1741 case AArch64::LDURSBWi:
1742 case AArch64::LDURSHWi:
1747 bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr &MI) const {
1748 return isUnscaledLdSt(MI.getOpcode());
1751 // Is this a candidate for ld/st merging or pairing? For example, we don't
1752 // touch volatiles or load/stores that have a hint to avoid pair formation.
1753 bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
1754 // If this is a volatile load/store, don't mess with it.
1755 if (MI.hasOrderedMemoryRef())
1758 // Make sure this is a reg+imm (as opposed to an address reloc).
1759 assert(MI.getOperand(1).isReg() && "Expected a reg operand.");
1760 if (!MI.getOperand(2).isImm())
1763 // Can't merge/pair if the instruction modifies the base register.
1764 // e.g., ldr x0, [x0]
1765 unsigned BaseReg = MI.getOperand(1).getReg();
1766 const TargetRegisterInfo *TRI = &getRegisterInfo();
1767 if (MI.modifiesRegister(BaseReg, TRI))
1770 // Check if this load/store has a hint to avoid pair formation.
1771 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1772 if (isLdStPairSuppressed(MI))
1775 // On some CPUs quad load/store pairs are slower than two single load/stores.
1776 if (Subtarget.isPaired128Slow()) {
1777 switch (MI.getOpcode()) {
1780 case AArch64::LDURQi:
1781 case AArch64::STURQi:
1782 case AArch64::LDRQui:
1783 case AArch64::STRQui:
1791 bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
1792 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
1793 const TargetRegisterInfo *TRI) const {
1795 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
1798 bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
1799 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
1800 const TargetRegisterInfo *TRI) const {
1801 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
1802 // Handle only loads/stores with base register followed by immediate offset.
1803 if (LdSt.getNumExplicitOperands() == 3) {
1804 // Non-paired instruction (e.g., ldr x1, [x0, #8]).
1805 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1807 } else if (LdSt.getNumExplicitOperands() == 4) {
1808 // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
1809 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() ||
1810 !LdSt.getOperand(3).isImm())
1815 // Get the scaling factor for the instruction and set the width for the
1818 int64_t Dummy1, Dummy2;
1820 // If this returns false, then it's an instruction we don't want to handle.
1821 if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2))
1824 // Compute the offset. Offset is calculated as the immediate operand
1825 // multiplied by the scaling factor. Unscaled instructions have scaling factor
1827 if (LdSt.getNumExplicitOperands() == 3) {
1828 BaseReg = LdSt.getOperand(1).getReg();
1829 Offset = LdSt.getOperand(2).getImm() * Scale;
1831 assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
1832 BaseReg = LdSt.getOperand(2).getReg();
1833 Offset = LdSt.getOperand(3).getImm() * Scale;
1839 AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const {
1840 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
1841 MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands()-1);
1842 assert(OfsOp.isImm() && "Offset operand wasn't immediate.");
1846 bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, unsigned &Scale,
1847 unsigned &Width, int64_t &MinOffset,
1848 int64_t &MaxOffset) const {
1850 // Not a memory operation or something we want to handle.
1853 MinOffset = MaxOffset = 0;
1855 case AArch64::STRWpost:
1856 case AArch64::LDRWpost:
1862 case AArch64::LDURQi:
1863 case AArch64::STURQi:
1869 case AArch64::LDURXi:
1870 case AArch64::LDURDi:
1871 case AArch64::STURXi:
1872 case AArch64::STURDi:
1878 case AArch64::LDURWi:
1879 case AArch64::LDURSi:
1880 case AArch64::LDURSWi:
1881 case AArch64::STURWi:
1882 case AArch64::STURSi:
1888 case AArch64::LDURHi:
1889 case AArch64::LDURHHi:
1890 case AArch64::LDURSHXi:
1891 case AArch64::LDURSHWi:
1892 case AArch64::STURHi:
1893 case AArch64::STURHHi:
1899 case AArch64::LDURBi:
1900 case AArch64::LDURBBi:
1901 case AArch64::LDURSBXi:
1902 case AArch64::LDURSBWi:
1903 case AArch64::STURBi:
1904 case AArch64::STURBBi:
1910 case AArch64::LDPQi:
1911 case AArch64::LDNPQi:
1912 case AArch64::STPQi:
1913 case AArch64::STNPQi:
1919 case AArch64::LDRQui:
1920 case AArch64::STRQui:
1925 case AArch64::LDPXi:
1926 case AArch64::LDPDi:
1927 case AArch64::LDNPXi:
1928 case AArch64::LDNPDi:
1929 case AArch64::STPXi:
1930 case AArch64::STPDi:
1931 case AArch64::STNPXi:
1932 case AArch64::STNPDi:
1938 case AArch64::LDRXui:
1939 case AArch64::LDRDui:
1940 case AArch64::STRXui:
1941 case AArch64::STRDui:
1946 case AArch64::LDPWi:
1947 case AArch64::LDPSi:
1948 case AArch64::LDNPWi:
1949 case AArch64::LDNPSi:
1950 case AArch64::STPWi:
1951 case AArch64::STPSi:
1952 case AArch64::STNPWi:
1953 case AArch64::STNPSi:
1959 case AArch64::LDRWui:
1960 case AArch64::LDRSui:
1961 case AArch64::LDRSWui:
1962 case AArch64::STRWui:
1963 case AArch64::STRSui:
1968 case AArch64::LDRHui:
1969 case AArch64::LDRHHui:
1970 case AArch64::STRHui:
1971 case AArch64::STRHHui:
1976 case AArch64::LDRBui:
1977 case AArch64::LDRBBui:
1978 case AArch64::STRBui:
1979 case AArch64::STRBBui:
1989 // Scale the unscaled offsets. Returns false if the unscaled offset can't be
1991 static bool scaleOffset(unsigned Opc, int64_t &Offset) {
1992 unsigned OffsetStride = 1;
1996 case AArch64::LDURQi:
1997 case AArch64::STURQi:
2000 case AArch64::LDURXi:
2001 case AArch64::LDURDi:
2002 case AArch64::STURXi:
2003 case AArch64::STURDi:
2006 case AArch64::LDURWi:
2007 case AArch64::LDURSi:
2008 case AArch64::LDURSWi:
2009 case AArch64::STURWi:
2010 case AArch64::STURSi:
2014 // If the byte-offset isn't a multiple of the stride, we can't scale this
2016 if (Offset % OffsetStride != 0)
2019 // Convert the byte-offset used by unscaled into an "element" offset used
2020 // by the scaled pair load/store instructions.
2021 Offset /= OffsetStride;
2025 static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
2026 if (FirstOpc == SecondOpc)
2028 // We can also pair sign-ext and zero-ext instructions.
2032 case AArch64::LDRWui:
2033 case AArch64::LDURWi:
2034 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
2035 case AArch64::LDRSWui:
2036 case AArch64::LDURSWi:
2037 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
2039 // These instructions can't be paired based on their opcodes.
2043 /// Detect opportunities for ldp/stp formation.
2045 /// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
2046 bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
2047 MachineInstr &SecondLdSt,
2048 unsigned NumLoads) const {
2049 // Only cluster up to a single pair.
2053 if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
2056 // Can we pair these instructions based on their opcodes?
2057 unsigned FirstOpc = FirstLdSt.getOpcode();
2058 unsigned SecondOpc = SecondLdSt.getOpcode();
2059 if (!canPairLdStOpc(FirstOpc, SecondOpc))
2062 // Can't merge volatiles or load/stores that have a hint to avoid pair
2063 // formation, for example.
2064 if (!isCandidateToMergeOrPair(FirstLdSt) ||
2065 !isCandidateToMergeOrPair(SecondLdSt))
2068 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
2069 int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
2070 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
2073 int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
2074 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
2077 // Pairwise instructions have a 7-bit signed offset field.
2078 if (Offset1 > 63 || Offset1 < -64)
2081 // The caller should already have ordered First/SecondLdSt by offset.
2082 assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
2083 return Offset1 + 1 == Offset2;
2086 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
2087 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
2088 const MDNode *Expr, const DebugLoc &DL) const {
2089 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
2090 .addFrameIndex(FrameIx)
2098 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
2099 unsigned Reg, unsigned SubIdx,
2101 const TargetRegisterInfo *TRI) {
2103 return MIB.addReg(Reg, State);
2105 if (TargetRegisterInfo::isPhysicalRegister(Reg))
2106 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
2107 return MIB.addReg(Reg, State, SubIdx);
2110 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
2112 // We really want the positive remainder mod 32 here, that happens to be
2113 // easily obtainable with a mask.
2114 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
2117 void AArch64InstrInfo::copyPhysRegTuple(
2118 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
2119 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
2120 ArrayRef<unsigned> Indices) const {
2121 assert(Subtarget.hasNEON() &&
2122 "Unexpected register copy without NEON");
2123 const TargetRegisterInfo *TRI = &getRegisterInfo();
2124 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2125 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2126 unsigned NumRegs = Indices.size();
2128 int SubReg = 0, End = NumRegs, Incr = 1;
2129 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
2130 SubReg = NumRegs - 1;
2135 for (; SubReg != End; SubReg += Incr) {
2136 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
2137 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2138 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2139 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2143 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
2144 MachineBasicBlock::iterator I,
2145 const DebugLoc &DL, unsigned DestReg,
2146 unsigned SrcReg, bool KillSrc) const {
2147 if (AArch64::GPR32spRegClass.contains(DestReg) &&
2148 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
2149 const TargetRegisterInfo *TRI = &getRegisterInfo();
2151 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2152 // If either operand is WSP, expand to ADD #0.
2153 if (Subtarget.hasZeroCycleRegMove()) {
2154 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2155 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2156 &AArch64::GPR64spRegClass);
2157 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2158 &AArch64::GPR64spRegClass);
2159 // This instruction is reading and writing X registers. This may upset
2160 // the register scavenger and machine verifier, so we need to indicate
2161 // that we are reading an undefined value from SrcRegX, but a proper
2162 // value from SrcReg.
2163 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
2164 .addReg(SrcRegX, RegState::Undef)
2166 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2167 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2169 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
2170 .addReg(SrcReg, getKillRegState(KillSrc))
2172 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2174 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
2175 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
2176 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2178 if (Subtarget.hasZeroCycleRegMove()) {
2179 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2180 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2181 &AArch64::GPR64spRegClass);
2182 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2183 &AArch64::GPR64spRegClass);
2184 // This instruction is reading and writing X registers. This may upset
2185 // the register scavenger and machine verifier, so we need to indicate
2186 // that we are reading an undefined value from SrcRegX, but a proper
2187 // value from SrcReg.
2188 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
2189 .addReg(AArch64::XZR)
2190 .addReg(SrcRegX, RegState::Undef)
2191 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2193 // Otherwise, expand to ORR WZR.
2194 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
2195 .addReg(AArch64::WZR)
2196 .addReg(SrcReg, getKillRegState(KillSrc));
2202 if (AArch64::GPR64spRegClass.contains(DestReg) &&
2203 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
2204 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2205 // If either operand is SP, expand to ADD #0.
2206 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
2207 .addReg(SrcReg, getKillRegState(KillSrc))
2209 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2210 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
2211 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
2212 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2214 // Otherwise, expand to ORR XZR.
2215 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
2216 .addReg(AArch64::XZR)
2217 .addReg(SrcReg, getKillRegState(KillSrc));
2222 // Copy a DDDD register quad by copying the individual sub-registers.
2223 if (AArch64::DDDDRegClass.contains(DestReg) &&
2224 AArch64::DDDDRegClass.contains(SrcReg)) {
2225 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2226 AArch64::dsub2, AArch64::dsub3 };
2227 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2232 // Copy a DDD register triple by copying the individual sub-registers.
2233 if (AArch64::DDDRegClass.contains(DestReg) &&
2234 AArch64::DDDRegClass.contains(SrcReg)) {
2235 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2237 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2242 // Copy a DD register pair by copying the individual sub-registers.
2243 if (AArch64::DDRegClass.contains(DestReg) &&
2244 AArch64::DDRegClass.contains(SrcReg)) {
2245 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
2246 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2251 // Copy a QQQQ register quad by copying the individual sub-registers.
2252 if (AArch64::QQQQRegClass.contains(DestReg) &&
2253 AArch64::QQQQRegClass.contains(SrcReg)) {
2254 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2255 AArch64::qsub2, AArch64::qsub3 };
2256 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2261 // Copy a QQQ register triple by copying the individual sub-registers.
2262 if (AArch64::QQQRegClass.contains(DestReg) &&
2263 AArch64::QQQRegClass.contains(SrcReg)) {
2264 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2266 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2271 // Copy a QQ register pair by copying the individual sub-registers.
2272 if (AArch64::QQRegClass.contains(DestReg) &&
2273 AArch64::QQRegClass.contains(SrcReg)) {
2274 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
2275 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2280 if (AArch64::FPR128RegClass.contains(DestReg) &&
2281 AArch64::FPR128RegClass.contains(SrcReg)) {
2282 if(Subtarget.hasNEON()) {
2283 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2285 .addReg(SrcReg, getKillRegState(KillSrc));
2287 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
2288 .addReg(AArch64::SP, RegState::Define)
2289 .addReg(SrcReg, getKillRegState(KillSrc))
2290 .addReg(AArch64::SP)
2292 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
2293 .addReg(AArch64::SP, RegState::Define)
2294 .addReg(DestReg, RegState::Define)
2295 .addReg(AArch64::SP)
2301 if (AArch64::FPR64RegClass.contains(DestReg) &&
2302 AArch64::FPR64RegClass.contains(SrcReg)) {
2303 if(Subtarget.hasNEON()) {
2304 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2305 &AArch64::FPR128RegClass);
2306 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2307 &AArch64::FPR128RegClass);
2308 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2310 .addReg(SrcReg, getKillRegState(KillSrc));
2312 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
2313 .addReg(SrcReg, getKillRegState(KillSrc));
2318 if (AArch64::FPR32RegClass.contains(DestReg) &&
2319 AArch64::FPR32RegClass.contains(SrcReg)) {
2320 if(Subtarget.hasNEON()) {
2321 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2322 &AArch64::FPR128RegClass);
2323 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2324 &AArch64::FPR128RegClass);
2325 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2327 .addReg(SrcReg, getKillRegState(KillSrc));
2329 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2330 .addReg(SrcReg, getKillRegState(KillSrc));
2335 if (AArch64::FPR16RegClass.contains(DestReg) &&
2336 AArch64::FPR16RegClass.contains(SrcReg)) {
2337 if(Subtarget.hasNEON()) {
2338 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2339 &AArch64::FPR128RegClass);
2340 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2341 &AArch64::FPR128RegClass);
2342 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2344 .addReg(SrcReg, getKillRegState(KillSrc));
2346 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2347 &AArch64::FPR32RegClass);
2348 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2349 &AArch64::FPR32RegClass);
2350 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2351 .addReg(SrcReg, getKillRegState(KillSrc));
2356 if (AArch64::FPR8RegClass.contains(DestReg) &&
2357 AArch64::FPR8RegClass.contains(SrcReg)) {
2358 if(Subtarget.hasNEON()) {
2359 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2360 &AArch64::FPR128RegClass);
2361 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2362 &AArch64::FPR128RegClass);
2363 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2365 .addReg(SrcReg, getKillRegState(KillSrc));
2367 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2368 &AArch64::FPR32RegClass);
2369 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2370 &AArch64::FPR32RegClass);
2371 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2372 .addReg(SrcReg, getKillRegState(KillSrc));
2377 // Copies between GPR64 and FPR64.
2378 if (AArch64::FPR64RegClass.contains(DestReg) &&
2379 AArch64::GPR64RegClass.contains(SrcReg)) {
2380 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
2381 .addReg(SrcReg, getKillRegState(KillSrc));
2384 if (AArch64::GPR64RegClass.contains(DestReg) &&
2385 AArch64::FPR64RegClass.contains(SrcReg)) {
2386 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
2387 .addReg(SrcReg, getKillRegState(KillSrc));
2390 // Copies between GPR32 and FPR32.
2391 if (AArch64::FPR32RegClass.contains(DestReg) &&
2392 AArch64::GPR32RegClass.contains(SrcReg)) {
2393 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
2394 .addReg(SrcReg, getKillRegState(KillSrc));
2397 if (AArch64::GPR32RegClass.contains(DestReg) &&
2398 AArch64::FPR32RegClass.contains(SrcReg)) {
2399 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
2400 .addReg(SrcReg, getKillRegState(KillSrc));
2404 if (DestReg == AArch64::NZCV) {
2405 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
2406 BuildMI(MBB, I, DL, get(AArch64::MSR))
2407 .addImm(AArch64SysReg::NZCV)
2408 .addReg(SrcReg, getKillRegState(KillSrc))
2409 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
2413 if (SrcReg == AArch64::NZCV) {
2414 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
2415 BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
2416 .addImm(AArch64SysReg::NZCV)
2417 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
2421 llvm_unreachable("unimplemented reg-to-reg copy");
2424 void AArch64InstrInfo::storeRegToStackSlot(
2425 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
2426 bool isKill, int FI, const TargetRegisterClass *RC,
2427 const TargetRegisterInfo *TRI) const {
2429 if (MBBI != MBB.end())
2430 DL = MBBI->getDebugLoc();
2431 MachineFunction &MF = *MBB.getParent();
2432 MachineFrameInfo &MFI = MF.getFrameInfo();
2433 unsigned Align = MFI.getObjectAlignment(FI);
2435 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2436 MachineMemOperand *MMO = MF.getMachineMemOperand(
2437 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
2440 switch (TRI->getSpillSize(*RC)) {
2442 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2443 Opc = AArch64::STRBui;
2446 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2447 Opc = AArch64::STRHui;
2450 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2451 Opc = AArch64::STRWui;
2452 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2453 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2455 assert(SrcReg != AArch64::WSP);
2456 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2457 Opc = AArch64::STRSui;
2460 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2461 Opc = AArch64::STRXui;
2462 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2463 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2465 assert(SrcReg != AArch64::SP);
2466 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2467 Opc = AArch64::STRDui;
2470 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2471 Opc = AArch64::STRQui;
2472 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2473 assert(Subtarget.hasNEON() &&
2474 "Unexpected register store without NEON");
2475 Opc = AArch64::ST1Twov1d;
2480 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2481 assert(Subtarget.hasNEON() &&
2482 "Unexpected register store without NEON");
2483 Opc = AArch64::ST1Threev1d;
2488 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2489 assert(Subtarget.hasNEON() &&
2490 "Unexpected register store without NEON");
2491 Opc = AArch64::ST1Fourv1d;
2493 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2494 assert(Subtarget.hasNEON() &&
2495 "Unexpected register store without NEON");
2496 Opc = AArch64::ST1Twov2d;
2501 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2502 assert(Subtarget.hasNEON() &&
2503 "Unexpected register store without NEON");
2504 Opc = AArch64::ST1Threev2d;
2509 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2510 assert(Subtarget.hasNEON() &&
2511 "Unexpected register store without NEON");
2512 Opc = AArch64::ST1Fourv2d;
2517 assert(Opc && "Unknown register class");
2519 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2520 .addReg(SrcReg, getKillRegState(isKill))
2525 MI.addMemOperand(MMO);
2528 void AArch64InstrInfo::loadRegFromStackSlot(
2529 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2530 int FI, const TargetRegisterClass *RC,
2531 const TargetRegisterInfo *TRI) const {
2533 if (MBBI != MBB.end())
2534 DL = MBBI->getDebugLoc();
2535 MachineFunction &MF = *MBB.getParent();
2536 MachineFrameInfo &MFI = MF.getFrameInfo();
2537 unsigned Align = MFI.getObjectAlignment(FI);
2538 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2539 MachineMemOperand *MMO = MF.getMachineMemOperand(
2540 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2544 switch (TRI->getSpillSize(*RC)) {
2546 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2547 Opc = AArch64::LDRBui;
2550 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2551 Opc = AArch64::LDRHui;
2554 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2555 Opc = AArch64::LDRWui;
2556 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2557 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2559 assert(DestReg != AArch64::WSP);
2560 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2561 Opc = AArch64::LDRSui;
2564 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2565 Opc = AArch64::LDRXui;
2566 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2567 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2569 assert(DestReg != AArch64::SP);
2570 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2571 Opc = AArch64::LDRDui;
2574 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2575 Opc = AArch64::LDRQui;
2576 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2577 assert(Subtarget.hasNEON() &&
2578 "Unexpected register load without NEON");
2579 Opc = AArch64::LD1Twov1d;
2584 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2585 assert(Subtarget.hasNEON() &&
2586 "Unexpected register load without NEON");
2587 Opc = AArch64::LD1Threev1d;
2592 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2593 assert(Subtarget.hasNEON() &&
2594 "Unexpected register load without NEON");
2595 Opc = AArch64::LD1Fourv1d;
2597 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2598 assert(Subtarget.hasNEON() &&
2599 "Unexpected register load without NEON");
2600 Opc = AArch64::LD1Twov2d;
2605 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2606 assert(Subtarget.hasNEON() &&
2607 "Unexpected register load without NEON");
2608 Opc = AArch64::LD1Threev2d;
2613 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2614 assert(Subtarget.hasNEON() &&
2615 "Unexpected register load without NEON");
2616 Opc = AArch64::LD1Fourv2d;
2621 assert(Opc && "Unknown register class");
2623 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2624 .addReg(DestReg, getDefRegState(true))
2628 MI.addMemOperand(MMO);
2631 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2632 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
2633 unsigned DestReg, unsigned SrcReg, int Offset,
2634 const TargetInstrInfo *TII,
2635 MachineInstr::MIFlag Flag, bool SetNZCV) {
2636 if (DestReg == SrcReg && Offset == 0)
2639 assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2640 "SP increment/decrement not 16-byte aligned");
2642 bool isSub = Offset < 0;
2646 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2647 // scratch register. If DestReg is a virtual register, use it as the
2648 // scratch register; otherwise, create a new virtual register (to be
2649 // replaced by the scavenger at the end of PEI). That case can be optimized
2650 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2651 // register can be loaded with offset%8 and the add/sub can use an extending
2652 // instruction with LSL#3.
2653 // Currently the function handles any offsets but generates a poor sequence
2655 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2659 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2661 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2662 const unsigned MaxEncoding = 0xfff;
2663 const unsigned ShiftSize = 12;
2664 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2665 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2667 if (((unsigned)Offset) > MaxEncodableValue) {
2668 ThisVal = MaxEncodableValue;
2670 ThisVal = Offset & MaxEncodableValue;
2672 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2673 "Encoding cannot handle value that big");
2674 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2676 .addImm(ThisVal >> ShiftSize)
2677 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2685 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2688 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2692 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2693 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
2694 MachineBasicBlock::iterator InsertPt, int FrameIndex,
2695 LiveIntervals *LIS) const {
2696 // This is a bit of a hack. Consider this instruction:
2698 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2700 // We explicitly chose GPR64all for the virtual register so such a copy might
2701 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2702 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2703 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2705 // To prevent that, we are going to constrain the %vreg0 register class here.
2707 // <rdar://problem/11522048>
2709 if (MI.isFullCopy()) {
2710 unsigned DstReg = MI.getOperand(0).getReg();
2711 unsigned SrcReg = MI.getOperand(1).getReg();
2712 if (SrcReg == AArch64::SP &&
2713 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2714 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2717 if (DstReg == AArch64::SP &&
2718 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2719 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2724 // Handle the case where a copy is being spilled or filled but the source
2725 // and destination register class don't match. For example:
2727 // %vreg0<def> = COPY %XZR; GPR64common:%vreg0
2729 // In this case we can still safely fold away the COPY and generate the
2730 // following spill code:
2732 // STRXui %XZR, <fi#0>
2734 // This also eliminates spilled cross register class COPYs (e.g. between x and
2735 // d regs) of the same size. For example:
2737 // %vreg0<def> = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1
2739 // will be filled as
2741 // LDRDui %vreg0, fi<#0>
2745 // LDRXui %vregTemp, fi<#0>
2746 // %vreg0 = FMOV %vregTemp
2748 if (MI.isCopy() && Ops.size() == 1 &&
2749 // Make sure we're only folding the explicit COPY defs/uses.
2750 (Ops[0] == 0 || Ops[0] == 1)) {
2751 bool IsSpill = Ops[0] == 0;
2752 bool IsFill = !IsSpill;
2753 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
2754 const MachineRegisterInfo &MRI = MF.getRegInfo();
2755 MachineBasicBlock &MBB = *MI.getParent();
2756 const MachineOperand &DstMO = MI.getOperand(0);
2757 const MachineOperand &SrcMO = MI.getOperand(1);
2758 unsigned DstReg = DstMO.getReg();
2759 unsigned SrcReg = SrcMO.getReg();
2760 // This is slightly expensive to compute for physical regs since
2761 // getMinimalPhysRegClass is slow.
2762 auto getRegClass = [&](unsigned Reg) {
2763 return TargetRegisterInfo::isVirtualRegister(Reg)
2764 ? MRI.getRegClass(Reg)
2765 : TRI.getMinimalPhysRegClass(Reg);
2768 if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
2769 assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
2770 TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
2771 "Mismatched register size in non subreg COPY");
2773 storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
2774 getRegClass(SrcReg), &TRI);
2776 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
2777 getRegClass(DstReg), &TRI);
2778 return &*--InsertPt;
2781 // Handle cases like spilling def of:
2783 // %vreg0:sub_32<def,read-undef> = COPY %WZR; GPR64common:%vreg0
2785 // where the physical register source can be widened and stored to the full
2786 // virtual reg destination stack slot, in this case producing:
2788 // STRXui %XZR, <fi#0>
2790 if (IsSpill && DstMO.isUndef() &&
2791 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
2792 assert(SrcMO.getSubReg() == 0 &&
2793 "Unexpected subreg on physical register");
2794 const TargetRegisterClass *SpillRC;
2795 unsigned SpillSubreg;
2796 switch (DstMO.getSubReg()) {
2800 case AArch64::sub_32:
2802 if (AArch64::GPR32RegClass.contains(SrcReg)) {
2803 SpillRC = &AArch64::GPR64RegClass;
2804 SpillSubreg = AArch64::sub_32;
2805 } else if (AArch64::FPR32RegClass.contains(SrcReg)) {
2806 SpillRC = &AArch64::FPR64RegClass;
2807 SpillSubreg = AArch64::ssub;
2812 if (AArch64::FPR64RegClass.contains(SrcReg)) {
2813 SpillRC = &AArch64::FPR128RegClass;
2814 SpillSubreg = AArch64::dsub;
2821 if (unsigned WidenedSrcReg =
2822 TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
2823 storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
2824 FrameIndex, SpillRC, &TRI);
2825 return &*--InsertPt;
2829 // Handle cases like filling use of:
2831 // %vreg0:sub_32<def,read-undef> = COPY %vreg1; GPR64:%vreg0, GPR32:%vreg1
2833 // where we can load the full virtual reg source stack slot, into the subreg
2834 // destination, in this case producing:
2836 // LDRWui %vreg0:sub_32<def,read-undef>, <fi#0>
2838 if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
2839 const TargetRegisterClass *FillRC;
2840 switch (DstMO.getSubReg()) {
2844 case AArch64::sub_32:
2845 FillRC = &AArch64::GPR32RegClass;
2848 FillRC = &AArch64::FPR32RegClass;
2851 FillRC = &AArch64::FPR64RegClass;
2856 assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
2857 TRI.getRegSizeInBits(*FillRC) &&
2858 "Mismatched regclass size on folded subreg COPY");
2859 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
2860 MachineInstr &LoadMI = *--InsertPt;
2861 MachineOperand &LoadDst = LoadMI.getOperand(0);
2862 assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
2863 LoadDst.setSubReg(DstMO.getSubReg());
2864 LoadDst.setIsUndef();
2874 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2875 bool *OutUseUnscaledOp,
2876 unsigned *OutUnscaledOp,
2877 int *EmittableOffset) {
2879 bool IsSigned = false;
2880 // The ImmIdx should be changed case by case if it is not 2.
2881 unsigned ImmIdx = 2;
2882 unsigned UnscaledOp = 0;
2883 // Set output values in case of early exit.
2884 if (EmittableOffset)
2885 *EmittableOffset = 0;
2886 if (OutUseUnscaledOp)
2887 *OutUseUnscaledOp = false;
2890 switch (MI.getOpcode()) {
2892 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2893 // Vector spills/fills can't take an immediate offset.
2894 case AArch64::LD1Twov2d:
2895 case AArch64::LD1Threev2d:
2896 case AArch64::LD1Fourv2d:
2897 case AArch64::LD1Twov1d:
2898 case AArch64::LD1Threev1d:
2899 case AArch64::LD1Fourv1d:
2900 case AArch64::ST1Twov2d:
2901 case AArch64::ST1Threev2d:
2902 case AArch64::ST1Fourv2d:
2903 case AArch64::ST1Twov1d:
2904 case AArch64::ST1Threev1d:
2905 case AArch64::ST1Fourv1d:
2906 return AArch64FrameOffsetCannotUpdate;
2907 case AArch64::PRFMui:
2909 UnscaledOp = AArch64::PRFUMi;
2911 case AArch64::LDRXui:
2913 UnscaledOp = AArch64::LDURXi;
2915 case AArch64::LDRWui:
2917 UnscaledOp = AArch64::LDURWi;
2919 case AArch64::LDRBui:
2921 UnscaledOp = AArch64::LDURBi;
2923 case AArch64::LDRHui:
2925 UnscaledOp = AArch64::LDURHi;
2927 case AArch64::LDRSui:
2929 UnscaledOp = AArch64::LDURSi;
2931 case AArch64::LDRDui:
2933 UnscaledOp = AArch64::LDURDi;
2935 case AArch64::LDRQui:
2937 UnscaledOp = AArch64::LDURQi;
2939 case AArch64::LDRBBui:
2941 UnscaledOp = AArch64::LDURBBi;
2943 case AArch64::LDRHHui:
2945 UnscaledOp = AArch64::LDURHHi;
2947 case AArch64::LDRSBXui:
2949 UnscaledOp = AArch64::LDURSBXi;
2951 case AArch64::LDRSBWui:
2953 UnscaledOp = AArch64::LDURSBWi;
2955 case AArch64::LDRSHXui:
2957 UnscaledOp = AArch64::LDURSHXi;
2959 case AArch64::LDRSHWui:
2961 UnscaledOp = AArch64::LDURSHWi;
2963 case AArch64::LDRSWui:
2965 UnscaledOp = AArch64::LDURSWi;
2968 case AArch64::STRXui:
2970 UnscaledOp = AArch64::STURXi;
2972 case AArch64::STRWui:
2974 UnscaledOp = AArch64::STURWi;
2976 case AArch64::STRBui:
2978 UnscaledOp = AArch64::STURBi;
2980 case AArch64::STRHui:
2982 UnscaledOp = AArch64::STURHi;
2984 case AArch64::STRSui:
2986 UnscaledOp = AArch64::STURSi;
2988 case AArch64::STRDui:
2990 UnscaledOp = AArch64::STURDi;
2992 case AArch64::STRQui:
2994 UnscaledOp = AArch64::STURQi;
2996 case AArch64::STRBBui:
2998 UnscaledOp = AArch64::STURBBi;
3000 case AArch64::STRHHui:
3002 UnscaledOp = AArch64::STURHHi;
3005 case AArch64::LDPXi:
3006 case AArch64::LDPDi:
3007 case AArch64::STPXi:
3008 case AArch64::STPDi:
3009 case AArch64::LDNPXi:
3010 case AArch64::LDNPDi:
3011 case AArch64::STNPXi:
3012 case AArch64::STNPDi:
3017 case AArch64::LDPQi:
3018 case AArch64::STPQi:
3019 case AArch64::LDNPQi:
3020 case AArch64::STNPQi:
3025 case AArch64::LDPWi:
3026 case AArch64::LDPSi:
3027 case AArch64::STPWi:
3028 case AArch64::STPSi:
3029 case AArch64::LDNPWi:
3030 case AArch64::LDNPSi:
3031 case AArch64::STNPWi:
3032 case AArch64::STNPSi:
3038 case AArch64::LDURXi:
3039 case AArch64::LDURWi:
3040 case AArch64::LDURBi:
3041 case AArch64::LDURHi:
3042 case AArch64::LDURSi:
3043 case AArch64::LDURDi:
3044 case AArch64::LDURQi:
3045 case AArch64::LDURHHi:
3046 case AArch64::LDURBBi:
3047 case AArch64::LDURSBXi:
3048 case AArch64::LDURSBWi:
3049 case AArch64::LDURSHXi:
3050 case AArch64::LDURSHWi:
3051 case AArch64::LDURSWi:
3052 case AArch64::STURXi:
3053 case AArch64::STURWi:
3054 case AArch64::STURBi:
3055 case AArch64::STURHi:
3056 case AArch64::STURSi:
3057 case AArch64::STURDi:
3058 case AArch64::STURQi:
3059 case AArch64::STURBBi:
3060 case AArch64::STURHHi:
3065 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
3067 bool useUnscaledOp = false;
3068 // If the offset doesn't match the scale, we rewrite the instruction to
3069 // use the unscaled instruction instead. Likewise, if we have a negative
3070 // offset (and have an unscaled op to use).
3071 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
3072 useUnscaledOp = true;
3074 // Use an unscaled addressing mode if the instruction has a negative offset
3075 // (or if the instruction is already using an unscaled addressing mode).
3078 // ldp/stp instructions.
3081 } else if (UnscaledOp == 0 || useUnscaledOp) {
3091 // Attempt to fold address computation.
3092 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
3093 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
3094 if (Offset >= MinOff && Offset <= MaxOff) {
3095 if (EmittableOffset)
3096 *EmittableOffset = Offset;
3099 int NewOff = Offset < 0 ? MinOff : MaxOff;
3100 if (EmittableOffset)
3101 *EmittableOffset = NewOff;
3102 Offset = (Offset - NewOff) * Scale;
3104 if (OutUseUnscaledOp)
3105 *OutUseUnscaledOp = useUnscaledOp;
3107 *OutUnscaledOp = UnscaledOp;
3108 return AArch64FrameOffsetCanUpdate |
3109 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
3112 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
3113 unsigned FrameReg, int &Offset,
3114 const AArch64InstrInfo *TII) {
3115 unsigned Opcode = MI.getOpcode();
3116 unsigned ImmIdx = FrameRegIdx + 1;
3118 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
3119 Offset += MI.getOperand(ImmIdx).getImm();
3120 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
3121 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
3122 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
3123 MI.eraseFromParent();
3129 unsigned UnscaledOp;
3131 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
3132 &UnscaledOp, &NewOffset);
3133 if (Status & AArch64FrameOffsetCanUpdate) {
3134 if (Status & AArch64FrameOffsetIsLegal)
3135 // Replace the FrameIndex with FrameReg.
3136 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
3138 MI.setDesc(TII->get(UnscaledOp));
3140 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
3147 void AArch64InstrInfo::getNoop(MCInst &NopInst) const {
3148 NopInst.setOpcode(AArch64::HINT);
3149 NopInst.addOperand(MCOperand::createImm(0));
3152 // AArch64 supports MachineCombiner.
3153 bool AArch64InstrInfo::useMachineCombiner() const {
3158 // True when Opc sets flag
3159 static bool isCombineInstrSettingFlag(unsigned Opc) {
3161 case AArch64::ADDSWrr:
3162 case AArch64::ADDSWri:
3163 case AArch64::ADDSXrr:
3164 case AArch64::ADDSXri:
3165 case AArch64::SUBSWrr:
3166 case AArch64::SUBSXrr:
3167 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3168 case AArch64::SUBSWri:
3169 case AArch64::SUBSXri:
3177 // 32b Opcodes that can be combined with a MUL
3178 static bool isCombineInstrCandidate32(unsigned Opc) {
3180 case AArch64::ADDWrr:
3181 case AArch64::ADDWri:
3182 case AArch64::SUBWrr:
3183 case AArch64::ADDSWrr:
3184 case AArch64::ADDSWri:
3185 case AArch64::SUBSWrr:
3186 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3187 case AArch64::SUBWri:
3188 case AArch64::SUBSWri:
3196 // 64b Opcodes that can be combined with a MUL
3197 static bool isCombineInstrCandidate64(unsigned Opc) {
3199 case AArch64::ADDXrr:
3200 case AArch64::ADDXri:
3201 case AArch64::SUBXrr:
3202 case AArch64::ADDSXrr:
3203 case AArch64::ADDSXri:
3204 case AArch64::SUBSXrr:
3205 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3206 case AArch64::SUBXri:
3207 case AArch64::SUBSXri:
3215 // FP Opcodes that can be combined with a FMUL
3216 static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
3217 switch (Inst.getOpcode()) {
3220 case AArch64::FADDSrr:
3221 case AArch64::FADDDrr:
3222 case AArch64::FADDv2f32:
3223 case AArch64::FADDv2f64:
3224 case AArch64::FADDv4f32:
3225 case AArch64::FSUBSrr:
3226 case AArch64::FSUBDrr:
3227 case AArch64::FSUBv2f32:
3228 case AArch64::FSUBv2f64:
3229 case AArch64::FSUBv4f32:
3230 TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
3231 return (Options.UnsafeFPMath ||
3232 Options.AllowFPOpFusion == FPOpFusion::Fast);
3237 // Opcodes that can be combined with a MUL
3238 static bool isCombineInstrCandidate(unsigned Opc) {
3239 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
3243 // Utility routine that checks if \param MO is defined by an
3244 // \param CombineOpc instruction in the basic block \param MBB
3245 static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
3246 unsigned CombineOpc, unsigned ZeroReg = 0,
3247 bool CheckZeroReg = false) {
3248 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3249 MachineInstr *MI = nullptr;
3251 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3252 MI = MRI.getUniqueVRegDef(MO.getReg());
3253 // And it needs to be in the trace (otherwise, it won't have a depth).
3254 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
3256 // Must only used by the user we combine with.
3257 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
3261 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
3262 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
3263 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3264 // The third input reg must be zero.
3265 if (MI->getOperand(3).getReg() != ZeroReg)
3273 // Is \param MO defined by an integer multiply and can be combined?
3274 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3275 unsigned MulOpc, unsigned ZeroReg) {
3276 return canCombine(MBB, MO, MulOpc, ZeroReg, true);
3280 // Is \param MO defined by a floating-point multiply and can be combined?
3281 static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3283 return canCombine(MBB, MO, MulOpc);
3286 // TODO: There are many more machine instruction opcodes to match:
3287 // 1. Other data types (integer, vectors)
3288 // 2. Other math / logic operations (xor, or)
3289 // 3. Other forms of the same operation (intrinsics and other variants)
3290 bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
3291 switch (Inst.getOpcode()) {
3292 case AArch64::FADDDrr:
3293 case AArch64::FADDSrr:
3294 case AArch64::FADDv2f32:
3295 case AArch64::FADDv2f64:
3296 case AArch64::FADDv4f32:
3297 case AArch64::FMULDrr:
3298 case AArch64::FMULSrr:
3299 case AArch64::FMULX32:
3300 case AArch64::FMULX64:
3301 case AArch64::FMULXv2f32:
3302 case AArch64::FMULXv2f64:
3303 case AArch64::FMULXv4f32:
3304 case AArch64::FMULv2f32:
3305 case AArch64::FMULv2f64:
3306 case AArch64::FMULv4f32:
3307 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
3313 /// Find instructions that can be turned into madd.
3314 static bool getMaddPatterns(MachineInstr &Root,
3315 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3316 unsigned Opc = Root.getOpcode();
3317 MachineBasicBlock &MBB = *Root.getParent();
3320 if (!isCombineInstrCandidate(Opc))
3322 if (isCombineInstrSettingFlag(Opc)) {
3323 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
3324 // When NZCV is live bail out.
3327 unsigned NewOpc = convertToNonFlagSettingOpc(Root);
3328 // When opcode can't change bail out.
3329 // CHECKME: do we miss any cases for opcode conversion?
3338 case AArch64::ADDWrr:
3339 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3340 "ADDWrr does not have register operands");
3341 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3343 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
3346 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3348 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
3352 case AArch64::ADDXrr:
3353 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3355 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
3358 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3360 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
3364 case AArch64::SUBWrr:
3365 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3367 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
3370 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3372 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
3376 case AArch64::SUBXrr:
3377 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3379 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
3382 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3384 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
3388 case AArch64::ADDWri:
3389 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3391 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
3395 case AArch64::ADDXri:
3396 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3398 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
3402 case AArch64::SUBWri:
3403 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3405 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
3409 case AArch64::SUBXri:
3410 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3412 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
3419 /// Floating-Point Support
3421 /// Find instructions that can be turned into madd.
3422 static bool getFMAPatterns(MachineInstr &Root,
3423 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3425 if (!isCombineInstrCandidateFP(Root))
3428 MachineBasicBlock &MBB = *Root.getParent();
3431 switch (Root.getOpcode()) {
3433 assert(false && "Unsupported FP instruction in combiner\n");
3435 case AArch64::FADDSrr:
3436 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3437 "FADDWrr does not have register operands");
3438 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3439 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
3441 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3442 AArch64::FMULv1i32_indexed)) {
3443 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
3446 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3447 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
3449 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3450 AArch64::FMULv1i32_indexed)) {
3451 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
3455 case AArch64::FADDDrr:
3456 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3457 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
3459 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3460 AArch64::FMULv1i64_indexed)) {
3461 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
3464 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3465 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
3467 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3468 AArch64::FMULv1i64_indexed)) {
3469 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
3473 case AArch64::FADDv2f32:
3474 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3475 AArch64::FMULv2i32_indexed)) {
3476 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
3478 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3479 AArch64::FMULv2f32)) {
3480 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
3483 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3484 AArch64::FMULv2i32_indexed)) {
3485 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
3487 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3488 AArch64::FMULv2f32)) {
3489 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
3493 case AArch64::FADDv2f64:
3494 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3495 AArch64::FMULv2i64_indexed)) {
3496 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
3498 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3499 AArch64::FMULv2f64)) {
3500 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
3503 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3504 AArch64::FMULv2i64_indexed)) {
3505 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
3507 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3508 AArch64::FMULv2f64)) {
3509 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
3513 case AArch64::FADDv4f32:
3514 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3515 AArch64::FMULv4i32_indexed)) {
3516 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
3518 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3519 AArch64::FMULv4f32)) {
3520 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
3523 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3524 AArch64::FMULv4i32_indexed)) {
3525 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
3527 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3528 AArch64::FMULv4f32)) {
3529 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
3534 case AArch64::FSUBSrr:
3535 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3536 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
3539 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3540 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
3542 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3543 AArch64::FMULv1i32_indexed)) {
3544 Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
3547 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULSrr)) {
3548 Patterns.push_back(MachineCombinerPattern::FNMULSUBS_OP1);
3552 case AArch64::FSUBDrr:
3553 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3554 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
3557 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3558 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
3560 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3561 AArch64::FMULv1i64_indexed)) {
3562 Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
3565 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULDrr)) {
3566 Patterns.push_back(MachineCombinerPattern::FNMULSUBD_OP1);
3570 case AArch64::FSUBv2f32:
3571 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3572 AArch64::FMULv2i32_indexed)) {
3573 Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
3575 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3576 AArch64::FMULv2f32)) {
3577 Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
3581 case AArch64::FSUBv2f64:
3582 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3583 AArch64::FMULv2i64_indexed)) {
3584 Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
3586 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3587 AArch64::FMULv2f64)) {
3588 Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
3592 case AArch64::FSUBv4f32:
3593 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3594 AArch64::FMULv4i32_indexed)) {
3595 Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
3597 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3598 AArch64::FMULv4f32)) {
3599 Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
3607 /// Return true when a code sequence can improve throughput. It
3608 /// should be called only for instructions in loops.
3609 /// \param Pattern - combiner pattern
3611 AArch64InstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
3615 case MachineCombinerPattern::FMULADDS_OP1:
3616 case MachineCombinerPattern::FMULADDS_OP2:
3617 case MachineCombinerPattern::FMULSUBS_OP1:
3618 case MachineCombinerPattern::FMULSUBS_OP2:
3619 case MachineCombinerPattern::FMULADDD_OP1:
3620 case MachineCombinerPattern::FMULADDD_OP2:
3621 case MachineCombinerPattern::FMULSUBD_OP1:
3622 case MachineCombinerPattern::FMULSUBD_OP2:
3623 case MachineCombinerPattern::FNMULSUBS_OP1:
3624 case MachineCombinerPattern::FNMULSUBD_OP1:
3625 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3626 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3627 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3628 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3629 case MachineCombinerPattern::FMLAv2f32_OP2:
3630 case MachineCombinerPattern::FMLAv2f32_OP1:
3631 case MachineCombinerPattern::FMLAv2f64_OP1:
3632 case MachineCombinerPattern::FMLAv2f64_OP2:
3633 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3634 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3635 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3636 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3637 case MachineCombinerPattern::FMLAv4f32_OP1:
3638 case MachineCombinerPattern::FMLAv4f32_OP2:
3639 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3640 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3641 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3642 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3643 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3644 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3645 case MachineCombinerPattern::FMLSv2f32_OP2:
3646 case MachineCombinerPattern::FMLSv2f64_OP2:
3647 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3648 case MachineCombinerPattern::FMLSv4f32_OP2:
3650 } // end switch (Pattern)
3653 /// Return true when there is potentially a faster code sequence for an
3654 /// instruction chain ending in \p Root. All potential patterns are listed in
3655 /// the \p Pattern vector. Pattern should be sorted in priority order since the
3656 /// pattern evaluator stops checking as soon as it finds a faster sequence.
3658 bool AArch64InstrInfo::getMachineCombinerPatterns(
3660 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
3662 if (getMaddPatterns(Root, Patterns))
3664 // Floating point patterns
3665 if (getFMAPatterns(Root, Patterns))
3668 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
3671 enum class FMAInstKind { Default, Indexed, Accumulator };
3672 /// genFusedMultiply - Generate fused multiply instructions.
3673 /// This function supports both integer and floating point instructions.
3674 /// A typical example:
3677 /// ==> F|MADD R,A,B,C
3678 /// \param MF Containing MachineFunction
3679 /// \param MRI Register information
3680 /// \param TII Target information
3681 /// \param Root is the F|ADD instruction
3682 /// \param [out] InsInstrs is a vector of machine instructions and will
3683 /// contain the generated madd instruction
3684 /// \param IdxMulOpd is index of operand in Root that is the result of
3685 /// the F|MUL. In the example above IdxMulOpd is 1.
3686 /// \param MaddOpc the opcode fo the f|madd instruction
3687 /// \param RC Register class of operands
3688 /// \param kind of fma instruction (addressing mode) to be generated
3689 static MachineInstr *
3690 genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
3691 const TargetInstrInfo *TII, MachineInstr &Root,
3692 SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
3693 unsigned MaddOpc, const TargetRegisterClass *RC,
3694 FMAInstKind kind = FMAInstKind::Default) {
3695 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3697 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3698 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3699 unsigned ResultReg = Root.getOperand(0).getReg();
3700 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3701 bool Src0IsKill = MUL->getOperand(1).isKill();
3702 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3703 bool Src1IsKill = MUL->getOperand(2).isKill();
3704 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
3705 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
3707 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3708 MRI.constrainRegClass(ResultReg, RC);
3709 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3710 MRI.constrainRegClass(SrcReg0, RC);
3711 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3712 MRI.constrainRegClass(SrcReg1, RC);
3713 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
3714 MRI.constrainRegClass(SrcReg2, RC);
3716 MachineInstrBuilder MIB;
3717 if (kind == FMAInstKind::Default)
3718 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3719 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3720 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3721 .addReg(SrcReg2, getKillRegState(Src2IsKill));
3722 else if (kind == FMAInstKind::Indexed)
3723 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3724 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3725 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3726 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3727 .addImm(MUL->getOperand(3).getImm());
3728 else if (kind == FMAInstKind::Accumulator)
3729 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3730 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3731 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3732 .addReg(SrcReg1, getKillRegState(Src1IsKill));
3734 assert(false && "Invalid FMA instruction kind \n");
3735 // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
3736 InsInstrs.push_back(MIB);
3740 /// genMaddR - Generate madd instruction and combine mul and add using
3741 /// an extra virtual register
3742 /// Example - an ADD intermediate needs to be stored in a register:
3745 /// ==> ORR V, ZR, Imm
3746 /// ==> MADD R,A,B,V
3747 /// \param MF Containing MachineFunction
3748 /// \param MRI Register information
3749 /// \param TII Target information
3750 /// \param Root is the ADD instruction
3751 /// \param [out] InsInstrs is a vector of machine instructions and will
3752 /// contain the generated madd instruction
3753 /// \param IdxMulOpd is index of operand in Root that is the result of
3754 /// the MUL. In the example above IdxMulOpd is 1.
3755 /// \param MaddOpc the opcode fo the madd instruction
3756 /// \param VR is a virtual register that holds the value of an ADD operand
3757 /// (V in the example above).
3758 /// \param RC Register class of operands
3759 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
3760 const TargetInstrInfo *TII, MachineInstr &Root,
3761 SmallVectorImpl<MachineInstr *> &InsInstrs,
3762 unsigned IdxMulOpd, unsigned MaddOpc,
3763 unsigned VR, const TargetRegisterClass *RC) {
3764 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3766 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3767 unsigned ResultReg = Root.getOperand(0).getReg();
3768 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3769 bool Src0IsKill = MUL->getOperand(1).isKill();
3770 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3771 bool Src1IsKill = MUL->getOperand(2).isKill();
3773 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3774 MRI.constrainRegClass(ResultReg, RC);
3775 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3776 MRI.constrainRegClass(SrcReg0, RC);
3777 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3778 MRI.constrainRegClass(SrcReg1, RC);
3779 if (TargetRegisterInfo::isVirtualRegister(VR))
3780 MRI.constrainRegClass(VR, RC);
3782 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
3784 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3785 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3788 InsInstrs.push_back(MIB);
3792 /// When getMachineCombinerPatterns() finds potential patterns,
3793 /// this function generates the instructions that could replace the
3794 /// original code sequence
3795 void AArch64InstrInfo::genAlternativeCodeSequence(
3796 MachineInstr &Root, MachineCombinerPattern Pattern,
3797 SmallVectorImpl<MachineInstr *> &InsInstrs,
3798 SmallVectorImpl<MachineInstr *> &DelInstrs,
3799 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
3800 MachineBasicBlock &MBB = *Root.getParent();
3801 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3802 MachineFunction &MF = *MBB.getParent();
3803 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
3806 const TargetRegisterClass *RC;
3810 // Reassociate instructions.
3811 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
3812 DelInstrs, InstrIdxForVirtReg);
3814 case MachineCombinerPattern::MULADDW_OP1:
3815 case MachineCombinerPattern::MULADDX_OP1:
3819 // --- Create(MADD);
3820 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
3821 Opc = AArch64::MADDWrrr;
3822 RC = &AArch64::GPR32RegClass;
3824 Opc = AArch64::MADDXrrr;
3825 RC = &AArch64::GPR64RegClass;
3827 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3829 case MachineCombinerPattern::MULADDW_OP2:
3830 case MachineCombinerPattern::MULADDX_OP2:
3834 // --- Create(MADD);
3835 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
3836 Opc = AArch64::MADDWrrr;
3837 RC = &AArch64::GPR32RegClass;
3839 Opc = AArch64::MADDXrrr;
3840 RC = &AArch64::GPR64RegClass;
3842 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3844 case MachineCombinerPattern::MULADDWI_OP1:
3845 case MachineCombinerPattern::MULADDXI_OP1: {
3848 // ==> ORR V, ZR, Imm
3850 // --- Create(MADD);
3851 const TargetRegisterClass *OrrRC;
3852 unsigned BitSize, OrrOpc, ZeroReg;
3853 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
3854 OrrOpc = AArch64::ORRWri;
3855 OrrRC = &AArch64::GPR32spRegClass;
3857 ZeroReg = AArch64::WZR;
3858 Opc = AArch64::MADDWrrr;
3859 RC = &AArch64::GPR32RegClass;
3861 OrrOpc = AArch64::ORRXri;
3862 OrrRC = &AArch64::GPR64spRegClass;
3864 ZeroReg = AArch64::XZR;
3865 Opc = AArch64::MADDXrrr;
3866 RC = &AArch64::GPR64RegClass;
3868 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3869 uint64_t Imm = Root.getOperand(2).getImm();
3871 if (Root.getOperand(3).isImm()) {
3872 unsigned Val = Root.getOperand(3).getImm();
3875 uint64_t UImm = SignExtend64(Imm, BitSize);
3877 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3878 MachineInstrBuilder MIB1 =
3879 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3882 InsInstrs.push_back(MIB1);
3883 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3884 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3888 case MachineCombinerPattern::MULSUBW_OP1:
3889 case MachineCombinerPattern::MULSUBX_OP1: {
3893 // ==> MADD R,A,B,V // = -C + A*B
3894 // --- Create(MADD);
3895 const TargetRegisterClass *SubRC;
3896 unsigned SubOpc, ZeroReg;
3897 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
3898 SubOpc = AArch64::SUBWrr;
3899 SubRC = &AArch64::GPR32spRegClass;
3900 ZeroReg = AArch64::WZR;
3901 Opc = AArch64::MADDWrrr;
3902 RC = &AArch64::GPR32RegClass;
3904 SubOpc = AArch64::SUBXrr;
3905 SubRC = &AArch64::GPR64spRegClass;
3906 ZeroReg = AArch64::XZR;
3907 Opc = AArch64::MADDXrrr;
3908 RC = &AArch64::GPR64RegClass;
3910 unsigned NewVR = MRI.createVirtualRegister(SubRC);
3912 MachineInstrBuilder MIB1 =
3913 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
3915 .add(Root.getOperand(2));
3916 InsInstrs.push_back(MIB1);
3917 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3918 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3921 case MachineCombinerPattern::MULSUBW_OP2:
3922 case MachineCombinerPattern::MULSUBX_OP2:
3925 // ==> MSUB R,A,B,C (computes C - A*B)
3926 // --- Create(MSUB);
3927 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
3928 Opc = AArch64::MSUBWrrr;
3929 RC = &AArch64::GPR32RegClass;
3931 Opc = AArch64::MSUBXrrr;
3932 RC = &AArch64::GPR64RegClass;
3934 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3936 case MachineCombinerPattern::MULSUBWI_OP1:
3937 case MachineCombinerPattern::MULSUBXI_OP1: {
3940 // ==> ORR V, ZR, -Imm
3941 // ==> MADD R,A,B,V // = -Imm + A*B
3942 // --- Create(MADD);
3943 const TargetRegisterClass *OrrRC;
3944 unsigned BitSize, OrrOpc, ZeroReg;
3945 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
3946 OrrOpc = AArch64::ORRWri;
3947 OrrRC = &AArch64::GPR32spRegClass;
3949 ZeroReg = AArch64::WZR;
3950 Opc = AArch64::MADDWrrr;
3951 RC = &AArch64::GPR32RegClass;
3953 OrrOpc = AArch64::ORRXri;
3954 OrrRC = &AArch64::GPR64spRegClass;
3956 ZeroReg = AArch64::XZR;
3957 Opc = AArch64::MADDXrrr;
3958 RC = &AArch64::GPR64RegClass;
3960 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3961 uint64_t Imm = Root.getOperand(2).getImm();
3962 if (Root.getOperand(3).isImm()) {
3963 unsigned Val = Root.getOperand(3).getImm();
3966 uint64_t UImm = SignExtend64(-Imm, BitSize);
3968 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3969 MachineInstrBuilder MIB1 =
3970 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3973 InsInstrs.push_back(MIB1);
3974 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3975 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3979 // Floating Point Support
3980 case MachineCombinerPattern::FMULADDS_OP1:
3981 case MachineCombinerPattern::FMULADDD_OP1:
3985 // --- Create(MADD);
3986 if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
3987 Opc = AArch64::FMADDSrrr;
3988 RC = &AArch64::FPR32RegClass;
3990 Opc = AArch64::FMADDDrrr;
3991 RC = &AArch64::FPR64RegClass;
3993 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3995 case MachineCombinerPattern::FMULADDS_OP2:
3996 case MachineCombinerPattern::FMULADDD_OP2:
3999 // ==> FMADD R,A,B,C
4000 // --- Create(FMADD);
4001 if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
4002 Opc = AArch64::FMADDSrrr;
4003 RC = &AArch64::FPR32RegClass;
4005 Opc = AArch64::FMADDDrrr;
4006 RC = &AArch64::FPR64RegClass;
4008 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4011 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
4012 Opc = AArch64::FMLAv1i32_indexed;
4013 RC = &AArch64::FPR32RegClass;
4014 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4015 FMAInstKind::Indexed);
4017 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
4018 Opc = AArch64::FMLAv1i32_indexed;
4019 RC = &AArch64::FPR32RegClass;
4020 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4021 FMAInstKind::Indexed);
4024 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
4025 Opc = AArch64::FMLAv1i64_indexed;
4026 RC = &AArch64::FPR64RegClass;
4027 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4028 FMAInstKind::Indexed);
4030 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
4031 Opc = AArch64::FMLAv1i64_indexed;
4032 RC = &AArch64::FPR64RegClass;
4033 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4034 FMAInstKind::Indexed);
4037 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
4038 case MachineCombinerPattern::FMLAv2f32_OP1:
4039 RC = &AArch64::FPR64RegClass;
4040 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
4041 Opc = AArch64::FMLAv2i32_indexed;
4042 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4043 FMAInstKind::Indexed);
4045 Opc = AArch64::FMLAv2f32;
4046 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4047 FMAInstKind::Accumulator);
4050 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
4051 case MachineCombinerPattern::FMLAv2f32_OP2:
4052 RC = &AArch64::FPR64RegClass;
4053 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
4054 Opc = AArch64::FMLAv2i32_indexed;
4055 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4056 FMAInstKind::Indexed);
4058 Opc = AArch64::FMLAv2f32;
4059 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4060 FMAInstKind::Accumulator);
4064 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
4065 case MachineCombinerPattern::FMLAv2f64_OP1:
4066 RC = &AArch64::FPR128RegClass;
4067 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
4068 Opc = AArch64::FMLAv2i64_indexed;
4069 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4070 FMAInstKind::Indexed);
4072 Opc = AArch64::FMLAv2f64;
4073 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4074 FMAInstKind::Accumulator);
4077 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
4078 case MachineCombinerPattern::FMLAv2f64_OP2:
4079 RC = &AArch64::FPR128RegClass;
4080 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
4081 Opc = AArch64::FMLAv2i64_indexed;
4082 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4083 FMAInstKind::Indexed);
4085 Opc = AArch64::FMLAv2f64;
4086 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4087 FMAInstKind::Accumulator);
4091 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
4092 case MachineCombinerPattern::FMLAv4f32_OP1:
4093 RC = &AArch64::FPR128RegClass;
4094 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
4095 Opc = AArch64::FMLAv4i32_indexed;
4096 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4097 FMAInstKind::Indexed);
4099 Opc = AArch64::FMLAv4f32;
4100 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4101 FMAInstKind::Accumulator);
4105 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
4106 case MachineCombinerPattern::FMLAv4f32_OP2:
4107 RC = &AArch64::FPR128RegClass;
4108 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
4109 Opc = AArch64::FMLAv4i32_indexed;
4110 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4111 FMAInstKind::Indexed);
4113 Opc = AArch64::FMLAv4f32;
4114 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4115 FMAInstKind::Accumulator);
4119 case MachineCombinerPattern::FMULSUBS_OP1:
4120 case MachineCombinerPattern::FMULSUBD_OP1: {
4123 // ==> FNMSUB R,A,B,C // = -C + A*B
4124 // --- Create(FNMSUB);
4125 if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
4126 Opc = AArch64::FNMSUBSrrr;
4127 RC = &AArch64::FPR32RegClass;
4129 Opc = AArch64::FNMSUBDrrr;
4130 RC = &AArch64::FPR64RegClass;
4132 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4136 case MachineCombinerPattern::FNMULSUBS_OP1:
4137 case MachineCombinerPattern::FNMULSUBD_OP1: {
4140 // ==> FNMADD R,A,B,C // = -A*B - C
4141 // --- Create(FNMADD);
4142 if (Pattern == MachineCombinerPattern::FNMULSUBS_OP1) {
4143 Opc = AArch64::FNMADDSrrr;
4144 RC = &AArch64::FPR32RegClass;
4146 Opc = AArch64::FNMADDDrrr;
4147 RC = &AArch64::FPR64RegClass;
4149 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4153 case MachineCombinerPattern::FMULSUBS_OP2:
4154 case MachineCombinerPattern::FMULSUBD_OP2: {
4157 // ==> FMSUB R,A,B,C (computes C - A*B)
4158 // --- Create(FMSUB);
4159 if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
4160 Opc = AArch64::FMSUBSrrr;
4161 RC = &AArch64::FPR32RegClass;
4163 Opc = AArch64::FMSUBDrrr;
4164 RC = &AArch64::FPR64RegClass;
4166 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4170 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
4171 Opc = AArch64::FMLSv1i32_indexed;
4172 RC = &AArch64::FPR32RegClass;
4173 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4174 FMAInstKind::Indexed);
4177 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
4178 Opc = AArch64::FMLSv1i64_indexed;
4179 RC = &AArch64::FPR64RegClass;
4180 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4181 FMAInstKind::Indexed);
4184 case MachineCombinerPattern::FMLSv2f32_OP2:
4185 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
4186 RC = &AArch64::FPR64RegClass;
4187 if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
4188 Opc = AArch64::FMLSv2i32_indexed;
4189 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4190 FMAInstKind::Indexed);
4192 Opc = AArch64::FMLSv2f32;
4193 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4194 FMAInstKind::Accumulator);
4198 case MachineCombinerPattern::FMLSv2f64_OP2:
4199 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
4200 RC = &AArch64::FPR128RegClass;
4201 if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
4202 Opc = AArch64::FMLSv2i64_indexed;
4203 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4204 FMAInstKind::Indexed);
4206 Opc = AArch64::FMLSv2f64;
4207 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4208 FMAInstKind::Accumulator);
4212 case MachineCombinerPattern::FMLSv4f32_OP2:
4213 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
4214 RC = &AArch64::FPR128RegClass;
4215 if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
4216 Opc = AArch64::FMLSv4i32_indexed;
4217 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4218 FMAInstKind::Indexed);
4220 Opc = AArch64::FMLSv4f32;
4221 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4222 FMAInstKind::Accumulator);
4225 } // end switch (Pattern)
4226 // Record MUL and ADD/SUB for deletion
4227 DelInstrs.push_back(MUL);
4228 DelInstrs.push_back(&Root);
4231 /// \brief Replace csincr-branch sequence by simple conditional branch
4235 /// csinc w9, wzr, wzr, <condition code>
4236 /// tbnz w9, #0, 0x44
4240 /// b.<inverted condition code>
4244 /// csinc w9, wzr, wzr, <condition code>
4245 /// tbz w9, #0, 0x44
4249 /// b.<condition code>
4252 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
4253 /// compare's constant operand is power of 2.
4257 /// and w8, w8, #0x400
4262 /// tbnz w8, #10, L1
4265 /// \param MI Conditional Branch
4266 /// \return True when the simple conditional branch is generated
4268 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
4269 bool IsNegativeBranch = false;
4270 bool IsTestAndBranch = false;
4271 unsigned TargetBBInMI = 0;
4272 switch (MI.getOpcode()) {
4274 llvm_unreachable("Unknown branch instruction?");
4281 case AArch64::CBNZW:
4282 case AArch64::CBNZX:
4284 IsNegativeBranch = true;
4289 IsTestAndBranch = true;
4291 case AArch64::TBNZW:
4292 case AArch64::TBNZX:
4294 IsNegativeBranch = true;
4295 IsTestAndBranch = true;
4298 // So we increment a zero register and test for bits other
4299 // than bit 0? Conservatively bail out in case the verifier
4300 // missed this case.
4301 if (IsTestAndBranch && MI.getOperand(1).getImm())
4305 assert(MI.getParent() && "Incomplete machine instruciton\n");
4306 MachineBasicBlock *MBB = MI.getParent();
4307 MachineFunction *MF = MBB->getParent();
4308 MachineRegisterInfo *MRI = &MF->getRegInfo();
4309 unsigned VReg = MI.getOperand(0).getReg();
4310 if (!TargetRegisterInfo::isVirtualRegister(VReg))
4313 MachineInstr *DefMI = MRI->getVRegDef(VReg);
4315 // Look through COPY instructions to find definition.
4316 while (DefMI->isCopy()) {
4317 unsigned CopyVReg = DefMI->getOperand(1).getReg();
4318 if (!MRI->hasOneNonDBGUse(CopyVReg))
4320 if (!MRI->hasOneDef(CopyVReg))
4322 DefMI = MRI->getVRegDef(CopyVReg);
4325 switch (DefMI->getOpcode()) {
4328 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4329 case AArch64::ANDWri:
4330 case AArch64::ANDXri: {
4331 if (IsTestAndBranch)
4333 if (DefMI->getParent() != MBB)
4335 if (!MRI->hasOneNonDBGUse(VReg))
4338 bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
4339 uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
4340 DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
4341 if (!isPowerOf2_64(Mask))
4344 MachineOperand &MO = DefMI->getOperand(1);
4345 unsigned NewReg = MO.getReg();
4346 if (!TargetRegisterInfo::isVirtualRegister(NewReg))
4349 assert(!MRI->def_empty(NewReg) && "Register must be defined.");
4351 MachineBasicBlock &RefToMBB = *MBB;
4352 MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
4353 DebugLoc DL = MI.getDebugLoc();
4354 unsigned Imm = Log2_64(Mask);
4355 unsigned Opc = (Imm < 32)
4356 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4357 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
4358 MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
4362 // Register lives on to the CBZ now.
4363 MO.setIsKill(false);
4365 // For immediate smaller than 32, we need to use the 32-bit
4366 // variant (W) in all cases. Indeed the 64-bit variant does not
4367 // allow to encode them.
4368 // Therefore, if the input register is 64-bit, we need to take the
4370 if (!Is32Bit && Imm < 32)
4371 NewMI->getOperand(0).setSubReg(AArch64::sub_32);
4372 MI.eraseFromParent();
4376 case AArch64::CSINCWr:
4377 case AArch64::CSINCXr: {
4378 if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
4379 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
4380 !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
4381 DefMI->getOperand(2).getReg() == AArch64::XZR))
4384 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
4387 AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
4388 // Convert only when the condition code is not modified between
4389 // the CSINC and the branch. The CC may be used by other
4390 // instructions in between.
4391 if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
4393 MachineBasicBlock &RefToMBB = *MBB;
4394 MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
4395 DebugLoc DL = MI.getDebugLoc();
4396 if (IsNegativeBranch)
4397 CC = AArch64CC::getInvertedCondCode(CC);
4398 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
4399 MI.eraseFromParent();
4405 std::pair<unsigned, unsigned>
4406 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
4407 const unsigned Mask = AArch64II::MO_FRAGMENT;
4408 return std::make_pair(TF & Mask, TF & ~Mask);
4411 ArrayRef<std::pair<unsigned, const char *>>
4412 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
4413 using namespace AArch64II;
4415 static const std::pair<unsigned, const char *> TargetFlags[] = {
4416 {MO_PAGE, "aarch64-page"},
4417 {MO_PAGEOFF, "aarch64-pageoff"},
4418 {MO_G3, "aarch64-g3"},
4419 {MO_G2, "aarch64-g2"},
4420 {MO_G1, "aarch64-g1"},
4421 {MO_G0, "aarch64-g0"},
4422 {MO_HI12, "aarch64-hi12"}};
4423 return makeArrayRef(TargetFlags);
4426 ArrayRef<std::pair<unsigned, const char *>>
4427 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
4428 using namespace AArch64II;
4430 static const std::pair<unsigned, const char *> TargetFlags[] = {
4431 {MO_GOT, "aarch64-got"},
4432 {MO_NC, "aarch64-nc"},
4433 {MO_TLS, "aarch64-tls"}};
4434 return makeArrayRef(TargetFlags);
4437 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
4438 AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {
4439 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4440 {{MOSuppressPair, "aarch64-suppress-pair"},
4441 {MOStridedAccess, "aarch64-strided-access"}};
4442 return makeArrayRef(TargetFlags);
4445 unsigned AArch64InstrInfo::getOutliningBenefit(size_t SequenceSize,
4447 bool CanBeTailCall) const {
4448 unsigned NotOutlinedSize = SequenceSize * Occurrences;
4449 unsigned OutlinedSize;
4451 // Is this candidate something we can outline as a tail call?
4452 if (CanBeTailCall) {
4453 // If yes, then we just outline the sequence and replace each of its
4454 // occurrences with a branch instruction.
4455 OutlinedSize = SequenceSize + Occurrences;
4457 // If no, then we outline the sequence (SequenceSize), add a return (+1),
4458 // and replace each occurrence with a save/restore to LR and a call
4459 // (3 * Occurrences)
4460 OutlinedSize = (SequenceSize + 1) + (3 * Occurrences);
4463 // Return the number of instructions saved by outlining this sequence.
4464 return NotOutlinedSize > OutlinedSize ? NotOutlinedSize - OutlinedSize : 0;
4467 bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF) const {
4468 return MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
4471 AArch64GenInstrInfo::MachineOutlinerInstrType
4472 AArch64InstrInfo::getOutliningType(MachineInstr &MI) const {
4474 MachineFunction *MF = MI.getParent()->getParent();
4475 AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>();
4477 // Don't outline LOHs.
4478 if (FuncInfo->getLOHRelated().count(&MI))
4479 return MachineOutlinerInstrType::Illegal;
4481 // Don't allow debug values to impact outlining type.
4482 if (MI.isDebugValue() || MI.isIndirectDebugValue())
4483 return MachineOutlinerInstrType::Invisible;
4485 // Is this a terminator for a basic block?
4486 if (MI.isTerminator()) {
4488 // Is this the end of a function?
4489 if (MI.getParent()->succ_empty())
4490 return MachineOutlinerInstrType::Legal;
4492 // It's not, so don't outline it.
4493 return MachineOutlinerInstrType::Illegal;
4496 // Don't outline positions.
4497 if (MI.isPosition())
4498 return MachineOutlinerInstrType::Illegal;
4500 // Make sure none of the operands are un-outlinable.
4501 for (const MachineOperand &MOP : MI.operands())
4502 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
4503 MOP.isTargetIndex())
4504 return MachineOutlinerInstrType::Illegal;
4506 // Don't outline anything that uses the link register.
4507 if (MI.modifiesRegister(AArch64::LR, &RI) ||
4508 MI.readsRegister(AArch64::LR, &RI))
4509 return MachineOutlinerInstrType::Illegal;
4511 // Does this use the stack?
4512 if (MI.modifiesRegister(AArch64::SP, &RI) ||
4513 MI.readsRegister(AArch64::SP, &RI)) {
4515 // Is it a memory operation?
4516 if (MI.mayLoadOrStore()) {
4517 unsigned Base; // Filled with the base regiser of MI.
4518 int64_t Offset; // Filled with the offset of MI.
4519 unsigned DummyWidth;
4521 // Does it allow us to offset the base register and is the base SP?
4522 if (!getMemOpBaseRegImmOfsWidth(MI, Base, Offset, DummyWidth, &RI) ||
4523 Base != AArch64::SP)
4524 return MachineOutlinerInstrType::Illegal;
4526 // Find the minimum/maximum offset for this instruction and check if
4527 // fixing it up would be in range.
4528 int64_t MinOffset, MaxOffset;
4529 unsigned DummyScale;
4530 getMemOpInfo(MI.getOpcode(), DummyScale, DummyWidth, MinOffset,
4533 // TODO: We should really test what happens if an instruction overflows.
4534 // This is tricky to test with IR tests, but when the outliner is moved
4535 // to a MIR test, it really ought to be checked.
4536 if (Offset + 16 < MinOffset || Offset + 16 > MaxOffset)
4537 return MachineOutlinerInstrType::Illegal;
4539 // It's in range, so we can outline it.
4540 return MachineOutlinerInstrType::Legal;
4543 // We can't fix it up, so don't outline it.
4544 return MachineOutlinerInstrType::Illegal;
4547 return MachineOutlinerInstrType::Legal;
4550 void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
4551 for (MachineInstr &MI : MBB) {
4552 unsigned Base, Width;
4555 // Is this a load or store with an immediate offset with SP as the base?
4556 if (!MI.mayLoadOrStore() ||
4557 !getMemOpBaseRegImmOfsWidth(MI, Base, Offset, Width, &RI) ||
4558 Base != AArch64::SP)
4561 // It is, so we have to fix it up.
4563 int64_t Dummy1, Dummy2;
4565 MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI);
4566 assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!");
4567 getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2);
4568 assert(Scale != 0 && "Unexpected opcode!");
4570 // We've pushed the return address to the stack, so add 16 to the offset.
4571 // This is safe, since we already checked if it would overflow when we
4572 // checked if this instruction was legal to outline.
4573 int64_t NewImm = (Offset + 16)/Scale;
4574 StackOffsetOperand.setImm(NewImm);
4578 void AArch64InstrInfo::insertOutlinerEpilogue(MachineBasicBlock &MBB,
4579 MachineFunction &MF,
4580 bool IsTailCall) const {
4582 // If this is a tail call outlined function, then there's already a return.
4586 // It's not a tail call, so we have to insert the return ourselves.
4587 MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET))
4588 .addReg(AArch64::LR, RegState::Undef);
4589 MBB.insert(MBB.end(), ret);
4591 // Walk over the basic block and fix up all the stack accesses.
4592 fixupPostOutline(MBB);
4595 void AArch64InstrInfo::insertOutlinerPrologue(MachineBasicBlock &MBB,
4596 MachineFunction &MF,
4597 bool IsTailCall) const {}
4599 MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
4600 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
4601 MachineFunction &MF, bool IsTailCall) const {
4603 // Are we tail calling?
4605 // If yes, then we can just branch to the label.
4607 BuildMI(MF, DebugLoc(), get(AArch64::B))
4608 .addGlobalAddress(M.getNamedValue(MF.getName())));
4612 // We're not tail calling, so we have to save LR before the call and restore
4614 MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
4615 .addReg(AArch64::SP, RegState::Define)
4616 .addReg(AArch64::LR)
4617 .addReg(AArch64::SP)
4619 It = MBB.insert(It, STRXpre);
4624 BuildMI(MF, DebugLoc(), get(AArch64::BL))
4625 .addGlobalAddress(M.getNamedValue(MF.getName())));
4629 // Restore the link register.
4630 MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
4631 .addReg(AArch64::SP, RegState::Define)
4632 .addReg(AArch64::LR)
4633 .addReg(AArch64::SP)
4635 It = MBB.insert(It, LDRXpost);