1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64MachineFunctionInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DebugLoc.h"
32 #include "llvm/IR/GlobalValue.h"
33 #include "llvm/MC/MCInst.h"
34 #include "llvm/MC/MCInstrDesc.h"
35 #include "llvm/Support/Casting.h"
36 #include "llvm/Support/CodeGen.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Compiler.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Target/TargetRegisterInfo.h"
44 #include "llvm/Target/TargetSubtargetInfo.h"
52 #define GET_INSTRINFO_CTOR_DTOR
53 #include "AArch64GenInstrInfo.inc"
55 static const MachineMemOperand::Flags MOSuppressPair =
56 MachineMemOperand::MOTargetFlag1;
58 static cl::opt<unsigned>
59 TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
60 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
62 static cl::opt<unsigned>
63 CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
64 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
66 static cl::opt<unsigned>
67 BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
68 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
70 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
71 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
72 RI(STI.getTargetTriple()), Subtarget(STI) {}
74 /// GetInstSize - Return the number of bytes of code the specified
75 /// instruction may be. This returns the maximum number of bytes.
76 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
77 const MachineBasicBlock &MBB = *MI.getParent();
78 const MachineFunction *MF = MBB.getParent();
79 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
81 if (MI.getOpcode() == AArch64::INLINEASM)
82 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
84 // FIXME: We currently only handle pseudoinstructions that don't get expanded
85 // before the assembly printer.
86 unsigned NumBytes = 0;
87 const MCInstrDesc &Desc = MI.getDesc();
88 switch (Desc.getOpcode()) {
90 // Anything not explicitly designated otherwise is a normal 4-byte insn.
93 case TargetOpcode::DBG_VALUE:
94 case TargetOpcode::EH_LABEL:
95 case TargetOpcode::IMPLICIT_DEF:
96 case TargetOpcode::KILL:
99 case TargetOpcode::STACKMAP:
100 // The upper bound for a stackmap intrinsic is the full length of its shadow
101 NumBytes = StackMapOpers(&MI).getNumPatchBytes();
102 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
104 case TargetOpcode::PATCHPOINT:
105 // The size of the patchpoint intrinsic is the number of bytes requested
106 NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
107 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
109 case AArch64::TLSDESC_CALLSEQ:
110 // This gets lowered to an instruction sequence which takes 16 bytes
118 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
119 SmallVectorImpl<MachineOperand> &Cond) {
120 // Block ends with fall-through condbranch.
121 switch (LastInst->getOpcode()) {
123 llvm_unreachable("Unknown branch instruction?");
125 Target = LastInst->getOperand(1).getMBB();
126 Cond.push_back(LastInst->getOperand(0));
132 Target = LastInst->getOperand(1).getMBB();
133 Cond.push_back(MachineOperand::CreateImm(-1));
134 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
135 Cond.push_back(LastInst->getOperand(0));
141 Target = LastInst->getOperand(2).getMBB();
142 Cond.push_back(MachineOperand::CreateImm(-1));
143 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
144 Cond.push_back(LastInst->getOperand(0));
145 Cond.push_back(LastInst->getOperand(1));
149 static unsigned getBranchDisplacementBits(unsigned Opc) {
152 llvm_unreachable("unexpected opcode!");
159 return TBZDisplacementBits;
164 return CBZDisplacementBits;
166 return BCCDisplacementBits;
170 bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
171 int64_t BrOffset) const {
172 unsigned Bits = getBranchDisplacementBits(BranchOp);
173 assert(Bits >= 3 && "max branch displacement must be enough to jump"
174 "over conditional branch expansion");
175 return isIntN(Bits, BrOffset / 4);
178 MachineBasicBlock *AArch64InstrInfo::getBranchDestBlock(
179 const MachineInstr &MI) const {
180 switch (MI.getOpcode()) {
182 llvm_unreachable("unexpected opcode!");
184 return MI.getOperand(0).getMBB();
189 return MI.getOperand(2).getMBB();
195 return MI.getOperand(1).getMBB();
200 bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
201 MachineBasicBlock *&TBB,
202 MachineBasicBlock *&FBB,
203 SmallVectorImpl<MachineOperand> &Cond,
204 bool AllowModify) const {
205 // If the block has no terminators, it just falls into the block after it.
206 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
210 if (!isUnpredicatedTerminator(*I))
213 // Get the last instruction in the block.
214 MachineInstr *LastInst = &*I;
216 // If there is only one terminator instruction, process it.
217 unsigned LastOpc = LastInst->getOpcode();
218 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
219 if (isUncondBranchOpcode(LastOpc)) {
220 TBB = LastInst->getOperand(0).getMBB();
223 if (isCondBranchOpcode(LastOpc)) {
224 // Block ends with fall-through condbranch.
225 parseCondBranch(LastInst, TBB, Cond);
228 return true; // Can't handle indirect branch.
231 // Get the instruction before it if it is a terminator.
232 MachineInstr *SecondLastInst = &*I;
233 unsigned SecondLastOpc = SecondLastInst->getOpcode();
235 // If AllowModify is true and the block ends with two or more unconditional
236 // branches, delete all but the first unconditional branch.
237 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
238 while (isUncondBranchOpcode(SecondLastOpc)) {
239 LastInst->eraseFromParent();
240 LastInst = SecondLastInst;
241 LastOpc = LastInst->getOpcode();
242 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
243 // Return now the only terminator is an unconditional branch.
244 TBB = LastInst->getOperand(0).getMBB();
247 SecondLastInst = &*I;
248 SecondLastOpc = SecondLastInst->getOpcode();
253 // If there are three terminators, we don't know what sort of block this is.
254 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
257 // If the block ends with a B and a Bcc, handle it.
258 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
259 parseCondBranch(SecondLastInst, TBB, Cond);
260 FBB = LastInst->getOperand(0).getMBB();
264 // If the block ends with two unconditional branches, handle it. The second
265 // one is not executed, so remove it.
266 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
267 TBB = SecondLastInst->getOperand(0).getMBB();
270 I->eraseFromParent();
274 // ...likewise if it ends with an indirect branch followed by an unconditional
276 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
279 I->eraseFromParent();
283 // Otherwise, can't handle this.
287 bool AArch64InstrInfo::reverseBranchCondition(
288 SmallVectorImpl<MachineOperand> &Cond) const {
289 if (Cond[0].getImm() != -1) {
291 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
292 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
294 // Folded compare-and-branch
295 switch (Cond[1].getImm()) {
297 llvm_unreachable("Unknown conditional branch!");
299 Cond[1].setImm(AArch64::CBNZW);
302 Cond[1].setImm(AArch64::CBZW);
305 Cond[1].setImm(AArch64::CBNZX);
308 Cond[1].setImm(AArch64::CBZX);
311 Cond[1].setImm(AArch64::TBNZW);
314 Cond[1].setImm(AArch64::TBZW);
317 Cond[1].setImm(AArch64::TBNZX);
320 Cond[1].setImm(AArch64::TBZX);
328 unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
329 int *BytesRemoved) const {
330 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
334 if (!isUncondBranchOpcode(I->getOpcode()) &&
335 !isCondBranchOpcode(I->getOpcode()))
338 // Remove the branch.
339 I->eraseFromParent();
343 if (I == MBB.begin()) {
349 if (!isCondBranchOpcode(I->getOpcode())) {
355 // Remove the branch.
356 I->eraseFromParent();
363 void AArch64InstrInfo::instantiateCondBranch(
364 MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
365 ArrayRef<MachineOperand> Cond) const {
366 if (Cond[0].getImm() != -1) {
368 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
370 // Folded compare-and-branch
371 // Note that we use addOperand instead of addReg to keep the flags.
372 const MachineInstrBuilder MIB =
373 BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
375 MIB.addImm(Cond[3].getImm());
380 unsigned AArch64InstrInfo::insertBranch(MachineBasicBlock &MBB,
381 MachineBasicBlock *TBB,
382 MachineBasicBlock *FBB,
383 ArrayRef<MachineOperand> Cond,
385 int *BytesAdded) const {
386 // Shouldn't be a fall through.
387 assert(TBB && "insertBranch must not be told to insert a fallthrough");
390 if (Cond.empty()) // Unconditional branch?
391 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
393 instantiateCondBranch(MBB, DL, TBB, Cond);
401 // Two-way conditional branch.
402 instantiateCondBranch(MBB, DL, TBB, Cond);
403 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
411 // Find the original register that VReg is copied from.
412 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
413 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
414 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
415 if (!DefMI->isFullCopy())
417 VReg = DefMI->getOperand(1).getReg();
422 // Determine if VReg is defined by an instruction that can be folded into a
423 // csel instruction. If so, return the folded opcode, and the replacement
425 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
426 unsigned *NewVReg = nullptr) {
427 VReg = removeCopies(MRI, VReg);
428 if (!TargetRegisterInfo::isVirtualRegister(VReg))
431 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
432 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
434 unsigned SrcOpNum = 0;
435 switch (DefMI->getOpcode()) {
436 case AArch64::ADDSXri:
437 case AArch64::ADDSWri:
438 // if NZCV is used, do not fold.
439 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
441 // fall-through to ADDXri and ADDWri.
443 case AArch64::ADDXri:
444 case AArch64::ADDWri:
445 // add x, 1 -> csinc.
446 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
447 DefMI->getOperand(3).getImm() != 0)
450 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
453 case AArch64::ORNXrr:
454 case AArch64::ORNWrr: {
455 // not x -> csinv, represented as orn dst, xzr, src.
456 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
457 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
460 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
464 case AArch64::SUBSXrr:
465 case AArch64::SUBSWrr:
466 // if NZCV is used, do not fold.
467 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
469 // fall-through to SUBXrr and SUBWrr.
471 case AArch64::SUBXrr:
472 case AArch64::SUBWrr: {
473 // neg x -> csneg, represented as sub dst, xzr, src.
474 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
475 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
478 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
484 assert(Opc && SrcOpNum && "Missing parameters");
487 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
491 bool AArch64InstrInfo::canInsertSelect(
492 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
493 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
494 int &FalseCycles) const {
495 // Check register classes.
496 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
497 const TargetRegisterClass *RC =
498 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
502 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
503 unsigned ExtraCondLat = Cond.size() != 1;
505 // GPRs are handled by csel.
506 // FIXME: Fold in x+1, -x, and ~x when applicable.
507 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
508 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
509 // Single-cycle csel, csinc, csinv, and csneg.
510 CondCycles = 1 + ExtraCondLat;
511 TrueCycles = FalseCycles = 1;
512 if (canFoldIntoCSel(MRI, TrueReg))
514 else if (canFoldIntoCSel(MRI, FalseReg))
519 // Scalar floating point is handled by fcsel.
520 // FIXME: Form fabs, fmin, and fmax when applicable.
521 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
522 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
523 CondCycles = 5 + ExtraCondLat;
524 TrueCycles = FalseCycles = 2;
532 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
533 MachineBasicBlock::iterator I,
534 const DebugLoc &DL, unsigned DstReg,
535 ArrayRef<MachineOperand> Cond,
536 unsigned TrueReg, unsigned FalseReg) const {
537 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
539 // Parse the condition code, see parseCondBranch() above.
540 AArch64CC::CondCode CC;
541 switch (Cond.size()) {
543 llvm_unreachable("Unknown condition opcode in Cond");
545 CC = AArch64CC::CondCode(Cond[0].getImm());
547 case 3: { // cbz/cbnz
548 // We must insert a compare against 0.
550 switch (Cond[1].getImm()) {
552 llvm_unreachable("Unknown branch opcode in Cond");
570 unsigned SrcReg = Cond[2].getReg();
572 // cmp reg, #0 is actually subs xzr, reg, #0.
573 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
574 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
579 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
580 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
587 case 4: { // tbz/tbnz
588 // We must insert a tst instruction.
589 switch (Cond[1].getImm()) {
591 llvm_unreachable("Unknown branch opcode in Cond");
601 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
602 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
603 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
604 .addReg(Cond[2].getReg())
606 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
608 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
609 .addReg(Cond[2].getReg())
611 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
617 const TargetRegisterClass *RC = nullptr;
618 bool TryFold = false;
619 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
620 RC = &AArch64::GPR64RegClass;
621 Opc = AArch64::CSELXr;
623 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
624 RC = &AArch64::GPR32RegClass;
625 Opc = AArch64::CSELWr;
627 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
628 RC = &AArch64::FPR64RegClass;
629 Opc = AArch64::FCSELDrrr;
630 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
631 RC = &AArch64::FPR32RegClass;
632 Opc = AArch64::FCSELSrrr;
634 assert(RC && "Unsupported regclass");
636 // Try folding simple instructions into the csel.
638 unsigned NewVReg = 0;
639 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
641 // The folded opcodes csinc, csinc and csneg apply the operation to
642 // FalseReg, so we need to invert the condition.
643 CC = AArch64CC::getInvertedCondCode(CC);
646 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
648 // Fold the operation. Leave any dead instructions for DCE to clean up.
652 // The extends the live range of NewVReg.
653 MRI.clearKillFlags(NewVReg);
657 // Pull all virtual register into the appropriate class.
658 MRI.constrainRegClass(TrueReg, RC);
659 MRI.constrainRegClass(FalseReg, RC);
662 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
666 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
667 static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
668 uint64_t Imm = MI.getOperand(1).getImm();
669 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
671 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
674 // FIXME: this implementation should be micro-architecture dependent, so a
675 // micro-architecture target hook should be introduced here in future.
676 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
677 if (!Subtarget.hasCustomCheapAsMoveHandling())
678 return MI.isAsCheapAsAMove();
682 switch (MI.getOpcode()) {
686 // add/sub on register without shift
687 case AArch64::ADDWri:
688 case AArch64::ADDXri:
689 case AArch64::SUBWri:
690 case AArch64::SUBXri:
691 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
692 MI.getOperand(3).getImm() == 0);
694 // add/sub on register with shift
695 case AArch64::ADDWrs:
696 case AArch64::ADDXrs:
697 case AArch64::SUBWrs:
698 case AArch64::SUBXrs:
699 Imm = MI.getOperand(3).getImm();
700 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
701 AArch64_AM::getArithShiftValue(Imm) < 4);
703 // logical ops on immediate
704 case AArch64::ANDWri:
705 case AArch64::ANDXri:
706 case AArch64::EORWri:
707 case AArch64::EORXri:
708 case AArch64::ORRWri:
709 case AArch64::ORRXri:
712 // logical ops on register without shift
713 case AArch64::ANDWrr:
714 case AArch64::ANDXrr:
715 case AArch64::BICWrr:
716 case AArch64::BICXrr:
717 case AArch64::EONWrr:
718 case AArch64::EONXrr:
719 case AArch64::EORWrr:
720 case AArch64::EORXrr:
721 case AArch64::ORNWrr:
722 case AArch64::ORNXrr:
723 case AArch64::ORRWrr:
724 case AArch64::ORRXrr:
727 // logical ops on register with shift
728 case AArch64::ANDWrs:
729 case AArch64::ANDXrs:
730 case AArch64::BICWrs:
731 case AArch64::BICXrs:
732 case AArch64::EONWrs:
733 case AArch64::EONXrs:
734 case AArch64::EORWrs:
735 case AArch64::EORXrs:
736 case AArch64::ORNWrs:
737 case AArch64::ORNXrs:
738 case AArch64::ORRWrs:
739 case AArch64::ORRXrs:
740 Imm = MI.getOperand(3).getImm();
741 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
742 AArch64_AM::getShiftValue(Imm) < 4 &&
743 AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
745 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
746 // ORRXri, it is as cheap as MOV
747 case AArch64::MOVi32imm:
748 return canBeExpandedToORR(MI, 32);
749 case AArch64::MOVi64imm:
750 return canBeExpandedToORR(MI, 64);
752 // It is cheap to zero out registers if the subtarget has ZeroCycleZeroing
754 case AArch64::FMOVS0:
755 case AArch64::FMOVD0:
756 return Subtarget.hasZeroCycleZeroing();
757 case TargetOpcode::COPY:
758 return (Subtarget.hasZeroCycleZeroing() &&
759 (MI.getOperand(1).getReg() == AArch64::WZR ||
760 MI.getOperand(1).getReg() == AArch64::XZR));
763 llvm_unreachable("Unknown opcode to check as cheap as a move!");
766 bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) const {
767 switch (MI.getOpcode()) {
771 case AArch64::ADDWrs:
772 case AArch64::ADDXrs:
773 case AArch64::ADDSWrs:
774 case AArch64::ADDSXrs: {
775 unsigned Imm = MI.getOperand(3).getImm();
776 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
779 return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5;
782 case AArch64::ADDWrx:
783 case AArch64::ADDXrx:
784 case AArch64::ADDXrx64:
785 case AArch64::ADDSWrx:
786 case AArch64::ADDSXrx:
787 case AArch64::ADDSXrx64: {
788 unsigned Imm = MI.getOperand(3).getImm();
789 switch (AArch64_AM::getArithExtendType(Imm)) {
792 case AArch64_AM::UXTB:
793 case AArch64_AM::UXTH:
794 case AArch64_AM::UXTW:
795 case AArch64_AM::UXTX:
796 return AArch64_AM::getArithShiftValue(Imm) <= 4;
800 case AArch64::SUBWrs:
801 case AArch64::SUBSWrs: {
802 unsigned Imm = MI.getOperand(3).getImm();
803 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
804 return ShiftVal == 0 ||
805 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31);
808 case AArch64::SUBXrs:
809 case AArch64::SUBSXrs: {
810 unsigned Imm = MI.getOperand(3).getImm();
811 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
812 return ShiftVal == 0 ||
813 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63);
816 case AArch64::SUBWrx:
817 case AArch64::SUBXrx:
818 case AArch64::SUBXrx64:
819 case AArch64::SUBSWrx:
820 case AArch64::SUBSXrx:
821 case AArch64::SUBSXrx64: {
822 unsigned Imm = MI.getOperand(3).getImm();
823 switch (AArch64_AM::getArithExtendType(Imm)) {
826 case AArch64_AM::UXTB:
827 case AArch64_AM::UXTH:
828 case AArch64_AM::UXTW:
829 case AArch64_AM::UXTX:
830 return AArch64_AM::getArithShiftValue(Imm) == 0;
834 case AArch64::LDRBBroW:
835 case AArch64::LDRBBroX:
836 case AArch64::LDRBroW:
837 case AArch64::LDRBroX:
838 case AArch64::LDRDroW:
839 case AArch64::LDRDroX:
840 case AArch64::LDRHHroW:
841 case AArch64::LDRHHroX:
842 case AArch64::LDRHroW:
843 case AArch64::LDRHroX:
844 case AArch64::LDRQroW:
845 case AArch64::LDRQroX:
846 case AArch64::LDRSBWroW:
847 case AArch64::LDRSBWroX:
848 case AArch64::LDRSBXroW:
849 case AArch64::LDRSBXroX:
850 case AArch64::LDRSHWroW:
851 case AArch64::LDRSHWroX:
852 case AArch64::LDRSHXroW:
853 case AArch64::LDRSHXroX:
854 case AArch64::LDRSWroW:
855 case AArch64::LDRSWroX:
856 case AArch64::LDRSroW:
857 case AArch64::LDRSroX:
858 case AArch64::LDRWroW:
859 case AArch64::LDRWroX:
860 case AArch64::LDRXroW:
861 case AArch64::LDRXroX:
862 case AArch64::PRFMroW:
863 case AArch64::PRFMroX:
864 case AArch64::STRBBroW:
865 case AArch64::STRBBroX:
866 case AArch64::STRBroW:
867 case AArch64::STRBroX:
868 case AArch64::STRDroW:
869 case AArch64::STRDroX:
870 case AArch64::STRHHroW:
871 case AArch64::STRHHroX:
872 case AArch64::STRHroW:
873 case AArch64::STRHroX:
874 case AArch64::STRQroW:
875 case AArch64::STRQroX:
876 case AArch64::STRSroW:
877 case AArch64::STRSroX:
878 case AArch64::STRWroW:
879 case AArch64::STRWroX:
880 case AArch64::STRXroW:
881 case AArch64::STRXroX: {
882 unsigned IsSigned = MI.getOperand(3).getImm();
888 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
889 unsigned &SrcReg, unsigned &DstReg,
890 unsigned &SubIdx) const {
891 switch (MI.getOpcode()) {
894 case AArch64::SBFMXri: // aka sxtw
895 case AArch64::UBFMXri: // aka uxtw
896 // Check for the 32 -> 64 bit extension case, these instructions can do
898 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
900 // This is a signed or unsigned 32 -> 64 bit extension.
901 SrcReg = MI.getOperand(1).getReg();
902 DstReg = MI.getOperand(0).getReg();
903 SubIdx = AArch64::sub_32;
908 bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
909 MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
910 const TargetRegisterInfo *TRI = &getRegisterInfo();
911 unsigned BaseRegA = 0, BaseRegB = 0;
912 int64_t OffsetA = 0, OffsetB = 0;
913 unsigned WidthA = 0, WidthB = 0;
915 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
916 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
918 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
919 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
922 // Retrieve the base register, offset from the base register and width. Width
923 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
924 // base registers are identical, and the offset of a lower memory access +
925 // the width doesn't overlap the offset of a higher memory access,
926 // then the memory accesses are different.
927 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
928 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
929 if (BaseRegA == BaseRegB) {
930 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
931 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
932 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
933 if (LowOffset + LowWidth <= HighOffset)
940 /// analyzeCompare - For a comparison instruction, return the source registers
941 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
942 /// Return true if the comparison instruction can be analyzed.
943 bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
944 unsigned &SrcReg2, int &CmpMask,
945 int &CmpValue) const {
946 switch (MI.getOpcode()) {
949 case AArch64::SUBSWrr:
950 case AArch64::SUBSWrs:
951 case AArch64::SUBSWrx:
952 case AArch64::SUBSXrr:
953 case AArch64::SUBSXrs:
954 case AArch64::SUBSXrx:
955 case AArch64::ADDSWrr:
956 case AArch64::ADDSWrs:
957 case AArch64::ADDSWrx:
958 case AArch64::ADDSXrr:
959 case AArch64::ADDSXrs:
960 case AArch64::ADDSXrx:
961 // Replace SUBSWrr with SUBWrr if NZCV is not used.
962 SrcReg = MI.getOperand(1).getReg();
963 SrcReg2 = MI.getOperand(2).getReg();
967 case AArch64::SUBSWri:
968 case AArch64::ADDSWri:
969 case AArch64::SUBSXri:
970 case AArch64::ADDSXri:
971 SrcReg = MI.getOperand(1).getReg();
974 // FIXME: In order to convert CmpValue to 0 or 1
975 CmpValue = MI.getOperand(2).getImm() != 0;
977 case AArch64::ANDSWri:
978 case AArch64::ANDSXri:
979 // ANDS does not use the same encoding scheme as the others xxxS
981 SrcReg = MI.getOperand(1).getReg();
984 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
985 // while the type of CmpValue is int. When converting uint64_t to int,
986 // the high 32 bits of uint64_t will be lost.
987 // In fact it causes a bug in spec2006-483.xalancbmk
988 // CmpValue is only used to compare with zero in OptimizeCompareInstr
989 CmpValue = AArch64_AM::decodeLogicalImmediate(
990 MI.getOperand(2).getImm(),
991 MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
998 static bool UpdateOperandRegClass(MachineInstr &Instr) {
999 MachineBasicBlock *MBB = Instr.getParent();
1000 assert(MBB && "Can't get MachineBasicBlock here");
1001 MachineFunction *MF = MBB->getParent();
1002 assert(MF && "Can't get MachineFunction here");
1003 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1004 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1005 MachineRegisterInfo *MRI = &MF->getRegInfo();
1007 for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
1009 MachineOperand &MO = Instr.getOperand(OpIdx);
1010 const TargetRegisterClass *OpRegCstraints =
1011 Instr.getRegClassConstraint(OpIdx, TII, TRI);
1013 // If there's no constraint, there's nothing to do.
1014 if (!OpRegCstraints)
1016 // If the operand is a frame index, there's nothing to do here.
1017 // A frame index operand will resolve correctly during PEI.
1021 assert(MO.isReg() &&
1022 "Operand has register constraints without being a register!");
1024 unsigned Reg = MO.getReg();
1025 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1026 if (!OpRegCstraints->contains(Reg))
1028 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
1029 !MRI->constrainRegClass(Reg, OpRegCstraints))
1036 /// \brief Return the opcode that does not set flags when possible - otherwise
1037 /// return the original opcode. The caller is responsible to do the actual
1038 /// substitution and legality checking.
1039 static unsigned convertFlagSettingOpcode(const MachineInstr &MI) {
1040 // Don't convert all compare instructions, because for some the zero register
1041 // encoding becomes the sp register.
1042 bool MIDefinesZeroReg = false;
1043 if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
1044 MIDefinesZeroReg = true;
1046 switch (MI.getOpcode()) {
1048 return MI.getOpcode();
1049 case AArch64::ADDSWrr:
1050 return AArch64::ADDWrr;
1051 case AArch64::ADDSWri:
1052 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
1053 case AArch64::ADDSWrs:
1054 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
1055 case AArch64::ADDSWrx:
1056 return AArch64::ADDWrx;
1057 case AArch64::ADDSXrr:
1058 return AArch64::ADDXrr;
1059 case AArch64::ADDSXri:
1060 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
1061 case AArch64::ADDSXrs:
1062 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
1063 case AArch64::ADDSXrx:
1064 return AArch64::ADDXrx;
1065 case AArch64::SUBSWrr:
1066 return AArch64::SUBWrr;
1067 case AArch64::SUBSWri:
1068 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
1069 case AArch64::SUBSWrs:
1070 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
1071 case AArch64::SUBSWrx:
1072 return AArch64::SUBWrx;
1073 case AArch64::SUBSXrr:
1074 return AArch64::SUBXrr;
1075 case AArch64::SUBSXri:
1076 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
1077 case AArch64::SUBSXrs:
1078 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
1079 case AArch64::SUBSXrx:
1080 return AArch64::SUBXrx;
1090 /// True when condition flags are accessed (either by writing or reading)
1091 /// on the instruction trace starting at From and ending at To.
1093 /// Note: If From and To are from different blocks it's assumed CC are accessed
1095 static bool areCFlagsAccessedBetweenInstrs(
1096 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
1097 const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
1098 // Early exit if To is at the beginning of the BB.
1099 if (To == To->getParent()->begin())
1102 // Check whether the instructions are in the same basic block
1103 // If not, assume the condition flags might get modified somewhere.
1104 if (To->getParent() != From->getParent())
1107 // From must be above To.
1108 assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
1109 [From](MachineInstr &MI) {
1110 return MI.getIterator() == From;
1111 }) != To->getParent()->rend());
1113 // We iterate backward starting \p To until we hit \p From.
1114 for (--To; To != From; --To) {
1115 const MachineInstr &Instr = *To;
1117 if ( ((AccessToCheck & AK_Write) && Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
1118 ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
1124 /// Try to optimize a compare instruction. A compare instruction is an
1125 /// instruction which produces AArch64::NZCV. It can be truly compare instruction
1126 /// when there are no uses of its destination register.
1128 /// The following steps are tried in order:
1129 /// 1. Convert CmpInstr into an unconditional version.
1130 /// 2. Remove CmpInstr if above there is an instruction producing a needed
1131 /// condition code or an instruction which can be converted into such an instruction.
1132 /// Only comparison with zero is supported.
1133 bool AArch64InstrInfo::optimizeCompareInstr(
1134 MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
1135 int CmpValue, const MachineRegisterInfo *MRI) const {
1136 assert(CmpInstr.getParent());
1139 // Replace SUBSWrr with SUBWrr if NZCV is not used.
1140 int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
1141 if (DeadNZCVIdx != -1) {
1142 if (CmpInstr.definesRegister(AArch64::WZR) ||
1143 CmpInstr.definesRegister(AArch64::XZR)) {
1144 CmpInstr.eraseFromParent();
1147 unsigned Opc = CmpInstr.getOpcode();
1148 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
1151 const MCInstrDesc &MCID = get(NewOpc);
1152 CmpInstr.setDesc(MCID);
1153 CmpInstr.RemoveOperand(DeadNZCVIdx);
1154 bool succeeded = UpdateOperandRegClass(CmpInstr);
1156 assert(succeeded && "Some operands reg class are incompatible!");
1160 // Continue only if we have a "ri" where immediate is zero.
1161 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1163 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
1164 if (CmpValue != 0 || SrcReg2 != 0)
1167 // CmpInstr is a Compare instruction if destination register is not used.
1168 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
1171 return substituteCmpToZero(CmpInstr, SrcReg, MRI);
1174 /// Get opcode of S version of Instr.
1175 /// If Instr is S version its opcode is returned.
1176 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1177 /// or we are not interested in it.
1178 static unsigned sForm(MachineInstr &Instr) {
1179 switch (Instr.getOpcode()) {
1181 return AArch64::INSTRUCTION_LIST_END;
1183 case AArch64::ADDSWrr:
1184 case AArch64::ADDSWri:
1185 case AArch64::ADDSXrr:
1186 case AArch64::ADDSXri:
1187 case AArch64::SUBSWrr:
1188 case AArch64::SUBSWri:
1189 case AArch64::SUBSXrr:
1190 case AArch64::SUBSXri:
1191 return Instr.getOpcode();
1193 case AArch64::ADDWrr: return AArch64::ADDSWrr;
1194 case AArch64::ADDWri: return AArch64::ADDSWri;
1195 case AArch64::ADDXrr: return AArch64::ADDSXrr;
1196 case AArch64::ADDXri: return AArch64::ADDSXri;
1197 case AArch64::ADCWr: return AArch64::ADCSWr;
1198 case AArch64::ADCXr: return AArch64::ADCSXr;
1199 case AArch64::SUBWrr: return AArch64::SUBSWrr;
1200 case AArch64::SUBWri: return AArch64::SUBSWri;
1201 case AArch64::SUBXrr: return AArch64::SUBSXrr;
1202 case AArch64::SUBXri: return AArch64::SUBSXri;
1203 case AArch64::SBCWr: return AArch64::SBCSWr;
1204 case AArch64::SBCXr: return AArch64::SBCSXr;
1205 case AArch64::ANDWri: return AArch64::ANDSWri;
1206 case AArch64::ANDXri: return AArch64::ANDSXri;
1210 /// Check if AArch64::NZCV should be alive in successors of MBB.
1211 static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
1212 for (auto *BB : MBB->successors())
1213 if (BB->isLiveIn(AArch64::NZCV))
1226 UsedNZCV() = default;
1228 UsedNZCV& operator |=(const UsedNZCV& UsedFlags) {
1229 this->N |= UsedFlags.N;
1230 this->Z |= UsedFlags.Z;
1231 this->C |= UsedFlags.C;
1232 this->V |= UsedFlags.V;
1237 } // end anonymous namespace
1239 /// Find a condition code used by the instruction.
1240 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1241 /// codes or we don't optimize CmpInstr in the presence of such instructions.
1242 static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1243 switch (Instr.getOpcode()) {
1245 return AArch64CC::Invalid;
1247 case AArch64::Bcc: {
1248 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1250 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
1253 case AArch64::CSINVWr:
1254 case AArch64::CSINVXr:
1255 case AArch64::CSINCWr:
1256 case AArch64::CSINCXr:
1257 case AArch64::CSELWr:
1258 case AArch64::CSELXr:
1259 case AArch64::CSNEGWr:
1260 case AArch64::CSNEGXr:
1261 case AArch64::FCSELSrrr:
1262 case AArch64::FCSELDrrr: {
1263 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1265 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
1270 static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1271 assert(CC != AArch64CC::Invalid);
1277 case AArch64CC::EQ: // Z set
1278 case AArch64CC::NE: // Z clear
1282 case AArch64CC::HI: // Z clear and C set
1283 case AArch64CC::LS: // Z set or C clear
1285 case AArch64CC::HS: // C set
1286 case AArch64CC::LO: // C clear
1290 case AArch64CC::MI: // N set
1291 case AArch64CC::PL: // N clear
1295 case AArch64CC::VS: // V set
1296 case AArch64CC::VC: // V clear
1300 case AArch64CC::GT: // Z clear, N and V the same
1301 case AArch64CC::LE: // Z set, N and V differ
1303 case AArch64CC::GE: // N and V the same
1304 case AArch64CC::LT: // N and V differ
1312 static bool isADDSRegImm(unsigned Opcode) {
1313 return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1316 static bool isSUBSRegImm(unsigned Opcode) {
1317 return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1320 /// Check if CmpInstr can be substituted by MI.
1322 /// CmpInstr can be substituted:
1323 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1324 /// - and, MI and CmpInstr are from the same MachineBB
1325 /// - and, condition flags are not alive in successors of the CmpInstr parent
1326 /// - and, if MI opcode is the S form there must be no defs of flags between
1328 /// or if MI opcode is not the S form there must be neither defs of flags
1329 /// nor uses of flags between MI and CmpInstr.
1330 /// - and C/V flags are not used after CmpInstr
1331 static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
1332 const TargetRegisterInfo *TRI) {
1334 assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1337 const unsigned CmpOpcode = CmpInstr->getOpcode();
1338 if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1341 if (MI->getParent() != CmpInstr->getParent())
1344 if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
1347 AccessKind AccessToCheck = AK_Write;
1348 if (sForm(*MI) != MI->getOpcode())
1349 AccessToCheck = AK_All;
1350 if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
1353 UsedNZCV NZCVUsedAfterCmp;
1354 for (auto I = std::next(CmpInstr->getIterator()), E = CmpInstr->getParent()->instr_end();
1356 const MachineInstr &Instr = *I;
1357 if (Instr.readsRegister(AArch64::NZCV, TRI)) {
1358 AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1359 if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1361 NZCVUsedAfterCmp |= getUsedNZCV(CC);
1364 if (Instr.modifiesRegister(AArch64::NZCV, TRI))
1368 return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1371 /// Substitute an instruction comparing to zero with another instruction
1372 /// which produces needed condition flags.
1374 /// Return true on success.
1375 bool AArch64InstrInfo::substituteCmpToZero(
1376 MachineInstr &CmpInstr, unsigned SrcReg,
1377 const MachineRegisterInfo *MRI) const {
1379 // Get the unique definition of SrcReg.
1380 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1384 const TargetRegisterInfo *TRI = &getRegisterInfo();
1386 unsigned NewOpc = sForm(*MI);
1387 if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1390 if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
1393 // Update the instruction to set NZCV.
1394 MI->setDesc(get(NewOpc));
1395 CmpInstr.eraseFromParent();
1396 bool succeeded = UpdateOperandRegClass(*MI);
1398 assert(succeeded && "Some operands reg class are incompatible!");
1399 MI->addRegisterDefined(AArch64::NZCV, TRI);
1403 bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1404 if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
1407 MachineBasicBlock &MBB = *MI.getParent();
1408 DebugLoc DL = MI.getDebugLoc();
1409 unsigned Reg = MI.getOperand(0).getReg();
1410 const GlobalValue *GV =
1411 cast<GlobalValue>((*MI.memoperands_begin())->getValue());
1412 const TargetMachine &TM = MBB.getParent()->getTarget();
1413 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1414 const unsigned char MO_NC = AArch64II::MO_NC;
1416 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1417 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1418 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1419 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1420 .addReg(Reg, RegState::Kill)
1422 .addMemOperand(*MI.memoperands_begin());
1423 } else if (TM.getCodeModel() == CodeModel::Large) {
1424 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1425 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1426 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1427 .addReg(Reg, RegState::Kill)
1428 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1429 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1430 .addReg(Reg, RegState::Kill)
1431 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1432 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1433 .addReg(Reg, RegState::Kill)
1434 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1435 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1436 .addReg(Reg, RegState::Kill)
1438 .addMemOperand(*MI.memoperands_begin());
1440 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1441 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1442 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1443 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1444 .addReg(Reg, RegState::Kill)
1445 .addGlobalAddress(GV, 0, LoFlags)
1446 .addMemOperand(*MI.memoperands_begin());
1454 /// Return true if this is this instruction has a non-zero immediate
1455 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr &MI) const {
1456 switch (MI.getOpcode()) {
1459 case AArch64::ADDSWrs:
1460 case AArch64::ADDSXrs:
1461 case AArch64::ADDWrs:
1462 case AArch64::ADDXrs:
1463 case AArch64::ANDSWrs:
1464 case AArch64::ANDSXrs:
1465 case AArch64::ANDWrs:
1466 case AArch64::ANDXrs:
1467 case AArch64::BICSWrs:
1468 case AArch64::BICSXrs:
1469 case AArch64::BICWrs:
1470 case AArch64::BICXrs:
1471 case AArch64::EONWrs:
1472 case AArch64::EONXrs:
1473 case AArch64::EORWrs:
1474 case AArch64::EORXrs:
1475 case AArch64::ORNWrs:
1476 case AArch64::ORNXrs:
1477 case AArch64::ORRWrs:
1478 case AArch64::ORRXrs:
1479 case AArch64::SUBSWrs:
1480 case AArch64::SUBSXrs:
1481 case AArch64::SUBWrs:
1482 case AArch64::SUBXrs:
1483 if (MI.getOperand(3).isImm()) {
1484 unsigned val = MI.getOperand(3).getImm();
1492 /// Return true if this is this instruction has a non-zero immediate
1493 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr &MI) const {
1494 switch (MI.getOpcode()) {
1497 case AArch64::ADDSWrx:
1498 case AArch64::ADDSXrx:
1499 case AArch64::ADDSXrx64:
1500 case AArch64::ADDWrx:
1501 case AArch64::ADDXrx:
1502 case AArch64::ADDXrx64:
1503 case AArch64::SUBSWrx:
1504 case AArch64::SUBSXrx:
1505 case AArch64::SUBSXrx64:
1506 case AArch64::SUBWrx:
1507 case AArch64::SUBXrx:
1508 case AArch64::SUBXrx64:
1509 if (MI.getOperand(3).isImm()) {
1510 unsigned val = MI.getOperand(3).getImm();
1519 // Return true if this instruction simply sets its single destination register
1520 // to zero. This is equivalent to a register rename of the zero-register.
1521 bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) const {
1522 switch (MI.getOpcode()) {
1525 case AArch64::MOVZWi:
1526 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1527 if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
1528 assert(MI.getDesc().getNumOperands() == 3 &&
1529 MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1533 case AArch64::ANDWri: // and Rd, Rzr, #imm
1534 return MI.getOperand(1).getReg() == AArch64::WZR;
1535 case AArch64::ANDXri:
1536 return MI.getOperand(1).getReg() == AArch64::XZR;
1537 case TargetOpcode::COPY:
1538 return MI.getOperand(1).getReg() == AArch64::WZR;
1543 // Return true if this instruction simply renames a general register without
1545 bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) const {
1546 switch (MI.getOpcode()) {
1549 case TargetOpcode::COPY: {
1550 // GPR32 copies will by lowered to ORRXrs
1551 unsigned DstReg = MI.getOperand(0).getReg();
1552 return (AArch64::GPR32RegClass.contains(DstReg) ||
1553 AArch64::GPR64RegClass.contains(DstReg));
1555 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1556 if (MI.getOperand(1).getReg() == AArch64::XZR) {
1557 assert(MI.getDesc().getNumOperands() == 4 &&
1558 MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1562 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1563 if (MI.getOperand(2).getImm() == 0) {
1564 assert(MI.getDesc().getNumOperands() == 4 &&
1565 MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1573 // Return true if this instruction simply renames a general register without
1575 bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) const {
1576 switch (MI.getOpcode()) {
1579 case TargetOpcode::COPY: {
1580 // FPR64 copies will by lowered to ORR.16b
1581 unsigned DstReg = MI.getOperand(0).getReg();
1582 return (AArch64::FPR64RegClass.contains(DstReg) ||
1583 AArch64::FPR128RegClass.contains(DstReg));
1585 case AArch64::ORRv16i8:
1586 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
1587 assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
1588 "invalid ORRv16i8 operands");
1596 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1597 int &FrameIndex) const {
1598 switch (MI.getOpcode()) {
1601 case AArch64::LDRWui:
1602 case AArch64::LDRXui:
1603 case AArch64::LDRBui:
1604 case AArch64::LDRHui:
1605 case AArch64::LDRSui:
1606 case AArch64::LDRDui:
1607 case AArch64::LDRQui:
1608 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1609 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1610 FrameIndex = MI.getOperand(1).getIndex();
1611 return MI.getOperand(0).getReg();
1619 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1620 int &FrameIndex) const {
1621 switch (MI.getOpcode()) {
1624 case AArch64::STRWui:
1625 case AArch64::STRXui:
1626 case AArch64::STRBui:
1627 case AArch64::STRHui:
1628 case AArch64::STRSui:
1629 case AArch64::STRDui:
1630 case AArch64::STRQui:
1631 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1632 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1633 FrameIndex = MI.getOperand(1).getIndex();
1634 return MI.getOperand(0).getReg();
1641 /// Return true if this is load/store scales or extends its register offset.
1642 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1643 /// MI should be a memory op that allows scaled addressing.
1644 bool AArch64InstrInfo::isScaledAddr(const MachineInstr &MI) const {
1645 switch (MI.getOpcode()) {
1648 case AArch64::LDRBBroW:
1649 case AArch64::LDRBroW:
1650 case AArch64::LDRDroW:
1651 case AArch64::LDRHHroW:
1652 case AArch64::LDRHroW:
1653 case AArch64::LDRQroW:
1654 case AArch64::LDRSBWroW:
1655 case AArch64::LDRSBXroW:
1656 case AArch64::LDRSHWroW:
1657 case AArch64::LDRSHXroW:
1658 case AArch64::LDRSWroW:
1659 case AArch64::LDRSroW:
1660 case AArch64::LDRWroW:
1661 case AArch64::LDRXroW:
1662 case AArch64::STRBBroW:
1663 case AArch64::STRBroW:
1664 case AArch64::STRDroW:
1665 case AArch64::STRHHroW:
1666 case AArch64::STRHroW:
1667 case AArch64::STRQroW:
1668 case AArch64::STRSroW:
1669 case AArch64::STRWroW:
1670 case AArch64::STRXroW:
1671 case AArch64::LDRBBroX:
1672 case AArch64::LDRBroX:
1673 case AArch64::LDRDroX:
1674 case AArch64::LDRHHroX:
1675 case AArch64::LDRHroX:
1676 case AArch64::LDRQroX:
1677 case AArch64::LDRSBWroX:
1678 case AArch64::LDRSBXroX:
1679 case AArch64::LDRSHWroX:
1680 case AArch64::LDRSHXroX:
1681 case AArch64::LDRSWroX:
1682 case AArch64::LDRSroX:
1683 case AArch64::LDRWroX:
1684 case AArch64::LDRXroX:
1685 case AArch64::STRBBroX:
1686 case AArch64::STRBroX:
1687 case AArch64::STRDroX:
1688 case AArch64::STRHHroX:
1689 case AArch64::STRHroX:
1690 case AArch64::STRQroX:
1691 case AArch64::STRSroX:
1692 case AArch64::STRWroX:
1693 case AArch64::STRXroX:
1695 unsigned Val = MI.getOperand(3).getImm();
1696 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1697 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1702 /// Check all MachineMemOperands for a hint to suppress pairing.
1703 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) const {
1704 return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1705 return MMO->getFlags() & MOSuppressPair;
1709 /// Set a flag on the first MachineMemOperand to suppress pairing.
1710 void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) const {
1711 if (MI.memoperands_empty())
1713 (*MI.memoperands_begin())->setFlags(MOSuppressPair);
1716 bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
1720 case AArch64::STURSi:
1721 case AArch64::STURDi:
1722 case AArch64::STURQi:
1723 case AArch64::STURBBi:
1724 case AArch64::STURHHi:
1725 case AArch64::STURWi:
1726 case AArch64::STURXi:
1727 case AArch64::LDURSi:
1728 case AArch64::LDURDi:
1729 case AArch64::LDURQi:
1730 case AArch64::LDURWi:
1731 case AArch64::LDURXi:
1732 case AArch64::LDURSWi:
1733 case AArch64::LDURHHi:
1734 case AArch64::LDURBBi:
1735 case AArch64::LDURSBWi:
1736 case AArch64::LDURSHWi:
1741 bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr &MI) const {
1742 return isUnscaledLdSt(MI.getOpcode());
1745 // Is this a candidate for ld/st merging or pairing? For example, we don't
1746 // touch volatiles or load/stores that have a hint to avoid pair formation.
1747 bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
1748 // If this is a volatile load/store, don't mess with it.
1749 if (MI.hasOrderedMemoryRef())
1752 // Make sure this is a reg+imm (as opposed to an address reloc).
1753 assert(MI.getOperand(1).isReg() && "Expected a reg operand.");
1754 if (!MI.getOperand(2).isImm())
1757 // Can't merge/pair if the instruction modifies the base register.
1758 // e.g., ldr x0, [x0]
1759 unsigned BaseReg = MI.getOperand(1).getReg();
1760 const TargetRegisterInfo *TRI = &getRegisterInfo();
1761 if (MI.modifiesRegister(BaseReg, TRI))
1764 // Check if this load/store has a hint to avoid pair formation.
1765 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1766 if (isLdStPairSuppressed(MI))
1769 // On some CPUs quad load/store pairs are slower than two single load/stores.
1770 if (Subtarget.isPaired128Slow()) {
1771 switch (MI.getOpcode()) {
1774 case AArch64::LDURQi:
1775 case AArch64::STURQi:
1776 case AArch64::LDRQui:
1777 case AArch64::STRQui:
1785 bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
1786 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
1787 const TargetRegisterInfo *TRI) const {
1789 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
1792 bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
1793 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
1794 const TargetRegisterInfo *TRI) const {
1795 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
1796 // Handle only loads/stores with base register followed by immediate offset.
1797 if (LdSt.getNumExplicitOperands() == 3) {
1798 // Non-paired instruction (e.g., ldr x1, [x0, #8]).
1799 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1801 } else if (LdSt.getNumExplicitOperands() == 4) {
1802 // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
1803 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() ||
1804 !LdSt.getOperand(3).isImm())
1809 // Get the scaling factor for the instruction and set the width for the
1812 int64_t Dummy1, Dummy2;
1814 // If this returns false, then it's an instruction we don't want to handle.
1815 if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2))
1818 // Compute the offset. Offset is calculated as the immediate operand
1819 // multiplied by the scaling factor. Unscaled instructions have scaling factor
1821 if (LdSt.getNumExplicitOperands() == 3) {
1822 BaseReg = LdSt.getOperand(1).getReg();
1823 Offset = LdSt.getOperand(2).getImm() * Scale;
1825 assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
1826 BaseReg = LdSt.getOperand(2).getReg();
1827 Offset = LdSt.getOperand(3).getImm() * Scale;
1833 AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const {
1834 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
1835 MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands()-1);
1836 assert(OfsOp.isImm() && "Offset operand wasn't immediate.");
1840 bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, unsigned &Scale,
1841 unsigned &Width, int64_t &MinOffset,
1842 int64_t &MaxOffset) const {
1844 // Not a memory operation or something we want to handle.
1847 MinOffset = MaxOffset = 0;
1849 case AArch64::STRWpost:
1850 case AArch64::LDRWpost:
1856 case AArch64::LDURQi:
1857 case AArch64::STURQi:
1863 case AArch64::LDURXi:
1864 case AArch64::LDURDi:
1865 case AArch64::STURXi:
1866 case AArch64::STURDi:
1872 case AArch64::LDURWi:
1873 case AArch64::LDURSi:
1874 case AArch64::LDURSWi:
1875 case AArch64::STURWi:
1876 case AArch64::STURSi:
1882 case AArch64::LDURHi:
1883 case AArch64::LDURHHi:
1884 case AArch64::LDURSHXi:
1885 case AArch64::LDURSHWi:
1886 case AArch64::STURHi:
1887 case AArch64::STURHHi:
1893 case AArch64::LDURBi:
1894 case AArch64::LDURBBi:
1895 case AArch64::LDURSBXi:
1896 case AArch64::LDURSBWi:
1897 case AArch64::STURBi:
1898 case AArch64::STURBBi:
1904 case AArch64::LDPQi:
1905 case AArch64::LDNPQi:
1906 case AArch64::STPQi:
1907 case AArch64::STNPQi:
1913 case AArch64::LDRQui:
1914 case AArch64::STRQui:
1919 case AArch64::LDPXi:
1920 case AArch64::LDPDi:
1921 case AArch64::LDNPXi:
1922 case AArch64::LDNPDi:
1923 case AArch64::STPXi:
1924 case AArch64::STPDi:
1925 case AArch64::STNPXi:
1926 case AArch64::STNPDi:
1932 case AArch64::LDRXui:
1933 case AArch64::LDRDui:
1934 case AArch64::STRXui:
1935 case AArch64::STRDui:
1940 case AArch64::LDPWi:
1941 case AArch64::LDPSi:
1942 case AArch64::LDNPWi:
1943 case AArch64::LDNPSi:
1944 case AArch64::STPWi:
1945 case AArch64::STPSi:
1946 case AArch64::STNPWi:
1947 case AArch64::STNPSi:
1953 case AArch64::LDRWui:
1954 case AArch64::LDRSui:
1955 case AArch64::LDRSWui:
1956 case AArch64::STRWui:
1957 case AArch64::STRSui:
1962 case AArch64::LDRHui:
1963 case AArch64::LDRHHui:
1964 case AArch64::STRHui:
1965 case AArch64::STRHHui:
1970 case AArch64::LDRBui:
1971 case AArch64::LDRBBui:
1972 case AArch64::STRBui:
1973 case AArch64::STRBBui:
1983 // Scale the unscaled offsets. Returns false if the unscaled offset can't be
1985 static bool scaleOffset(unsigned Opc, int64_t &Offset) {
1986 unsigned OffsetStride = 1;
1990 case AArch64::LDURQi:
1991 case AArch64::STURQi:
1994 case AArch64::LDURXi:
1995 case AArch64::LDURDi:
1996 case AArch64::STURXi:
1997 case AArch64::STURDi:
2000 case AArch64::LDURWi:
2001 case AArch64::LDURSi:
2002 case AArch64::LDURSWi:
2003 case AArch64::STURWi:
2004 case AArch64::STURSi:
2008 // If the byte-offset isn't a multiple of the stride, we can't scale this
2010 if (Offset % OffsetStride != 0)
2013 // Convert the byte-offset used by unscaled into an "element" offset used
2014 // by the scaled pair load/store instructions.
2015 Offset /= OffsetStride;
2019 static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
2020 if (FirstOpc == SecondOpc)
2022 // We can also pair sign-ext and zero-ext instructions.
2026 case AArch64::LDRWui:
2027 case AArch64::LDURWi:
2028 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
2029 case AArch64::LDRSWui:
2030 case AArch64::LDURSWi:
2031 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
2033 // These instructions can't be paired based on their opcodes.
2037 /// Detect opportunities for ldp/stp formation.
2039 /// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
2040 bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
2041 MachineInstr &SecondLdSt,
2042 unsigned NumLoads) const {
2043 // Only cluster up to a single pair.
2047 if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
2050 // Can we pair these instructions based on their opcodes?
2051 unsigned FirstOpc = FirstLdSt.getOpcode();
2052 unsigned SecondOpc = SecondLdSt.getOpcode();
2053 if (!canPairLdStOpc(FirstOpc, SecondOpc))
2056 // Can't merge volatiles or load/stores that have a hint to avoid pair
2057 // formation, for example.
2058 if (!isCandidateToMergeOrPair(FirstLdSt) ||
2059 !isCandidateToMergeOrPair(SecondLdSt))
2062 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
2063 int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
2064 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
2067 int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
2068 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
2071 // Pairwise instructions have a 7-bit signed offset field.
2072 if (Offset1 > 63 || Offset1 < -64)
2075 // The caller should already have ordered First/SecondLdSt by offset.
2076 assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
2077 return Offset1 + 1 == Offset2;
2080 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
2081 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
2082 const MDNode *Expr, const DebugLoc &DL) const {
2083 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
2084 .addFrameIndex(FrameIx)
2092 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
2093 unsigned Reg, unsigned SubIdx,
2095 const TargetRegisterInfo *TRI) {
2097 return MIB.addReg(Reg, State);
2099 if (TargetRegisterInfo::isPhysicalRegister(Reg))
2100 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
2101 return MIB.addReg(Reg, State, SubIdx);
2104 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
2106 // We really want the positive remainder mod 32 here, that happens to be
2107 // easily obtainable with a mask.
2108 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
2111 void AArch64InstrInfo::copyPhysRegTuple(
2112 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
2113 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
2114 ArrayRef<unsigned> Indices) const {
2115 assert(Subtarget.hasNEON() &&
2116 "Unexpected register copy without NEON");
2117 const TargetRegisterInfo *TRI = &getRegisterInfo();
2118 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2119 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2120 unsigned NumRegs = Indices.size();
2122 int SubReg = 0, End = NumRegs, Incr = 1;
2123 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
2124 SubReg = NumRegs - 1;
2129 for (; SubReg != End; SubReg += Incr) {
2130 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
2131 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2132 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2133 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2137 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
2138 MachineBasicBlock::iterator I,
2139 const DebugLoc &DL, unsigned DestReg,
2140 unsigned SrcReg, bool KillSrc) const {
2141 if (AArch64::GPR32spRegClass.contains(DestReg) &&
2142 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
2143 const TargetRegisterInfo *TRI = &getRegisterInfo();
2145 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2146 // If either operand is WSP, expand to ADD #0.
2147 if (Subtarget.hasZeroCycleRegMove()) {
2148 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2149 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2150 &AArch64::GPR64spRegClass);
2151 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2152 &AArch64::GPR64spRegClass);
2153 // This instruction is reading and writing X registers. This may upset
2154 // the register scavenger and machine verifier, so we need to indicate
2155 // that we are reading an undefined value from SrcRegX, but a proper
2156 // value from SrcReg.
2157 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
2158 .addReg(SrcRegX, RegState::Undef)
2160 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2161 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2163 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
2164 .addReg(SrcReg, getKillRegState(KillSrc))
2166 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2168 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
2169 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
2170 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2172 if (Subtarget.hasZeroCycleRegMove()) {
2173 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2174 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2175 &AArch64::GPR64spRegClass);
2176 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2177 &AArch64::GPR64spRegClass);
2178 // This instruction is reading and writing X registers. This may upset
2179 // the register scavenger and machine verifier, so we need to indicate
2180 // that we are reading an undefined value from SrcRegX, but a proper
2181 // value from SrcReg.
2182 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
2183 .addReg(AArch64::XZR)
2184 .addReg(SrcRegX, RegState::Undef)
2185 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2187 // Otherwise, expand to ORR WZR.
2188 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
2189 .addReg(AArch64::WZR)
2190 .addReg(SrcReg, getKillRegState(KillSrc));
2196 if (AArch64::GPR64spRegClass.contains(DestReg) &&
2197 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
2198 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2199 // If either operand is SP, expand to ADD #0.
2200 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
2201 .addReg(SrcReg, getKillRegState(KillSrc))
2203 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2204 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
2205 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
2206 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2208 // Otherwise, expand to ORR XZR.
2209 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
2210 .addReg(AArch64::XZR)
2211 .addReg(SrcReg, getKillRegState(KillSrc));
2216 // Copy a DDDD register quad by copying the individual sub-registers.
2217 if (AArch64::DDDDRegClass.contains(DestReg) &&
2218 AArch64::DDDDRegClass.contains(SrcReg)) {
2219 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2220 AArch64::dsub2, AArch64::dsub3 };
2221 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2226 // Copy a DDD register triple by copying the individual sub-registers.
2227 if (AArch64::DDDRegClass.contains(DestReg) &&
2228 AArch64::DDDRegClass.contains(SrcReg)) {
2229 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2231 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2236 // Copy a DD register pair by copying the individual sub-registers.
2237 if (AArch64::DDRegClass.contains(DestReg) &&
2238 AArch64::DDRegClass.contains(SrcReg)) {
2239 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
2240 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2245 // Copy a QQQQ register quad by copying the individual sub-registers.
2246 if (AArch64::QQQQRegClass.contains(DestReg) &&
2247 AArch64::QQQQRegClass.contains(SrcReg)) {
2248 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2249 AArch64::qsub2, AArch64::qsub3 };
2250 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2255 // Copy a QQQ register triple by copying the individual sub-registers.
2256 if (AArch64::QQQRegClass.contains(DestReg) &&
2257 AArch64::QQQRegClass.contains(SrcReg)) {
2258 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2260 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2265 // Copy a QQ register pair by copying the individual sub-registers.
2266 if (AArch64::QQRegClass.contains(DestReg) &&
2267 AArch64::QQRegClass.contains(SrcReg)) {
2268 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
2269 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2274 if (AArch64::FPR128RegClass.contains(DestReg) &&
2275 AArch64::FPR128RegClass.contains(SrcReg)) {
2276 if(Subtarget.hasNEON()) {
2277 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2279 .addReg(SrcReg, getKillRegState(KillSrc));
2281 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
2282 .addReg(AArch64::SP, RegState::Define)
2283 .addReg(SrcReg, getKillRegState(KillSrc))
2284 .addReg(AArch64::SP)
2286 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
2287 .addReg(AArch64::SP, RegState::Define)
2288 .addReg(DestReg, RegState::Define)
2289 .addReg(AArch64::SP)
2295 if (AArch64::FPR64RegClass.contains(DestReg) &&
2296 AArch64::FPR64RegClass.contains(SrcReg)) {
2297 if(Subtarget.hasNEON()) {
2298 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2299 &AArch64::FPR128RegClass);
2300 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2301 &AArch64::FPR128RegClass);
2302 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2304 .addReg(SrcReg, getKillRegState(KillSrc));
2306 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
2307 .addReg(SrcReg, getKillRegState(KillSrc));
2312 if (AArch64::FPR32RegClass.contains(DestReg) &&
2313 AArch64::FPR32RegClass.contains(SrcReg)) {
2314 if(Subtarget.hasNEON()) {
2315 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2316 &AArch64::FPR128RegClass);
2317 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2318 &AArch64::FPR128RegClass);
2319 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2321 .addReg(SrcReg, getKillRegState(KillSrc));
2323 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2324 .addReg(SrcReg, getKillRegState(KillSrc));
2329 if (AArch64::FPR16RegClass.contains(DestReg) &&
2330 AArch64::FPR16RegClass.contains(SrcReg)) {
2331 if(Subtarget.hasNEON()) {
2332 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2333 &AArch64::FPR128RegClass);
2334 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2335 &AArch64::FPR128RegClass);
2336 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2338 .addReg(SrcReg, getKillRegState(KillSrc));
2340 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2341 &AArch64::FPR32RegClass);
2342 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2343 &AArch64::FPR32RegClass);
2344 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2345 .addReg(SrcReg, getKillRegState(KillSrc));
2350 if (AArch64::FPR8RegClass.contains(DestReg) &&
2351 AArch64::FPR8RegClass.contains(SrcReg)) {
2352 if(Subtarget.hasNEON()) {
2353 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2354 &AArch64::FPR128RegClass);
2355 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2356 &AArch64::FPR128RegClass);
2357 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2359 .addReg(SrcReg, getKillRegState(KillSrc));
2361 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2362 &AArch64::FPR32RegClass);
2363 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2364 &AArch64::FPR32RegClass);
2365 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2366 .addReg(SrcReg, getKillRegState(KillSrc));
2371 // Copies between GPR64 and FPR64.
2372 if (AArch64::FPR64RegClass.contains(DestReg) &&
2373 AArch64::GPR64RegClass.contains(SrcReg)) {
2374 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
2375 .addReg(SrcReg, getKillRegState(KillSrc));
2378 if (AArch64::GPR64RegClass.contains(DestReg) &&
2379 AArch64::FPR64RegClass.contains(SrcReg)) {
2380 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
2381 .addReg(SrcReg, getKillRegState(KillSrc));
2384 // Copies between GPR32 and FPR32.
2385 if (AArch64::FPR32RegClass.contains(DestReg) &&
2386 AArch64::GPR32RegClass.contains(SrcReg)) {
2387 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
2388 .addReg(SrcReg, getKillRegState(KillSrc));
2391 if (AArch64::GPR32RegClass.contains(DestReg) &&
2392 AArch64::FPR32RegClass.contains(SrcReg)) {
2393 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
2394 .addReg(SrcReg, getKillRegState(KillSrc));
2398 if (DestReg == AArch64::NZCV) {
2399 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
2400 BuildMI(MBB, I, DL, get(AArch64::MSR))
2401 .addImm(AArch64SysReg::NZCV)
2402 .addReg(SrcReg, getKillRegState(KillSrc))
2403 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
2407 if (SrcReg == AArch64::NZCV) {
2408 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
2409 BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
2410 .addImm(AArch64SysReg::NZCV)
2411 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
2415 llvm_unreachable("unimplemented reg-to-reg copy");
2418 void AArch64InstrInfo::storeRegToStackSlot(
2419 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
2420 bool isKill, int FI, const TargetRegisterClass *RC,
2421 const TargetRegisterInfo *TRI) const {
2423 if (MBBI != MBB.end())
2424 DL = MBBI->getDebugLoc();
2425 MachineFunction &MF = *MBB.getParent();
2426 MachineFrameInfo &MFI = MF.getFrameInfo();
2427 unsigned Align = MFI.getObjectAlignment(FI);
2429 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2430 MachineMemOperand *MMO = MF.getMachineMemOperand(
2431 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
2434 switch (TRI->getSpillSize(*RC)) {
2436 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2437 Opc = AArch64::STRBui;
2440 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2441 Opc = AArch64::STRHui;
2444 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2445 Opc = AArch64::STRWui;
2446 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2447 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2449 assert(SrcReg != AArch64::WSP);
2450 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2451 Opc = AArch64::STRSui;
2454 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2455 Opc = AArch64::STRXui;
2456 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2457 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2459 assert(SrcReg != AArch64::SP);
2460 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2461 Opc = AArch64::STRDui;
2464 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2465 Opc = AArch64::STRQui;
2466 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2467 assert(Subtarget.hasNEON() &&
2468 "Unexpected register store without NEON");
2469 Opc = AArch64::ST1Twov1d;
2474 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2475 assert(Subtarget.hasNEON() &&
2476 "Unexpected register store without NEON");
2477 Opc = AArch64::ST1Threev1d;
2482 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2483 assert(Subtarget.hasNEON() &&
2484 "Unexpected register store without NEON");
2485 Opc = AArch64::ST1Fourv1d;
2487 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2488 assert(Subtarget.hasNEON() &&
2489 "Unexpected register store without NEON");
2490 Opc = AArch64::ST1Twov2d;
2495 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2496 assert(Subtarget.hasNEON() &&
2497 "Unexpected register store without NEON");
2498 Opc = AArch64::ST1Threev2d;
2503 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2504 assert(Subtarget.hasNEON() &&
2505 "Unexpected register store without NEON");
2506 Opc = AArch64::ST1Fourv2d;
2511 assert(Opc && "Unknown register class");
2513 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2514 .addReg(SrcReg, getKillRegState(isKill))
2519 MI.addMemOperand(MMO);
2522 void AArch64InstrInfo::loadRegFromStackSlot(
2523 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2524 int FI, const TargetRegisterClass *RC,
2525 const TargetRegisterInfo *TRI) const {
2527 if (MBBI != MBB.end())
2528 DL = MBBI->getDebugLoc();
2529 MachineFunction &MF = *MBB.getParent();
2530 MachineFrameInfo &MFI = MF.getFrameInfo();
2531 unsigned Align = MFI.getObjectAlignment(FI);
2532 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2533 MachineMemOperand *MMO = MF.getMachineMemOperand(
2534 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2538 switch (TRI->getSpillSize(*RC)) {
2540 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2541 Opc = AArch64::LDRBui;
2544 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2545 Opc = AArch64::LDRHui;
2548 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2549 Opc = AArch64::LDRWui;
2550 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2551 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2553 assert(DestReg != AArch64::WSP);
2554 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2555 Opc = AArch64::LDRSui;
2558 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2559 Opc = AArch64::LDRXui;
2560 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2561 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2563 assert(DestReg != AArch64::SP);
2564 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2565 Opc = AArch64::LDRDui;
2568 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2569 Opc = AArch64::LDRQui;
2570 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2571 assert(Subtarget.hasNEON() &&
2572 "Unexpected register load without NEON");
2573 Opc = AArch64::LD1Twov1d;
2578 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2579 assert(Subtarget.hasNEON() &&
2580 "Unexpected register load without NEON");
2581 Opc = AArch64::LD1Threev1d;
2586 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2587 assert(Subtarget.hasNEON() &&
2588 "Unexpected register load without NEON");
2589 Opc = AArch64::LD1Fourv1d;
2591 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2592 assert(Subtarget.hasNEON() &&
2593 "Unexpected register load without NEON");
2594 Opc = AArch64::LD1Twov2d;
2599 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2600 assert(Subtarget.hasNEON() &&
2601 "Unexpected register load without NEON");
2602 Opc = AArch64::LD1Threev2d;
2607 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2608 assert(Subtarget.hasNEON() &&
2609 "Unexpected register load without NEON");
2610 Opc = AArch64::LD1Fourv2d;
2615 assert(Opc && "Unknown register class");
2617 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2618 .addReg(DestReg, getDefRegState(true))
2622 MI.addMemOperand(MMO);
2625 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2626 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
2627 unsigned DestReg, unsigned SrcReg, int Offset,
2628 const TargetInstrInfo *TII,
2629 MachineInstr::MIFlag Flag, bool SetNZCV) {
2630 if (DestReg == SrcReg && Offset == 0)
2633 assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2634 "SP increment/decrement not 16-byte aligned");
2636 bool isSub = Offset < 0;
2640 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2641 // scratch register. If DestReg is a virtual register, use it as the
2642 // scratch register; otherwise, create a new virtual register (to be
2643 // replaced by the scavenger at the end of PEI). That case can be optimized
2644 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2645 // register can be loaded with offset%8 and the add/sub can use an extending
2646 // instruction with LSL#3.
2647 // Currently the function handles any offsets but generates a poor sequence
2649 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2653 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2655 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2656 const unsigned MaxEncoding = 0xfff;
2657 const unsigned ShiftSize = 12;
2658 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2659 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2661 if (((unsigned)Offset) > MaxEncodableValue) {
2662 ThisVal = MaxEncodableValue;
2664 ThisVal = Offset & MaxEncodableValue;
2666 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2667 "Encoding cannot handle value that big");
2668 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2670 .addImm(ThisVal >> ShiftSize)
2671 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2679 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2682 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2686 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2687 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
2688 MachineBasicBlock::iterator InsertPt, int FrameIndex,
2689 LiveIntervals *LIS) const {
2690 // This is a bit of a hack. Consider this instruction:
2692 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2694 // We explicitly chose GPR64all for the virtual register so such a copy might
2695 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2696 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2697 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2699 // To prevent that, we are going to constrain the %vreg0 register class here.
2701 // <rdar://problem/11522048>
2703 if (MI.isFullCopy()) {
2704 unsigned DstReg = MI.getOperand(0).getReg();
2705 unsigned SrcReg = MI.getOperand(1).getReg();
2706 if (SrcReg == AArch64::SP &&
2707 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2708 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2711 if (DstReg == AArch64::SP &&
2712 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2713 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2718 // Handle the case where a copy is being spilled or filled but the source
2719 // and destination register class don't match. For example:
2721 // %vreg0<def> = COPY %XZR; GPR64common:%vreg0
2723 // In this case we can still safely fold away the COPY and generate the
2724 // following spill code:
2726 // STRXui %XZR, <fi#0>
2728 // This also eliminates spilled cross register class COPYs (e.g. between x and
2729 // d regs) of the same size. For example:
2731 // %vreg0<def> = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1
2733 // will be filled as
2735 // LDRDui %vreg0, fi<#0>
2739 // LDRXui %vregTemp, fi<#0>
2740 // %vreg0 = FMOV %vregTemp
2742 if (MI.isCopy() && Ops.size() == 1 &&
2743 // Make sure we're only folding the explicit COPY defs/uses.
2744 (Ops[0] == 0 || Ops[0] == 1)) {
2745 bool IsSpill = Ops[0] == 0;
2746 bool IsFill = !IsSpill;
2747 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
2748 const MachineRegisterInfo &MRI = MF.getRegInfo();
2749 MachineBasicBlock &MBB = *MI.getParent();
2750 const MachineOperand &DstMO = MI.getOperand(0);
2751 const MachineOperand &SrcMO = MI.getOperand(1);
2752 unsigned DstReg = DstMO.getReg();
2753 unsigned SrcReg = SrcMO.getReg();
2754 // This is slightly expensive to compute for physical regs since
2755 // getMinimalPhysRegClass is slow.
2756 auto getRegClass = [&](unsigned Reg) {
2757 return TargetRegisterInfo::isVirtualRegister(Reg)
2758 ? MRI.getRegClass(Reg)
2759 : TRI.getMinimalPhysRegClass(Reg);
2762 if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
2763 assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
2764 TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
2765 "Mismatched register size in non subreg COPY");
2767 storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
2768 getRegClass(SrcReg), &TRI);
2770 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
2771 getRegClass(DstReg), &TRI);
2772 return &*--InsertPt;
2775 // Handle cases like spilling def of:
2777 // %vreg0:sub_32<def,read-undef> = COPY %WZR; GPR64common:%vreg0
2779 // where the physical register source can be widened and stored to the full
2780 // virtual reg destination stack slot, in this case producing:
2782 // STRXui %XZR, <fi#0>
2784 if (IsSpill && DstMO.isUndef() &&
2785 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
2786 assert(SrcMO.getSubReg() == 0 &&
2787 "Unexpected subreg on physical register");
2788 const TargetRegisterClass *SpillRC;
2789 unsigned SpillSubreg;
2790 switch (DstMO.getSubReg()) {
2794 case AArch64::sub_32:
2796 if (AArch64::GPR32RegClass.contains(SrcReg)) {
2797 SpillRC = &AArch64::GPR64RegClass;
2798 SpillSubreg = AArch64::sub_32;
2799 } else if (AArch64::FPR32RegClass.contains(SrcReg)) {
2800 SpillRC = &AArch64::FPR64RegClass;
2801 SpillSubreg = AArch64::ssub;
2806 if (AArch64::FPR64RegClass.contains(SrcReg)) {
2807 SpillRC = &AArch64::FPR128RegClass;
2808 SpillSubreg = AArch64::dsub;
2815 if (unsigned WidenedSrcReg =
2816 TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
2817 storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
2818 FrameIndex, SpillRC, &TRI);
2819 return &*--InsertPt;
2823 // Handle cases like filling use of:
2825 // %vreg0:sub_32<def,read-undef> = COPY %vreg1; GPR64:%vreg0, GPR32:%vreg1
2827 // where we can load the full virtual reg source stack slot, into the subreg
2828 // destination, in this case producing:
2830 // LDRWui %vreg0:sub_32<def,read-undef>, <fi#0>
2832 if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
2833 const TargetRegisterClass *FillRC;
2834 switch (DstMO.getSubReg()) {
2838 case AArch64::sub_32:
2839 FillRC = &AArch64::GPR32RegClass;
2842 FillRC = &AArch64::FPR32RegClass;
2845 FillRC = &AArch64::FPR64RegClass;
2850 assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
2851 TRI.getRegSizeInBits(*FillRC) &&
2852 "Mismatched regclass size on folded subreg COPY");
2853 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
2854 MachineInstr &LoadMI = *--InsertPt;
2855 MachineOperand &LoadDst = LoadMI.getOperand(0);
2856 assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
2857 LoadDst.setSubReg(DstMO.getSubReg());
2858 LoadDst.setIsUndef();
2868 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2869 bool *OutUseUnscaledOp,
2870 unsigned *OutUnscaledOp,
2871 int *EmittableOffset) {
2873 bool IsSigned = false;
2874 // The ImmIdx should be changed case by case if it is not 2.
2875 unsigned ImmIdx = 2;
2876 unsigned UnscaledOp = 0;
2877 // Set output values in case of early exit.
2878 if (EmittableOffset)
2879 *EmittableOffset = 0;
2880 if (OutUseUnscaledOp)
2881 *OutUseUnscaledOp = false;
2884 switch (MI.getOpcode()) {
2886 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2887 // Vector spills/fills can't take an immediate offset.
2888 case AArch64::LD1Twov2d:
2889 case AArch64::LD1Threev2d:
2890 case AArch64::LD1Fourv2d:
2891 case AArch64::LD1Twov1d:
2892 case AArch64::LD1Threev1d:
2893 case AArch64::LD1Fourv1d:
2894 case AArch64::ST1Twov2d:
2895 case AArch64::ST1Threev2d:
2896 case AArch64::ST1Fourv2d:
2897 case AArch64::ST1Twov1d:
2898 case AArch64::ST1Threev1d:
2899 case AArch64::ST1Fourv1d:
2900 return AArch64FrameOffsetCannotUpdate;
2901 case AArch64::PRFMui:
2903 UnscaledOp = AArch64::PRFUMi;
2905 case AArch64::LDRXui:
2907 UnscaledOp = AArch64::LDURXi;
2909 case AArch64::LDRWui:
2911 UnscaledOp = AArch64::LDURWi;
2913 case AArch64::LDRBui:
2915 UnscaledOp = AArch64::LDURBi;
2917 case AArch64::LDRHui:
2919 UnscaledOp = AArch64::LDURHi;
2921 case AArch64::LDRSui:
2923 UnscaledOp = AArch64::LDURSi;
2925 case AArch64::LDRDui:
2927 UnscaledOp = AArch64::LDURDi;
2929 case AArch64::LDRQui:
2931 UnscaledOp = AArch64::LDURQi;
2933 case AArch64::LDRBBui:
2935 UnscaledOp = AArch64::LDURBBi;
2937 case AArch64::LDRHHui:
2939 UnscaledOp = AArch64::LDURHHi;
2941 case AArch64::LDRSBXui:
2943 UnscaledOp = AArch64::LDURSBXi;
2945 case AArch64::LDRSBWui:
2947 UnscaledOp = AArch64::LDURSBWi;
2949 case AArch64::LDRSHXui:
2951 UnscaledOp = AArch64::LDURSHXi;
2953 case AArch64::LDRSHWui:
2955 UnscaledOp = AArch64::LDURSHWi;
2957 case AArch64::LDRSWui:
2959 UnscaledOp = AArch64::LDURSWi;
2962 case AArch64::STRXui:
2964 UnscaledOp = AArch64::STURXi;
2966 case AArch64::STRWui:
2968 UnscaledOp = AArch64::STURWi;
2970 case AArch64::STRBui:
2972 UnscaledOp = AArch64::STURBi;
2974 case AArch64::STRHui:
2976 UnscaledOp = AArch64::STURHi;
2978 case AArch64::STRSui:
2980 UnscaledOp = AArch64::STURSi;
2982 case AArch64::STRDui:
2984 UnscaledOp = AArch64::STURDi;
2986 case AArch64::STRQui:
2988 UnscaledOp = AArch64::STURQi;
2990 case AArch64::STRBBui:
2992 UnscaledOp = AArch64::STURBBi;
2994 case AArch64::STRHHui:
2996 UnscaledOp = AArch64::STURHHi;
2999 case AArch64::LDPXi:
3000 case AArch64::LDPDi:
3001 case AArch64::STPXi:
3002 case AArch64::STPDi:
3003 case AArch64::LDNPXi:
3004 case AArch64::LDNPDi:
3005 case AArch64::STNPXi:
3006 case AArch64::STNPDi:
3011 case AArch64::LDPQi:
3012 case AArch64::STPQi:
3013 case AArch64::LDNPQi:
3014 case AArch64::STNPQi:
3019 case AArch64::LDPWi:
3020 case AArch64::LDPSi:
3021 case AArch64::STPWi:
3022 case AArch64::STPSi:
3023 case AArch64::LDNPWi:
3024 case AArch64::LDNPSi:
3025 case AArch64::STNPWi:
3026 case AArch64::STNPSi:
3032 case AArch64::LDURXi:
3033 case AArch64::LDURWi:
3034 case AArch64::LDURBi:
3035 case AArch64::LDURHi:
3036 case AArch64::LDURSi:
3037 case AArch64::LDURDi:
3038 case AArch64::LDURQi:
3039 case AArch64::LDURHHi:
3040 case AArch64::LDURBBi:
3041 case AArch64::LDURSBXi:
3042 case AArch64::LDURSBWi:
3043 case AArch64::LDURSHXi:
3044 case AArch64::LDURSHWi:
3045 case AArch64::LDURSWi:
3046 case AArch64::STURXi:
3047 case AArch64::STURWi:
3048 case AArch64::STURBi:
3049 case AArch64::STURHi:
3050 case AArch64::STURSi:
3051 case AArch64::STURDi:
3052 case AArch64::STURQi:
3053 case AArch64::STURBBi:
3054 case AArch64::STURHHi:
3059 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
3061 bool useUnscaledOp = false;
3062 // If the offset doesn't match the scale, we rewrite the instruction to
3063 // use the unscaled instruction instead. Likewise, if we have a negative
3064 // offset (and have an unscaled op to use).
3065 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
3066 useUnscaledOp = true;
3068 // Use an unscaled addressing mode if the instruction has a negative offset
3069 // (or if the instruction is already using an unscaled addressing mode).
3072 // ldp/stp instructions.
3075 } else if (UnscaledOp == 0 || useUnscaledOp) {
3085 // Attempt to fold address computation.
3086 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
3087 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
3088 if (Offset >= MinOff && Offset <= MaxOff) {
3089 if (EmittableOffset)
3090 *EmittableOffset = Offset;
3093 int NewOff = Offset < 0 ? MinOff : MaxOff;
3094 if (EmittableOffset)
3095 *EmittableOffset = NewOff;
3096 Offset = (Offset - NewOff) * Scale;
3098 if (OutUseUnscaledOp)
3099 *OutUseUnscaledOp = useUnscaledOp;
3101 *OutUnscaledOp = UnscaledOp;
3102 return AArch64FrameOffsetCanUpdate |
3103 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
3106 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
3107 unsigned FrameReg, int &Offset,
3108 const AArch64InstrInfo *TII) {
3109 unsigned Opcode = MI.getOpcode();
3110 unsigned ImmIdx = FrameRegIdx + 1;
3112 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
3113 Offset += MI.getOperand(ImmIdx).getImm();
3114 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
3115 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
3116 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
3117 MI.eraseFromParent();
3123 unsigned UnscaledOp;
3125 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
3126 &UnscaledOp, &NewOffset);
3127 if (Status & AArch64FrameOffsetCanUpdate) {
3128 if (Status & AArch64FrameOffsetIsLegal)
3129 // Replace the FrameIndex with FrameReg.
3130 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
3132 MI.setDesc(TII->get(UnscaledOp));
3134 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
3141 void AArch64InstrInfo::getNoop(MCInst &NopInst) const {
3142 NopInst.setOpcode(AArch64::HINT);
3143 NopInst.addOperand(MCOperand::createImm(0));
3146 // AArch64 supports MachineCombiner.
3147 bool AArch64InstrInfo::useMachineCombiner() const {
3152 // True when Opc sets flag
3153 static bool isCombineInstrSettingFlag(unsigned Opc) {
3155 case AArch64::ADDSWrr:
3156 case AArch64::ADDSWri:
3157 case AArch64::ADDSXrr:
3158 case AArch64::ADDSXri:
3159 case AArch64::SUBSWrr:
3160 case AArch64::SUBSXrr:
3161 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3162 case AArch64::SUBSWri:
3163 case AArch64::SUBSXri:
3171 // 32b Opcodes that can be combined with a MUL
3172 static bool isCombineInstrCandidate32(unsigned Opc) {
3174 case AArch64::ADDWrr:
3175 case AArch64::ADDWri:
3176 case AArch64::SUBWrr:
3177 case AArch64::ADDSWrr:
3178 case AArch64::ADDSWri:
3179 case AArch64::SUBSWrr:
3180 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3181 case AArch64::SUBWri:
3182 case AArch64::SUBSWri:
3190 // 64b Opcodes that can be combined with a MUL
3191 static bool isCombineInstrCandidate64(unsigned Opc) {
3193 case AArch64::ADDXrr:
3194 case AArch64::ADDXri:
3195 case AArch64::SUBXrr:
3196 case AArch64::ADDSXrr:
3197 case AArch64::ADDSXri:
3198 case AArch64::SUBSXrr:
3199 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3200 case AArch64::SUBXri:
3201 case AArch64::SUBSXri:
3209 // FP Opcodes that can be combined with a FMUL
3210 static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
3211 switch (Inst.getOpcode()) {
3214 case AArch64::FADDSrr:
3215 case AArch64::FADDDrr:
3216 case AArch64::FADDv2f32:
3217 case AArch64::FADDv2f64:
3218 case AArch64::FADDv4f32:
3219 case AArch64::FSUBSrr:
3220 case AArch64::FSUBDrr:
3221 case AArch64::FSUBv2f32:
3222 case AArch64::FSUBv2f64:
3223 case AArch64::FSUBv4f32:
3224 TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
3225 return (Options.UnsafeFPMath ||
3226 Options.AllowFPOpFusion == FPOpFusion::Fast);
3231 // Opcodes that can be combined with a MUL
3232 static bool isCombineInstrCandidate(unsigned Opc) {
3233 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
3237 // Utility routine that checks if \param MO is defined by an
3238 // \param CombineOpc instruction in the basic block \param MBB
3239 static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
3240 unsigned CombineOpc, unsigned ZeroReg = 0,
3241 bool CheckZeroReg = false) {
3242 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3243 MachineInstr *MI = nullptr;
3245 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3246 MI = MRI.getUniqueVRegDef(MO.getReg());
3247 // And it needs to be in the trace (otherwise, it won't have a depth).
3248 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
3250 // Must only used by the user we combine with.
3251 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
3255 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
3256 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
3257 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3258 // The third input reg must be zero.
3259 if (MI->getOperand(3).getReg() != ZeroReg)
3267 // Is \param MO defined by an integer multiply and can be combined?
3268 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3269 unsigned MulOpc, unsigned ZeroReg) {
3270 return canCombine(MBB, MO, MulOpc, ZeroReg, true);
3274 // Is \param MO defined by a floating-point multiply and can be combined?
3275 static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3277 return canCombine(MBB, MO, MulOpc);
3280 // TODO: There are many more machine instruction opcodes to match:
3281 // 1. Other data types (integer, vectors)
3282 // 2. Other math / logic operations (xor, or)
3283 // 3. Other forms of the same operation (intrinsics and other variants)
3284 bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
3285 switch (Inst.getOpcode()) {
3286 case AArch64::FADDDrr:
3287 case AArch64::FADDSrr:
3288 case AArch64::FADDv2f32:
3289 case AArch64::FADDv2f64:
3290 case AArch64::FADDv4f32:
3291 case AArch64::FMULDrr:
3292 case AArch64::FMULSrr:
3293 case AArch64::FMULX32:
3294 case AArch64::FMULX64:
3295 case AArch64::FMULXv2f32:
3296 case AArch64::FMULXv2f64:
3297 case AArch64::FMULXv4f32:
3298 case AArch64::FMULv2f32:
3299 case AArch64::FMULv2f64:
3300 case AArch64::FMULv4f32:
3301 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
3307 /// Find instructions that can be turned into madd.
3308 static bool getMaddPatterns(MachineInstr &Root,
3309 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3310 unsigned Opc = Root.getOpcode();
3311 MachineBasicBlock &MBB = *Root.getParent();
3314 if (!isCombineInstrCandidate(Opc))
3316 if (isCombineInstrSettingFlag(Opc)) {
3317 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
3318 // When NZCV is live bail out.
3321 unsigned NewOpc = convertFlagSettingOpcode(Root);
3322 // When opcode can't change bail out.
3323 // CHECKME: do we miss any cases for opcode conversion?
3332 case AArch64::ADDWrr:
3333 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3334 "ADDWrr does not have register operands");
3335 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3337 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
3340 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3342 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
3346 case AArch64::ADDXrr:
3347 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3349 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
3352 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3354 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
3358 case AArch64::SUBWrr:
3359 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3361 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
3364 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3366 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
3370 case AArch64::SUBXrr:
3371 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3373 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
3376 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3378 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
3382 case AArch64::ADDWri:
3383 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3385 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
3389 case AArch64::ADDXri:
3390 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3392 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
3396 case AArch64::SUBWri:
3397 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3399 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
3403 case AArch64::SUBXri:
3404 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3406 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
3413 /// Floating-Point Support
3415 /// Find instructions that can be turned into madd.
3416 static bool getFMAPatterns(MachineInstr &Root,
3417 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3419 if (!isCombineInstrCandidateFP(Root))
3422 MachineBasicBlock &MBB = *Root.getParent();
3425 switch (Root.getOpcode()) {
3427 assert(false && "Unsupported FP instruction in combiner\n");
3429 case AArch64::FADDSrr:
3430 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3431 "FADDWrr does not have register operands");
3432 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3433 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
3435 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3436 AArch64::FMULv1i32_indexed)) {
3437 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
3440 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3441 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
3443 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3444 AArch64::FMULv1i32_indexed)) {
3445 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
3449 case AArch64::FADDDrr:
3450 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3451 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
3453 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3454 AArch64::FMULv1i64_indexed)) {
3455 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
3458 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3459 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
3461 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3462 AArch64::FMULv1i64_indexed)) {
3463 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
3467 case AArch64::FADDv2f32:
3468 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3469 AArch64::FMULv2i32_indexed)) {
3470 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
3472 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3473 AArch64::FMULv2f32)) {
3474 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
3477 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3478 AArch64::FMULv2i32_indexed)) {
3479 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
3481 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3482 AArch64::FMULv2f32)) {
3483 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
3487 case AArch64::FADDv2f64:
3488 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3489 AArch64::FMULv2i64_indexed)) {
3490 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
3492 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3493 AArch64::FMULv2f64)) {
3494 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
3497 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3498 AArch64::FMULv2i64_indexed)) {
3499 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
3501 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3502 AArch64::FMULv2f64)) {
3503 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
3507 case AArch64::FADDv4f32:
3508 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3509 AArch64::FMULv4i32_indexed)) {
3510 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
3512 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3513 AArch64::FMULv4f32)) {
3514 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
3517 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3518 AArch64::FMULv4i32_indexed)) {
3519 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
3521 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3522 AArch64::FMULv4f32)) {
3523 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
3528 case AArch64::FSUBSrr:
3529 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3530 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
3533 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3534 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
3536 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3537 AArch64::FMULv1i32_indexed)) {
3538 Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
3541 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULSrr)) {
3542 Patterns.push_back(MachineCombinerPattern::FNMULSUBS_OP1);
3546 case AArch64::FSUBDrr:
3547 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3548 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
3551 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3552 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
3554 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3555 AArch64::FMULv1i64_indexed)) {
3556 Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
3559 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULDrr)) {
3560 Patterns.push_back(MachineCombinerPattern::FNMULSUBD_OP1);
3564 case AArch64::FSUBv2f32:
3565 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3566 AArch64::FMULv2i32_indexed)) {
3567 Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
3569 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3570 AArch64::FMULv2f32)) {
3571 Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
3575 case AArch64::FSUBv2f64:
3576 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3577 AArch64::FMULv2i64_indexed)) {
3578 Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
3580 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3581 AArch64::FMULv2f64)) {
3582 Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
3586 case AArch64::FSUBv4f32:
3587 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3588 AArch64::FMULv4i32_indexed)) {
3589 Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
3591 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3592 AArch64::FMULv4f32)) {
3593 Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
3601 /// Return true when a code sequence can improve throughput. It
3602 /// should be called only for instructions in loops.
3603 /// \param Pattern - combiner pattern
3605 AArch64InstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
3609 case MachineCombinerPattern::FMULADDS_OP1:
3610 case MachineCombinerPattern::FMULADDS_OP2:
3611 case MachineCombinerPattern::FMULSUBS_OP1:
3612 case MachineCombinerPattern::FMULSUBS_OP2:
3613 case MachineCombinerPattern::FMULADDD_OP1:
3614 case MachineCombinerPattern::FMULADDD_OP2:
3615 case MachineCombinerPattern::FMULSUBD_OP1:
3616 case MachineCombinerPattern::FMULSUBD_OP2:
3617 case MachineCombinerPattern::FNMULSUBS_OP1:
3618 case MachineCombinerPattern::FNMULSUBD_OP1:
3619 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3620 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3621 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3622 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3623 case MachineCombinerPattern::FMLAv2f32_OP2:
3624 case MachineCombinerPattern::FMLAv2f32_OP1:
3625 case MachineCombinerPattern::FMLAv2f64_OP1:
3626 case MachineCombinerPattern::FMLAv2f64_OP2:
3627 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3628 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3629 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3630 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3631 case MachineCombinerPattern::FMLAv4f32_OP1:
3632 case MachineCombinerPattern::FMLAv4f32_OP2:
3633 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3634 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3635 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3636 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3637 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3638 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3639 case MachineCombinerPattern::FMLSv2f32_OP2:
3640 case MachineCombinerPattern::FMLSv2f64_OP2:
3641 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3642 case MachineCombinerPattern::FMLSv4f32_OP2:
3644 } // end switch (Pattern)
3647 /// Return true when there is potentially a faster code sequence for an
3648 /// instruction chain ending in \p Root. All potential patterns are listed in
3649 /// the \p Pattern vector. Pattern should be sorted in priority order since the
3650 /// pattern evaluator stops checking as soon as it finds a faster sequence.
3652 bool AArch64InstrInfo::getMachineCombinerPatterns(
3654 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
3656 if (getMaddPatterns(Root, Patterns))
3658 // Floating point patterns
3659 if (getFMAPatterns(Root, Patterns))
3662 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
3665 enum class FMAInstKind { Default, Indexed, Accumulator };
3666 /// genFusedMultiply - Generate fused multiply instructions.
3667 /// This function supports both integer and floating point instructions.
3668 /// A typical example:
3671 /// ==> F|MADD R,A,B,C
3672 /// \param Root is the F|ADD instruction
3673 /// \param [out] InsInstrs is a vector of machine instructions and will
3674 /// contain the generated madd instruction
3675 /// \param IdxMulOpd is index of operand in Root that is the result of
3676 /// the F|MUL. In the example above IdxMulOpd is 1.
3677 /// \param MaddOpc the opcode fo the f|madd instruction
3678 static MachineInstr *
3679 genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
3680 const TargetInstrInfo *TII, MachineInstr &Root,
3681 SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
3682 unsigned MaddOpc, const TargetRegisterClass *RC,
3683 FMAInstKind kind = FMAInstKind::Default) {
3684 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3686 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3687 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3688 unsigned ResultReg = Root.getOperand(0).getReg();
3689 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3690 bool Src0IsKill = MUL->getOperand(1).isKill();
3691 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3692 bool Src1IsKill = MUL->getOperand(2).isKill();
3693 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
3694 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
3696 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3697 MRI.constrainRegClass(ResultReg, RC);
3698 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3699 MRI.constrainRegClass(SrcReg0, RC);
3700 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3701 MRI.constrainRegClass(SrcReg1, RC);
3702 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
3703 MRI.constrainRegClass(SrcReg2, RC);
3705 MachineInstrBuilder MIB;
3706 if (kind == FMAInstKind::Default)
3707 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3708 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3709 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3710 .addReg(SrcReg2, getKillRegState(Src2IsKill));
3711 else if (kind == FMAInstKind::Indexed)
3712 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3713 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3714 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3715 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3716 .addImm(MUL->getOperand(3).getImm());
3717 else if (kind == FMAInstKind::Accumulator)
3718 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3719 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3720 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3721 .addReg(SrcReg1, getKillRegState(Src1IsKill));
3723 assert(false && "Invalid FMA instruction kind \n");
3724 // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
3725 InsInstrs.push_back(MIB);
3729 /// genMaddR - Generate madd instruction and combine mul and add using
3730 /// an extra virtual register
3731 /// Example - an ADD intermediate needs to be stored in a register:
3734 /// ==> ORR V, ZR, Imm
3735 /// ==> MADD R,A,B,V
3736 /// \param Root is the ADD instruction
3737 /// \param [out] InsInstrs is a vector of machine instructions and will
3738 /// contain the generated madd instruction
3739 /// \param IdxMulOpd is index of operand in Root that is the result of
3740 /// the MUL. In the example above IdxMulOpd is 1.
3741 /// \param MaddOpc the opcode fo the madd instruction
3742 /// \param VR is a virtual register that holds the value of an ADD operand
3743 /// (V in the example above).
3744 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
3745 const TargetInstrInfo *TII, MachineInstr &Root,
3746 SmallVectorImpl<MachineInstr *> &InsInstrs,
3747 unsigned IdxMulOpd, unsigned MaddOpc,
3748 unsigned VR, const TargetRegisterClass *RC) {
3749 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3751 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3752 unsigned ResultReg = Root.getOperand(0).getReg();
3753 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3754 bool Src0IsKill = MUL->getOperand(1).isKill();
3755 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3756 bool Src1IsKill = MUL->getOperand(2).isKill();
3758 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3759 MRI.constrainRegClass(ResultReg, RC);
3760 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3761 MRI.constrainRegClass(SrcReg0, RC);
3762 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3763 MRI.constrainRegClass(SrcReg1, RC);
3764 if (TargetRegisterInfo::isVirtualRegister(VR))
3765 MRI.constrainRegClass(VR, RC);
3767 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
3769 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3770 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3773 InsInstrs.push_back(MIB);
3777 /// When getMachineCombinerPatterns() finds potential patterns,
3778 /// this function generates the instructions that could replace the
3779 /// original code sequence
3780 void AArch64InstrInfo::genAlternativeCodeSequence(
3781 MachineInstr &Root, MachineCombinerPattern Pattern,
3782 SmallVectorImpl<MachineInstr *> &InsInstrs,
3783 SmallVectorImpl<MachineInstr *> &DelInstrs,
3784 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
3785 MachineBasicBlock &MBB = *Root.getParent();
3786 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3787 MachineFunction &MF = *MBB.getParent();
3788 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
3791 const TargetRegisterClass *RC;
3795 // Reassociate instructions.
3796 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
3797 DelInstrs, InstrIdxForVirtReg);
3799 case MachineCombinerPattern::MULADDW_OP1:
3800 case MachineCombinerPattern::MULADDX_OP1:
3804 // --- Create(MADD);
3805 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
3806 Opc = AArch64::MADDWrrr;
3807 RC = &AArch64::GPR32RegClass;
3809 Opc = AArch64::MADDXrrr;
3810 RC = &AArch64::GPR64RegClass;
3812 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3814 case MachineCombinerPattern::MULADDW_OP2:
3815 case MachineCombinerPattern::MULADDX_OP2:
3819 // --- Create(MADD);
3820 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
3821 Opc = AArch64::MADDWrrr;
3822 RC = &AArch64::GPR32RegClass;
3824 Opc = AArch64::MADDXrrr;
3825 RC = &AArch64::GPR64RegClass;
3827 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3829 case MachineCombinerPattern::MULADDWI_OP1:
3830 case MachineCombinerPattern::MULADDXI_OP1: {
3833 // ==> ORR V, ZR, Imm
3835 // --- Create(MADD);
3836 const TargetRegisterClass *OrrRC;
3837 unsigned BitSize, OrrOpc, ZeroReg;
3838 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
3839 OrrOpc = AArch64::ORRWri;
3840 OrrRC = &AArch64::GPR32spRegClass;
3842 ZeroReg = AArch64::WZR;
3843 Opc = AArch64::MADDWrrr;
3844 RC = &AArch64::GPR32RegClass;
3846 OrrOpc = AArch64::ORRXri;
3847 OrrRC = &AArch64::GPR64spRegClass;
3849 ZeroReg = AArch64::XZR;
3850 Opc = AArch64::MADDXrrr;
3851 RC = &AArch64::GPR64RegClass;
3853 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3854 uint64_t Imm = Root.getOperand(2).getImm();
3856 if (Root.getOperand(3).isImm()) {
3857 unsigned Val = Root.getOperand(3).getImm();
3860 uint64_t UImm = SignExtend64(Imm, BitSize);
3862 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3863 MachineInstrBuilder MIB1 =
3864 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3867 InsInstrs.push_back(MIB1);
3868 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3869 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3873 case MachineCombinerPattern::MULSUBW_OP1:
3874 case MachineCombinerPattern::MULSUBX_OP1: {
3878 // ==> MADD R,A,B,V // = -C + A*B
3879 // --- Create(MADD);
3880 const TargetRegisterClass *SubRC;
3881 unsigned SubOpc, ZeroReg;
3882 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
3883 SubOpc = AArch64::SUBWrr;
3884 SubRC = &AArch64::GPR32spRegClass;
3885 ZeroReg = AArch64::WZR;
3886 Opc = AArch64::MADDWrrr;
3887 RC = &AArch64::GPR32RegClass;
3889 SubOpc = AArch64::SUBXrr;
3890 SubRC = &AArch64::GPR64spRegClass;
3891 ZeroReg = AArch64::XZR;
3892 Opc = AArch64::MADDXrrr;
3893 RC = &AArch64::GPR64RegClass;
3895 unsigned NewVR = MRI.createVirtualRegister(SubRC);
3897 MachineInstrBuilder MIB1 =
3898 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
3900 .add(Root.getOperand(2));
3901 InsInstrs.push_back(MIB1);
3902 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3903 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3906 case MachineCombinerPattern::MULSUBW_OP2:
3907 case MachineCombinerPattern::MULSUBX_OP2:
3910 // ==> MSUB R,A,B,C (computes C - A*B)
3911 // --- Create(MSUB);
3912 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
3913 Opc = AArch64::MSUBWrrr;
3914 RC = &AArch64::GPR32RegClass;
3916 Opc = AArch64::MSUBXrrr;
3917 RC = &AArch64::GPR64RegClass;
3919 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3921 case MachineCombinerPattern::MULSUBWI_OP1:
3922 case MachineCombinerPattern::MULSUBXI_OP1: {
3925 // ==> ORR V, ZR, -Imm
3926 // ==> MADD R,A,B,V // = -Imm + A*B
3927 // --- Create(MADD);
3928 const TargetRegisterClass *OrrRC;
3929 unsigned BitSize, OrrOpc, ZeroReg;
3930 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
3931 OrrOpc = AArch64::ORRWri;
3932 OrrRC = &AArch64::GPR32spRegClass;
3934 ZeroReg = AArch64::WZR;
3935 Opc = AArch64::MADDWrrr;
3936 RC = &AArch64::GPR32RegClass;
3938 OrrOpc = AArch64::ORRXri;
3939 OrrRC = &AArch64::GPR64spRegClass;
3941 ZeroReg = AArch64::XZR;
3942 Opc = AArch64::MADDXrrr;
3943 RC = &AArch64::GPR64RegClass;
3945 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3946 uint64_t Imm = Root.getOperand(2).getImm();
3947 if (Root.getOperand(3).isImm()) {
3948 unsigned Val = Root.getOperand(3).getImm();
3951 uint64_t UImm = SignExtend64(-Imm, BitSize);
3953 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3954 MachineInstrBuilder MIB1 =
3955 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3958 InsInstrs.push_back(MIB1);
3959 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3960 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3964 // Floating Point Support
3965 case MachineCombinerPattern::FMULADDS_OP1:
3966 case MachineCombinerPattern::FMULADDD_OP1:
3970 // --- Create(MADD);
3971 if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
3972 Opc = AArch64::FMADDSrrr;
3973 RC = &AArch64::FPR32RegClass;
3975 Opc = AArch64::FMADDDrrr;
3976 RC = &AArch64::FPR64RegClass;
3978 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3980 case MachineCombinerPattern::FMULADDS_OP2:
3981 case MachineCombinerPattern::FMULADDD_OP2:
3984 // ==> FMADD R,A,B,C
3985 // --- Create(FMADD);
3986 if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
3987 Opc = AArch64::FMADDSrrr;
3988 RC = &AArch64::FPR32RegClass;
3990 Opc = AArch64::FMADDDrrr;
3991 RC = &AArch64::FPR64RegClass;
3993 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3996 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3997 Opc = AArch64::FMLAv1i32_indexed;
3998 RC = &AArch64::FPR32RegClass;
3999 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4000 FMAInstKind::Indexed);
4002 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
4003 Opc = AArch64::FMLAv1i32_indexed;
4004 RC = &AArch64::FPR32RegClass;
4005 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4006 FMAInstKind::Indexed);
4009 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
4010 Opc = AArch64::FMLAv1i64_indexed;
4011 RC = &AArch64::FPR64RegClass;
4012 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4013 FMAInstKind::Indexed);
4015 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
4016 Opc = AArch64::FMLAv1i64_indexed;
4017 RC = &AArch64::FPR64RegClass;
4018 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4019 FMAInstKind::Indexed);
4022 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
4023 case MachineCombinerPattern::FMLAv2f32_OP1:
4024 RC = &AArch64::FPR64RegClass;
4025 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
4026 Opc = AArch64::FMLAv2i32_indexed;
4027 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4028 FMAInstKind::Indexed);
4030 Opc = AArch64::FMLAv2f32;
4031 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4032 FMAInstKind::Accumulator);
4035 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
4036 case MachineCombinerPattern::FMLAv2f32_OP2:
4037 RC = &AArch64::FPR64RegClass;
4038 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
4039 Opc = AArch64::FMLAv2i32_indexed;
4040 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4041 FMAInstKind::Indexed);
4043 Opc = AArch64::FMLAv2f32;
4044 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4045 FMAInstKind::Accumulator);
4049 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
4050 case MachineCombinerPattern::FMLAv2f64_OP1:
4051 RC = &AArch64::FPR128RegClass;
4052 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
4053 Opc = AArch64::FMLAv2i64_indexed;
4054 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4055 FMAInstKind::Indexed);
4057 Opc = AArch64::FMLAv2f64;
4058 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4059 FMAInstKind::Accumulator);
4062 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
4063 case MachineCombinerPattern::FMLAv2f64_OP2:
4064 RC = &AArch64::FPR128RegClass;
4065 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
4066 Opc = AArch64::FMLAv2i64_indexed;
4067 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4068 FMAInstKind::Indexed);
4070 Opc = AArch64::FMLAv2f64;
4071 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4072 FMAInstKind::Accumulator);
4076 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
4077 case MachineCombinerPattern::FMLAv4f32_OP1:
4078 RC = &AArch64::FPR128RegClass;
4079 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
4080 Opc = AArch64::FMLAv4i32_indexed;
4081 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4082 FMAInstKind::Indexed);
4084 Opc = AArch64::FMLAv4f32;
4085 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4086 FMAInstKind::Accumulator);
4090 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
4091 case MachineCombinerPattern::FMLAv4f32_OP2:
4092 RC = &AArch64::FPR128RegClass;
4093 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
4094 Opc = AArch64::FMLAv4i32_indexed;
4095 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4096 FMAInstKind::Indexed);
4098 Opc = AArch64::FMLAv4f32;
4099 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4100 FMAInstKind::Accumulator);
4104 case MachineCombinerPattern::FMULSUBS_OP1:
4105 case MachineCombinerPattern::FMULSUBD_OP1: {
4108 // ==> FNMSUB R,A,B,C // = -C + A*B
4109 // --- Create(FNMSUB);
4110 if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
4111 Opc = AArch64::FNMSUBSrrr;
4112 RC = &AArch64::FPR32RegClass;
4114 Opc = AArch64::FNMSUBDrrr;
4115 RC = &AArch64::FPR64RegClass;
4117 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4121 case MachineCombinerPattern::FNMULSUBS_OP1:
4122 case MachineCombinerPattern::FNMULSUBD_OP1: {
4125 // ==> FNMADD R,A,B,C // = -A*B - C
4126 // --- Create(FNMADD);
4127 if (Pattern == MachineCombinerPattern::FNMULSUBS_OP1) {
4128 Opc = AArch64::FNMADDSrrr;
4129 RC = &AArch64::FPR32RegClass;
4131 Opc = AArch64::FNMADDDrrr;
4132 RC = &AArch64::FPR64RegClass;
4134 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4138 case MachineCombinerPattern::FMULSUBS_OP2:
4139 case MachineCombinerPattern::FMULSUBD_OP2: {
4142 // ==> FMSUB R,A,B,C (computes C - A*B)
4143 // --- Create(FMSUB);
4144 if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
4145 Opc = AArch64::FMSUBSrrr;
4146 RC = &AArch64::FPR32RegClass;
4148 Opc = AArch64::FMSUBDrrr;
4149 RC = &AArch64::FPR64RegClass;
4151 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4155 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
4156 Opc = AArch64::FMLSv1i32_indexed;
4157 RC = &AArch64::FPR32RegClass;
4158 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4159 FMAInstKind::Indexed);
4162 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
4163 Opc = AArch64::FMLSv1i64_indexed;
4164 RC = &AArch64::FPR64RegClass;
4165 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4166 FMAInstKind::Indexed);
4169 case MachineCombinerPattern::FMLSv2f32_OP2:
4170 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
4171 RC = &AArch64::FPR64RegClass;
4172 if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
4173 Opc = AArch64::FMLSv2i32_indexed;
4174 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4175 FMAInstKind::Indexed);
4177 Opc = AArch64::FMLSv2f32;
4178 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4179 FMAInstKind::Accumulator);
4183 case MachineCombinerPattern::FMLSv2f64_OP2:
4184 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
4185 RC = &AArch64::FPR128RegClass;
4186 if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
4187 Opc = AArch64::FMLSv2i64_indexed;
4188 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4189 FMAInstKind::Indexed);
4191 Opc = AArch64::FMLSv2f64;
4192 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4193 FMAInstKind::Accumulator);
4197 case MachineCombinerPattern::FMLSv4f32_OP2:
4198 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
4199 RC = &AArch64::FPR128RegClass;
4200 if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
4201 Opc = AArch64::FMLSv4i32_indexed;
4202 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4203 FMAInstKind::Indexed);
4205 Opc = AArch64::FMLSv4f32;
4206 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4207 FMAInstKind::Accumulator);
4210 } // end switch (Pattern)
4211 // Record MUL and ADD/SUB for deletion
4212 DelInstrs.push_back(MUL);
4213 DelInstrs.push_back(&Root);
4216 /// \brief Replace csincr-branch sequence by simple conditional branch
4220 /// csinc w9, wzr, wzr, <condition code>
4221 /// tbnz w9, #0, 0x44
4223 /// b.<inverted condition code>
4226 /// csinc w9, wzr, wzr, <condition code>
4227 /// tbz w9, #0, 0x44
4229 /// b.<condition code>
4231 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
4232 /// compare's constant operand is power of 2.
4235 /// and w8, w8, #0x400
4238 /// tbnz w8, #10, L1
4240 /// \param MI Conditional Branch
4241 /// \return True when the simple conditional branch is generated
4243 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
4244 bool IsNegativeBranch = false;
4245 bool IsTestAndBranch = false;
4246 unsigned TargetBBInMI = 0;
4247 switch (MI.getOpcode()) {
4249 llvm_unreachable("Unknown branch instruction?");
4256 case AArch64::CBNZW:
4257 case AArch64::CBNZX:
4259 IsNegativeBranch = true;
4264 IsTestAndBranch = true;
4266 case AArch64::TBNZW:
4267 case AArch64::TBNZX:
4269 IsNegativeBranch = true;
4270 IsTestAndBranch = true;
4273 // So we increment a zero register and test for bits other
4274 // than bit 0? Conservatively bail out in case the verifier
4275 // missed this case.
4276 if (IsTestAndBranch && MI.getOperand(1).getImm())
4280 assert(MI.getParent() && "Incomplete machine instruciton\n");
4281 MachineBasicBlock *MBB = MI.getParent();
4282 MachineFunction *MF = MBB->getParent();
4283 MachineRegisterInfo *MRI = &MF->getRegInfo();
4284 unsigned VReg = MI.getOperand(0).getReg();
4285 if (!TargetRegisterInfo::isVirtualRegister(VReg))
4288 MachineInstr *DefMI = MRI->getVRegDef(VReg);
4290 // Look through COPY instructions to find definition.
4291 while (DefMI->isCopy()) {
4292 unsigned CopyVReg = DefMI->getOperand(1).getReg();
4293 if (!MRI->hasOneNonDBGUse(CopyVReg))
4295 if (!MRI->hasOneDef(CopyVReg))
4297 DefMI = MRI->getVRegDef(CopyVReg);
4300 switch (DefMI->getOpcode()) {
4303 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4304 case AArch64::ANDWri:
4305 case AArch64::ANDXri: {
4306 if (IsTestAndBranch)
4308 if (DefMI->getParent() != MBB)
4310 if (!MRI->hasOneNonDBGUse(VReg))
4313 bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
4314 uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
4315 DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
4316 if (!isPowerOf2_64(Mask))
4319 MachineOperand &MO = DefMI->getOperand(1);
4320 unsigned NewReg = MO.getReg();
4321 if (!TargetRegisterInfo::isVirtualRegister(NewReg))
4324 assert(!MRI->def_empty(NewReg) && "Register must be defined.");
4326 MachineBasicBlock &RefToMBB = *MBB;
4327 MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
4328 DebugLoc DL = MI.getDebugLoc();
4329 unsigned Imm = Log2_64(Mask);
4330 unsigned Opc = (Imm < 32)
4331 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4332 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
4333 MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
4337 // Register lives on to the CBZ now.
4338 MO.setIsKill(false);
4340 // For immediate smaller than 32, we need to use the 32-bit
4341 // variant (W) in all cases. Indeed the 64-bit variant does not
4342 // allow to encode them.
4343 // Therefore, if the input register is 64-bit, we need to take the
4345 if (!Is32Bit && Imm < 32)
4346 NewMI->getOperand(0).setSubReg(AArch64::sub_32);
4347 MI.eraseFromParent();
4351 case AArch64::CSINCWr:
4352 case AArch64::CSINCXr: {
4353 if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
4354 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
4355 !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
4356 DefMI->getOperand(2).getReg() == AArch64::XZR))
4359 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
4362 AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
4363 // Convert only when the condition code is not modified between
4364 // the CSINC and the branch. The CC may be used by other
4365 // instructions in between.
4366 if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
4368 MachineBasicBlock &RefToMBB = *MBB;
4369 MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
4370 DebugLoc DL = MI.getDebugLoc();
4371 if (IsNegativeBranch)
4372 CC = AArch64CC::getInvertedCondCode(CC);
4373 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
4374 MI.eraseFromParent();
4380 std::pair<unsigned, unsigned>
4381 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
4382 const unsigned Mask = AArch64II::MO_FRAGMENT;
4383 return std::make_pair(TF & Mask, TF & ~Mask);
4386 ArrayRef<std::pair<unsigned, const char *>>
4387 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
4388 using namespace AArch64II;
4390 static const std::pair<unsigned, const char *> TargetFlags[] = {
4391 {MO_PAGE, "aarch64-page"},
4392 {MO_PAGEOFF, "aarch64-pageoff"},
4393 {MO_G3, "aarch64-g3"},
4394 {MO_G2, "aarch64-g2"},
4395 {MO_G1, "aarch64-g1"},
4396 {MO_G0, "aarch64-g0"},
4397 {MO_HI12, "aarch64-hi12"}};
4398 return makeArrayRef(TargetFlags);
4401 ArrayRef<std::pair<unsigned, const char *>>
4402 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
4403 using namespace AArch64II;
4405 static const std::pair<unsigned, const char *> TargetFlags[] = {
4406 {MO_GOT, "aarch64-got"},
4407 {MO_NC, "aarch64-nc"},
4408 {MO_TLS, "aarch64-tls"}};
4409 return makeArrayRef(TargetFlags);
4412 unsigned AArch64InstrInfo::getOutliningBenefit(size_t SequenceSize,
4414 bool CanBeTailCall) const {
4415 unsigned NotOutlinedSize = SequenceSize * Occurrences;
4416 unsigned OutlinedSize;
4418 // Is this candidate something we can outline as a tail call?
4419 if (CanBeTailCall) {
4420 // If yes, then we just outline the sequence and replace each of its
4421 // occurrences with a branch instruction.
4422 OutlinedSize = SequenceSize + Occurrences;
4424 // If no, then we outline the sequence (SequenceSize), add a return (+1),
4425 // and replace each occurrence with a save/restore to LR and a call
4426 // (3 * Occurrences)
4427 OutlinedSize = (SequenceSize + 1) + (3 * Occurrences);
4430 // Return the number of instructions saved by outlining this sequence.
4431 return NotOutlinedSize > OutlinedSize ? NotOutlinedSize - OutlinedSize : 0;
4434 bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF) const {
4435 return MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
4438 AArch64GenInstrInfo::MachineOutlinerInstrType
4439 AArch64InstrInfo::getOutliningType(MachineInstr &MI) const {
4441 MachineFunction *MF = MI.getParent()->getParent();
4442 AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>();
4444 // Don't outline LOHs.
4445 if (FuncInfo->getLOHRelated().count(&MI))
4446 return MachineOutlinerInstrType::Illegal;
4448 // Don't allow debug values to impact outlining type.
4449 if (MI.isDebugValue() || MI.isIndirectDebugValue())
4450 return MachineOutlinerInstrType::Invisible;
4452 // Is this a terminator for a basic block?
4453 if (MI.isTerminator()) {
4455 // Is this the end of a function?
4456 if (MI.getParent()->succ_empty())
4457 return MachineOutlinerInstrType::Legal;
4459 // It's not, so don't outline it.
4460 return MachineOutlinerInstrType::Illegal;
4463 // Don't outline positions.
4464 if (MI.isPosition())
4465 return MachineOutlinerInstrType::Illegal;
4467 // Make sure none of the operands are un-outlinable.
4468 for (const MachineOperand &MOP : MI.operands())
4469 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
4470 MOP.isTargetIndex())
4471 return MachineOutlinerInstrType::Illegal;
4473 // Don't outline anything that uses the link register.
4474 if (MI.modifiesRegister(AArch64::LR, &RI) ||
4475 MI.readsRegister(AArch64::LR, &RI))
4476 return MachineOutlinerInstrType::Illegal;
4478 // Does this use the stack?
4479 if (MI.modifiesRegister(AArch64::SP, &RI) ||
4480 MI.readsRegister(AArch64::SP, &RI)) {
4482 // Is it a memory operation?
4483 if (MI.mayLoadOrStore()) {
4484 unsigned Base; // Filled with the base regiser of MI.
4485 int64_t Offset; // Filled with the offset of MI.
4486 unsigned DummyWidth;
4488 // Does it allow us to offset the base register and is the base SP?
4489 if (!getMemOpBaseRegImmOfsWidth(MI, Base, Offset, DummyWidth, &RI) ||
4490 Base != AArch64::SP)
4491 return MachineOutlinerInstrType::Illegal;
4493 // Find the minimum/maximum offset for this instruction and check if
4494 // fixing it up would be in range.
4495 int64_t MinOffset, MaxOffset;
4496 unsigned DummyScale;
4497 getMemOpInfo(MI.getOpcode(), DummyScale, DummyWidth, MinOffset,
4500 // TODO: We should really test what happens if an instruction overflows.
4501 // This is tricky to test with IR tests, but when the outliner is moved
4502 // to a MIR test, it really ought to be checked.
4503 if (Offset + 16 < MinOffset || Offset + 16 > MaxOffset)
4504 return MachineOutlinerInstrType::Illegal;
4506 // It's in range, so we can outline it.
4507 return MachineOutlinerInstrType::Legal;
4510 // We can't fix it up, so don't outline it.
4511 return MachineOutlinerInstrType::Illegal;
4514 return MachineOutlinerInstrType::Legal;
4517 void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
4518 for (MachineInstr &MI : MBB) {
4519 unsigned Base, Width;
4522 // Is this a load or store with an immediate offset with SP as the base?
4523 if (!MI.mayLoadOrStore() ||
4524 !getMemOpBaseRegImmOfsWidth(MI, Base, Offset, Width, &RI) ||
4525 Base != AArch64::SP)
4528 // It is, so we have to fix it up.
4530 int64_t Dummy1, Dummy2;
4532 MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI);
4533 assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!");
4534 getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2);
4535 assert(Scale != 0 && "Unexpected opcode!");
4537 // We've pushed the return address to the stack, so add 16 to the offset.
4538 // This is safe, since we already checked if it would overflow when we
4539 // checked if this instruction was legal to outline.
4540 int64_t NewImm = (Offset + 16)/Scale;
4541 StackOffsetOperand.setImm(NewImm);
4545 void AArch64InstrInfo::insertOutlinerEpilogue(MachineBasicBlock &MBB,
4546 MachineFunction &MF,
4547 bool IsTailCall) const {
4549 // If this is a tail call outlined function, then there's already a return.
4553 // It's not a tail call, so we have to insert the return ourselves.
4554 MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET))
4555 .addReg(AArch64::LR, RegState::Undef);
4556 MBB.insert(MBB.end(), ret);
4558 // Walk over the basic block and fix up all the stack accesses.
4559 fixupPostOutline(MBB);
4562 void AArch64InstrInfo::insertOutlinerPrologue(MachineBasicBlock &MBB,
4563 MachineFunction &MF,
4564 bool IsTailCall) const {}
4566 MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
4567 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
4568 MachineFunction &MF, bool IsTailCall) const {
4570 // Are we tail calling?
4572 // If yes, then we can just branch to the label.
4574 BuildMI(MF, DebugLoc(), get(AArch64::B))
4575 .addGlobalAddress(M.getNamedValue(MF.getName())));
4579 // We're not tail calling, so we have to save LR before the call and restore
4581 MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
4582 .addReg(AArch64::SP, RegState::Define)
4583 .addReg(AArch64::LR)
4584 .addReg(AArch64::SP)
4586 It = MBB.insert(It, STRXpre);
4591 BuildMI(MF, DebugLoc(), get(AArch64::BL))
4592 .addGlobalAddress(M.getNamedValue(MF.getName())));
4596 // Restore the link register.
4597 MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
4598 .addReg(AArch64::SP, RegState::Define)
4599 .addReg(AArch64::LR)
4600 .addReg(AArch64::SP)
4602 It = MBB.insert(It, LDRXpost);