1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/CodeGen/StackMaps.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
30 #define GET_INSTRINFO_CTOR_DTOR
31 #include "AArch64GenInstrInfo.inc"
33 static const MachineMemOperand::Flags MOSuppressPair =
34 MachineMemOperand::MOTargetFlag1;
36 static cl::opt<unsigned>
37 TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
38 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
40 static cl::opt<unsigned>
41 CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
42 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
44 static cl::opt<unsigned>
45 BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
46 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
48 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
49 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
50 RI(STI.getTargetTriple()), Subtarget(STI) {}
52 /// GetInstSize - Return the number of bytes of code the specified
53 /// instruction may be. This returns the maximum number of bytes.
54 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
55 const MachineBasicBlock &MBB = *MI.getParent();
56 const MachineFunction *MF = MBB.getParent();
57 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
59 if (MI.getOpcode() == AArch64::INLINEASM)
60 return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
62 // FIXME: We currently only handle pseudoinstructions that don't get expanded
63 // before the assembly printer.
64 unsigned NumBytes = 0;
65 const MCInstrDesc &Desc = MI.getDesc();
66 switch (Desc.getOpcode()) {
68 // Anything not explicitly designated otherwise is a normal 4-byte insn.
71 case TargetOpcode::DBG_VALUE:
72 case TargetOpcode::EH_LABEL:
73 case TargetOpcode::IMPLICIT_DEF:
74 case TargetOpcode::KILL:
77 case TargetOpcode::STACKMAP:
78 // The upper bound for a stackmap intrinsic is the full length of its shadow
79 NumBytes = StackMapOpers(&MI).getNumPatchBytes();
80 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
82 case TargetOpcode::PATCHPOINT:
83 // The size of the patchpoint intrinsic is the number of bytes requested
84 NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
85 assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
87 case AArch64::TLSDESC_CALLSEQ:
88 // This gets lowered to an instruction sequence which takes 16 bytes
96 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
97 SmallVectorImpl<MachineOperand> &Cond) {
98 // Block ends with fall-through condbranch.
99 switch (LastInst->getOpcode()) {
101 llvm_unreachable("Unknown branch instruction?");
103 Target = LastInst->getOperand(1).getMBB();
104 Cond.push_back(LastInst->getOperand(0));
110 Target = LastInst->getOperand(1).getMBB();
111 Cond.push_back(MachineOperand::CreateImm(-1));
112 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
113 Cond.push_back(LastInst->getOperand(0));
119 Target = LastInst->getOperand(2).getMBB();
120 Cond.push_back(MachineOperand::CreateImm(-1));
121 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
122 Cond.push_back(LastInst->getOperand(0));
123 Cond.push_back(LastInst->getOperand(1));
127 static unsigned getBranchDisplacementBits(unsigned Opc) {
130 llvm_unreachable("unexpected opcode!");
137 return TBZDisplacementBits;
142 return CBZDisplacementBits;
144 return BCCDisplacementBits;
148 bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
149 int64_t BrOffset) const {
150 unsigned Bits = getBranchDisplacementBits(BranchOp);
151 assert(Bits >= 3 && "max branch displacement must be enough to jump"
152 "over conditional branch expansion");
153 return isIntN(Bits, BrOffset / 4);
156 MachineBasicBlock *AArch64InstrInfo::getBranchDestBlock(
157 const MachineInstr &MI) const {
158 switch (MI.getOpcode()) {
160 llvm_unreachable("unexpected opcode!");
162 return MI.getOperand(0).getMBB();
167 return MI.getOperand(2).getMBB();
173 return MI.getOperand(1).getMBB();
178 bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
179 MachineBasicBlock *&TBB,
180 MachineBasicBlock *&FBB,
181 SmallVectorImpl<MachineOperand> &Cond,
182 bool AllowModify) const {
183 // If the block has no terminators, it just falls into the block after it.
184 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
188 if (!isUnpredicatedTerminator(*I))
191 // Get the last instruction in the block.
192 MachineInstr *LastInst = &*I;
194 // If there is only one terminator instruction, process it.
195 unsigned LastOpc = LastInst->getOpcode();
196 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
197 if (isUncondBranchOpcode(LastOpc)) {
198 TBB = LastInst->getOperand(0).getMBB();
201 if (isCondBranchOpcode(LastOpc)) {
202 // Block ends with fall-through condbranch.
203 parseCondBranch(LastInst, TBB, Cond);
206 return true; // Can't handle indirect branch.
209 // Get the instruction before it if it is a terminator.
210 MachineInstr *SecondLastInst = &*I;
211 unsigned SecondLastOpc = SecondLastInst->getOpcode();
213 // If AllowModify is true and the block ends with two or more unconditional
214 // branches, delete all but the first unconditional branch.
215 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
216 while (isUncondBranchOpcode(SecondLastOpc)) {
217 LastInst->eraseFromParent();
218 LastInst = SecondLastInst;
219 LastOpc = LastInst->getOpcode();
220 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
221 // Return now the only terminator is an unconditional branch.
222 TBB = LastInst->getOperand(0).getMBB();
225 SecondLastInst = &*I;
226 SecondLastOpc = SecondLastInst->getOpcode();
231 // If there are three terminators, we don't know what sort of block this is.
232 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
235 // If the block ends with a B and a Bcc, handle it.
236 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
237 parseCondBranch(SecondLastInst, TBB, Cond);
238 FBB = LastInst->getOperand(0).getMBB();
242 // If the block ends with two unconditional branches, handle it. The second
243 // one is not executed, so remove it.
244 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
245 TBB = SecondLastInst->getOperand(0).getMBB();
248 I->eraseFromParent();
252 // ...likewise if it ends with an indirect branch followed by an unconditional
254 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
257 I->eraseFromParent();
261 // Otherwise, can't handle this.
265 bool AArch64InstrInfo::reverseBranchCondition(
266 SmallVectorImpl<MachineOperand> &Cond) const {
267 if (Cond[0].getImm() != -1) {
269 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
270 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
272 // Folded compare-and-branch
273 switch (Cond[1].getImm()) {
275 llvm_unreachable("Unknown conditional branch!");
277 Cond[1].setImm(AArch64::CBNZW);
280 Cond[1].setImm(AArch64::CBZW);
283 Cond[1].setImm(AArch64::CBNZX);
286 Cond[1].setImm(AArch64::CBZX);
289 Cond[1].setImm(AArch64::TBNZW);
292 Cond[1].setImm(AArch64::TBZW);
295 Cond[1].setImm(AArch64::TBNZX);
298 Cond[1].setImm(AArch64::TBZX);
306 unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
307 int *BytesRemoved) const {
308 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
312 if (!isUncondBranchOpcode(I->getOpcode()) &&
313 !isCondBranchOpcode(I->getOpcode()))
316 // Remove the branch.
317 I->eraseFromParent();
321 if (I == MBB.begin()) {
327 if (!isCondBranchOpcode(I->getOpcode())) {
333 // Remove the branch.
334 I->eraseFromParent();
341 void AArch64InstrInfo::instantiateCondBranch(
342 MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
343 ArrayRef<MachineOperand> Cond) const {
344 if (Cond[0].getImm() != -1) {
346 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
348 // Folded compare-and-branch
349 // Note that we use addOperand instead of addReg to keep the flags.
350 const MachineInstrBuilder MIB =
351 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
353 MIB.addImm(Cond[3].getImm());
358 unsigned AArch64InstrInfo::insertBranch(MachineBasicBlock &MBB,
359 MachineBasicBlock *TBB,
360 MachineBasicBlock *FBB,
361 ArrayRef<MachineOperand> Cond,
363 int *BytesAdded) const {
364 // Shouldn't be a fall through.
365 assert(TBB && "insertBranch must not be told to insert a fallthrough");
368 if (Cond.empty()) // Unconditional branch?
369 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
371 instantiateCondBranch(MBB, DL, TBB, Cond);
379 // Two-way conditional branch.
380 instantiateCondBranch(MBB, DL, TBB, Cond);
381 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
389 // Find the original register that VReg is copied from.
390 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
391 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
392 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
393 if (!DefMI->isFullCopy())
395 VReg = DefMI->getOperand(1).getReg();
400 // Determine if VReg is defined by an instruction that can be folded into a
401 // csel instruction. If so, return the folded opcode, and the replacement
403 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
404 unsigned *NewVReg = nullptr) {
405 VReg = removeCopies(MRI, VReg);
406 if (!TargetRegisterInfo::isVirtualRegister(VReg))
409 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
410 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
412 unsigned SrcOpNum = 0;
413 switch (DefMI->getOpcode()) {
414 case AArch64::ADDSXri:
415 case AArch64::ADDSWri:
416 // if NZCV is used, do not fold.
417 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
419 // fall-through to ADDXri and ADDWri.
421 case AArch64::ADDXri:
422 case AArch64::ADDWri:
423 // add x, 1 -> csinc.
424 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
425 DefMI->getOperand(3).getImm() != 0)
428 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
431 case AArch64::ORNXrr:
432 case AArch64::ORNWrr: {
433 // not x -> csinv, represented as orn dst, xzr, src.
434 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
435 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
438 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
442 case AArch64::SUBSXrr:
443 case AArch64::SUBSWrr:
444 // if NZCV is used, do not fold.
445 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
447 // fall-through to SUBXrr and SUBWrr.
449 case AArch64::SUBXrr:
450 case AArch64::SUBWrr: {
451 // neg x -> csneg, represented as sub dst, xzr, src.
452 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
453 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
456 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
462 assert(Opc && SrcOpNum && "Missing parameters");
465 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
469 bool AArch64InstrInfo::canInsertSelect(
470 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
471 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
472 int &FalseCycles) const {
473 // Check register classes.
474 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
475 const TargetRegisterClass *RC =
476 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
480 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
481 unsigned ExtraCondLat = Cond.size() != 1;
483 // GPRs are handled by csel.
484 // FIXME: Fold in x+1, -x, and ~x when applicable.
485 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
486 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
487 // Single-cycle csel, csinc, csinv, and csneg.
488 CondCycles = 1 + ExtraCondLat;
489 TrueCycles = FalseCycles = 1;
490 if (canFoldIntoCSel(MRI, TrueReg))
492 else if (canFoldIntoCSel(MRI, FalseReg))
497 // Scalar floating point is handled by fcsel.
498 // FIXME: Form fabs, fmin, and fmax when applicable.
499 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
500 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
501 CondCycles = 5 + ExtraCondLat;
502 TrueCycles = FalseCycles = 2;
510 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
511 MachineBasicBlock::iterator I,
512 const DebugLoc &DL, unsigned DstReg,
513 ArrayRef<MachineOperand> Cond,
514 unsigned TrueReg, unsigned FalseReg) const {
515 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
517 // Parse the condition code, see parseCondBranch() above.
518 AArch64CC::CondCode CC;
519 switch (Cond.size()) {
521 llvm_unreachable("Unknown condition opcode in Cond");
523 CC = AArch64CC::CondCode(Cond[0].getImm());
525 case 3: { // cbz/cbnz
526 // We must insert a compare against 0.
528 switch (Cond[1].getImm()) {
530 llvm_unreachable("Unknown branch opcode in Cond");
548 unsigned SrcReg = Cond[2].getReg();
550 // cmp reg, #0 is actually subs xzr, reg, #0.
551 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
552 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
557 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
558 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
565 case 4: { // tbz/tbnz
566 // We must insert a tst instruction.
567 switch (Cond[1].getImm()) {
569 llvm_unreachable("Unknown branch opcode in Cond");
579 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
580 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
581 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
582 .addReg(Cond[2].getReg())
584 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
586 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
587 .addReg(Cond[2].getReg())
589 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
595 const TargetRegisterClass *RC = nullptr;
596 bool TryFold = false;
597 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
598 RC = &AArch64::GPR64RegClass;
599 Opc = AArch64::CSELXr;
601 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
602 RC = &AArch64::GPR32RegClass;
603 Opc = AArch64::CSELWr;
605 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
606 RC = &AArch64::FPR64RegClass;
607 Opc = AArch64::FCSELDrrr;
608 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
609 RC = &AArch64::FPR32RegClass;
610 Opc = AArch64::FCSELSrrr;
612 assert(RC && "Unsupported regclass");
614 // Try folding simple instructions into the csel.
616 unsigned NewVReg = 0;
617 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
619 // The folded opcodes csinc, csinc and csneg apply the operation to
620 // FalseReg, so we need to invert the condition.
621 CC = AArch64CC::getInvertedCondCode(CC);
624 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
626 // Fold the operation. Leave any dead instructions for DCE to clean up.
630 // The extends the live range of NewVReg.
631 MRI.clearKillFlags(NewVReg);
635 // Pull all virtual register into the appropriate class.
636 MRI.constrainRegClass(TrueReg, RC);
637 MRI.constrainRegClass(FalseReg, RC);
640 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
644 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
645 static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
646 uint64_t Imm = MI.getOperand(1).getImm();
647 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
649 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
652 // FIXME: this implementation should be micro-architecture dependent, so a
653 // micro-architecture target hook should be introduced here in future.
654 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
655 if (!Subtarget.hasCustomCheapAsMoveHandling())
656 return MI.isAsCheapAsAMove();
660 switch (MI.getOpcode()) {
664 // add/sub on register without shift
665 case AArch64::ADDWri:
666 case AArch64::ADDXri:
667 case AArch64::SUBWri:
668 case AArch64::SUBXri:
669 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
670 MI.getOperand(3).getImm() == 0);
672 // add/sub on register with shift
673 case AArch64::ADDWrs:
674 case AArch64::ADDXrs:
675 case AArch64::SUBWrs:
676 case AArch64::SUBXrs:
677 Imm = MI.getOperand(3).getImm();
678 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
679 AArch64_AM::getArithShiftValue(Imm) < 4);
681 // logical ops on immediate
682 case AArch64::ANDWri:
683 case AArch64::ANDXri:
684 case AArch64::EORWri:
685 case AArch64::EORXri:
686 case AArch64::ORRWri:
687 case AArch64::ORRXri:
690 // logical ops on register without shift
691 case AArch64::ANDWrr:
692 case AArch64::ANDXrr:
693 case AArch64::BICWrr:
694 case AArch64::BICXrr:
695 case AArch64::EONWrr:
696 case AArch64::EONXrr:
697 case AArch64::EORWrr:
698 case AArch64::EORXrr:
699 case AArch64::ORNWrr:
700 case AArch64::ORNXrr:
701 case AArch64::ORRWrr:
702 case AArch64::ORRXrr:
705 // logical ops on register with shift
706 case AArch64::ANDWrs:
707 case AArch64::ANDXrs:
708 case AArch64::BICWrs:
709 case AArch64::BICXrs:
710 case AArch64::EONWrs:
711 case AArch64::EONXrs:
712 case AArch64::EORWrs:
713 case AArch64::EORXrs:
714 case AArch64::ORNWrs:
715 case AArch64::ORNXrs:
716 case AArch64::ORRWrs:
717 case AArch64::ORRXrs:
718 Imm = MI.getOperand(3).getImm();
719 return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
720 AArch64_AM::getShiftValue(Imm) < 4 &&
721 AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
723 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
724 // ORRXri, it is as cheap as MOV
725 case AArch64::MOVi32imm:
726 return canBeExpandedToORR(MI, 32);
727 case AArch64::MOVi64imm:
728 return canBeExpandedToORR(MI, 64);
730 // It is cheap to zero out registers if the subtarget has ZeroCycleZeroing
732 case AArch64::FMOVS0:
733 case AArch64::FMOVD0:
734 return Subtarget.hasZeroCycleZeroing();
735 case TargetOpcode::COPY:
736 return (Subtarget.hasZeroCycleZeroing() &&
737 (MI.getOperand(1).getReg() == AArch64::WZR ||
738 MI.getOperand(1).getReg() == AArch64::XZR));
741 llvm_unreachable("Unknown opcode to check as cheap as a move!");
744 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
745 unsigned &SrcReg, unsigned &DstReg,
746 unsigned &SubIdx) const {
747 switch (MI.getOpcode()) {
750 case AArch64::SBFMXri: // aka sxtw
751 case AArch64::UBFMXri: // aka uxtw
752 // Check for the 32 -> 64 bit extension case, these instructions can do
754 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
756 // This is a signed or unsigned 32 -> 64 bit extension.
757 SrcReg = MI.getOperand(1).getReg();
758 DstReg = MI.getOperand(0).getReg();
759 SubIdx = AArch64::sub_32;
764 bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
765 MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
766 const TargetRegisterInfo *TRI = &getRegisterInfo();
767 unsigned BaseRegA = 0, BaseRegB = 0;
768 int64_t OffsetA = 0, OffsetB = 0;
769 unsigned WidthA = 0, WidthB = 0;
771 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
772 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
774 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
775 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
778 // Retrieve the base register, offset from the base register and width. Width
779 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
780 // base registers are identical, and the offset of a lower memory access +
781 // the width doesn't overlap the offset of a higher memory access,
782 // then the memory accesses are different.
783 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
784 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
785 if (BaseRegA == BaseRegB) {
786 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
787 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
788 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
789 if (LowOffset + LowWidth <= HighOffset)
796 /// analyzeCompare - For a comparison instruction, return the source registers
797 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
798 /// Return true if the comparison instruction can be analyzed.
799 bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
800 unsigned &SrcReg2, int &CmpMask,
801 int &CmpValue) const {
802 switch (MI.getOpcode()) {
805 case AArch64::SUBSWrr:
806 case AArch64::SUBSWrs:
807 case AArch64::SUBSWrx:
808 case AArch64::SUBSXrr:
809 case AArch64::SUBSXrs:
810 case AArch64::SUBSXrx:
811 case AArch64::ADDSWrr:
812 case AArch64::ADDSWrs:
813 case AArch64::ADDSWrx:
814 case AArch64::ADDSXrr:
815 case AArch64::ADDSXrs:
816 case AArch64::ADDSXrx:
817 // Replace SUBSWrr with SUBWrr if NZCV is not used.
818 SrcReg = MI.getOperand(1).getReg();
819 SrcReg2 = MI.getOperand(2).getReg();
823 case AArch64::SUBSWri:
824 case AArch64::ADDSWri:
825 case AArch64::SUBSXri:
826 case AArch64::ADDSXri:
827 SrcReg = MI.getOperand(1).getReg();
830 // FIXME: In order to convert CmpValue to 0 or 1
831 CmpValue = MI.getOperand(2).getImm() != 0;
833 case AArch64::ANDSWri:
834 case AArch64::ANDSXri:
835 // ANDS does not use the same encoding scheme as the others xxxS
837 SrcReg = MI.getOperand(1).getReg();
840 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
841 // while the type of CmpValue is int. When converting uint64_t to int,
842 // the high 32 bits of uint64_t will be lost.
843 // In fact it causes a bug in spec2006-483.xalancbmk
844 // CmpValue is only used to compare with zero in OptimizeCompareInstr
845 CmpValue = AArch64_AM::decodeLogicalImmediate(
846 MI.getOperand(2).getImm(),
847 MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
854 static bool UpdateOperandRegClass(MachineInstr &Instr) {
855 MachineBasicBlock *MBB = Instr.getParent();
856 assert(MBB && "Can't get MachineBasicBlock here");
857 MachineFunction *MF = MBB->getParent();
858 assert(MF && "Can't get MachineFunction here");
859 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
860 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
861 MachineRegisterInfo *MRI = &MF->getRegInfo();
863 for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
865 MachineOperand &MO = Instr.getOperand(OpIdx);
866 const TargetRegisterClass *OpRegCstraints =
867 Instr.getRegClassConstraint(OpIdx, TII, TRI);
869 // If there's no constraint, there's nothing to do.
872 // If the operand is a frame index, there's nothing to do here.
873 // A frame index operand will resolve correctly during PEI.
878 "Operand has register constraints without being a register!");
880 unsigned Reg = MO.getReg();
881 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
882 if (!OpRegCstraints->contains(Reg))
884 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
885 !MRI->constrainRegClass(Reg, OpRegCstraints))
892 /// \brief Return the opcode that does not set flags when possible - otherwise
893 /// return the original opcode. The caller is responsible to do the actual
894 /// substitution and legality checking.
895 static unsigned convertFlagSettingOpcode(const MachineInstr &MI) {
896 // Don't convert all compare instructions, because for some the zero register
897 // encoding becomes the sp register.
898 bool MIDefinesZeroReg = false;
899 if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
900 MIDefinesZeroReg = true;
902 switch (MI.getOpcode()) {
904 return MI.getOpcode();
905 case AArch64::ADDSWrr:
906 return AArch64::ADDWrr;
907 case AArch64::ADDSWri:
908 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
909 case AArch64::ADDSWrs:
910 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
911 case AArch64::ADDSWrx:
912 return AArch64::ADDWrx;
913 case AArch64::ADDSXrr:
914 return AArch64::ADDXrr;
915 case AArch64::ADDSXri:
916 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
917 case AArch64::ADDSXrs:
918 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
919 case AArch64::ADDSXrx:
920 return AArch64::ADDXrx;
921 case AArch64::SUBSWrr:
922 return AArch64::SUBWrr;
923 case AArch64::SUBSWri:
924 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
925 case AArch64::SUBSWrs:
926 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
927 case AArch64::SUBSWrx:
928 return AArch64::SUBWrx;
929 case AArch64::SUBSXrr:
930 return AArch64::SUBXrr;
931 case AArch64::SUBSXri:
932 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
933 case AArch64::SUBSXrs:
934 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
935 case AArch64::SUBSXrx:
936 return AArch64::SUBXrx;
946 /// True when condition flags are accessed (either by writing or reading)
947 /// on the instruction trace starting at From and ending at To.
949 /// Note: If From and To are from different blocks it's assumed CC are accessed
951 static bool areCFlagsAccessedBetweenInstrs(
952 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
953 const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
954 // Early exit if To is at the beginning of the BB.
955 if (To == To->getParent()->begin())
958 // Check whether the instructions are in the same basic block
959 // If not, assume the condition flags might get modified somewhere.
960 if (To->getParent() != From->getParent())
963 // From must be above To.
964 assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
965 [From](MachineInstr &MI) {
966 return MI.getIterator() == From;
967 }) != To->getParent()->rend());
969 // We iterate backward starting \p To until we hit \p From.
970 for (--To; To != From; --To) {
971 const MachineInstr &Instr = *To;
973 if ( ((AccessToCheck & AK_Write) && Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
974 ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
980 /// Try to optimize a compare instruction. A compare instruction is an
981 /// instruction which produces AArch64::NZCV. It can be truly compare instruction
982 /// when there are no uses of its destination register.
984 /// The following steps are tried in order:
985 /// 1. Convert CmpInstr into an unconditional version.
986 /// 2. Remove CmpInstr if above there is an instruction producing a needed
987 /// condition code or an instruction which can be converted into such an instruction.
988 /// Only comparison with zero is supported.
989 bool AArch64InstrInfo::optimizeCompareInstr(
990 MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
991 int CmpValue, const MachineRegisterInfo *MRI) const {
992 assert(CmpInstr.getParent());
995 // Replace SUBSWrr with SUBWrr if NZCV is not used.
996 int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
997 if (DeadNZCVIdx != -1) {
998 if (CmpInstr.definesRegister(AArch64::WZR) ||
999 CmpInstr.definesRegister(AArch64::XZR)) {
1000 CmpInstr.eraseFromParent();
1003 unsigned Opc = CmpInstr.getOpcode();
1004 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
1007 const MCInstrDesc &MCID = get(NewOpc);
1008 CmpInstr.setDesc(MCID);
1009 CmpInstr.RemoveOperand(DeadNZCVIdx);
1010 bool succeeded = UpdateOperandRegClass(CmpInstr);
1012 assert(succeeded && "Some operands reg class are incompatible!");
1016 // Continue only if we have a "ri" where immediate is zero.
1017 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1019 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
1020 if (CmpValue != 0 || SrcReg2 != 0)
1023 // CmpInstr is a Compare instruction if destination register is not used.
1024 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
1027 return substituteCmpToZero(CmpInstr, SrcReg, MRI);
1030 /// Get opcode of S version of Instr.
1031 /// If Instr is S version its opcode is returned.
1032 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1033 /// or we are not interested in it.
1034 static unsigned sForm(MachineInstr &Instr) {
1035 switch (Instr.getOpcode()) {
1037 return AArch64::INSTRUCTION_LIST_END;
1039 case AArch64::ADDSWrr:
1040 case AArch64::ADDSWri:
1041 case AArch64::ADDSXrr:
1042 case AArch64::ADDSXri:
1043 case AArch64::SUBSWrr:
1044 case AArch64::SUBSWri:
1045 case AArch64::SUBSXrr:
1046 case AArch64::SUBSXri:
1047 return Instr.getOpcode();;
1049 case AArch64::ADDWrr: return AArch64::ADDSWrr;
1050 case AArch64::ADDWri: return AArch64::ADDSWri;
1051 case AArch64::ADDXrr: return AArch64::ADDSXrr;
1052 case AArch64::ADDXri: return AArch64::ADDSXri;
1053 case AArch64::ADCWr: return AArch64::ADCSWr;
1054 case AArch64::ADCXr: return AArch64::ADCSXr;
1055 case AArch64::SUBWrr: return AArch64::SUBSWrr;
1056 case AArch64::SUBWri: return AArch64::SUBSWri;
1057 case AArch64::SUBXrr: return AArch64::SUBSXrr;
1058 case AArch64::SUBXri: return AArch64::SUBSXri;
1059 case AArch64::SBCWr: return AArch64::SBCSWr;
1060 case AArch64::SBCXr: return AArch64::SBCSXr;
1061 case AArch64::ANDWri: return AArch64::ANDSWri;
1062 case AArch64::ANDXri: return AArch64::ANDSXri;
1066 /// Check if AArch64::NZCV should be alive in successors of MBB.
1067 static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
1068 for (auto *BB : MBB->successors())
1069 if (BB->isLiveIn(AArch64::NZCV))
1080 UsedNZCV(): N(false), Z(false), C(false), V(false) {}
1081 UsedNZCV& operator |=(const UsedNZCV& UsedFlags) {
1082 this->N |= UsedFlags.N;
1083 this->Z |= UsedFlags.Z;
1084 this->C |= UsedFlags.C;
1085 this->V |= UsedFlags.V;
1089 } // end anonymous namespace
1091 /// Find a condition code used by the instruction.
1092 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1093 /// codes or we don't optimize CmpInstr in the presence of such instructions.
1094 static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1095 switch (Instr.getOpcode()) {
1097 return AArch64CC::Invalid;
1099 case AArch64::Bcc: {
1100 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1102 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
1105 case AArch64::CSINVWr:
1106 case AArch64::CSINVXr:
1107 case AArch64::CSINCWr:
1108 case AArch64::CSINCXr:
1109 case AArch64::CSELWr:
1110 case AArch64::CSELXr:
1111 case AArch64::CSNEGWr:
1112 case AArch64::CSNEGXr:
1113 case AArch64::FCSELSrrr:
1114 case AArch64::FCSELDrrr: {
1115 int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1117 return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
1122 static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1123 assert(CC != AArch64CC::Invalid);
1129 case AArch64CC::EQ: // Z set
1130 case AArch64CC::NE: // Z clear
1134 case AArch64CC::HI: // Z clear and C set
1135 case AArch64CC::LS: // Z set or C clear
1137 case AArch64CC::HS: // C set
1138 case AArch64CC::LO: // C clear
1142 case AArch64CC::MI: // N set
1143 case AArch64CC::PL: // N clear
1147 case AArch64CC::VS: // V set
1148 case AArch64CC::VC: // V clear
1152 case AArch64CC::GT: // Z clear, N and V the same
1153 case AArch64CC::LE: // Z set, N and V differ
1155 case AArch64CC::GE: // N and V the same
1156 case AArch64CC::LT: // N and V differ
1164 static bool isADDSRegImm(unsigned Opcode) {
1165 return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1168 static bool isSUBSRegImm(unsigned Opcode) {
1169 return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1172 /// Check if CmpInstr can be substituted by MI.
1174 /// CmpInstr can be substituted:
1175 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1176 /// - and, MI and CmpInstr are from the same MachineBB
1177 /// - and, condition flags are not alive in successors of the CmpInstr parent
1178 /// - and, if MI opcode is the S form there must be no defs of flags between
1180 /// or if MI opcode is not the S form there must be neither defs of flags
1181 /// nor uses of flags between MI and CmpInstr.
1182 /// - and C/V flags are not used after CmpInstr
1183 static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
1184 const TargetRegisterInfo *TRI) {
1186 assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1189 const unsigned CmpOpcode = CmpInstr->getOpcode();
1190 if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1193 if (MI->getParent() != CmpInstr->getParent())
1196 if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
1199 AccessKind AccessToCheck = AK_Write;
1200 if (sForm(*MI) != MI->getOpcode())
1201 AccessToCheck = AK_All;
1202 if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
1205 UsedNZCV NZCVUsedAfterCmp;
1206 for (auto I = std::next(CmpInstr->getIterator()), E = CmpInstr->getParent()->instr_end();
1208 const MachineInstr &Instr = *I;
1209 if (Instr.readsRegister(AArch64::NZCV, TRI)) {
1210 AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1211 if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1213 NZCVUsedAfterCmp |= getUsedNZCV(CC);
1216 if (Instr.modifiesRegister(AArch64::NZCV, TRI))
1220 return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1223 /// Substitute an instruction comparing to zero with another instruction
1224 /// which produces needed condition flags.
1226 /// Return true on success.
1227 bool AArch64InstrInfo::substituteCmpToZero(
1228 MachineInstr &CmpInstr, unsigned SrcReg,
1229 const MachineRegisterInfo *MRI) const {
1231 // Get the unique definition of SrcReg.
1232 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1236 const TargetRegisterInfo *TRI = &getRegisterInfo();
1238 unsigned NewOpc = sForm(*MI);
1239 if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1242 if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
1245 // Update the instruction to set NZCV.
1246 MI->setDesc(get(NewOpc));
1247 CmpInstr.eraseFromParent();
1248 bool succeeded = UpdateOperandRegClass(*MI);
1250 assert(succeeded && "Some operands reg class are incompatible!");
1251 MI->addRegisterDefined(AArch64::NZCV, TRI);
1255 bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1256 if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
1259 MachineBasicBlock &MBB = *MI.getParent();
1260 DebugLoc DL = MI.getDebugLoc();
1261 unsigned Reg = MI.getOperand(0).getReg();
1262 const GlobalValue *GV =
1263 cast<GlobalValue>((*MI.memoperands_begin())->getValue());
1264 const TargetMachine &TM = MBB.getParent()->getTarget();
1265 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1266 const unsigned char MO_NC = AArch64II::MO_NC;
1268 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1269 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1270 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1271 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1272 .addReg(Reg, RegState::Kill)
1274 .addMemOperand(*MI.memoperands_begin());
1275 } else if (TM.getCodeModel() == CodeModel::Large) {
1276 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1277 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1278 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1279 .addReg(Reg, RegState::Kill)
1280 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1281 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1282 .addReg(Reg, RegState::Kill)
1283 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1284 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1285 .addReg(Reg, RegState::Kill)
1286 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1287 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1288 .addReg(Reg, RegState::Kill)
1290 .addMemOperand(*MI.memoperands_begin());
1292 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1293 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1294 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1295 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1296 .addReg(Reg, RegState::Kill)
1297 .addGlobalAddress(GV, 0, LoFlags)
1298 .addMemOperand(*MI.memoperands_begin());
1306 /// Return true if this is this instruction has a non-zero immediate
1307 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr &MI) const {
1308 switch (MI.getOpcode()) {
1311 case AArch64::ADDSWrs:
1312 case AArch64::ADDSXrs:
1313 case AArch64::ADDWrs:
1314 case AArch64::ADDXrs:
1315 case AArch64::ANDSWrs:
1316 case AArch64::ANDSXrs:
1317 case AArch64::ANDWrs:
1318 case AArch64::ANDXrs:
1319 case AArch64::BICSWrs:
1320 case AArch64::BICSXrs:
1321 case AArch64::BICWrs:
1322 case AArch64::BICXrs:
1323 case AArch64::CRC32Brr:
1324 case AArch64::CRC32CBrr:
1325 case AArch64::CRC32CHrr:
1326 case AArch64::CRC32CWrr:
1327 case AArch64::CRC32CXrr:
1328 case AArch64::CRC32Hrr:
1329 case AArch64::CRC32Wrr:
1330 case AArch64::CRC32Xrr:
1331 case AArch64::EONWrs:
1332 case AArch64::EONXrs:
1333 case AArch64::EORWrs:
1334 case AArch64::EORXrs:
1335 case AArch64::ORNWrs:
1336 case AArch64::ORNXrs:
1337 case AArch64::ORRWrs:
1338 case AArch64::ORRXrs:
1339 case AArch64::SUBSWrs:
1340 case AArch64::SUBSXrs:
1341 case AArch64::SUBWrs:
1342 case AArch64::SUBXrs:
1343 if (MI.getOperand(3).isImm()) {
1344 unsigned val = MI.getOperand(3).getImm();
1352 /// Return true if this is this instruction has a non-zero immediate
1353 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr &MI) const {
1354 switch (MI.getOpcode()) {
1357 case AArch64::ADDSWrx:
1358 case AArch64::ADDSXrx:
1359 case AArch64::ADDSXrx64:
1360 case AArch64::ADDWrx:
1361 case AArch64::ADDXrx:
1362 case AArch64::ADDXrx64:
1363 case AArch64::SUBSWrx:
1364 case AArch64::SUBSXrx:
1365 case AArch64::SUBSXrx64:
1366 case AArch64::SUBWrx:
1367 case AArch64::SUBXrx:
1368 case AArch64::SUBXrx64:
1369 if (MI.getOperand(3).isImm()) {
1370 unsigned val = MI.getOperand(3).getImm();
1379 // Return true if this instruction simply sets its single destination register
1380 // to zero. This is equivalent to a register rename of the zero-register.
1381 bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) const {
1382 switch (MI.getOpcode()) {
1385 case AArch64::MOVZWi:
1386 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1387 if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
1388 assert(MI.getDesc().getNumOperands() == 3 &&
1389 MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1393 case AArch64::ANDWri: // and Rd, Rzr, #imm
1394 return MI.getOperand(1).getReg() == AArch64::WZR;
1395 case AArch64::ANDXri:
1396 return MI.getOperand(1).getReg() == AArch64::XZR;
1397 case TargetOpcode::COPY:
1398 return MI.getOperand(1).getReg() == AArch64::WZR;
1403 // Return true if this instruction simply renames a general register without
1405 bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) const {
1406 switch (MI.getOpcode()) {
1409 case TargetOpcode::COPY: {
1410 // GPR32 copies will by lowered to ORRXrs
1411 unsigned DstReg = MI.getOperand(0).getReg();
1412 return (AArch64::GPR32RegClass.contains(DstReg) ||
1413 AArch64::GPR64RegClass.contains(DstReg));
1415 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1416 if (MI.getOperand(1).getReg() == AArch64::XZR) {
1417 assert(MI.getDesc().getNumOperands() == 4 &&
1418 MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1422 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1423 if (MI.getOperand(2).getImm() == 0) {
1424 assert(MI.getDesc().getNumOperands() == 4 &&
1425 MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1433 // Return true if this instruction simply renames a general register without
1435 bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) const {
1436 switch (MI.getOpcode()) {
1439 case TargetOpcode::COPY: {
1440 // FPR64 copies will by lowered to ORR.16b
1441 unsigned DstReg = MI.getOperand(0).getReg();
1442 return (AArch64::FPR64RegClass.contains(DstReg) ||
1443 AArch64::FPR128RegClass.contains(DstReg));
1445 case AArch64::ORRv16i8:
1446 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
1447 assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
1448 "invalid ORRv16i8 operands");
1456 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1457 int &FrameIndex) const {
1458 switch (MI.getOpcode()) {
1461 case AArch64::LDRWui:
1462 case AArch64::LDRXui:
1463 case AArch64::LDRBui:
1464 case AArch64::LDRHui:
1465 case AArch64::LDRSui:
1466 case AArch64::LDRDui:
1467 case AArch64::LDRQui:
1468 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1469 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1470 FrameIndex = MI.getOperand(1).getIndex();
1471 return MI.getOperand(0).getReg();
1479 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1480 int &FrameIndex) const {
1481 switch (MI.getOpcode()) {
1484 case AArch64::STRWui:
1485 case AArch64::STRXui:
1486 case AArch64::STRBui:
1487 case AArch64::STRHui:
1488 case AArch64::STRSui:
1489 case AArch64::STRDui:
1490 case AArch64::STRQui:
1491 if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1492 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1493 FrameIndex = MI.getOperand(1).getIndex();
1494 return MI.getOperand(0).getReg();
1501 /// Return true if this is load/store scales or extends its register offset.
1502 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1503 /// MI should be a memory op that allows scaled addressing.
1504 bool AArch64InstrInfo::isScaledAddr(const MachineInstr &MI) const {
1505 switch (MI.getOpcode()) {
1508 case AArch64::LDRBBroW:
1509 case AArch64::LDRBroW:
1510 case AArch64::LDRDroW:
1511 case AArch64::LDRHHroW:
1512 case AArch64::LDRHroW:
1513 case AArch64::LDRQroW:
1514 case AArch64::LDRSBWroW:
1515 case AArch64::LDRSBXroW:
1516 case AArch64::LDRSHWroW:
1517 case AArch64::LDRSHXroW:
1518 case AArch64::LDRSWroW:
1519 case AArch64::LDRSroW:
1520 case AArch64::LDRWroW:
1521 case AArch64::LDRXroW:
1522 case AArch64::STRBBroW:
1523 case AArch64::STRBroW:
1524 case AArch64::STRDroW:
1525 case AArch64::STRHHroW:
1526 case AArch64::STRHroW:
1527 case AArch64::STRQroW:
1528 case AArch64::STRSroW:
1529 case AArch64::STRWroW:
1530 case AArch64::STRXroW:
1531 case AArch64::LDRBBroX:
1532 case AArch64::LDRBroX:
1533 case AArch64::LDRDroX:
1534 case AArch64::LDRHHroX:
1535 case AArch64::LDRHroX:
1536 case AArch64::LDRQroX:
1537 case AArch64::LDRSBWroX:
1538 case AArch64::LDRSBXroX:
1539 case AArch64::LDRSHWroX:
1540 case AArch64::LDRSHXroX:
1541 case AArch64::LDRSWroX:
1542 case AArch64::LDRSroX:
1543 case AArch64::LDRWroX:
1544 case AArch64::LDRXroX:
1545 case AArch64::STRBBroX:
1546 case AArch64::STRBroX:
1547 case AArch64::STRDroX:
1548 case AArch64::STRHHroX:
1549 case AArch64::STRHroX:
1550 case AArch64::STRQroX:
1551 case AArch64::STRSroX:
1552 case AArch64::STRWroX:
1553 case AArch64::STRXroX:
1555 unsigned Val = MI.getOperand(3).getImm();
1556 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1557 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1562 /// Check all MachineMemOperands for a hint to suppress pairing.
1563 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) const {
1564 return any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1565 return MMO->getFlags() & MOSuppressPair;
1569 /// Set a flag on the first MachineMemOperand to suppress pairing.
1570 void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) const {
1571 if (MI.memoperands_empty())
1573 (*MI.memoperands_begin())->setFlags(MOSuppressPair);
1576 bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
1580 case AArch64::STURSi:
1581 case AArch64::STURDi:
1582 case AArch64::STURQi:
1583 case AArch64::STURBBi:
1584 case AArch64::STURHHi:
1585 case AArch64::STURWi:
1586 case AArch64::STURXi:
1587 case AArch64::LDURSi:
1588 case AArch64::LDURDi:
1589 case AArch64::LDURQi:
1590 case AArch64::LDURWi:
1591 case AArch64::LDURXi:
1592 case AArch64::LDURSWi:
1593 case AArch64::LDURHHi:
1594 case AArch64::LDURBBi:
1595 case AArch64::LDURSBWi:
1596 case AArch64::LDURSHWi:
1601 bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr &MI) const {
1602 return isUnscaledLdSt(MI.getOpcode());
1605 // Is this a candidate for ld/st merging or pairing? For example, we don't
1606 // touch volatiles or load/stores that have a hint to avoid pair formation.
1607 bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
1608 // If this is a volatile load/store, don't mess with it.
1609 if (MI.hasOrderedMemoryRef())
1612 // Make sure this is a reg+imm (as opposed to an address reloc).
1613 assert(MI.getOperand(1).isReg() && "Expected a reg operand.");
1614 if (!MI.getOperand(2).isImm())
1617 // Can't merge/pair if the instruction modifies the base register.
1618 // e.g., ldr x0, [x0]
1619 unsigned BaseReg = MI.getOperand(1).getReg();
1620 const TargetRegisterInfo *TRI = &getRegisterInfo();
1621 if (MI.modifiesRegister(BaseReg, TRI))
1624 // Check if this load/store has a hint to avoid pair formation.
1625 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1626 if (isLdStPairSuppressed(MI))
1629 // On some CPUs quad load/store pairs are slower than two single load/stores.
1630 if (Subtarget.avoidQuadLdStPairs()) {
1631 switch (MI.getOpcode()) {
1634 case AArch64::LDURQi:
1635 case AArch64::STURQi:
1636 case AArch64::LDRQui:
1637 case AArch64::STRQui:
1645 bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
1646 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
1647 const TargetRegisterInfo *TRI) const {
1649 return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
1652 bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
1653 MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
1654 const TargetRegisterInfo *TRI) const {
1655 assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
1656 // Handle only loads/stores with base register followed by immediate offset.
1657 if (LdSt.getNumExplicitOperands() == 3) {
1658 // Non-paired instruction (e.g., ldr x1, [x0, #8]).
1659 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
1661 } else if (LdSt.getNumExplicitOperands() == 4) {
1662 // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
1663 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() ||
1664 !LdSt.getOperand(3).isImm())
1669 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1670 // Unscaled instructions have scaling factor set to 1.
1672 switch (LdSt.getOpcode()) {
1675 case AArch64::LDURQi:
1676 case AArch64::STURQi:
1680 case AArch64::LDURXi:
1681 case AArch64::LDURDi:
1682 case AArch64::STURXi:
1683 case AArch64::STURDi:
1687 case AArch64::LDURWi:
1688 case AArch64::LDURSi:
1689 case AArch64::LDURSWi:
1690 case AArch64::STURWi:
1691 case AArch64::STURSi:
1695 case AArch64::LDURHi:
1696 case AArch64::LDURHHi:
1697 case AArch64::LDURSHXi:
1698 case AArch64::LDURSHWi:
1699 case AArch64::STURHi:
1700 case AArch64::STURHHi:
1704 case AArch64::LDURBi:
1705 case AArch64::LDURBBi:
1706 case AArch64::LDURSBXi:
1707 case AArch64::LDURSBWi:
1708 case AArch64::STURBi:
1709 case AArch64::STURBBi:
1713 case AArch64::LDPQi:
1714 case AArch64::LDNPQi:
1715 case AArch64::STPQi:
1716 case AArch64::STNPQi:
1720 case AArch64::LDRQui:
1721 case AArch64::STRQui:
1724 case AArch64::LDPXi:
1725 case AArch64::LDPDi:
1726 case AArch64::LDNPXi:
1727 case AArch64::LDNPDi:
1728 case AArch64::STPXi:
1729 case AArch64::STPDi:
1730 case AArch64::STNPXi:
1731 case AArch64::STNPDi:
1735 case AArch64::LDRXui:
1736 case AArch64::LDRDui:
1737 case AArch64::STRXui:
1738 case AArch64::STRDui:
1741 case AArch64::LDPWi:
1742 case AArch64::LDPSi:
1743 case AArch64::LDNPWi:
1744 case AArch64::LDNPSi:
1745 case AArch64::STPWi:
1746 case AArch64::STPSi:
1747 case AArch64::STNPWi:
1748 case AArch64::STNPSi:
1752 case AArch64::LDRWui:
1753 case AArch64::LDRSui:
1754 case AArch64::LDRSWui:
1755 case AArch64::STRWui:
1756 case AArch64::STRSui:
1759 case AArch64::LDRHui:
1760 case AArch64::LDRHHui:
1761 case AArch64::STRHui:
1762 case AArch64::STRHHui:
1765 case AArch64::LDRBui:
1766 case AArch64::LDRBBui:
1767 case AArch64::STRBui:
1768 case AArch64::STRBBui:
1773 if (LdSt.getNumExplicitOperands() == 3) {
1774 BaseReg = LdSt.getOperand(1).getReg();
1775 Offset = LdSt.getOperand(2).getImm() * Scale;
1777 assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
1778 BaseReg = LdSt.getOperand(2).getReg();
1779 Offset = LdSt.getOperand(3).getImm() * Scale;
1784 // Scale the unscaled offsets. Returns false if the unscaled offset can't be
1786 static bool scaleOffset(unsigned Opc, int64_t &Offset) {
1787 unsigned OffsetStride = 1;
1791 case AArch64::LDURQi:
1792 case AArch64::STURQi:
1795 case AArch64::LDURXi:
1796 case AArch64::LDURDi:
1797 case AArch64::STURXi:
1798 case AArch64::STURDi:
1801 case AArch64::LDURWi:
1802 case AArch64::LDURSi:
1803 case AArch64::LDURSWi:
1804 case AArch64::STURWi:
1805 case AArch64::STURSi:
1809 // If the byte-offset isn't a multiple of the stride, we can't scale this
1811 if (Offset % OffsetStride != 0)
1814 // Convert the byte-offset used by unscaled into an "element" offset used
1815 // by the scaled pair load/store instructions.
1816 Offset /= OffsetStride;
1820 static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
1821 if (FirstOpc == SecondOpc)
1823 // We can also pair sign-ext and zero-ext instructions.
1827 case AArch64::LDRWui:
1828 case AArch64::LDURWi:
1829 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
1830 case AArch64::LDRSWui:
1831 case AArch64::LDURSWi:
1832 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
1834 // These instructions can't be paired based on their opcodes.
1838 /// Detect opportunities for ldp/stp formation.
1840 /// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
1841 bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
1842 MachineInstr &SecondLdSt,
1843 unsigned NumLoads) const {
1844 // Only cluster up to a single pair.
1848 if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
1851 // Can we pair these instructions based on their opcodes?
1852 unsigned FirstOpc = FirstLdSt.getOpcode();
1853 unsigned SecondOpc = SecondLdSt.getOpcode();
1854 if (!canPairLdStOpc(FirstOpc, SecondOpc))
1857 // Can't merge volatiles or load/stores that have a hint to avoid pair
1858 // formation, for example.
1859 if (!isCandidateToMergeOrPair(FirstLdSt) ||
1860 !isCandidateToMergeOrPair(SecondLdSt))
1863 // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
1864 int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
1865 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
1868 int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
1869 if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
1872 // Pairwise instructions have a 7-bit signed offset field.
1873 if (Offset1 > 63 || Offset1 < -64)
1876 // The caller should already have ordered First/SecondLdSt by offset.
1877 assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
1878 return Offset1 + 1 == Offset2;
1881 bool AArch64InstrInfo::shouldScheduleAdjacent(
1882 const MachineInstr &First, const MachineInstr &Second) const {
1883 if (Subtarget.hasArithmeticBccFusion()) {
1884 // Fuse CMN, CMP, TST followed by Bcc.
1885 unsigned SecondOpcode = Second.getOpcode();
1886 if (SecondOpcode == AArch64::Bcc) {
1887 switch (First.getOpcode()) {
1890 case AArch64::ADDSWri:
1891 case AArch64::ADDSWrr:
1892 case AArch64::ADDSXri:
1893 case AArch64::ADDSXrr:
1894 case AArch64::ANDSWri:
1895 case AArch64::ANDSWrr:
1896 case AArch64::ANDSXri:
1897 case AArch64::ANDSXrr:
1898 case AArch64::SUBSWri:
1899 case AArch64::SUBSWrr:
1900 case AArch64::SUBSXri:
1901 case AArch64::SUBSXrr:
1902 case AArch64::BICSWrr:
1903 case AArch64::BICSXrr:
1905 case AArch64::ADDSWrs:
1906 case AArch64::ADDSXrs:
1907 case AArch64::ANDSWrs:
1908 case AArch64::ANDSXrs:
1909 case AArch64::SUBSWrs:
1910 case AArch64::SUBSXrs:
1911 case AArch64::BICSWrs:
1912 case AArch64::BICSXrs:
1913 // Shift value can be 0 making these behave like the "rr" variant...
1914 return !hasShiftedReg(Second);
1918 if (Subtarget.hasArithmeticCbzFusion()) {
1919 // Fuse ALU operations followed by CBZ/CBNZ.
1920 unsigned SecondOpcode = Second.getOpcode();
1921 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1922 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
1923 switch (First.getOpcode()) {
1926 case AArch64::ADDWri:
1927 case AArch64::ADDWrr:
1928 case AArch64::ADDXri:
1929 case AArch64::ADDXrr:
1930 case AArch64::ANDWri:
1931 case AArch64::ANDWrr:
1932 case AArch64::ANDXri:
1933 case AArch64::ANDXrr:
1934 case AArch64::EORWri:
1935 case AArch64::EORWrr:
1936 case AArch64::EORXri:
1937 case AArch64::EORXrr:
1938 case AArch64::ORRWri:
1939 case AArch64::ORRWrr:
1940 case AArch64::ORRXri:
1941 case AArch64::ORRXrr:
1942 case AArch64::SUBWri:
1943 case AArch64::SUBWrr:
1944 case AArch64::SUBXri:
1945 case AArch64::SUBXrr:
1947 case AArch64::ADDWrs:
1948 case AArch64::ADDXrs:
1949 case AArch64::ANDWrs:
1950 case AArch64::ANDXrs:
1951 case AArch64::SUBWrs:
1952 case AArch64::SUBXrs:
1953 case AArch64::BICWrs:
1954 case AArch64::BICXrs:
1955 // Shift value can be 0 making these behave like the "rr" variant...
1956 return !hasShiftedReg(Second);
1963 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1964 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1965 const MDNode *Expr, const DebugLoc &DL) const {
1966 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1967 .addFrameIndex(FrameIx)
1975 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1976 unsigned Reg, unsigned SubIdx,
1978 const TargetRegisterInfo *TRI) {
1980 return MIB.addReg(Reg, State);
1982 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1983 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1984 return MIB.addReg(Reg, State, SubIdx);
1987 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1989 // We really want the positive remainder mod 32 here, that happens to be
1990 // easily obtainable with a mask.
1991 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1994 void AArch64InstrInfo::copyPhysRegTuple(
1995 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
1996 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1997 llvm::ArrayRef<unsigned> Indices) const {
1998 assert(Subtarget.hasNEON() &&
1999 "Unexpected register copy without NEON");
2000 const TargetRegisterInfo *TRI = &getRegisterInfo();
2001 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2002 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2003 unsigned NumRegs = Indices.size();
2005 int SubReg = 0, End = NumRegs, Incr = 1;
2006 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
2007 SubReg = NumRegs - 1;
2012 for (; SubReg != End; SubReg += Incr) {
2013 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
2014 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2015 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2016 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2020 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
2021 MachineBasicBlock::iterator I,
2022 const DebugLoc &DL, unsigned DestReg,
2023 unsigned SrcReg, bool KillSrc) const {
2024 if (AArch64::GPR32spRegClass.contains(DestReg) &&
2025 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
2026 const TargetRegisterInfo *TRI = &getRegisterInfo();
2028 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2029 // If either operand is WSP, expand to ADD #0.
2030 if (Subtarget.hasZeroCycleRegMove()) {
2031 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2032 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2033 &AArch64::GPR64spRegClass);
2034 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2035 &AArch64::GPR64spRegClass);
2036 // This instruction is reading and writing X registers. This may upset
2037 // the register scavenger and machine verifier, so we need to indicate
2038 // that we are reading an undefined value from SrcRegX, but a proper
2039 // value from SrcReg.
2040 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
2041 .addReg(SrcRegX, RegState::Undef)
2043 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2044 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2046 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
2047 .addReg(SrcReg, getKillRegState(KillSrc))
2049 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2051 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
2052 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
2053 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2055 if (Subtarget.hasZeroCycleRegMove()) {
2056 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2057 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2058 &AArch64::GPR64spRegClass);
2059 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2060 &AArch64::GPR64spRegClass);
2061 // This instruction is reading and writing X registers. This may upset
2062 // the register scavenger and machine verifier, so we need to indicate
2063 // that we are reading an undefined value from SrcRegX, but a proper
2064 // value from SrcReg.
2065 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
2066 .addReg(AArch64::XZR)
2067 .addReg(SrcRegX, RegState::Undef)
2068 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2070 // Otherwise, expand to ORR WZR.
2071 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
2072 .addReg(AArch64::WZR)
2073 .addReg(SrcReg, getKillRegState(KillSrc));
2079 if (AArch64::GPR64spRegClass.contains(DestReg) &&
2080 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
2081 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2082 // If either operand is SP, expand to ADD #0.
2083 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
2084 .addReg(SrcReg, getKillRegState(KillSrc))
2086 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2087 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
2088 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
2089 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
2091 // Otherwise, expand to ORR XZR.
2092 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
2093 .addReg(AArch64::XZR)
2094 .addReg(SrcReg, getKillRegState(KillSrc));
2099 // Copy a DDDD register quad by copying the individual sub-registers.
2100 if (AArch64::DDDDRegClass.contains(DestReg) &&
2101 AArch64::DDDDRegClass.contains(SrcReg)) {
2102 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2103 AArch64::dsub2, AArch64::dsub3 };
2104 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2109 // Copy a DDD register triple by copying the individual sub-registers.
2110 if (AArch64::DDDRegClass.contains(DestReg) &&
2111 AArch64::DDDRegClass.contains(SrcReg)) {
2112 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2114 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2119 // Copy a DD register pair by copying the individual sub-registers.
2120 if (AArch64::DDRegClass.contains(DestReg) &&
2121 AArch64::DDRegClass.contains(SrcReg)) {
2122 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
2123 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2128 // Copy a QQQQ register quad by copying the individual sub-registers.
2129 if (AArch64::QQQQRegClass.contains(DestReg) &&
2130 AArch64::QQQQRegClass.contains(SrcReg)) {
2131 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2132 AArch64::qsub2, AArch64::qsub3 };
2133 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2138 // Copy a QQQ register triple by copying the individual sub-registers.
2139 if (AArch64::QQQRegClass.contains(DestReg) &&
2140 AArch64::QQQRegClass.contains(SrcReg)) {
2141 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2143 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2148 // Copy a QQ register pair by copying the individual sub-registers.
2149 if (AArch64::QQRegClass.contains(DestReg) &&
2150 AArch64::QQRegClass.contains(SrcReg)) {
2151 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
2152 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2157 if (AArch64::FPR128RegClass.contains(DestReg) &&
2158 AArch64::FPR128RegClass.contains(SrcReg)) {
2159 if(Subtarget.hasNEON()) {
2160 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2162 .addReg(SrcReg, getKillRegState(KillSrc));
2164 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
2165 .addReg(AArch64::SP, RegState::Define)
2166 .addReg(SrcReg, getKillRegState(KillSrc))
2167 .addReg(AArch64::SP)
2169 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
2170 .addReg(AArch64::SP, RegState::Define)
2171 .addReg(DestReg, RegState::Define)
2172 .addReg(AArch64::SP)
2178 if (AArch64::FPR64RegClass.contains(DestReg) &&
2179 AArch64::FPR64RegClass.contains(SrcReg)) {
2180 if(Subtarget.hasNEON()) {
2181 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2182 &AArch64::FPR128RegClass);
2183 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2184 &AArch64::FPR128RegClass);
2185 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2187 .addReg(SrcReg, getKillRegState(KillSrc));
2189 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
2190 .addReg(SrcReg, getKillRegState(KillSrc));
2195 if (AArch64::FPR32RegClass.contains(DestReg) &&
2196 AArch64::FPR32RegClass.contains(SrcReg)) {
2197 if(Subtarget.hasNEON()) {
2198 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2199 &AArch64::FPR128RegClass);
2200 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2201 &AArch64::FPR128RegClass);
2202 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2204 .addReg(SrcReg, getKillRegState(KillSrc));
2206 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2207 .addReg(SrcReg, getKillRegState(KillSrc));
2212 if (AArch64::FPR16RegClass.contains(DestReg) &&
2213 AArch64::FPR16RegClass.contains(SrcReg)) {
2214 if(Subtarget.hasNEON()) {
2215 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2216 &AArch64::FPR128RegClass);
2217 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2218 &AArch64::FPR128RegClass);
2219 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2221 .addReg(SrcReg, getKillRegState(KillSrc));
2223 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2224 &AArch64::FPR32RegClass);
2225 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2226 &AArch64::FPR32RegClass);
2227 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2228 .addReg(SrcReg, getKillRegState(KillSrc));
2233 if (AArch64::FPR8RegClass.contains(DestReg) &&
2234 AArch64::FPR8RegClass.contains(SrcReg)) {
2235 if(Subtarget.hasNEON()) {
2236 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2237 &AArch64::FPR128RegClass);
2238 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2239 &AArch64::FPR128RegClass);
2240 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2242 .addReg(SrcReg, getKillRegState(KillSrc));
2244 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2245 &AArch64::FPR32RegClass);
2246 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2247 &AArch64::FPR32RegClass);
2248 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2249 .addReg(SrcReg, getKillRegState(KillSrc));
2254 // Copies between GPR64 and FPR64.
2255 if (AArch64::FPR64RegClass.contains(DestReg) &&
2256 AArch64::GPR64RegClass.contains(SrcReg)) {
2257 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
2258 .addReg(SrcReg, getKillRegState(KillSrc));
2261 if (AArch64::GPR64RegClass.contains(DestReg) &&
2262 AArch64::FPR64RegClass.contains(SrcReg)) {
2263 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
2264 .addReg(SrcReg, getKillRegState(KillSrc));
2267 // Copies between GPR32 and FPR32.
2268 if (AArch64::FPR32RegClass.contains(DestReg) &&
2269 AArch64::GPR32RegClass.contains(SrcReg)) {
2270 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
2271 .addReg(SrcReg, getKillRegState(KillSrc));
2274 if (AArch64::GPR32RegClass.contains(DestReg) &&
2275 AArch64::FPR32RegClass.contains(SrcReg)) {
2276 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
2277 .addReg(SrcReg, getKillRegState(KillSrc));
2281 if (DestReg == AArch64::NZCV) {
2282 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
2283 BuildMI(MBB, I, DL, get(AArch64::MSR))
2284 .addImm(AArch64SysReg::NZCV)
2285 .addReg(SrcReg, getKillRegState(KillSrc))
2286 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
2290 if (SrcReg == AArch64::NZCV) {
2291 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
2292 BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
2293 .addImm(AArch64SysReg::NZCV)
2294 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
2298 llvm_unreachable("unimplemented reg-to-reg copy");
2301 void AArch64InstrInfo::storeRegToStackSlot(
2302 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
2303 bool isKill, int FI, const TargetRegisterClass *RC,
2304 const TargetRegisterInfo *TRI) const {
2306 if (MBBI != MBB.end())
2307 DL = MBBI->getDebugLoc();
2308 MachineFunction &MF = *MBB.getParent();
2309 MachineFrameInfo &MFI = MF.getFrameInfo();
2310 unsigned Align = MFI.getObjectAlignment(FI);
2312 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2313 MachineMemOperand *MMO = MF.getMachineMemOperand(
2314 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
2317 switch (RC->getSize()) {
2319 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2320 Opc = AArch64::STRBui;
2323 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2324 Opc = AArch64::STRHui;
2327 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2328 Opc = AArch64::STRWui;
2329 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2330 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2332 assert(SrcReg != AArch64::WSP);
2333 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2334 Opc = AArch64::STRSui;
2337 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2338 Opc = AArch64::STRXui;
2339 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
2340 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2342 assert(SrcReg != AArch64::SP);
2343 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2344 Opc = AArch64::STRDui;
2347 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2348 Opc = AArch64::STRQui;
2349 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2350 assert(Subtarget.hasNEON() &&
2351 "Unexpected register store without NEON");
2352 Opc = AArch64::ST1Twov1d;
2357 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2358 assert(Subtarget.hasNEON() &&
2359 "Unexpected register store without NEON");
2360 Opc = AArch64::ST1Threev1d;
2365 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2366 assert(Subtarget.hasNEON() &&
2367 "Unexpected register store without NEON");
2368 Opc = AArch64::ST1Fourv1d;
2370 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2371 assert(Subtarget.hasNEON() &&
2372 "Unexpected register store without NEON");
2373 Opc = AArch64::ST1Twov2d;
2378 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2379 assert(Subtarget.hasNEON() &&
2380 "Unexpected register store without NEON");
2381 Opc = AArch64::ST1Threev2d;
2386 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2387 assert(Subtarget.hasNEON() &&
2388 "Unexpected register store without NEON");
2389 Opc = AArch64::ST1Fourv2d;
2394 assert(Opc && "Unknown register class");
2396 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2397 .addReg(SrcReg, getKillRegState(isKill))
2402 MI.addMemOperand(MMO);
2405 void AArch64InstrInfo::loadRegFromStackSlot(
2406 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2407 int FI, const TargetRegisterClass *RC,
2408 const TargetRegisterInfo *TRI) const {
2410 if (MBBI != MBB.end())
2411 DL = MBBI->getDebugLoc();
2412 MachineFunction &MF = *MBB.getParent();
2413 MachineFrameInfo &MFI = MF.getFrameInfo();
2414 unsigned Align = MFI.getObjectAlignment(FI);
2415 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
2416 MachineMemOperand *MMO = MF.getMachineMemOperand(
2417 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2421 switch (RC->getSize()) {
2423 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2424 Opc = AArch64::LDRBui;
2427 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2428 Opc = AArch64::LDRHui;
2431 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2432 Opc = AArch64::LDRWui;
2433 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2434 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2436 assert(DestReg != AArch64::WSP);
2437 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2438 Opc = AArch64::LDRSui;
2441 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2442 Opc = AArch64::LDRXui;
2443 if (TargetRegisterInfo::isVirtualRegister(DestReg))
2444 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2446 assert(DestReg != AArch64::SP);
2447 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2448 Opc = AArch64::LDRDui;
2451 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2452 Opc = AArch64::LDRQui;
2453 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2454 assert(Subtarget.hasNEON() &&
2455 "Unexpected register load without NEON");
2456 Opc = AArch64::LD1Twov1d;
2461 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2462 assert(Subtarget.hasNEON() &&
2463 "Unexpected register load without NEON");
2464 Opc = AArch64::LD1Threev1d;
2469 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2470 assert(Subtarget.hasNEON() &&
2471 "Unexpected register load without NEON");
2472 Opc = AArch64::LD1Fourv1d;
2474 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2475 assert(Subtarget.hasNEON() &&
2476 "Unexpected register load without NEON");
2477 Opc = AArch64::LD1Twov2d;
2482 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2483 assert(Subtarget.hasNEON() &&
2484 "Unexpected register load without NEON");
2485 Opc = AArch64::LD1Threev2d;
2490 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2491 assert(Subtarget.hasNEON() &&
2492 "Unexpected register load without NEON");
2493 Opc = AArch64::LD1Fourv2d;
2498 assert(Opc && "Unknown register class");
2500 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2501 .addReg(DestReg, getDefRegState(true))
2505 MI.addMemOperand(MMO);
2508 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2509 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
2510 unsigned DestReg, unsigned SrcReg, int Offset,
2511 const TargetInstrInfo *TII,
2512 MachineInstr::MIFlag Flag, bool SetNZCV) {
2513 if (DestReg == SrcReg && Offset == 0)
2516 assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2517 "SP increment/decrement not 16-byte aligned");
2519 bool isSub = Offset < 0;
2523 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2524 // scratch register. If DestReg is a virtual register, use it as the
2525 // scratch register; otherwise, create a new virtual register (to be
2526 // replaced by the scavenger at the end of PEI). That case can be optimized
2527 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2528 // register can be loaded with offset%8 and the add/sub can use an extending
2529 // instruction with LSL#3.
2530 // Currently the function handles any offsets but generates a poor sequence
2532 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2536 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2538 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2539 const unsigned MaxEncoding = 0xfff;
2540 const unsigned ShiftSize = 12;
2541 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2542 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2544 if (((unsigned)Offset) > MaxEncodableValue) {
2545 ThisVal = MaxEncodableValue;
2547 ThisVal = Offset & MaxEncodableValue;
2549 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2550 "Encoding cannot handle value that big");
2551 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2553 .addImm(ThisVal >> ShiftSize)
2554 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2562 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2565 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2569 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2570 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
2571 MachineBasicBlock::iterator InsertPt, int FrameIndex,
2572 LiveIntervals *LIS) const {
2573 // This is a bit of a hack. Consider this instruction:
2575 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2577 // We explicitly chose GPR64all for the virtual register so such a copy might
2578 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2579 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2580 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2582 // To prevent that, we are going to constrain the %vreg0 register class here.
2584 // <rdar://problem/11522048>
2587 unsigned DstReg = MI.getOperand(0).getReg();
2588 unsigned SrcReg = MI.getOperand(1).getReg();
2589 if (SrcReg == AArch64::SP &&
2590 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2591 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2594 if (DstReg == AArch64::SP &&
2595 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2596 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2601 // Handle the case where a copy is being spilled or refilled but the source
2602 // and destination register class don't match. For example:
2604 // %vreg0<def> = COPY %XZR; GPR64common:%vreg0
2606 // In this case we can still safely fold away the COPY and generate the
2607 // following spill code:
2609 // STRXui %XZR, <fi#0>
2611 // This also eliminates spilled cross register class COPYs (e.g. between x and
2612 // d regs) of the same size. For example:
2614 // %vreg0<def> = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1
2616 // will be refilled as
2618 // LDRDui %vreg0, fi<#0>
2622 // LDRXui %vregTemp, fi<#0>
2623 // %vreg0 = FMOV %vregTemp
2625 if (MI.isFullCopy() && Ops.size() == 1 &&
2626 // Make sure we're only folding the explicit COPY defs/uses.
2627 (Ops[0] == 0 || Ops[0] == 1)) {
2628 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
2629 const MachineRegisterInfo &MRI = MF.getRegInfo();
2630 MachineBasicBlock &MBB = *MI.getParent();
2631 const MachineOperand &DstMO = MI.getOperand(0);
2632 const MachineOperand &SrcMO = MI.getOperand(1);
2633 unsigned DstReg = DstMO.getReg();
2634 unsigned SrcReg = SrcMO.getReg();
2635 auto getRegClass = [&](unsigned Reg) {
2636 return TargetRegisterInfo::isVirtualRegister(Reg)
2637 ? MRI.getRegClass(Reg)
2638 : TRI.getMinimalPhysRegClass(Reg);
2640 const TargetRegisterClass &DstRC = *getRegClass(DstReg);
2641 const TargetRegisterClass &SrcRC = *getRegClass(SrcReg);
2642 if (DstRC.getSize() == SrcRC.getSize()) {
2644 storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
2647 loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, &DstRC, &TRI);
2648 return &*--InsertPt;
2656 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2657 bool *OutUseUnscaledOp,
2658 unsigned *OutUnscaledOp,
2659 int *EmittableOffset) {
2661 bool IsSigned = false;
2662 // The ImmIdx should be changed case by case if it is not 2.
2663 unsigned ImmIdx = 2;
2664 unsigned UnscaledOp = 0;
2665 // Set output values in case of early exit.
2666 if (EmittableOffset)
2667 *EmittableOffset = 0;
2668 if (OutUseUnscaledOp)
2669 *OutUseUnscaledOp = false;
2672 switch (MI.getOpcode()) {
2674 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2675 // Vector spills/fills can't take an immediate offset.
2676 case AArch64::LD1Twov2d:
2677 case AArch64::LD1Threev2d:
2678 case AArch64::LD1Fourv2d:
2679 case AArch64::LD1Twov1d:
2680 case AArch64::LD1Threev1d:
2681 case AArch64::LD1Fourv1d:
2682 case AArch64::ST1Twov2d:
2683 case AArch64::ST1Threev2d:
2684 case AArch64::ST1Fourv2d:
2685 case AArch64::ST1Twov1d:
2686 case AArch64::ST1Threev1d:
2687 case AArch64::ST1Fourv1d:
2688 return AArch64FrameOffsetCannotUpdate;
2689 case AArch64::PRFMui:
2691 UnscaledOp = AArch64::PRFUMi;
2693 case AArch64::LDRXui:
2695 UnscaledOp = AArch64::LDURXi;
2697 case AArch64::LDRWui:
2699 UnscaledOp = AArch64::LDURWi;
2701 case AArch64::LDRBui:
2703 UnscaledOp = AArch64::LDURBi;
2705 case AArch64::LDRHui:
2707 UnscaledOp = AArch64::LDURHi;
2709 case AArch64::LDRSui:
2711 UnscaledOp = AArch64::LDURSi;
2713 case AArch64::LDRDui:
2715 UnscaledOp = AArch64::LDURDi;
2717 case AArch64::LDRQui:
2719 UnscaledOp = AArch64::LDURQi;
2721 case AArch64::LDRBBui:
2723 UnscaledOp = AArch64::LDURBBi;
2725 case AArch64::LDRHHui:
2727 UnscaledOp = AArch64::LDURHHi;
2729 case AArch64::LDRSBXui:
2731 UnscaledOp = AArch64::LDURSBXi;
2733 case AArch64::LDRSBWui:
2735 UnscaledOp = AArch64::LDURSBWi;
2737 case AArch64::LDRSHXui:
2739 UnscaledOp = AArch64::LDURSHXi;
2741 case AArch64::LDRSHWui:
2743 UnscaledOp = AArch64::LDURSHWi;
2745 case AArch64::LDRSWui:
2747 UnscaledOp = AArch64::LDURSWi;
2750 case AArch64::STRXui:
2752 UnscaledOp = AArch64::STURXi;
2754 case AArch64::STRWui:
2756 UnscaledOp = AArch64::STURWi;
2758 case AArch64::STRBui:
2760 UnscaledOp = AArch64::STURBi;
2762 case AArch64::STRHui:
2764 UnscaledOp = AArch64::STURHi;
2766 case AArch64::STRSui:
2768 UnscaledOp = AArch64::STURSi;
2770 case AArch64::STRDui:
2772 UnscaledOp = AArch64::STURDi;
2774 case AArch64::STRQui:
2776 UnscaledOp = AArch64::STURQi;
2778 case AArch64::STRBBui:
2780 UnscaledOp = AArch64::STURBBi;
2782 case AArch64::STRHHui:
2784 UnscaledOp = AArch64::STURHHi;
2787 case AArch64::LDPXi:
2788 case AArch64::LDPDi:
2789 case AArch64::STPXi:
2790 case AArch64::STPDi:
2791 case AArch64::LDNPXi:
2792 case AArch64::LDNPDi:
2793 case AArch64::STNPXi:
2794 case AArch64::STNPDi:
2799 case AArch64::LDPQi:
2800 case AArch64::STPQi:
2801 case AArch64::LDNPQi:
2802 case AArch64::STNPQi:
2807 case AArch64::LDPWi:
2808 case AArch64::LDPSi:
2809 case AArch64::STPWi:
2810 case AArch64::STPSi:
2811 case AArch64::LDNPWi:
2812 case AArch64::LDNPSi:
2813 case AArch64::STNPWi:
2814 case AArch64::STNPSi:
2820 case AArch64::LDURXi:
2821 case AArch64::LDURWi:
2822 case AArch64::LDURBi:
2823 case AArch64::LDURHi:
2824 case AArch64::LDURSi:
2825 case AArch64::LDURDi:
2826 case AArch64::LDURQi:
2827 case AArch64::LDURHHi:
2828 case AArch64::LDURBBi:
2829 case AArch64::LDURSBXi:
2830 case AArch64::LDURSBWi:
2831 case AArch64::LDURSHXi:
2832 case AArch64::LDURSHWi:
2833 case AArch64::LDURSWi:
2834 case AArch64::STURXi:
2835 case AArch64::STURWi:
2836 case AArch64::STURBi:
2837 case AArch64::STURHi:
2838 case AArch64::STURSi:
2839 case AArch64::STURDi:
2840 case AArch64::STURQi:
2841 case AArch64::STURBBi:
2842 case AArch64::STURHHi:
2847 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2849 bool useUnscaledOp = false;
2850 // If the offset doesn't match the scale, we rewrite the instruction to
2851 // use the unscaled instruction instead. Likewise, if we have a negative
2852 // offset (and have an unscaled op to use).
2853 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2854 useUnscaledOp = true;
2856 // Use an unscaled addressing mode if the instruction has a negative offset
2857 // (or if the instruction is already using an unscaled addressing mode).
2860 // ldp/stp instructions.
2863 } else if (UnscaledOp == 0 || useUnscaledOp) {
2873 // Attempt to fold address computation.
2874 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2875 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2876 if (Offset >= MinOff && Offset <= MaxOff) {
2877 if (EmittableOffset)
2878 *EmittableOffset = Offset;
2881 int NewOff = Offset < 0 ? MinOff : MaxOff;
2882 if (EmittableOffset)
2883 *EmittableOffset = NewOff;
2884 Offset = (Offset - NewOff) * Scale;
2886 if (OutUseUnscaledOp)
2887 *OutUseUnscaledOp = useUnscaledOp;
2889 *OutUnscaledOp = UnscaledOp;
2890 return AArch64FrameOffsetCanUpdate |
2891 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2894 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2895 unsigned FrameReg, int &Offset,
2896 const AArch64InstrInfo *TII) {
2897 unsigned Opcode = MI.getOpcode();
2898 unsigned ImmIdx = FrameRegIdx + 1;
2900 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2901 Offset += MI.getOperand(ImmIdx).getImm();
2902 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2903 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2904 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2905 MI.eraseFromParent();
2911 unsigned UnscaledOp;
2913 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2914 &UnscaledOp, &NewOffset);
2915 if (Status & AArch64FrameOffsetCanUpdate) {
2916 if (Status & AArch64FrameOffsetIsLegal)
2917 // Replace the FrameIndex with FrameReg.
2918 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2920 MI.setDesc(TII->get(UnscaledOp));
2922 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2929 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2930 NopInst.setOpcode(AArch64::HINT);
2931 NopInst.addOperand(MCOperand::createImm(0));
2934 // AArch64 supports MachineCombiner.
2935 bool AArch64InstrInfo::useMachineCombiner() const {
2940 // True when Opc sets flag
2941 static bool isCombineInstrSettingFlag(unsigned Opc) {
2943 case AArch64::ADDSWrr:
2944 case AArch64::ADDSWri:
2945 case AArch64::ADDSXrr:
2946 case AArch64::ADDSXri:
2947 case AArch64::SUBSWrr:
2948 case AArch64::SUBSXrr:
2949 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2950 case AArch64::SUBSWri:
2951 case AArch64::SUBSXri:
2959 // 32b Opcodes that can be combined with a MUL
2960 static bool isCombineInstrCandidate32(unsigned Opc) {
2962 case AArch64::ADDWrr:
2963 case AArch64::ADDWri:
2964 case AArch64::SUBWrr:
2965 case AArch64::ADDSWrr:
2966 case AArch64::ADDSWri:
2967 case AArch64::SUBSWrr:
2968 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2969 case AArch64::SUBWri:
2970 case AArch64::SUBSWri:
2978 // 64b Opcodes that can be combined with a MUL
2979 static bool isCombineInstrCandidate64(unsigned Opc) {
2981 case AArch64::ADDXrr:
2982 case AArch64::ADDXri:
2983 case AArch64::SUBXrr:
2984 case AArch64::ADDSXrr:
2985 case AArch64::ADDSXri:
2986 case AArch64::SUBSXrr:
2987 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2988 case AArch64::SUBXri:
2989 case AArch64::SUBSXri:
2997 // FP Opcodes that can be combined with a FMUL
2998 static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
2999 switch (Inst.getOpcode()) {
3002 case AArch64::FADDSrr:
3003 case AArch64::FADDDrr:
3004 case AArch64::FADDv2f32:
3005 case AArch64::FADDv2f64:
3006 case AArch64::FADDv4f32:
3007 case AArch64::FSUBSrr:
3008 case AArch64::FSUBDrr:
3009 case AArch64::FSUBv2f32:
3010 case AArch64::FSUBv2f64:
3011 case AArch64::FSUBv4f32:
3012 TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
3013 return (Options.UnsafeFPMath ||
3014 Options.AllowFPOpFusion == FPOpFusion::Fast);
3019 // Opcodes that can be combined with a MUL
3020 static bool isCombineInstrCandidate(unsigned Opc) {
3021 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
3025 // Utility routine that checks if \param MO is defined by an
3026 // \param CombineOpc instruction in the basic block \param MBB
3027 static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
3028 unsigned CombineOpc, unsigned ZeroReg = 0,
3029 bool CheckZeroReg = false) {
3030 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3031 MachineInstr *MI = nullptr;
3033 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
3034 MI = MRI.getUniqueVRegDef(MO.getReg());
3035 // And it needs to be in the trace (otherwise, it won't have a depth).
3036 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
3038 // Must only used by the user we combine with.
3039 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
3043 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
3044 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
3045 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3046 // The third input reg must be zero.
3047 if (MI->getOperand(3).getReg() != ZeroReg)
3055 // Is \param MO defined by an integer multiply and can be combined?
3056 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3057 unsigned MulOpc, unsigned ZeroReg) {
3058 return canCombine(MBB, MO, MulOpc, ZeroReg, true);
3062 // Is \param MO defined by a floating-point multiply and can be combined?
3063 static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
3065 return canCombine(MBB, MO, MulOpc);
3068 // TODO: There are many more machine instruction opcodes to match:
3069 // 1. Other data types (integer, vectors)
3070 // 2. Other math / logic operations (xor, or)
3071 // 3. Other forms of the same operation (intrinsics and other variants)
3072 bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
3073 switch (Inst.getOpcode()) {
3074 case AArch64::FADDDrr:
3075 case AArch64::FADDSrr:
3076 case AArch64::FADDv2f32:
3077 case AArch64::FADDv2f64:
3078 case AArch64::FADDv4f32:
3079 case AArch64::FMULDrr:
3080 case AArch64::FMULSrr:
3081 case AArch64::FMULX32:
3082 case AArch64::FMULX64:
3083 case AArch64::FMULXv2f32:
3084 case AArch64::FMULXv2f64:
3085 case AArch64::FMULXv4f32:
3086 case AArch64::FMULv2f32:
3087 case AArch64::FMULv2f64:
3088 case AArch64::FMULv4f32:
3089 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
3095 /// Find instructions that can be turned into madd.
3096 static bool getMaddPatterns(MachineInstr &Root,
3097 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3098 unsigned Opc = Root.getOpcode();
3099 MachineBasicBlock &MBB = *Root.getParent();
3102 if (!isCombineInstrCandidate(Opc))
3104 if (isCombineInstrSettingFlag(Opc)) {
3105 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
3106 // When NZCV is live bail out.
3109 unsigned NewOpc = convertFlagSettingOpcode(Root);
3110 // When opcode can't change bail out.
3111 // CHECKME: do we miss any cases for opcode conversion?
3120 case AArch64::ADDWrr:
3121 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3122 "ADDWrr does not have register operands");
3123 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3125 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
3128 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3130 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
3134 case AArch64::ADDXrr:
3135 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3137 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
3140 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3142 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
3146 case AArch64::SUBWrr:
3147 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3149 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
3152 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3154 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
3158 case AArch64::SUBXrr:
3159 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3161 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
3164 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3166 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
3170 case AArch64::ADDWri:
3171 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3173 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
3177 case AArch64::ADDXri:
3178 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3180 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
3184 case AArch64::SUBWri:
3185 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3187 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
3191 case AArch64::SUBXri:
3192 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3194 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
3201 /// Floating-Point Support
3203 /// Find instructions that can be turned into madd.
3204 static bool getFMAPatterns(MachineInstr &Root,
3205 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
3207 if (!isCombineInstrCandidateFP(Root))
3210 MachineBasicBlock &MBB = *Root.getParent();
3213 switch (Root.getOpcode()) {
3215 assert(false && "Unsupported FP instruction in combiner\n");
3217 case AArch64::FADDSrr:
3218 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3219 "FADDWrr does not have register operands");
3220 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3221 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
3223 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3224 AArch64::FMULv1i32_indexed)) {
3225 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
3228 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3229 Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
3231 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3232 AArch64::FMULv1i32_indexed)) {
3233 Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
3237 case AArch64::FADDDrr:
3238 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3239 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
3241 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3242 AArch64::FMULv1i64_indexed)) {
3243 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
3246 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3247 Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
3249 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3250 AArch64::FMULv1i64_indexed)) {
3251 Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
3255 case AArch64::FADDv2f32:
3256 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3257 AArch64::FMULv2i32_indexed)) {
3258 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
3260 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3261 AArch64::FMULv2f32)) {
3262 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
3265 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3266 AArch64::FMULv2i32_indexed)) {
3267 Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
3269 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3270 AArch64::FMULv2f32)) {
3271 Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
3275 case AArch64::FADDv2f64:
3276 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3277 AArch64::FMULv2i64_indexed)) {
3278 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
3280 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3281 AArch64::FMULv2f64)) {
3282 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
3285 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3286 AArch64::FMULv2i64_indexed)) {
3287 Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
3289 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3290 AArch64::FMULv2f64)) {
3291 Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
3295 case AArch64::FADDv4f32:
3296 if (canCombineWithFMUL(MBB, Root.getOperand(1),
3297 AArch64::FMULv4i32_indexed)) {
3298 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
3300 } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3301 AArch64::FMULv4f32)) {
3302 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
3305 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3306 AArch64::FMULv4i32_indexed)) {
3307 Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
3309 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3310 AArch64::FMULv4f32)) {
3311 Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
3316 case AArch64::FSUBSrr:
3317 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3318 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
3321 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3322 Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
3324 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3325 AArch64::FMULv1i32_indexed)) {
3326 Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
3330 case AArch64::FSUBDrr:
3331 if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3332 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
3335 if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3336 Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
3338 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3339 AArch64::FMULv1i64_indexed)) {
3340 Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
3344 case AArch64::FSUBv2f32:
3345 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3346 AArch64::FMULv2i32_indexed)) {
3347 Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
3349 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3350 AArch64::FMULv2f32)) {
3351 Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
3355 case AArch64::FSUBv2f64:
3356 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3357 AArch64::FMULv2i64_indexed)) {
3358 Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
3360 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3361 AArch64::FMULv2f64)) {
3362 Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
3366 case AArch64::FSUBv4f32:
3367 if (canCombineWithFMUL(MBB, Root.getOperand(2),
3368 AArch64::FMULv4i32_indexed)) {
3369 Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
3371 } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3372 AArch64::FMULv4f32)) {
3373 Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
3381 /// Return true when a code sequence can improve throughput. It
3382 /// should be called only for instructions in loops.
3383 /// \param Pattern - combiner pattern
3385 AArch64InstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
3389 case MachineCombinerPattern::FMULADDS_OP1:
3390 case MachineCombinerPattern::FMULADDS_OP2:
3391 case MachineCombinerPattern::FMULSUBS_OP1:
3392 case MachineCombinerPattern::FMULSUBS_OP2:
3393 case MachineCombinerPattern::FMULADDD_OP1:
3394 case MachineCombinerPattern::FMULADDD_OP2:
3395 case MachineCombinerPattern::FMULSUBD_OP1:
3396 case MachineCombinerPattern::FMULSUBD_OP2:
3397 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3398 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3399 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3400 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3401 case MachineCombinerPattern::FMLAv2f32_OP2:
3402 case MachineCombinerPattern::FMLAv2f32_OP1:
3403 case MachineCombinerPattern::FMLAv2f64_OP1:
3404 case MachineCombinerPattern::FMLAv2f64_OP2:
3405 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3406 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3407 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3408 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3409 case MachineCombinerPattern::FMLAv4f32_OP1:
3410 case MachineCombinerPattern::FMLAv4f32_OP2:
3411 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3412 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3413 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3414 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3415 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3416 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3417 case MachineCombinerPattern::FMLSv2f32_OP2:
3418 case MachineCombinerPattern::FMLSv2f64_OP2:
3419 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3420 case MachineCombinerPattern::FMLSv4f32_OP2:
3422 } // end switch (Pattern)
3425 /// Return true when there is potentially a faster code sequence for an
3426 /// instruction chain ending in \p Root. All potential patterns are listed in
3427 /// the \p Pattern vector. Pattern should be sorted in priority order since the
3428 /// pattern evaluator stops checking as soon as it finds a faster sequence.
3430 bool AArch64InstrInfo::getMachineCombinerPatterns(
3432 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
3434 if (getMaddPatterns(Root, Patterns))
3436 // Floating point patterns
3437 if (getFMAPatterns(Root, Patterns))
3440 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
3443 enum class FMAInstKind { Default, Indexed, Accumulator };
3444 /// genFusedMultiply - Generate fused multiply instructions.
3445 /// This function supports both integer and floating point instructions.
3446 /// A typical example:
3449 /// ==> F|MADD R,A,B,C
3450 /// \param Root is the F|ADD instruction
3451 /// \param [out] InsInstrs is a vector of machine instructions and will
3452 /// contain the generated madd instruction
3453 /// \param IdxMulOpd is index of operand in Root that is the result of
3454 /// the F|MUL. In the example above IdxMulOpd is 1.
3455 /// \param MaddOpc the opcode fo the f|madd instruction
3456 static MachineInstr *
3457 genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
3458 const TargetInstrInfo *TII, MachineInstr &Root,
3459 SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
3460 unsigned MaddOpc, const TargetRegisterClass *RC,
3461 FMAInstKind kind = FMAInstKind::Default) {
3462 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3464 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3465 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3466 unsigned ResultReg = Root.getOperand(0).getReg();
3467 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3468 bool Src0IsKill = MUL->getOperand(1).isKill();
3469 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3470 bool Src1IsKill = MUL->getOperand(2).isKill();
3471 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
3472 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
3474 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3475 MRI.constrainRegClass(ResultReg, RC);
3476 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3477 MRI.constrainRegClass(SrcReg0, RC);
3478 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3479 MRI.constrainRegClass(SrcReg1, RC);
3480 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
3481 MRI.constrainRegClass(SrcReg2, RC);
3483 MachineInstrBuilder MIB;
3484 if (kind == FMAInstKind::Default)
3485 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3486 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3487 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3488 .addReg(SrcReg2, getKillRegState(Src2IsKill));
3489 else if (kind == FMAInstKind::Indexed)
3490 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3491 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3492 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3493 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3494 .addImm(MUL->getOperand(3).getImm());
3495 else if (kind == FMAInstKind::Accumulator)
3496 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3497 .addReg(SrcReg2, getKillRegState(Src2IsKill))
3498 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3499 .addReg(SrcReg1, getKillRegState(Src1IsKill));
3501 assert(false && "Invalid FMA instruction kind \n");
3502 // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
3503 InsInstrs.push_back(MIB);
3507 /// genMaddR - Generate madd instruction and combine mul and add using
3508 /// an extra virtual register
3509 /// Example - an ADD intermediate needs to be stored in a register:
3512 /// ==> ORR V, ZR, Imm
3513 /// ==> MADD R,A,B,V
3514 /// \param Root is the ADD instruction
3515 /// \param [out] InsInstrs is a vector of machine instructions and will
3516 /// contain the generated madd instruction
3517 /// \param IdxMulOpd is index of operand in Root that is the result of
3518 /// the MUL. In the example above IdxMulOpd is 1.
3519 /// \param MaddOpc the opcode fo the madd instruction
3520 /// \param VR is a virtual register that holds the value of an ADD operand
3521 /// (V in the example above).
3522 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
3523 const TargetInstrInfo *TII, MachineInstr &Root,
3524 SmallVectorImpl<MachineInstr *> &InsInstrs,
3525 unsigned IdxMulOpd, unsigned MaddOpc,
3526 unsigned VR, const TargetRegisterClass *RC) {
3527 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3529 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3530 unsigned ResultReg = Root.getOperand(0).getReg();
3531 unsigned SrcReg0 = MUL->getOperand(1).getReg();
3532 bool Src0IsKill = MUL->getOperand(1).isKill();
3533 unsigned SrcReg1 = MUL->getOperand(2).getReg();
3534 bool Src1IsKill = MUL->getOperand(2).isKill();
3536 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
3537 MRI.constrainRegClass(ResultReg, RC);
3538 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
3539 MRI.constrainRegClass(SrcReg0, RC);
3540 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
3541 MRI.constrainRegClass(SrcReg1, RC);
3542 if (TargetRegisterInfo::isVirtualRegister(VR))
3543 MRI.constrainRegClass(VR, RC);
3545 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
3547 .addReg(SrcReg0, getKillRegState(Src0IsKill))
3548 .addReg(SrcReg1, getKillRegState(Src1IsKill))
3551 InsInstrs.push_back(MIB);
3555 /// When getMachineCombinerPatterns() finds potential patterns,
3556 /// this function generates the instructions that could replace the
3557 /// original code sequence
3558 void AArch64InstrInfo::genAlternativeCodeSequence(
3559 MachineInstr &Root, MachineCombinerPattern Pattern,
3560 SmallVectorImpl<MachineInstr *> &InsInstrs,
3561 SmallVectorImpl<MachineInstr *> &DelInstrs,
3562 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
3563 MachineBasicBlock &MBB = *Root.getParent();
3564 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3565 MachineFunction &MF = *MBB.getParent();
3566 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
3569 const TargetRegisterClass *RC;
3573 // Reassociate instructions.
3574 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
3575 DelInstrs, InstrIdxForVirtReg);
3577 case MachineCombinerPattern::MULADDW_OP1:
3578 case MachineCombinerPattern::MULADDX_OP1:
3582 // --- Create(MADD);
3583 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
3584 Opc = AArch64::MADDWrrr;
3585 RC = &AArch64::GPR32RegClass;
3587 Opc = AArch64::MADDXrrr;
3588 RC = &AArch64::GPR64RegClass;
3590 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3592 case MachineCombinerPattern::MULADDW_OP2:
3593 case MachineCombinerPattern::MULADDX_OP2:
3597 // --- Create(MADD);
3598 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
3599 Opc = AArch64::MADDWrrr;
3600 RC = &AArch64::GPR32RegClass;
3602 Opc = AArch64::MADDXrrr;
3603 RC = &AArch64::GPR64RegClass;
3605 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3607 case MachineCombinerPattern::MULADDWI_OP1:
3608 case MachineCombinerPattern::MULADDXI_OP1: {
3611 // ==> ORR V, ZR, Imm
3613 // --- Create(MADD);
3614 const TargetRegisterClass *OrrRC;
3615 unsigned BitSize, OrrOpc, ZeroReg;
3616 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
3617 OrrOpc = AArch64::ORRWri;
3618 OrrRC = &AArch64::GPR32spRegClass;
3620 ZeroReg = AArch64::WZR;
3621 Opc = AArch64::MADDWrrr;
3622 RC = &AArch64::GPR32RegClass;
3624 OrrOpc = AArch64::ORRXri;
3625 OrrRC = &AArch64::GPR64spRegClass;
3627 ZeroReg = AArch64::XZR;
3628 Opc = AArch64::MADDXrrr;
3629 RC = &AArch64::GPR64RegClass;
3631 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3632 uint64_t Imm = Root.getOperand(2).getImm();
3634 if (Root.getOperand(3).isImm()) {
3635 unsigned Val = Root.getOperand(3).getImm();
3638 uint64_t UImm = SignExtend64(Imm, BitSize);
3640 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3641 MachineInstrBuilder MIB1 =
3642 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3645 InsInstrs.push_back(MIB1);
3646 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3647 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3651 case MachineCombinerPattern::MULSUBW_OP1:
3652 case MachineCombinerPattern::MULSUBX_OP1: {
3656 // ==> MADD R,A,B,V // = -C + A*B
3657 // --- Create(MADD);
3658 const TargetRegisterClass *SubRC;
3659 unsigned SubOpc, ZeroReg;
3660 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
3661 SubOpc = AArch64::SUBWrr;
3662 SubRC = &AArch64::GPR32spRegClass;
3663 ZeroReg = AArch64::WZR;
3664 Opc = AArch64::MADDWrrr;
3665 RC = &AArch64::GPR32RegClass;
3667 SubOpc = AArch64::SUBXrr;
3668 SubRC = &AArch64::GPR64spRegClass;
3669 ZeroReg = AArch64::XZR;
3670 Opc = AArch64::MADDXrrr;
3671 RC = &AArch64::GPR64RegClass;
3673 unsigned NewVR = MRI.createVirtualRegister(SubRC);
3675 MachineInstrBuilder MIB1 =
3676 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
3678 .addOperand(Root.getOperand(2));
3679 InsInstrs.push_back(MIB1);
3680 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3681 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3684 case MachineCombinerPattern::MULSUBW_OP2:
3685 case MachineCombinerPattern::MULSUBX_OP2:
3688 // ==> MSUB R,A,B,C (computes C - A*B)
3689 // --- Create(MSUB);
3690 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
3691 Opc = AArch64::MSUBWrrr;
3692 RC = &AArch64::GPR32RegClass;
3694 Opc = AArch64::MSUBXrrr;
3695 RC = &AArch64::GPR64RegClass;
3697 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3699 case MachineCombinerPattern::MULSUBWI_OP1:
3700 case MachineCombinerPattern::MULSUBXI_OP1: {
3703 // ==> ORR V, ZR, -Imm
3704 // ==> MADD R,A,B,V // = -Imm + A*B
3705 // --- Create(MADD);
3706 const TargetRegisterClass *OrrRC;
3707 unsigned BitSize, OrrOpc, ZeroReg;
3708 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
3709 OrrOpc = AArch64::ORRWri;
3710 OrrRC = &AArch64::GPR32spRegClass;
3712 ZeroReg = AArch64::WZR;
3713 Opc = AArch64::MADDWrrr;
3714 RC = &AArch64::GPR32RegClass;
3716 OrrOpc = AArch64::ORRXri;
3717 OrrRC = &AArch64::GPR64spRegClass;
3719 ZeroReg = AArch64::XZR;
3720 Opc = AArch64::MADDXrrr;
3721 RC = &AArch64::GPR64RegClass;
3723 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
3724 uint64_t Imm = Root.getOperand(2).getImm();
3725 if (Root.getOperand(3).isImm()) {
3726 unsigned Val = Root.getOperand(3).getImm();
3729 uint64_t UImm = SignExtend64(-Imm, BitSize);
3731 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
3732 MachineInstrBuilder MIB1 =
3733 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
3736 InsInstrs.push_back(MIB1);
3737 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
3738 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3742 // Floating Point Support
3743 case MachineCombinerPattern::FMULADDS_OP1:
3744 case MachineCombinerPattern::FMULADDD_OP1:
3748 // --- Create(MADD);
3749 if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
3750 Opc = AArch64::FMADDSrrr;
3751 RC = &AArch64::FPR32RegClass;
3753 Opc = AArch64::FMADDDrrr;
3754 RC = &AArch64::FPR64RegClass;
3756 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3758 case MachineCombinerPattern::FMULADDS_OP2:
3759 case MachineCombinerPattern::FMULADDD_OP2:
3762 // ==> FMADD R,A,B,C
3763 // --- Create(FMADD);
3764 if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
3765 Opc = AArch64::FMADDSrrr;
3766 RC = &AArch64::FPR32RegClass;
3768 Opc = AArch64::FMADDDrrr;
3769 RC = &AArch64::FPR64RegClass;
3771 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3774 case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
3775 Opc = AArch64::FMLAv1i32_indexed;
3776 RC = &AArch64::FPR32RegClass;
3777 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3778 FMAInstKind::Indexed);
3780 case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
3781 Opc = AArch64::FMLAv1i32_indexed;
3782 RC = &AArch64::FPR32RegClass;
3783 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3784 FMAInstKind::Indexed);
3787 case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
3788 Opc = AArch64::FMLAv1i64_indexed;
3789 RC = &AArch64::FPR64RegClass;
3790 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3791 FMAInstKind::Indexed);
3793 case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
3794 Opc = AArch64::FMLAv1i64_indexed;
3795 RC = &AArch64::FPR64RegClass;
3796 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3797 FMAInstKind::Indexed);
3800 case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
3801 case MachineCombinerPattern::FMLAv2f32_OP1:
3802 RC = &AArch64::FPR64RegClass;
3803 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
3804 Opc = AArch64::FMLAv2i32_indexed;
3805 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3806 FMAInstKind::Indexed);
3808 Opc = AArch64::FMLAv2f32;
3809 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3810 FMAInstKind::Accumulator);
3813 case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
3814 case MachineCombinerPattern::FMLAv2f32_OP2:
3815 RC = &AArch64::FPR64RegClass;
3816 if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
3817 Opc = AArch64::FMLAv2i32_indexed;
3818 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3819 FMAInstKind::Indexed);
3821 Opc = AArch64::FMLAv2f32;
3822 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3823 FMAInstKind::Accumulator);
3827 case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
3828 case MachineCombinerPattern::FMLAv2f64_OP1:
3829 RC = &AArch64::FPR128RegClass;
3830 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
3831 Opc = AArch64::FMLAv2i64_indexed;
3832 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3833 FMAInstKind::Indexed);
3835 Opc = AArch64::FMLAv2f64;
3836 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3837 FMAInstKind::Accumulator);
3840 case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
3841 case MachineCombinerPattern::FMLAv2f64_OP2:
3842 RC = &AArch64::FPR128RegClass;
3843 if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
3844 Opc = AArch64::FMLAv2i64_indexed;
3845 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3846 FMAInstKind::Indexed);
3848 Opc = AArch64::FMLAv2f64;
3849 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3850 FMAInstKind::Accumulator);
3854 case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
3855 case MachineCombinerPattern::FMLAv4f32_OP1:
3856 RC = &AArch64::FPR128RegClass;
3857 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
3858 Opc = AArch64::FMLAv4i32_indexed;
3859 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3860 FMAInstKind::Indexed);
3862 Opc = AArch64::FMLAv4f32;
3863 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
3864 FMAInstKind::Accumulator);
3868 case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
3869 case MachineCombinerPattern::FMLAv4f32_OP2:
3870 RC = &AArch64::FPR128RegClass;
3871 if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
3872 Opc = AArch64::FMLAv4i32_indexed;
3873 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3874 FMAInstKind::Indexed);
3876 Opc = AArch64::FMLAv4f32;
3877 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3878 FMAInstKind::Accumulator);
3882 case MachineCombinerPattern::FMULSUBS_OP1:
3883 case MachineCombinerPattern::FMULSUBD_OP1: {
3886 // ==> FNMSUB R,A,B,C // = -C + A*B
3887 // --- Create(FNMSUB);
3888 if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
3889 Opc = AArch64::FNMSUBSrrr;
3890 RC = &AArch64::FPR32RegClass;
3892 Opc = AArch64::FNMSUBDrrr;
3893 RC = &AArch64::FPR64RegClass;
3895 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
3898 case MachineCombinerPattern::FMULSUBS_OP2:
3899 case MachineCombinerPattern::FMULSUBD_OP2: {
3902 // ==> FMSUB R,A,B,C (computes C - A*B)
3903 // --- Create(FMSUB);
3904 if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
3905 Opc = AArch64::FMSUBSrrr;
3906 RC = &AArch64::FPR32RegClass;
3908 Opc = AArch64::FMSUBDrrr;
3909 RC = &AArch64::FPR64RegClass;
3911 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
3914 case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
3915 Opc = AArch64::FMLSv1i32_indexed;
3916 RC = &AArch64::FPR32RegClass;
3917 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3918 FMAInstKind::Indexed);
3921 case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
3922 Opc = AArch64::FMLSv1i64_indexed;
3923 RC = &AArch64::FPR64RegClass;
3924 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3925 FMAInstKind::Indexed);
3928 case MachineCombinerPattern::FMLSv2f32_OP2:
3929 case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
3930 RC = &AArch64::FPR64RegClass;
3931 if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
3932 Opc = AArch64::FMLSv2i32_indexed;
3933 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3934 FMAInstKind::Indexed);
3936 Opc = AArch64::FMLSv2f32;
3937 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3938 FMAInstKind::Accumulator);
3942 case MachineCombinerPattern::FMLSv2f64_OP2:
3943 case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
3944 RC = &AArch64::FPR128RegClass;
3945 if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
3946 Opc = AArch64::FMLSv2i64_indexed;
3947 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3948 FMAInstKind::Indexed);
3950 Opc = AArch64::FMLSv2f64;
3951 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3952 FMAInstKind::Accumulator);
3956 case MachineCombinerPattern::FMLSv4f32_OP2:
3957 case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
3958 RC = &AArch64::FPR128RegClass;
3959 if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
3960 Opc = AArch64::FMLSv4i32_indexed;
3961 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3962 FMAInstKind::Indexed);
3964 Opc = AArch64::FMLSv4f32;
3965 MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
3966 FMAInstKind::Accumulator);
3970 } // end switch (Pattern)
3971 // Record MUL and ADD/SUB for deletion
3972 DelInstrs.push_back(MUL);
3973 DelInstrs.push_back(&Root);
3978 /// \brief Replace csincr-branch sequence by simple conditional branch
3982 /// csinc w9, wzr, wzr, <condition code>
3983 /// tbnz w9, #0, 0x44
3985 /// b.<inverted condition code>
3988 /// csinc w9, wzr, wzr, <condition code>
3989 /// tbz w9, #0, 0x44
3991 /// b.<condition code>
3993 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
3994 /// compare's constant operand is power of 2.
3997 /// and w8, w8, #0x400
4000 /// tbnz w8, #10, L1
4002 /// \param MI Conditional Branch
4003 /// \return True when the simple conditional branch is generated
4005 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
4006 bool IsNegativeBranch = false;
4007 bool IsTestAndBranch = false;
4008 unsigned TargetBBInMI = 0;
4009 switch (MI.getOpcode()) {
4011 llvm_unreachable("Unknown branch instruction?");
4018 case AArch64::CBNZW:
4019 case AArch64::CBNZX:
4021 IsNegativeBranch = true;
4026 IsTestAndBranch = true;
4028 case AArch64::TBNZW:
4029 case AArch64::TBNZX:
4031 IsNegativeBranch = true;
4032 IsTestAndBranch = true;
4035 // So we increment a zero register and test for bits other
4036 // than bit 0? Conservatively bail out in case the verifier
4037 // missed this case.
4038 if (IsTestAndBranch && MI.getOperand(1).getImm())
4042 assert(MI.getParent() && "Incomplete machine instruciton\n");
4043 MachineBasicBlock *MBB = MI.getParent();
4044 MachineFunction *MF = MBB->getParent();
4045 MachineRegisterInfo *MRI = &MF->getRegInfo();
4046 unsigned VReg = MI.getOperand(0).getReg();
4047 if (!TargetRegisterInfo::isVirtualRegister(VReg))
4050 MachineInstr *DefMI = MRI->getVRegDef(VReg);
4052 // Look through COPY instructions to find definition.
4053 while (DefMI->isCopy()) {
4054 unsigned CopyVReg = DefMI->getOperand(1).getReg();
4055 if (!MRI->hasOneNonDBGUse(CopyVReg))
4057 if (!MRI->hasOneDef(CopyVReg))
4059 DefMI = MRI->getVRegDef(CopyVReg);
4062 switch (DefMI->getOpcode()) {
4065 // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4066 case AArch64::ANDWri:
4067 case AArch64::ANDXri: {
4068 if (IsTestAndBranch)
4070 if (DefMI->getParent() != MBB)
4072 if (!MRI->hasOneNonDBGUse(VReg))
4075 bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
4076 uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
4077 DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
4078 if (!isPowerOf2_64(Mask))
4081 MachineOperand &MO = DefMI->getOperand(1);
4082 unsigned NewReg = MO.getReg();
4083 if (!TargetRegisterInfo::isVirtualRegister(NewReg))
4086 assert(!MRI->def_empty(NewReg) && "Register must be defined.");
4088 MachineBasicBlock &RefToMBB = *MBB;
4089 MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
4090 DebugLoc DL = MI.getDebugLoc();
4091 unsigned Imm = Log2_64(Mask);
4092 unsigned Opc = (Imm < 32)
4093 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4094 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
4095 MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
4099 // Register lives on to the CBZ now.
4100 MO.setIsKill(false);
4102 // For immediate smaller than 32, we need to use the 32-bit
4103 // variant (W) in all cases. Indeed the 64-bit variant does not
4104 // allow to encode them.
4105 // Therefore, if the input register is 64-bit, we need to take the
4107 if (!Is32Bit && Imm < 32)
4108 NewMI->getOperand(0).setSubReg(AArch64::sub_32);
4109 MI.eraseFromParent();
4113 case AArch64::CSINCWr:
4114 case AArch64::CSINCXr: {
4115 if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
4116 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
4117 !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
4118 DefMI->getOperand(2).getReg() == AArch64::XZR))
4121 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
4124 AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
4125 // Convert only when the condition code is not modified between
4126 // the CSINC and the branch. The CC may be used by other
4127 // instructions in between.
4128 if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
4130 MachineBasicBlock &RefToMBB = *MBB;
4131 MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
4132 DebugLoc DL = MI.getDebugLoc();
4133 if (IsNegativeBranch)
4134 CC = AArch64CC::getInvertedCondCode(CC);
4135 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
4136 MI.eraseFromParent();
4142 std::pair<unsigned, unsigned>
4143 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
4144 const unsigned Mask = AArch64II::MO_FRAGMENT;
4145 return std::make_pair(TF & Mask, TF & ~Mask);
4148 ArrayRef<std::pair<unsigned, const char *>>
4149 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
4150 using namespace AArch64II;
4151 static const std::pair<unsigned, const char *> TargetFlags[] = {
4152 {MO_PAGE, "aarch64-page"},
4153 {MO_PAGEOFF, "aarch64-pageoff"},
4154 {MO_G3, "aarch64-g3"},
4155 {MO_G2, "aarch64-g2"},
4156 {MO_G1, "aarch64-g1"},
4157 {MO_G0, "aarch64-g0"},
4158 {MO_HI12, "aarch64-hi12"}};
4159 return makeArrayRef(TargetFlags);
4162 ArrayRef<std::pair<unsigned, const char *>>
4163 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
4164 using namespace AArch64II;
4165 static const std::pair<unsigned, const char *> TargetFlags[] = {
4166 {MO_GOT, "aarch64-got"},
4167 {MO_NC, "aarch64-nc"},
4168 {MO_TLS, "aarch64-tls"}};
4169 return makeArrayRef(TargetFlags);