1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/TargetRegistry.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AArch64GenInstrInfo.inc"
31 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
32 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
33 RI(STI.getTargetTriple()), Subtarget(STI) {}
35 /// GetInstSize - Return the number of bytes of code the specified
36 /// instruction may be. This returns the maximum number of bytes.
37 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
38 const MachineBasicBlock &MBB = *MI->getParent();
39 const MachineFunction *MF = MBB.getParent();
40 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
42 if (MI->getOpcode() == AArch64::INLINEASM)
43 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
45 const MCInstrDesc &Desc = MI->getDesc();
46 switch (Desc.getOpcode()) {
48 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
50 case TargetOpcode::DBG_VALUE:
51 case TargetOpcode::EH_LABEL:
52 case TargetOpcode::IMPLICIT_DEF:
53 case TargetOpcode::KILL:
55 case AArch64::TLSDESC_CALLSEQ:
56 // This gets lowered to an instruction sequence which takes 16 bytes
60 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
63 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
64 SmallVectorImpl<MachineOperand> &Cond) {
65 // Block ends with fall-through condbranch.
66 switch (LastInst->getOpcode()) {
68 llvm_unreachable("Unknown branch instruction?");
70 Target = LastInst->getOperand(1).getMBB();
71 Cond.push_back(LastInst->getOperand(0));
77 Target = LastInst->getOperand(1).getMBB();
78 Cond.push_back(MachineOperand::CreateImm(-1));
79 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
80 Cond.push_back(LastInst->getOperand(0));
86 Target = LastInst->getOperand(2).getMBB();
87 Cond.push_back(MachineOperand::CreateImm(-1));
88 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
89 Cond.push_back(LastInst->getOperand(0));
90 Cond.push_back(LastInst->getOperand(1));
95 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
96 MachineBasicBlock *&TBB,
97 MachineBasicBlock *&FBB,
98 SmallVectorImpl<MachineOperand> &Cond,
99 bool AllowModify) const {
100 // If the block has no terminators, it just falls into the block after it.
101 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
105 if (!isUnpredicatedTerminator(I))
108 // Get the last instruction in the block.
109 MachineInstr *LastInst = I;
111 // If there is only one terminator instruction, process it.
112 unsigned LastOpc = LastInst->getOpcode();
113 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
114 if (isUncondBranchOpcode(LastOpc)) {
115 TBB = LastInst->getOperand(0).getMBB();
118 if (isCondBranchOpcode(LastOpc)) {
119 // Block ends with fall-through condbranch.
120 parseCondBranch(LastInst, TBB, Cond);
123 return true; // Can't handle indirect branch.
126 // Get the instruction before it if it is a terminator.
127 MachineInstr *SecondLastInst = I;
128 unsigned SecondLastOpc = SecondLastInst->getOpcode();
130 // If AllowModify is true and the block ends with two or more unconditional
131 // branches, delete all but the first unconditional branch.
132 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
133 while (isUncondBranchOpcode(SecondLastOpc)) {
134 LastInst->eraseFromParent();
135 LastInst = SecondLastInst;
136 LastOpc = LastInst->getOpcode();
137 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
138 // Return now the only terminator is an unconditional branch.
139 TBB = LastInst->getOperand(0).getMBB();
143 SecondLastOpc = SecondLastInst->getOpcode();
148 // If there are three terminators, we don't know what sort of block this is.
149 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
152 // If the block ends with a B and a Bcc, handle it.
153 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
154 parseCondBranch(SecondLastInst, TBB, Cond);
155 FBB = LastInst->getOperand(0).getMBB();
159 // If the block ends with two unconditional branches, handle it. The second
160 // one is not executed, so remove it.
161 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
162 TBB = SecondLastInst->getOperand(0).getMBB();
165 I->eraseFromParent();
169 // ...likewise if it ends with an indirect branch followed by an unconditional
171 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
174 I->eraseFromParent();
178 // Otherwise, can't handle this.
182 bool AArch64InstrInfo::ReverseBranchCondition(
183 SmallVectorImpl<MachineOperand> &Cond) const {
184 if (Cond[0].getImm() != -1) {
186 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
187 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
189 // Folded compare-and-branch
190 switch (Cond[1].getImm()) {
192 llvm_unreachable("Unknown conditional branch!");
194 Cond[1].setImm(AArch64::CBNZW);
197 Cond[1].setImm(AArch64::CBZW);
200 Cond[1].setImm(AArch64::CBNZX);
203 Cond[1].setImm(AArch64::CBZX);
206 Cond[1].setImm(AArch64::TBNZW);
209 Cond[1].setImm(AArch64::TBZW);
212 Cond[1].setImm(AArch64::TBNZX);
215 Cond[1].setImm(AArch64::TBZX);
223 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
224 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
228 if (!isUncondBranchOpcode(I->getOpcode()) &&
229 !isCondBranchOpcode(I->getOpcode()))
232 // Remove the branch.
233 I->eraseFromParent();
237 if (I == MBB.begin())
240 if (!isCondBranchOpcode(I->getOpcode()))
243 // Remove the branch.
244 I->eraseFromParent();
248 void AArch64InstrInfo::instantiateCondBranch(
249 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
250 ArrayRef<MachineOperand> Cond) const {
251 if (Cond[0].getImm() != -1) {
253 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
255 // Folded compare-and-branch
256 // Note that we use addOperand instead of addReg to keep the flags.
257 const MachineInstrBuilder MIB =
258 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
260 MIB.addImm(Cond[3].getImm());
265 unsigned AArch64InstrInfo::InsertBranch(
266 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
267 ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
268 // Shouldn't be a fall through.
269 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
272 if (Cond.empty()) // Unconditional branch?
273 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
275 instantiateCondBranch(MBB, DL, TBB, Cond);
279 // Two-way conditional branch.
280 instantiateCondBranch(MBB, DL, TBB, Cond);
281 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
285 // Find the original register that VReg is copied from.
286 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
287 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
288 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
289 if (!DefMI->isFullCopy())
291 VReg = DefMI->getOperand(1).getReg();
296 // Determine if VReg is defined by an instruction that can be folded into a
297 // csel instruction. If so, return the folded opcode, and the replacement
299 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
300 unsigned *NewVReg = nullptr) {
301 VReg = removeCopies(MRI, VReg);
302 if (!TargetRegisterInfo::isVirtualRegister(VReg))
305 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
306 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
308 unsigned SrcOpNum = 0;
309 switch (DefMI->getOpcode()) {
310 case AArch64::ADDSXri:
311 case AArch64::ADDSWri:
312 // if NZCV is used, do not fold.
313 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
315 // fall-through to ADDXri and ADDWri.
316 case AArch64::ADDXri:
317 case AArch64::ADDWri:
318 // add x, 1 -> csinc.
319 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
320 DefMI->getOperand(3).getImm() != 0)
323 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
326 case AArch64::ORNXrr:
327 case AArch64::ORNWrr: {
328 // not x -> csinv, represented as orn dst, xzr, src.
329 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
330 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
333 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
337 case AArch64::SUBSXrr:
338 case AArch64::SUBSWrr:
339 // if NZCV is used, do not fold.
340 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
342 // fall-through to SUBXrr and SUBWrr.
343 case AArch64::SUBXrr:
344 case AArch64::SUBWrr: {
345 // neg x -> csneg, represented as sub dst, xzr, src.
346 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
347 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
350 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
356 assert(Opc && SrcOpNum && "Missing parameters");
359 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
363 bool AArch64InstrInfo::canInsertSelect(
364 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
365 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
366 int &FalseCycles) const {
367 // Check register classes.
368 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
369 const TargetRegisterClass *RC =
370 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
374 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
375 unsigned ExtraCondLat = Cond.size() != 1;
377 // GPRs are handled by csel.
378 // FIXME: Fold in x+1, -x, and ~x when applicable.
379 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
380 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
381 // Single-cycle csel, csinc, csinv, and csneg.
382 CondCycles = 1 + ExtraCondLat;
383 TrueCycles = FalseCycles = 1;
384 if (canFoldIntoCSel(MRI, TrueReg))
386 else if (canFoldIntoCSel(MRI, FalseReg))
391 // Scalar floating point is handled by fcsel.
392 // FIXME: Form fabs, fmin, and fmax when applicable.
393 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
394 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
395 CondCycles = 5 + ExtraCondLat;
396 TrueCycles = FalseCycles = 2;
404 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
405 MachineBasicBlock::iterator I, DebugLoc DL,
407 ArrayRef<MachineOperand> Cond,
408 unsigned TrueReg, unsigned FalseReg) const {
409 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
411 // Parse the condition code, see parseCondBranch() above.
412 AArch64CC::CondCode CC;
413 switch (Cond.size()) {
415 llvm_unreachable("Unknown condition opcode in Cond");
417 CC = AArch64CC::CondCode(Cond[0].getImm());
419 case 3: { // cbz/cbnz
420 // We must insert a compare against 0.
422 switch (Cond[1].getImm()) {
424 llvm_unreachable("Unknown branch opcode in Cond");
442 unsigned SrcReg = Cond[2].getReg();
444 // cmp reg, #0 is actually subs xzr, reg, #0.
445 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
446 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
451 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
452 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
459 case 4: { // tbz/tbnz
460 // We must insert a tst instruction.
461 switch (Cond[1].getImm()) {
463 llvm_unreachable("Unknown branch opcode in Cond");
473 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
474 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
475 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
476 .addReg(Cond[2].getReg())
478 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
480 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
481 .addReg(Cond[2].getReg())
483 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
489 const TargetRegisterClass *RC = nullptr;
490 bool TryFold = false;
491 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
492 RC = &AArch64::GPR64RegClass;
493 Opc = AArch64::CSELXr;
495 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
496 RC = &AArch64::GPR32RegClass;
497 Opc = AArch64::CSELWr;
499 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
500 RC = &AArch64::FPR64RegClass;
501 Opc = AArch64::FCSELDrrr;
502 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
503 RC = &AArch64::FPR32RegClass;
504 Opc = AArch64::FCSELSrrr;
506 assert(RC && "Unsupported regclass");
508 // Try folding simple instructions into the csel.
510 unsigned NewVReg = 0;
511 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
513 // The folded opcodes csinc, csinc and csneg apply the operation to
514 // FalseReg, so we need to invert the condition.
515 CC = AArch64CC::getInvertedCondCode(CC);
518 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
520 // Fold the operation. Leave any dead instructions for DCE to clean up.
524 // The extends the live range of NewVReg.
525 MRI.clearKillFlags(NewVReg);
529 // Pull all virtual register into the appropriate class.
530 MRI.constrainRegClass(TrueReg, RC);
531 MRI.constrainRegClass(FalseReg, RC);
534 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
538 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
539 static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
540 uint64_t Imm = MI->getOperand(1).getImm();
541 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
543 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
546 // FIXME: this implementation should be micro-architecture dependent, so a
547 // micro-architecture target hook should be introduced here in future.
548 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
549 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
550 return MI->isAsCheapAsAMove();
552 switch (MI->getOpcode()) {
556 // add/sub on register without shift
557 case AArch64::ADDWri:
558 case AArch64::ADDXri:
559 case AArch64::SUBWri:
560 case AArch64::SUBXri:
561 return (MI->getOperand(3).getImm() == 0);
563 // logical ops on immediate
564 case AArch64::ANDWri:
565 case AArch64::ANDXri:
566 case AArch64::EORWri:
567 case AArch64::EORXri:
568 case AArch64::ORRWri:
569 case AArch64::ORRXri:
572 // logical ops on register without shift
573 case AArch64::ANDWrr:
574 case AArch64::ANDXrr:
575 case AArch64::BICWrr:
576 case AArch64::BICXrr:
577 case AArch64::EONWrr:
578 case AArch64::EONXrr:
579 case AArch64::EORWrr:
580 case AArch64::EORXrr:
581 case AArch64::ORNWrr:
582 case AArch64::ORNXrr:
583 case AArch64::ORRWrr:
584 case AArch64::ORRXrr:
586 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
587 // ORRXri, it is as cheap as MOV
588 case AArch64::MOVi32imm:
589 return canBeExpandedToORR(MI, 32);
590 case AArch64::MOVi64imm:
591 return canBeExpandedToORR(MI, 64);
594 llvm_unreachable("Unknown opcode to check as cheap as a move!");
597 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
598 unsigned &SrcReg, unsigned &DstReg,
599 unsigned &SubIdx) const {
600 switch (MI.getOpcode()) {
603 case AArch64::SBFMXri: // aka sxtw
604 case AArch64::UBFMXri: // aka uxtw
605 // Check for the 32 -> 64 bit extension case, these instructions can do
607 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
609 // This is a signed or unsigned 32 -> 64 bit extension.
610 SrcReg = MI.getOperand(1).getReg();
611 DstReg = MI.getOperand(0).getReg();
612 SubIdx = AArch64::sub_32;
618 AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
620 AliasAnalysis *AA) const {
621 const TargetRegisterInfo *TRI = &getRegisterInfo();
622 unsigned BaseRegA = 0, BaseRegB = 0;
623 int OffsetA = 0, OffsetB = 0;
624 int WidthA = 0, WidthB = 0;
626 assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
627 assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
629 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
630 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
633 // Retrieve the base register, offset from the base register and width. Width
634 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
635 // base registers are identical, and the offset of a lower memory access +
636 // the width doesn't overlap the offset of a higher memory access,
637 // then the memory accesses are different.
638 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
639 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
640 if (BaseRegA == BaseRegB) {
641 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
642 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
643 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
644 if (LowOffset + LowWidth <= HighOffset)
651 /// analyzeCompare - For a comparison instruction, return the source registers
652 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
653 /// Return true if the comparison instruction can be analyzed.
654 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
655 unsigned &SrcReg2, int &CmpMask,
656 int &CmpValue) const {
657 switch (MI->getOpcode()) {
660 case AArch64::SUBSWrr:
661 case AArch64::SUBSWrs:
662 case AArch64::SUBSWrx:
663 case AArch64::SUBSXrr:
664 case AArch64::SUBSXrs:
665 case AArch64::SUBSXrx:
666 case AArch64::ADDSWrr:
667 case AArch64::ADDSWrs:
668 case AArch64::ADDSWrx:
669 case AArch64::ADDSXrr:
670 case AArch64::ADDSXrs:
671 case AArch64::ADDSXrx:
672 // Replace SUBSWrr with SUBWrr if NZCV is not used.
673 SrcReg = MI->getOperand(1).getReg();
674 SrcReg2 = MI->getOperand(2).getReg();
678 case AArch64::SUBSWri:
679 case AArch64::ADDSWri:
680 case AArch64::SUBSXri:
681 case AArch64::ADDSXri:
682 SrcReg = MI->getOperand(1).getReg();
685 // FIXME: In order to convert CmpValue to 0 or 1
686 CmpValue = (MI->getOperand(2).getImm() != 0);
688 case AArch64::ANDSWri:
689 case AArch64::ANDSXri:
690 // ANDS does not use the same encoding scheme as the others xxxS
692 SrcReg = MI->getOperand(1).getReg();
695 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
696 // while the type of CmpValue is int. When converting uint64_t to int,
697 // the high 32 bits of uint64_t will be lost.
698 // In fact it causes a bug in spec2006-483.xalancbmk
699 // CmpValue is only used to compare with zero in OptimizeCompareInstr
700 CmpValue = (AArch64_AM::decodeLogicalImmediate(
701 MI->getOperand(2).getImm(),
702 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
709 static bool UpdateOperandRegClass(MachineInstr *Instr) {
710 MachineBasicBlock *MBB = Instr->getParent();
711 assert(MBB && "Can't get MachineBasicBlock here");
712 MachineFunction *MF = MBB->getParent();
713 assert(MF && "Can't get MachineFunction here");
714 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
715 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
716 MachineRegisterInfo *MRI = &MF->getRegInfo();
718 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
720 MachineOperand &MO = Instr->getOperand(OpIdx);
721 const TargetRegisterClass *OpRegCstraints =
722 Instr->getRegClassConstraint(OpIdx, TII, TRI);
724 // If there's no constraint, there's nothing to do.
727 // If the operand is a frame index, there's nothing to do here.
728 // A frame index operand will resolve correctly during PEI.
733 "Operand has register constraints without being a register!");
735 unsigned Reg = MO.getReg();
736 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
737 if (!OpRegCstraints->contains(Reg))
739 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
740 !MRI->constrainRegClass(Reg, OpRegCstraints))
747 /// \brief Return the opcode that does not set flags when possible - otherwise
748 /// return the original opcode. The caller is responsible to do the actual
749 /// substitution and legality checking.
750 static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
751 // Don't convert all compare instructions, because for some the zero register
752 // encoding becomes the sp register.
753 bool MIDefinesZeroReg = false;
754 if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
755 MIDefinesZeroReg = true;
757 switch (MI->getOpcode()) {
759 return MI->getOpcode();
760 case AArch64::ADDSWrr:
761 return AArch64::ADDWrr;
762 case AArch64::ADDSWri:
763 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
764 case AArch64::ADDSWrs:
765 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
766 case AArch64::ADDSWrx:
767 return AArch64::ADDWrx;
768 case AArch64::ADDSXrr:
769 return AArch64::ADDXrr;
770 case AArch64::ADDSXri:
771 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
772 case AArch64::ADDSXrs:
773 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
774 case AArch64::ADDSXrx:
775 return AArch64::ADDXrx;
776 case AArch64::SUBSWrr:
777 return AArch64::SUBWrr;
778 case AArch64::SUBSWri:
779 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
780 case AArch64::SUBSWrs:
781 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
782 case AArch64::SUBSWrx:
783 return AArch64::SUBWrx;
784 case AArch64::SUBSXrr:
785 return AArch64::SUBXrr;
786 case AArch64::SUBSXri:
787 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
788 case AArch64::SUBSXrs:
789 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
790 case AArch64::SUBSXrx:
791 return AArch64::SUBXrx;
795 /// True when condition code could be modified on the instruction
796 /// trace starting at from and ending at to.
797 static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
798 const bool CheckOnlyCCWrites,
799 const TargetRegisterInfo *TRI) {
800 // We iterate backward starting \p To until we hit \p From
801 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
803 // Early exit if To is at the beginning of the BB.
807 // Check whether the definition of SrcReg is in the same basic block as
808 // Compare. If not, assume the condition code gets modified on some path.
809 if (To->getParent() != From->getParent())
812 // Check that NZCV isn't set on the trace.
813 for (--I; I != E; --I) {
814 const MachineInstr &Instr = *I;
816 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
817 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
818 // This instruction modifies or uses NZCV after the one we want to
822 // We currently don't allow the instruction trace to cross basic
828 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
829 /// comparison into one that sets the zero bit in the flags register.
830 bool AArch64InstrInfo::optimizeCompareInstr(
831 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
832 int CmpValue, const MachineRegisterInfo *MRI) const {
834 // Replace SUBSWrr with SUBWrr if NZCV is not used.
835 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
836 if (Cmp_NZCV != -1) {
837 if (CmpInstr->definesRegister(AArch64::WZR) ||
838 CmpInstr->definesRegister(AArch64::XZR)) {
839 CmpInstr->eraseFromParent();
842 unsigned Opc = CmpInstr->getOpcode();
843 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
846 const MCInstrDesc &MCID = get(NewOpc);
847 CmpInstr->setDesc(MCID);
848 CmpInstr->RemoveOperand(Cmp_NZCV);
849 bool succeeded = UpdateOperandRegClass(CmpInstr);
851 assert(succeeded && "Some operands reg class are incompatible!");
855 // Continue only if we have a "ri" where immediate is zero.
856 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
858 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
859 if (CmpValue != 0 || SrcReg2 != 0)
862 // CmpInstr is a Compare instruction if destination register is not used.
863 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
866 // Get the unique definition of SrcReg.
867 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
871 bool CheckOnlyCCWrites = false;
872 const TargetRegisterInfo *TRI = &getRegisterInfo();
873 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
876 unsigned NewOpc = MI->getOpcode();
877 switch (MI->getOpcode()) {
880 case AArch64::ADDSWrr:
881 case AArch64::ADDSWri:
882 case AArch64::ADDSXrr:
883 case AArch64::ADDSXri:
884 case AArch64::SUBSWrr:
885 case AArch64::SUBSWri:
886 case AArch64::SUBSXrr:
887 case AArch64::SUBSXri:
889 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
890 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
891 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
892 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
893 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
894 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
895 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
896 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
897 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
898 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
899 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
900 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
901 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
902 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
905 // Scan forward for the use of NZCV.
906 // When checking against MI: if it's a conditional code requires
907 // checking of V bit, then this is not safe to do.
908 // It is safe to remove CmpInstr if NZCV is redefined or killed.
909 // If we are done with the basic block, we need to check whether NZCV is
912 for (MachineBasicBlock::iterator I = CmpInstr,
913 E = CmpInstr->getParent()->end();
914 !IsSafe && ++I != E;) {
915 const MachineInstr &Instr = *I;
916 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
918 const MachineOperand &MO = Instr.getOperand(IO);
919 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
923 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
930 // Decode the condition code.
931 unsigned Opc = Instr.getOpcode();
932 AArch64CC::CondCode CC;
937 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
939 case AArch64::CSINVWr:
940 case AArch64::CSINVXr:
941 case AArch64::CSINCWr:
942 case AArch64::CSINCXr:
943 case AArch64::CSELWr:
944 case AArch64::CSELXr:
945 case AArch64::CSNEGWr:
946 case AArch64::CSNEGXr:
947 case AArch64::FCSELSrrr:
948 case AArch64::FCSELDrrr:
949 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
953 // It is not safe to remove Compare instruction if Overflow(V) is used.
956 // NZCV can be used multiple times, we should continue.
969 // If NZCV is not killed nor re-defined, we should check whether it is
970 // live-out. If it is live-out, do not optimize.
972 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
973 for (auto *MBB : ParentBlock->successors())
974 if (MBB->isLiveIn(AArch64::NZCV))
978 // Update the instruction to set NZCV.
979 MI->setDesc(get(NewOpc));
980 CmpInstr->eraseFromParent();
981 bool succeeded = UpdateOperandRegClass(MI);
983 assert(succeeded && "Some operands reg class are incompatible!");
984 MI->addRegisterDefined(AArch64::NZCV, TRI);
989 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
990 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
993 MachineBasicBlock &MBB = *MI->getParent();
994 DebugLoc DL = MI->getDebugLoc();
995 unsigned Reg = MI->getOperand(0).getReg();
996 const GlobalValue *GV =
997 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
998 const TargetMachine &TM = MBB.getParent()->getTarget();
999 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1000 const unsigned char MO_NC = AArch64II::MO_NC;
1002 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1003 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1004 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1005 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1006 .addReg(Reg, RegState::Kill).addImm(0)
1007 .addMemOperand(*MI->memoperands_begin());
1008 } else if (TM.getCodeModel() == CodeModel::Large) {
1009 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1010 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1011 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1012 .addReg(Reg, RegState::Kill)
1013 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1014 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1015 .addReg(Reg, RegState::Kill)
1016 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1017 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1018 .addReg(Reg, RegState::Kill)
1019 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1020 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1021 .addReg(Reg, RegState::Kill).addImm(0)
1022 .addMemOperand(*MI->memoperands_begin());
1024 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1025 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1026 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1027 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1028 .addReg(Reg, RegState::Kill)
1029 .addGlobalAddress(GV, 0, LoFlags)
1030 .addMemOperand(*MI->memoperands_begin());
1038 /// Return true if this is this instruction has a non-zero immediate
1039 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1040 switch (MI->getOpcode()) {
1043 case AArch64::ADDSWrs:
1044 case AArch64::ADDSXrs:
1045 case AArch64::ADDWrs:
1046 case AArch64::ADDXrs:
1047 case AArch64::ANDSWrs:
1048 case AArch64::ANDSXrs:
1049 case AArch64::ANDWrs:
1050 case AArch64::ANDXrs:
1051 case AArch64::BICSWrs:
1052 case AArch64::BICSXrs:
1053 case AArch64::BICWrs:
1054 case AArch64::BICXrs:
1055 case AArch64::CRC32Brr:
1056 case AArch64::CRC32CBrr:
1057 case AArch64::CRC32CHrr:
1058 case AArch64::CRC32CWrr:
1059 case AArch64::CRC32CXrr:
1060 case AArch64::CRC32Hrr:
1061 case AArch64::CRC32Wrr:
1062 case AArch64::CRC32Xrr:
1063 case AArch64::EONWrs:
1064 case AArch64::EONXrs:
1065 case AArch64::EORWrs:
1066 case AArch64::EORXrs:
1067 case AArch64::ORNWrs:
1068 case AArch64::ORNXrs:
1069 case AArch64::ORRWrs:
1070 case AArch64::ORRXrs:
1071 case AArch64::SUBSWrs:
1072 case AArch64::SUBSXrs:
1073 case AArch64::SUBWrs:
1074 case AArch64::SUBXrs:
1075 if (MI->getOperand(3).isImm()) {
1076 unsigned val = MI->getOperand(3).getImm();
1084 /// Return true if this is this instruction has a non-zero immediate
1085 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1086 switch (MI->getOpcode()) {
1089 case AArch64::ADDSWrx:
1090 case AArch64::ADDSXrx:
1091 case AArch64::ADDSXrx64:
1092 case AArch64::ADDWrx:
1093 case AArch64::ADDXrx:
1094 case AArch64::ADDXrx64:
1095 case AArch64::SUBSWrx:
1096 case AArch64::SUBSXrx:
1097 case AArch64::SUBSXrx64:
1098 case AArch64::SUBWrx:
1099 case AArch64::SUBXrx:
1100 case AArch64::SUBXrx64:
1101 if (MI->getOperand(3).isImm()) {
1102 unsigned val = MI->getOperand(3).getImm();
1111 // Return true if this instruction simply sets its single destination register
1112 // to zero. This is equivalent to a register rename of the zero-register.
1113 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1114 switch (MI->getOpcode()) {
1117 case AArch64::MOVZWi:
1118 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1119 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1120 assert(MI->getDesc().getNumOperands() == 3 &&
1121 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1125 case AArch64::ANDWri: // and Rd, Rzr, #imm
1126 return MI->getOperand(1).getReg() == AArch64::WZR;
1127 case AArch64::ANDXri:
1128 return MI->getOperand(1).getReg() == AArch64::XZR;
1129 case TargetOpcode::COPY:
1130 return MI->getOperand(1).getReg() == AArch64::WZR;
1135 // Return true if this instruction simply renames a general register without
1137 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1138 switch (MI->getOpcode()) {
1141 case TargetOpcode::COPY: {
1142 // GPR32 copies will by lowered to ORRXrs
1143 unsigned DstReg = MI->getOperand(0).getReg();
1144 return (AArch64::GPR32RegClass.contains(DstReg) ||
1145 AArch64::GPR64RegClass.contains(DstReg));
1147 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1148 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1149 assert(MI->getDesc().getNumOperands() == 4 &&
1150 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1154 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1155 if (MI->getOperand(2).getImm() == 0) {
1156 assert(MI->getDesc().getNumOperands() == 4 &&
1157 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1165 // Return true if this instruction simply renames a general register without
1167 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1168 switch (MI->getOpcode()) {
1171 case TargetOpcode::COPY: {
1172 // FPR64 copies will by lowered to ORR.16b
1173 unsigned DstReg = MI->getOperand(0).getReg();
1174 return (AArch64::FPR64RegClass.contains(DstReg) ||
1175 AArch64::FPR128RegClass.contains(DstReg));
1177 case AArch64::ORRv16i8:
1178 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1179 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1180 "invalid ORRv16i8 operands");
1188 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1189 int &FrameIndex) const {
1190 switch (MI->getOpcode()) {
1193 case AArch64::LDRWui:
1194 case AArch64::LDRXui:
1195 case AArch64::LDRBui:
1196 case AArch64::LDRHui:
1197 case AArch64::LDRSui:
1198 case AArch64::LDRDui:
1199 case AArch64::LDRQui:
1200 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1201 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1202 FrameIndex = MI->getOperand(1).getIndex();
1203 return MI->getOperand(0).getReg();
1211 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1212 int &FrameIndex) const {
1213 switch (MI->getOpcode()) {
1216 case AArch64::STRWui:
1217 case AArch64::STRXui:
1218 case AArch64::STRBui:
1219 case AArch64::STRHui:
1220 case AArch64::STRSui:
1221 case AArch64::STRDui:
1222 case AArch64::STRQui:
1223 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1224 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1225 FrameIndex = MI->getOperand(1).getIndex();
1226 return MI->getOperand(0).getReg();
1233 /// Return true if this is load/store scales or extends its register offset.
1234 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1235 /// MI should be a memory op that allows scaled addressing.
1236 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1237 switch (MI->getOpcode()) {
1240 case AArch64::LDRBBroW:
1241 case AArch64::LDRBroW:
1242 case AArch64::LDRDroW:
1243 case AArch64::LDRHHroW:
1244 case AArch64::LDRHroW:
1245 case AArch64::LDRQroW:
1246 case AArch64::LDRSBWroW:
1247 case AArch64::LDRSBXroW:
1248 case AArch64::LDRSHWroW:
1249 case AArch64::LDRSHXroW:
1250 case AArch64::LDRSWroW:
1251 case AArch64::LDRSroW:
1252 case AArch64::LDRWroW:
1253 case AArch64::LDRXroW:
1254 case AArch64::STRBBroW:
1255 case AArch64::STRBroW:
1256 case AArch64::STRDroW:
1257 case AArch64::STRHHroW:
1258 case AArch64::STRHroW:
1259 case AArch64::STRQroW:
1260 case AArch64::STRSroW:
1261 case AArch64::STRWroW:
1262 case AArch64::STRXroW:
1263 case AArch64::LDRBBroX:
1264 case AArch64::LDRBroX:
1265 case AArch64::LDRDroX:
1266 case AArch64::LDRHHroX:
1267 case AArch64::LDRHroX:
1268 case AArch64::LDRQroX:
1269 case AArch64::LDRSBWroX:
1270 case AArch64::LDRSBXroX:
1271 case AArch64::LDRSHWroX:
1272 case AArch64::LDRSHXroX:
1273 case AArch64::LDRSWroX:
1274 case AArch64::LDRSroX:
1275 case AArch64::LDRWroX:
1276 case AArch64::LDRXroX:
1277 case AArch64::STRBBroX:
1278 case AArch64::STRBroX:
1279 case AArch64::STRDroX:
1280 case AArch64::STRHHroX:
1281 case AArch64::STRHroX:
1282 case AArch64::STRQroX:
1283 case AArch64::STRSroX:
1284 case AArch64::STRWroX:
1285 case AArch64::STRXroX:
1287 unsigned Val = MI->getOperand(3).getImm();
1288 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1289 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1294 /// Check all MachineMemOperands for a hint to suppress pairing.
1295 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1296 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1297 "Too many target MO flags");
1298 for (auto *MM : MI->memoperands()) {
1299 if (MM->getFlags() &
1300 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1307 /// Set a flag on the first MachineMemOperand to suppress pairing.
1308 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1309 if (MI->memoperands_empty())
1312 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1313 "Too many target MO flags");
1314 (*MI->memoperands_begin())
1315 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1319 AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1321 const TargetRegisterInfo *TRI) const {
1322 switch (LdSt->getOpcode()) {
1325 case AArch64::STRSui:
1326 case AArch64::STRDui:
1327 case AArch64::STRQui:
1328 case AArch64::STRXui:
1329 case AArch64::STRWui:
1330 case AArch64::LDRSui:
1331 case AArch64::LDRDui:
1332 case AArch64::LDRQui:
1333 case AArch64::LDRXui:
1334 case AArch64::LDRWui:
1335 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1337 BaseReg = LdSt->getOperand(1).getReg();
1338 MachineFunction &MF = *LdSt->getParent()->getParent();
1339 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1340 Offset = LdSt->getOperand(2).getImm() * Width;
1345 bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
1346 MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
1347 const TargetRegisterInfo *TRI) const {
1348 // Handle only loads/stores with base register followed by immediate offset.
1349 if (LdSt->getNumOperands() != 3)
1351 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1354 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1355 // Unscaled instructions have scaling factor set to 1.
1357 switch (LdSt->getOpcode()) {
1360 case AArch64::LDURQi:
1361 case AArch64::STURQi:
1365 case AArch64::LDURXi:
1366 case AArch64::LDURDi:
1367 case AArch64::STURXi:
1368 case AArch64::STURDi:
1372 case AArch64::LDURWi:
1373 case AArch64::LDURSi:
1374 case AArch64::LDURSWi:
1375 case AArch64::STURWi:
1376 case AArch64::STURSi:
1380 case AArch64::LDURHi:
1381 case AArch64::LDURHHi:
1382 case AArch64::LDURSHXi:
1383 case AArch64::LDURSHWi:
1384 case AArch64::STURHi:
1385 case AArch64::STURHHi:
1389 case AArch64::LDURBi:
1390 case AArch64::LDURBBi:
1391 case AArch64::LDURSBXi:
1392 case AArch64::LDURSBWi:
1393 case AArch64::STURBi:
1394 case AArch64::STURBBi:
1398 case AArch64::LDRQui:
1399 case AArch64::STRQui:
1402 case AArch64::LDRXui:
1403 case AArch64::LDRDui:
1404 case AArch64::STRXui:
1405 case AArch64::STRDui:
1408 case AArch64::LDRWui:
1409 case AArch64::LDRSui:
1410 case AArch64::STRWui:
1411 case AArch64::STRSui:
1414 case AArch64::LDRHui:
1415 case AArch64::LDRHHui:
1416 case AArch64::STRHui:
1417 case AArch64::STRHHui:
1420 case AArch64::LDRBui:
1421 case AArch64::LDRBBui:
1422 case AArch64::STRBui:
1423 case AArch64::STRBBui:
1428 BaseReg = LdSt->getOperand(1).getReg();
1429 Offset = LdSt->getOperand(2).getImm() * Scale;
1433 /// Detect opportunities for ldp/stp formation.
1435 /// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
1436 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1437 MachineInstr *SecondLdSt,
1438 unsigned NumLoads) const {
1439 // Only cluster up to a single pair.
1442 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1444 // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
1445 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1446 // Allow 6 bits of positive range.
1449 // The caller should already have ordered First/SecondLdSt by offset.
1450 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1451 return Ofs1 + 1 == Ofs2;
1454 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1455 MachineInstr *Second) const {
1456 if (Subtarget.isCyclone()) {
1457 // Cyclone can fuse CMN, CMP, TST followed by Bcc.
1458 unsigned SecondOpcode = Second->getOpcode();
1459 if (SecondOpcode == AArch64::Bcc) {
1460 switch (First->getOpcode()) {
1463 case AArch64::SUBSWri:
1464 case AArch64::ADDSWri:
1465 case AArch64::ANDSWri:
1466 case AArch64::SUBSXri:
1467 case AArch64::ADDSXri:
1468 case AArch64::ANDSXri:
1472 // Cyclone B0 also supports ALU operations followed by CBZ/CBNZ.
1473 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1474 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
1475 switch (First->getOpcode()) {
1478 case AArch64::ADDWri:
1479 case AArch64::ADDXri:
1480 case AArch64::ANDWri:
1481 case AArch64::ANDXri:
1482 case AArch64::EORWri:
1483 case AArch64::EORXri:
1484 case AArch64::ORRWri:
1485 case AArch64::ORRXri:
1486 case AArch64::SUBWri:
1487 case AArch64::SUBXri:
1495 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1496 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1497 const MDNode *Expr, DebugLoc DL) const {
1498 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1499 .addFrameIndex(FrameIx)
1507 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1508 unsigned Reg, unsigned SubIdx,
1510 const TargetRegisterInfo *TRI) {
1512 return MIB.addReg(Reg, State);
1514 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1515 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1516 return MIB.addReg(Reg, State, SubIdx);
1519 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1521 // We really want the positive remainder mod 32 here, that happens to be
1522 // easily obtainable with a mask.
1523 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1526 void AArch64InstrInfo::copyPhysRegTuple(
1527 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1528 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1529 llvm::ArrayRef<unsigned> Indices) const {
1530 assert(Subtarget.hasNEON() &&
1531 "Unexpected register copy without NEON");
1532 const TargetRegisterInfo *TRI = &getRegisterInfo();
1533 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1534 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1535 unsigned NumRegs = Indices.size();
1537 int SubReg = 0, End = NumRegs, Incr = 1;
1538 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1539 SubReg = NumRegs - 1;
1544 for (; SubReg != End; SubReg += Incr) {
1545 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
1546 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1547 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1548 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1552 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1553 MachineBasicBlock::iterator I, DebugLoc DL,
1554 unsigned DestReg, unsigned SrcReg,
1555 bool KillSrc) const {
1556 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1557 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1558 const TargetRegisterInfo *TRI = &getRegisterInfo();
1560 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1561 // If either operand is WSP, expand to ADD #0.
1562 if (Subtarget.hasZeroCycleRegMove()) {
1563 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1564 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1565 &AArch64::GPR64spRegClass);
1566 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1567 &AArch64::GPR64spRegClass);
1568 // This instruction is reading and writing X registers. This may upset
1569 // the register scavenger and machine verifier, so we need to indicate
1570 // that we are reading an undefined value from SrcRegX, but a proper
1571 // value from SrcReg.
1572 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1573 .addReg(SrcRegX, RegState::Undef)
1575 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1576 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1578 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1579 .addReg(SrcReg, getKillRegState(KillSrc))
1581 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1583 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1584 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1585 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1587 if (Subtarget.hasZeroCycleRegMove()) {
1588 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1589 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1590 &AArch64::GPR64spRegClass);
1591 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1592 &AArch64::GPR64spRegClass);
1593 // This instruction is reading and writing X registers. This may upset
1594 // the register scavenger and machine verifier, so we need to indicate
1595 // that we are reading an undefined value from SrcRegX, but a proper
1596 // value from SrcReg.
1597 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1598 .addReg(AArch64::XZR)
1599 .addReg(SrcRegX, RegState::Undef)
1600 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1602 // Otherwise, expand to ORR WZR.
1603 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1604 .addReg(AArch64::WZR)
1605 .addReg(SrcReg, getKillRegState(KillSrc));
1611 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1612 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1613 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1614 // If either operand is SP, expand to ADD #0.
1615 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1616 .addReg(SrcReg, getKillRegState(KillSrc))
1618 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1619 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1620 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1621 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1623 // Otherwise, expand to ORR XZR.
1624 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1625 .addReg(AArch64::XZR)
1626 .addReg(SrcReg, getKillRegState(KillSrc));
1631 // Copy a DDDD register quad by copying the individual sub-registers.
1632 if (AArch64::DDDDRegClass.contains(DestReg) &&
1633 AArch64::DDDDRegClass.contains(SrcReg)) {
1634 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1635 AArch64::dsub2, AArch64::dsub3 };
1636 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1641 // Copy a DDD register triple by copying the individual sub-registers.
1642 if (AArch64::DDDRegClass.contains(DestReg) &&
1643 AArch64::DDDRegClass.contains(SrcReg)) {
1644 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1646 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1651 // Copy a DD register pair by copying the individual sub-registers.
1652 if (AArch64::DDRegClass.contains(DestReg) &&
1653 AArch64::DDRegClass.contains(SrcReg)) {
1654 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1655 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1660 // Copy a QQQQ register quad by copying the individual sub-registers.
1661 if (AArch64::QQQQRegClass.contains(DestReg) &&
1662 AArch64::QQQQRegClass.contains(SrcReg)) {
1663 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1664 AArch64::qsub2, AArch64::qsub3 };
1665 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1670 // Copy a QQQ register triple by copying the individual sub-registers.
1671 if (AArch64::QQQRegClass.contains(DestReg) &&
1672 AArch64::QQQRegClass.contains(SrcReg)) {
1673 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1675 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1680 // Copy a QQ register pair by copying the individual sub-registers.
1681 if (AArch64::QQRegClass.contains(DestReg) &&
1682 AArch64::QQRegClass.contains(SrcReg)) {
1683 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1684 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1689 if (AArch64::FPR128RegClass.contains(DestReg) &&
1690 AArch64::FPR128RegClass.contains(SrcReg)) {
1691 if(Subtarget.hasNEON()) {
1692 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1694 .addReg(SrcReg, getKillRegState(KillSrc));
1696 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1697 .addReg(AArch64::SP, RegState::Define)
1698 .addReg(SrcReg, getKillRegState(KillSrc))
1699 .addReg(AArch64::SP)
1701 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1702 .addReg(AArch64::SP, RegState::Define)
1703 .addReg(DestReg, RegState::Define)
1704 .addReg(AArch64::SP)
1710 if (AArch64::FPR64RegClass.contains(DestReg) &&
1711 AArch64::FPR64RegClass.contains(SrcReg)) {
1712 if(Subtarget.hasNEON()) {
1713 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1714 &AArch64::FPR128RegClass);
1715 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1716 &AArch64::FPR128RegClass);
1717 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1719 .addReg(SrcReg, getKillRegState(KillSrc));
1721 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1722 .addReg(SrcReg, getKillRegState(KillSrc));
1727 if (AArch64::FPR32RegClass.contains(DestReg) &&
1728 AArch64::FPR32RegClass.contains(SrcReg)) {
1729 if(Subtarget.hasNEON()) {
1730 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1731 &AArch64::FPR128RegClass);
1732 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1733 &AArch64::FPR128RegClass);
1734 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1736 .addReg(SrcReg, getKillRegState(KillSrc));
1738 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1739 .addReg(SrcReg, getKillRegState(KillSrc));
1744 if (AArch64::FPR16RegClass.contains(DestReg) &&
1745 AArch64::FPR16RegClass.contains(SrcReg)) {
1746 if(Subtarget.hasNEON()) {
1747 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1748 &AArch64::FPR128RegClass);
1749 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1750 &AArch64::FPR128RegClass);
1751 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1753 .addReg(SrcReg, getKillRegState(KillSrc));
1755 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1756 &AArch64::FPR32RegClass);
1757 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1758 &AArch64::FPR32RegClass);
1759 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1760 .addReg(SrcReg, getKillRegState(KillSrc));
1765 if (AArch64::FPR8RegClass.contains(DestReg) &&
1766 AArch64::FPR8RegClass.contains(SrcReg)) {
1767 if(Subtarget.hasNEON()) {
1768 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1769 &AArch64::FPR128RegClass);
1770 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1771 &AArch64::FPR128RegClass);
1772 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1774 .addReg(SrcReg, getKillRegState(KillSrc));
1776 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1777 &AArch64::FPR32RegClass);
1778 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1779 &AArch64::FPR32RegClass);
1780 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1781 .addReg(SrcReg, getKillRegState(KillSrc));
1786 // Copies between GPR64 and FPR64.
1787 if (AArch64::FPR64RegClass.contains(DestReg) &&
1788 AArch64::GPR64RegClass.contains(SrcReg)) {
1789 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1790 .addReg(SrcReg, getKillRegState(KillSrc));
1793 if (AArch64::GPR64RegClass.contains(DestReg) &&
1794 AArch64::FPR64RegClass.contains(SrcReg)) {
1795 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1796 .addReg(SrcReg, getKillRegState(KillSrc));
1799 // Copies between GPR32 and FPR32.
1800 if (AArch64::FPR32RegClass.contains(DestReg) &&
1801 AArch64::GPR32RegClass.contains(SrcReg)) {
1802 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1803 .addReg(SrcReg, getKillRegState(KillSrc));
1806 if (AArch64::GPR32RegClass.contains(DestReg) &&
1807 AArch64::FPR32RegClass.contains(SrcReg)) {
1808 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1809 .addReg(SrcReg, getKillRegState(KillSrc));
1813 if (DestReg == AArch64::NZCV) {
1814 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1815 BuildMI(MBB, I, DL, get(AArch64::MSR))
1816 .addImm(AArch64SysReg::NZCV)
1817 .addReg(SrcReg, getKillRegState(KillSrc))
1818 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1822 if (SrcReg == AArch64::NZCV) {
1823 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1824 BuildMI(MBB, I, DL, get(AArch64::MRS))
1826 .addImm(AArch64SysReg::NZCV)
1827 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1831 llvm_unreachable("unimplemented reg-to-reg copy");
1834 void AArch64InstrInfo::storeRegToStackSlot(
1835 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1836 bool isKill, int FI, const TargetRegisterClass *RC,
1837 const TargetRegisterInfo *TRI) const {
1839 if (MBBI != MBB.end())
1840 DL = MBBI->getDebugLoc();
1841 MachineFunction &MF = *MBB.getParent();
1842 MachineFrameInfo &MFI = *MF.getFrameInfo();
1843 unsigned Align = MFI.getObjectAlignment(FI);
1845 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
1846 MachineMemOperand *MMO = MF.getMachineMemOperand(
1847 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1850 switch (RC->getSize()) {
1852 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1853 Opc = AArch64::STRBui;
1856 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1857 Opc = AArch64::STRHui;
1860 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1861 Opc = AArch64::STRWui;
1862 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1863 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1865 assert(SrcReg != AArch64::WSP);
1866 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1867 Opc = AArch64::STRSui;
1870 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1871 Opc = AArch64::STRXui;
1872 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1873 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1875 assert(SrcReg != AArch64::SP);
1876 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1877 Opc = AArch64::STRDui;
1880 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1881 Opc = AArch64::STRQui;
1882 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1883 assert(Subtarget.hasNEON() &&
1884 "Unexpected register store without NEON");
1885 Opc = AArch64::ST1Twov1d, Offset = false;
1889 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1890 assert(Subtarget.hasNEON() &&
1891 "Unexpected register store without NEON");
1892 Opc = AArch64::ST1Threev1d, Offset = false;
1896 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1897 assert(Subtarget.hasNEON() &&
1898 "Unexpected register store without NEON");
1899 Opc = AArch64::ST1Fourv1d, Offset = false;
1900 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1901 assert(Subtarget.hasNEON() &&
1902 "Unexpected register store without NEON");
1903 Opc = AArch64::ST1Twov2d, Offset = false;
1907 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1908 assert(Subtarget.hasNEON() &&
1909 "Unexpected register store without NEON");
1910 Opc = AArch64::ST1Threev2d, Offset = false;
1914 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1915 assert(Subtarget.hasNEON() &&
1916 "Unexpected register store without NEON");
1917 Opc = AArch64::ST1Fourv2d, Offset = false;
1921 assert(Opc && "Unknown register class");
1923 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
1924 .addReg(SrcReg, getKillRegState(isKill))
1929 MI.addMemOperand(MMO);
1932 void AArch64InstrInfo::loadRegFromStackSlot(
1933 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1934 int FI, const TargetRegisterClass *RC,
1935 const TargetRegisterInfo *TRI) const {
1937 if (MBBI != MBB.end())
1938 DL = MBBI->getDebugLoc();
1939 MachineFunction &MF = *MBB.getParent();
1940 MachineFrameInfo &MFI = *MF.getFrameInfo();
1941 unsigned Align = MFI.getObjectAlignment(FI);
1942 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
1943 MachineMemOperand *MMO = MF.getMachineMemOperand(
1944 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1948 switch (RC->getSize()) {
1950 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1951 Opc = AArch64::LDRBui;
1954 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1955 Opc = AArch64::LDRHui;
1958 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1959 Opc = AArch64::LDRWui;
1960 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1961 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1963 assert(DestReg != AArch64::WSP);
1964 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1965 Opc = AArch64::LDRSui;
1968 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1969 Opc = AArch64::LDRXui;
1970 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1971 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1973 assert(DestReg != AArch64::SP);
1974 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1975 Opc = AArch64::LDRDui;
1978 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1979 Opc = AArch64::LDRQui;
1980 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1981 assert(Subtarget.hasNEON() &&
1982 "Unexpected register load without NEON");
1983 Opc = AArch64::LD1Twov1d, Offset = false;
1987 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1988 assert(Subtarget.hasNEON() &&
1989 "Unexpected register load without NEON");
1990 Opc = AArch64::LD1Threev1d, Offset = false;
1994 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1995 assert(Subtarget.hasNEON() &&
1996 "Unexpected register load without NEON");
1997 Opc = AArch64::LD1Fourv1d, Offset = false;
1998 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1999 assert(Subtarget.hasNEON() &&
2000 "Unexpected register load without NEON");
2001 Opc = AArch64::LD1Twov2d, Offset = false;
2005 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2006 assert(Subtarget.hasNEON() &&
2007 "Unexpected register load without NEON");
2008 Opc = AArch64::LD1Threev2d, Offset = false;
2012 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2013 assert(Subtarget.hasNEON() &&
2014 "Unexpected register load without NEON");
2015 Opc = AArch64::LD1Fourv2d, Offset = false;
2019 assert(Opc && "Unknown register class");
2021 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2022 .addReg(DestReg, getDefRegState(true))
2026 MI.addMemOperand(MMO);
2029 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2030 MachineBasicBlock::iterator MBBI, DebugLoc DL,
2031 unsigned DestReg, unsigned SrcReg, int Offset,
2032 const TargetInstrInfo *TII,
2033 MachineInstr::MIFlag Flag, bool SetNZCV) {
2034 if (DestReg == SrcReg && Offset == 0)
2037 bool isSub = Offset < 0;
2041 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2042 // scratch register. If DestReg is a virtual register, use it as the
2043 // scratch register; otherwise, create a new virtual register (to be
2044 // replaced by the scavenger at the end of PEI). That case can be optimized
2045 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2046 // register can be loaded with offset%8 and the add/sub can use an extending
2047 // instruction with LSL#3.
2048 // Currently the function handles any offsets but generates a poor sequence
2050 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2054 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2056 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2057 const unsigned MaxEncoding = 0xfff;
2058 const unsigned ShiftSize = 12;
2059 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2060 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2062 if (((unsigned)Offset) > MaxEncodableValue) {
2063 ThisVal = MaxEncodableValue;
2065 ThisVal = Offset & MaxEncodableValue;
2067 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2068 "Encoding cannot handle value that big");
2069 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2071 .addImm(ThisVal >> ShiftSize)
2072 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2080 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2083 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2087 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2088 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
2089 MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
2090 // This is a bit of a hack. Consider this instruction:
2092 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2094 // We explicitly chose GPR64all for the virtual register so such a copy might
2095 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2096 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2097 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2099 // To prevent that, we are going to constrain the %vreg0 register class here.
2101 // <rdar://problem/11522048>
2104 unsigned DstReg = MI->getOperand(0).getReg();
2105 unsigned SrcReg = MI->getOperand(1).getReg();
2106 if (SrcReg == AArch64::SP &&
2107 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2108 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2111 if (DstReg == AArch64::SP &&
2112 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2113 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2122 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2123 bool *OutUseUnscaledOp,
2124 unsigned *OutUnscaledOp,
2125 int *EmittableOffset) {
2127 bool IsSigned = false;
2128 // The ImmIdx should be changed case by case if it is not 2.
2129 unsigned ImmIdx = 2;
2130 unsigned UnscaledOp = 0;
2131 // Set output values in case of early exit.
2132 if (EmittableOffset)
2133 *EmittableOffset = 0;
2134 if (OutUseUnscaledOp)
2135 *OutUseUnscaledOp = false;
2138 switch (MI.getOpcode()) {
2140 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2141 // Vector spills/fills can't take an immediate offset.
2142 case AArch64::LD1Twov2d:
2143 case AArch64::LD1Threev2d:
2144 case AArch64::LD1Fourv2d:
2145 case AArch64::LD1Twov1d:
2146 case AArch64::LD1Threev1d:
2147 case AArch64::LD1Fourv1d:
2148 case AArch64::ST1Twov2d:
2149 case AArch64::ST1Threev2d:
2150 case AArch64::ST1Fourv2d:
2151 case AArch64::ST1Twov1d:
2152 case AArch64::ST1Threev1d:
2153 case AArch64::ST1Fourv1d:
2154 return AArch64FrameOffsetCannotUpdate;
2155 case AArch64::PRFMui:
2157 UnscaledOp = AArch64::PRFUMi;
2159 case AArch64::LDRXui:
2161 UnscaledOp = AArch64::LDURXi;
2163 case AArch64::LDRWui:
2165 UnscaledOp = AArch64::LDURWi;
2167 case AArch64::LDRBui:
2169 UnscaledOp = AArch64::LDURBi;
2171 case AArch64::LDRHui:
2173 UnscaledOp = AArch64::LDURHi;
2175 case AArch64::LDRSui:
2177 UnscaledOp = AArch64::LDURSi;
2179 case AArch64::LDRDui:
2181 UnscaledOp = AArch64::LDURDi;
2183 case AArch64::LDRQui:
2185 UnscaledOp = AArch64::LDURQi;
2187 case AArch64::LDRBBui:
2189 UnscaledOp = AArch64::LDURBBi;
2191 case AArch64::LDRHHui:
2193 UnscaledOp = AArch64::LDURHHi;
2195 case AArch64::LDRSBXui:
2197 UnscaledOp = AArch64::LDURSBXi;
2199 case AArch64::LDRSBWui:
2201 UnscaledOp = AArch64::LDURSBWi;
2203 case AArch64::LDRSHXui:
2205 UnscaledOp = AArch64::LDURSHXi;
2207 case AArch64::LDRSHWui:
2209 UnscaledOp = AArch64::LDURSHWi;
2211 case AArch64::LDRSWui:
2213 UnscaledOp = AArch64::LDURSWi;
2216 case AArch64::STRXui:
2218 UnscaledOp = AArch64::STURXi;
2220 case AArch64::STRWui:
2222 UnscaledOp = AArch64::STURWi;
2224 case AArch64::STRBui:
2226 UnscaledOp = AArch64::STURBi;
2228 case AArch64::STRHui:
2230 UnscaledOp = AArch64::STURHi;
2232 case AArch64::STRSui:
2234 UnscaledOp = AArch64::STURSi;
2236 case AArch64::STRDui:
2238 UnscaledOp = AArch64::STURDi;
2240 case AArch64::STRQui:
2242 UnscaledOp = AArch64::STURQi;
2244 case AArch64::STRBBui:
2246 UnscaledOp = AArch64::STURBBi;
2248 case AArch64::STRHHui:
2250 UnscaledOp = AArch64::STURHHi;
2253 case AArch64::LDPXi:
2254 case AArch64::LDPDi:
2255 case AArch64::STPXi:
2256 case AArch64::STPDi:
2257 case AArch64::LDNPXi:
2258 case AArch64::LDNPDi:
2259 case AArch64::STNPXi:
2260 case AArch64::STNPDi:
2265 case AArch64::LDPQi:
2266 case AArch64::STPQi:
2267 case AArch64::LDNPQi:
2268 case AArch64::STNPQi:
2273 case AArch64::LDPWi:
2274 case AArch64::LDPSi:
2275 case AArch64::STPWi:
2276 case AArch64::STPSi:
2277 case AArch64::LDNPWi:
2278 case AArch64::LDNPSi:
2279 case AArch64::STNPWi:
2280 case AArch64::STNPSi:
2286 case AArch64::LDURXi:
2287 case AArch64::LDURWi:
2288 case AArch64::LDURBi:
2289 case AArch64::LDURHi:
2290 case AArch64::LDURSi:
2291 case AArch64::LDURDi:
2292 case AArch64::LDURQi:
2293 case AArch64::LDURHHi:
2294 case AArch64::LDURBBi:
2295 case AArch64::LDURSBXi:
2296 case AArch64::LDURSBWi:
2297 case AArch64::LDURSHXi:
2298 case AArch64::LDURSHWi:
2299 case AArch64::LDURSWi:
2300 case AArch64::STURXi:
2301 case AArch64::STURWi:
2302 case AArch64::STURBi:
2303 case AArch64::STURHi:
2304 case AArch64::STURSi:
2305 case AArch64::STURDi:
2306 case AArch64::STURQi:
2307 case AArch64::STURBBi:
2308 case AArch64::STURHHi:
2313 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2315 bool useUnscaledOp = false;
2316 // If the offset doesn't match the scale, we rewrite the instruction to
2317 // use the unscaled instruction instead. Likewise, if we have a negative
2318 // offset (and have an unscaled op to use).
2319 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2320 useUnscaledOp = true;
2322 // Use an unscaled addressing mode if the instruction has a negative offset
2323 // (or if the instruction is already using an unscaled addressing mode).
2326 // ldp/stp instructions.
2329 } else if (UnscaledOp == 0 || useUnscaledOp) {
2339 // Attempt to fold address computation.
2340 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2341 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2342 if (Offset >= MinOff && Offset <= MaxOff) {
2343 if (EmittableOffset)
2344 *EmittableOffset = Offset;
2347 int NewOff = Offset < 0 ? MinOff : MaxOff;
2348 if (EmittableOffset)
2349 *EmittableOffset = NewOff;
2350 Offset = (Offset - NewOff) * Scale;
2352 if (OutUseUnscaledOp)
2353 *OutUseUnscaledOp = useUnscaledOp;
2355 *OutUnscaledOp = UnscaledOp;
2356 return AArch64FrameOffsetCanUpdate |
2357 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2360 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2361 unsigned FrameReg, int &Offset,
2362 const AArch64InstrInfo *TII) {
2363 unsigned Opcode = MI.getOpcode();
2364 unsigned ImmIdx = FrameRegIdx + 1;
2366 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2367 Offset += MI.getOperand(ImmIdx).getImm();
2368 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2369 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2370 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2371 MI.eraseFromParent();
2377 unsigned UnscaledOp;
2379 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2380 &UnscaledOp, &NewOffset);
2381 if (Status & AArch64FrameOffsetCanUpdate) {
2382 if (Status & AArch64FrameOffsetIsLegal)
2383 // Replace the FrameIndex with FrameReg.
2384 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2386 MI.setDesc(TII->get(UnscaledOp));
2388 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2395 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2396 NopInst.setOpcode(AArch64::HINT);
2397 NopInst.addOperand(MCOperand::createImm(0));
2399 /// useMachineCombiner - return true when a target supports MachineCombiner
2400 bool AArch64InstrInfo::useMachineCombiner() const {
2401 // AArch64 supports the combiner
2405 // True when Opc sets flag
2406 static bool isCombineInstrSettingFlag(unsigned Opc) {
2408 case AArch64::ADDSWrr:
2409 case AArch64::ADDSWri:
2410 case AArch64::ADDSXrr:
2411 case AArch64::ADDSXri:
2412 case AArch64::SUBSWrr:
2413 case AArch64::SUBSXrr:
2414 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2415 case AArch64::SUBSWri:
2416 case AArch64::SUBSXri:
2424 // 32b Opcodes that can be combined with a MUL
2425 static bool isCombineInstrCandidate32(unsigned Opc) {
2427 case AArch64::ADDWrr:
2428 case AArch64::ADDWri:
2429 case AArch64::SUBWrr:
2430 case AArch64::ADDSWrr:
2431 case AArch64::ADDSWri:
2432 case AArch64::SUBSWrr:
2433 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2434 case AArch64::SUBWri:
2435 case AArch64::SUBSWri:
2443 // 64b Opcodes that can be combined with a MUL
2444 static bool isCombineInstrCandidate64(unsigned Opc) {
2446 case AArch64::ADDXrr:
2447 case AArch64::ADDXri:
2448 case AArch64::SUBXrr:
2449 case AArch64::ADDSXrr:
2450 case AArch64::ADDSXri:
2451 case AArch64::SUBSXrr:
2452 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2453 case AArch64::SUBXri:
2454 case AArch64::SUBSXri:
2462 // Opcodes that can be combined with a MUL
2463 static bool isCombineInstrCandidate(unsigned Opc) {
2464 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2467 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2468 unsigned MulOpc, unsigned ZeroReg) {
2469 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2470 MachineInstr *MI = nullptr;
2471 // We need a virtual register definition.
2472 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2473 MI = MRI.getUniqueVRegDef(MO.getReg());
2474 // And it needs to be in the trace (otherwise, it won't have a depth).
2475 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2478 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2479 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2480 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2482 // The third input reg must be zero.
2483 if (MI->getOperand(3).getReg() != ZeroReg)
2486 // Must only used by the user we combine with.
2487 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2493 // TODO: There are many more machine instruction opcodes to match:
2494 // 1. Other data types (integer, vectors)
2495 // 2. Other math / logic operations (xor, or)
2496 // 3. Other forms of the same operation (intrinsics and other variants)
2497 bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
2498 switch (Inst.getOpcode()) {
2499 case AArch64::FADDDrr:
2500 case AArch64::FADDSrr:
2501 case AArch64::FADDv2f32:
2502 case AArch64::FADDv2f64:
2503 case AArch64::FADDv4f32:
2504 case AArch64::FMULDrr:
2505 case AArch64::FMULSrr:
2506 case AArch64::FMULX32:
2507 case AArch64::FMULX64:
2508 case AArch64::FMULXv2f32:
2509 case AArch64::FMULXv2f64:
2510 case AArch64::FMULXv4f32:
2511 case AArch64::FMULv2f32:
2512 case AArch64::FMULv2f64:
2513 case AArch64::FMULv4f32:
2514 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
2520 /// Find instructions that can be turned into madd.
2521 static bool getMaddPatterns(MachineInstr &Root,
2522 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
2523 unsigned Opc = Root.getOpcode();
2524 MachineBasicBlock &MBB = *Root.getParent();
2527 if (!isCombineInstrCandidate(Opc))
2529 if (isCombineInstrSettingFlag(Opc)) {
2530 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2531 // When NZCV is live bail out.
2534 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2535 // When opcode can't change bail out.
2536 // CHECKME: do we miss any cases for opcode conversion?
2545 case AArch64::ADDWrr:
2546 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2547 "ADDWrr does not have register operands");
2548 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2550 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
2553 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2555 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
2559 case AArch64::ADDXrr:
2560 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2562 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
2565 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2567 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
2571 case AArch64::SUBWrr:
2572 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2574 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
2577 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2579 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
2583 case AArch64::SUBXrr:
2584 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2586 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
2589 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2591 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
2595 case AArch64::ADDWri:
2596 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2598 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
2602 case AArch64::ADDXri:
2603 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2605 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
2609 case AArch64::SUBWri:
2610 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2612 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
2616 case AArch64::SUBXri:
2617 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2619 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
2627 /// Return true when there is potentially a faster code sequence for an
2628 /// instruction chain ending in \p Root. All potential patterns are listed in
2629 /// the \p Pattern vector. Pattern should be sorted in priority order since the
2630 /// pattern evaluator stops checking as soon as it finds a faster sequence.
2632 bool AArch64InstrInfo::getMachineCombinerPatterns(
2634 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
2635 if (getMaddPatterns(Root, Patterns))
2638 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
2641 /// genMadd - Generate madd instruction and combine mul and add.
2645 /// ==> MADD R,A,B,C
2646 /// \param Root is the ADD instruction
2647 /// \param [out] InsInstrs is a vector of machine instructions and will
2648 /// contain the generated madd instruction
2649 /// \param IdxMulOpd is index of operand in Root that is the result of
2650 /// the MUL. In the example above IdxMulOpd is 1.
2651 /// \param MaddOpc the opcode fo the madd instruction
2652 static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2653 const TargetInstrInfo *TII, MachineInstr &Root,
2654 SmallVectorImpl<MachineInstr *> &InsInstrs,
2655 unsigned IdxMulOpd, unsigned MaddOpc,
2656 const TargetRegisterClass *RC) {
2657 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2659 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2660 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2661 unsigned ResultReg = Root.getOperand(0).getReg();
2662 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2663 bool Src0IsKill = MUL->getOperand(1).isKill();
2664 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2665 bool Src1IsKill = MUL->getOperand(2).isKill();
2666 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2667 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2669 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2670 MRI.constrainRegClass(ResultReg, RC);
2671 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2672 MRI.constrainRegClass(SrcReg0, RC);
2673 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2674 MRI.constrainRegClass(SrcReg1, RC);
2675 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2676 MRI.constrainRegClass(SrcReg2, RC);
2678 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2680 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2681 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2682 .addReg(SrcReg2, getKillRegState(Src2IsKill));
2684 InsInstrs.push_back(MIB);
2688 /// genMaddR - Generate madd instruction and combine mul and add using
2689 /// an extra virtual register
2690 /// Example - an ADD intermediate needs to be stored in a register:
2693 /// ==> ORR V, ZR, Imm
2694 /// ==> MADD R,A,B,V
2695 /// \param Root is the ADD instruction
2696 /// \param [out] InsInstrs is a vector of machine instructions and will
2697 /// contain the generated madd instruction
2698 /// \param IdxMulOpd is index of operand in Root that is the result of
2699 /// the MUL. In the example above IdxMulOpd is 1.
2700 /// \param MaddOpc the opcode fo the madd instruction
2701 /// \param VR is a virtual register that holds the value of an ADD operand
2702 /// (V in the example above).
2703 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2704 const TargetInstrInfo *TII, MachineInstr &Root,
2705 SmallVectorImpl<MachineInstr *> &InsInstrs,
2706 unsigned IdxMulOpd, unsigned MaddOpc,
2707 unsigned VR, const TargetRegisterClass *RC) {
2708 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2710 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2711 unsigned ResultReg = Root.getOperand(0).getReg();
2712 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2713 bool Src0IsKill = MUL->getOperand(1).isKill();
2714 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2715 bool Src1IsKill = MUL->getOperand(2).isKill();
2717 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2718 MRI.constrainRegClass(ResultReg, RC);
2719 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2720 MRI.constrainRegClass(SrcReg0, RC);
2721 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2722 MRI.constrainRegClass(SrcReg1, RC);
2723 if (TargetRegisterInfo::isVirtualRegister(VR))
2724 MRI.constrainRegClass(VR, RC);
2726 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2728 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2729 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2732 InsInstrs.push_back(MIB);
2736 /// When getMachineCombinerPatterns() finds potential patterns,
2737 /// this function generates the instructions that could replace the
2738 /// original code sequence
2739 void AArch64InstrInfo::genAlternativeCodeSequence(
2740 MachineInstr &Root, MachineCombinerPattern Pattern,
2741 SmallVectorImpl<MachineInstr *> &InsInstrs,
2742 SmallVectorImpl<MachineInstr *> &DelInstrs,
2743 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2744 MachineBasicBlock &MBB = *Root.getParent();
2745 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2746 MachineFunction &MF = *MBB.getParent();
2747 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2750 const TargetRegisterClass *RC;
2754 // Reassociate instructions.
2755 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
2756 DelInstrs, InstrIdxForVirtReg);
2758 case MachineCombinerPattern::MULADDW_OP1:
2759 case MachineCombinerPattern::MULADDX_OP1:
2763 // --- Create(MADD);
2764 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
2765 Opc = AArch64::MADDWrrr;
2766 RC = &AArch64::GPR32RegClass;
2768 Opc = AArch64::MADDXrrr;
2769 RC = &AArch64::GPR64RegClass;
2771 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
2773 case MachineCombinerPattern::MULADDW_OP2:
2774 case MachineCombinerPattern::MULADDX_OP2:
2778 // --- Create(MADD);
2779 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
2780 Opc = AArch64::MADDWrrr;
2781 RC = &AArch64::GPR32RegClass;
2783 Opc = AArch64::MADDXrrr;
2784 RC = &AArch64::GPR64RegClass;
2786 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2788 case MachineCombinerPattern::MULADDWI_OP1:
2789 case MachineCombinerPattern::MULADDXI_OP1: {
2792 // ==> ORR V, ZR, Imm
2794 // --- Create(MADD);
2795 const TargetRegisterClass *OrrRC;
2796 unsigned BitSize, OrrOpc, ZeroReg;
2797 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
2798 OrrOpc = AArch64::ORRWri;
2799 OrrRC = &AArch64::GPR32spRegClass;
2801 ZeroReg = AArch64::WZR;
2802 Opc = AArch64::MADDWrrr;
2803 RC = &AArch64::GPR32RegClass;
2805 OrrOpc = AArch64::ORRXri;
2806 OrrRC = &AArch64::GPR64spRegClass;
2808 ZeroReg = AArch64::XZR;
2809 Opc = AArch64::MADDXrrr;
2810 RC = &AArch64::GPR64RegClass;
2812 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2813 uint64_t Imm = Root.getOperand(2).getImm();
2815 if (Root.getOperand(3).isImm()) {
2816 unsigned Val = Root.getOperand(3).getImm();
2819 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2821 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2822 MachineInstrBuilder MIB1 =
2823 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2826 InsInstrs.push_back(MIB1);
2827 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2828 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2832 case MachineCombinerPattern::MULSUBW_OP1:
2833 case MachineCombinerPattern::MULSUBX_OP1: {
2837 // ==> MADD R,A,B,V // = -C + A*B
2838 // --- Create(MADD);
2839 const TargetRegisterClass *SubRC;
2840 unsigned SubOpc, ZeroReg;
2841 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
2842 SubOpc = AArch64::SUBWrr;
2843 SubRC = &AArch64::GPR32spRegClass;
2844 ZeroReg = AArch64::WZR;
2845 Opc = AArch64::MADDWrrr;
2846 RC = &AArch64::GPR32RegClass;
2848 SubOpc = AArch64::SUBXrr;
2849 SubRC = &AArch64::GPR64spRegClass;
2850 ZeroReg = AArch64::XZR;
2851 Opc = AArch64::MADDXrrr;
2852 RC = &AArch64::GPR64RegClass;
2854 unsigned NewVR = MRI.createVirtualRegister(SubRC);
2856 MachineInstrBuilder MIB1 =
2857 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
2859 .addOperand(Root.getOperand(2));
2860 InsInstrs.push_back(MIB1);
2861 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2862 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2865 case MachineCombinerPattern::MULSUBW_OP2:
2866 case MachineCombinerPattern::MULSUBX_OP2:
2869 // ==> MSUB R,A,B,C (computes C - A*B)
2870 // --- Create(MSUB);
2871 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
2872 Opc = AArch64::MSUBWrrr;
2873 RC = &AArch64::GPR32RegClass;
2875 Opc = AArch64::MSUBXrrr;
2876 RC = &AArch64::GPR64RegClass;
2878 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2880 case MachineCombinerPattern::MULSUBWI_OP1:
2881 case MachineCombinerPattern::MULSUBXI_OP1: {
2884 // ==> ORR V, ZR, -Imm
2885 // ==> MADD R,A,B,V // = -Imm + A*B
2886 // --- Create(MADD);
2887 const TargetRegisterClass *OrrRC;
2888 unsigned BitSize, OrrOpc, ZeroReg;
2889 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
2890 OrrOpc = AArch64::ORRWri;
2891 OrrRC = &AArch64::GPR32spRegClass;
2893 ZeroReg = AArch64::WZR;
2894 Opc = AArch64::MADDWrrr;
2895 RC = &AArch64::GPR32RegClass;
2897 OrrOpc = AArch64::ORRXri;
2898 OrrRC = &AArch64::GPR64spRegClass;
2900 ZeroReg = AArch64::XZR;
2901 Opc = AArch64::MADDXrrr;
2902 RC = &AArch64::GPR64RegClass;
2904 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2905 int Imm = Root.getOperand(2).getImm();
2906 if (Root.getOperand(3).isImm()) {
2907 unsigned Val = Root.getOperand(3).getImm();
2910 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2912 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2913 MachineInstrBuilder MIB1 =
2914 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2917 InsInstrs.push_back(MIB1);
2918 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2919 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2923 } // end switch (Pattern)
2924 // Record MUL and ADD/SUB for deletion
2925 DelInstrs.push_back(MUL);
2926 DelInstrs.push_back(&Root);
2931 /// \brief Replace csincr-branch sequence by simple conditional branch
2935 /// csinc w9, wzr, wzr, <condition code>
2936 /// tbnz w9, #0, 0x44
2938 /// b.<inverted condition code>
2941 /// csinc w9, wzr, wzr, <condition code>
2942 /// tbz w9, #0, 0x44
2944 /// b.<condition code>
2946 /// \param MI Conditional Branch
2947 /// \return True when the simple conditional branch is generated
2949 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
2950 bool IsNegativeBranch = false;
2951 bool IsTestAndBranch = false;
2952 unsigned TargetBBInMI = 0;
2953 switch (MI->getOpcode()) {
2955 llvm_unreachable("Unknown branch instruction?");
2962 case AArch64::CBNZW:
2963 case AArch64::CBNZX:
2965 IsNegativeBranch = true;
2970 IsTestAndBranch = true;
2972 case AArch64::TBNZW:
2973 case AArch64::TBNZX:
2975 IsNegativeBranch = true;
2976 IsTestAndBranch = true;
2979 // So we increment a zero register and test for bits other
2980 // than bit 0? Conservatively bail out in case the verifier
2981 // missed this case.
2982 if (IsTestAndBranch && MI->getOperand(1).getImm())
2986 assert(MI->getParent() && "Incomplete machine instruciton\n");
2987 MachineBasicBlock *MBB = MI->getParent();
2988 MachineFunction *MF = MBB->getParent();
2989 MachineRegisterInfo *MRI = &MF->getRegInfo();
2990 unsigned VReg = MI->getOperand(0).getReg();
2991 if (!TargetRegisterInfo::isVirtualRegister(VReg))
2994 MachineInstr *DefMI = MRI->getVRegDef(VReg);
2997 if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
2998 DefMI->getOperand(1).getReg() == AArch64::WZR &&
2999 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
3000 !(DefMI->getOpcode() == AArch64::CSINCXr &&
3001 DefMI->getOperand(1).getReg() == AArch64::XZR &&
3002 DefMI->getOperand(2).getReg() == AArch64::XZR))
3005 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
3008 AArch64CC::CondCode CC =
3009 (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
3010 bool CheckOnlyCCWrites = true;
3011 // Convert only when the condition code is not modified between
3012 // the CSINC and the branch. The CC may be used by other
3013 // instructions in between.
3014 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
3016 MachineBasicBlock &RefToMBB = *MBB;
3017 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
3018 DebugLoc DL = MI->getDebugLoc();
3019 if (IsNegativeBranch)
3020 CC = AArch64CC::getInvertedCondCode(CC);
3021 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
3022 MI->eraseFromParent();
3026 std::pair<unsigned, unsigned>
3027 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
3028 const unsigned Mask = AArch64II::MO_FRAGMENT;
3029 return std::make_pair(TF & Mask, TF & ~Mask);
3032 ArrayRef<std::pair<unsigned, const char *>>
3033 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
3034 using namespace AArch64II;
3035 static const std::pair<unsigned, const char *> TargetFlags[] = {
3036 {MO_PAGE, "aarch64-page"},
3037 {MO_PAGEOFF, "aarch64-pageoff"},
3038 {MO_G3, "aarch64-g3"},
3039 {MO_G2, "aarch64-g2"},
3040 {MO_G1, "aarch64-g1"},
3041 {MO_G0, "aarch64-g0"},
3042 {MO_HI12, "aarch64-hi12"}};
3043 return makeArrayRef(TargetFlags);
3046 ArrayRef<std::pair<unsigned, const char *>>
3047 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
3048 using namespace AArch64II;
3049 static const std::pair<unsigned, const char *> TargetFlags[] = {
3050 {MO_GOT, "aarch64-got"},
3051 {MO_NC, "aarch64-nc"},
3052 {MO_TLS, "aarch64-tls"},
3053 {MO_CONSTPOOL, "aarch64-constant-pool"}};
3054 return makeArrayRef(TargetFlags);