1 //===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the targeting of the InstructionSelector class for
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64RegisterBankInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "AArch64TargetMachine.h"
21 #include "MCTargetDesc/AArch64AddressingModes.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
35 #define DEBUG_TYPE "aarch64-isel"
41 #define GET_GLOBALISEL_PREDICATE_BITSET
42 #include "AArch64GenGlobalISel.inc"
43 #undef GET_GLOBALISEL_PREDICATE_BITSET
45 class AArch64InstructionSelector : public InstructionSelector {
47 AArch64InstructionSelector(const AArch64TargetMachine &TM,
48 const AArch64Subtarget &STI,
49 const AArch64RegisterBankInfo &RBI);
51 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
52 static const char *getName() { return DEBUG_TYPE; }
55 /// tblgen-erated 'select' implementation, used as the initial selector for
56 /// the patterns that don't require complex C++.
57 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
59 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
60 MachineRegisterInfo &MRI) const;
61 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
62 MachineRegisterInfo &MRI) const;
64 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
65 MachineRegisterInfo &MRI) const;
67 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
69 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
72 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
73 return selectAddrModeUnscaled(Root, 1);
75 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
76 return selectAddrModeUnscaled(Root, 2);
78 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
79 return selectAddrModeUnscaled(Root, 4);
81 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
82 return selectAddrModeUnscaled(Root, 8);
84 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
85 return selectAddrModeUnscaled(Root, 16);
88 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
91 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
92 return selectAddrModeIndexed(Root, Width / 8);
95 const AArch64TargetMachine &TM;
96 const AArch64Subtarget &STI;
97 const AArch64InstrInfo &TII;
98 const AArch64RegisterInfo &TRI;
99 const AArch64RegisterBankInfo &RBI;
101 #define GET_GLOBALISEL_PREDICATES_DECL
102 #include "AArch64GenGlobalISel.inc"
103 #undef GET_GLOBALISEL_PREDICATES_DECL
105 // We declare the temporaries used by selectImpl() in the class to minimize the
106 // cost of constructing placeholder values.
107 #define GET_GLOBALISEL_TEMPORARIES_DECL
108 #include "AArch64GenGlobalISel.inc"
109 #undef GET_GLOBALISEL_TEMPORARIES_DECL
112 } // end anonymous namespace
114 #define GET_GLOBALISEL_IMPL
115 #include "AArch64GenGlobalISel.inc"
116 #undef GET_GLOBALISEL_IMPL
118 AArch64InstructionSelector::AArch64InstructionSelector(
119 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
120 const AArch64RegisterBankInfo &RBI)
121 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
122 TRI(*STI.getRegisterInfo()), RBI(RBI),
123 #define GET_GLOBALISEL_PREDICATES_INIT
124 #include "AArch64GenGlobalISel.inc"
125 #undef GET_GLOBALISEL_PREDICATES_INIT
126 #define GET_GLOBALISEL_TEMPORARIES_INIT
127 #include "AArch64GenGlobalISel.inc"
128 #undef GET_GLOBALISEL_TEMPORARIES_INIT
132 // FIXME: This should be target-independent, inferred from the types declared
133 // for each class in the bank.
134 static const TargetRegisterClass *
135 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
136 const RegisterBankInfo &RBI) {
137 if (RB.getID() == AArch64::GPRRegBankID) {
138 if (Ty.getSizeInBits() <= 32)
139 return &AArch64::GPR32RegClass;
140 if (Ty.getSizeInBits() == 64)
141 return &AArch64::GPR64RegClass;
145 if (RB.getID() == AArch64::FPRRegBankID) {
146 if (Ty.getSizeInBits() == 32)
147 return &AArch64::FPR32RegClass;
148 if (Ty.getSizeInBits() == 64)
149 return &AArch64::FPR64RegClass;
150 if (Ty.getSizeInBits() == 128)
151 return &AArch64::FPR128RegClass;
158 /// Check whether \p I is a currently unsupported binary operation:
159 /// - it has an unsized type
160 /// - an operand is not a vreg
161 /// - all operands are not in the same bank
162 /// These are checks that should someday live in the verifier, but right now,
163 /// these are mostly limitations of the aarch64 selector.
164 static bool unsupportedBinOp(const MachineInstr &I,
165 const AArch64RegisterBankInfo &RBI,
166 const MachineRegisterInfo &MRI,
167 const AArch64RegisterInfo &TRI) {
168 LLT Ty = MRI.getType(I.getOperand(0).getReg());
170 DEBUG(dbgs() << "Generic binop register should be typed\n");
174 const RegisterBank *PrevOpBank = nullptr;
175 for (auto &MO : I.operands()) {
176 // FIXME: Support non-register operands.
178 DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
182 // FIXME: Can generic operations have physical registers operands? If
183 // so, this will need to be taught about that, and we'll need to get the
184 // bank out of the minimal class for the register.
185 // Either way, this needs to be documented (and possibly verified).
186 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
187 DEBUG(dbgs() << "Generic inst has physical register operand\n");
191 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
193 DEBUG(dbgs() << "Generic register has no bank or class\n");
197 if (PrevOpBank && OpBank != PrevOpBank) {
198 DEBUG(dbgs() << "Generic inst operands have different banks\n");
206 /// Select the AArch64 opcode for the basic binary operation \p GenericOpc
207 /// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
208 /// and of size \p OpSize.
209 /// \returns \p GenericOpc if the combination is unsupported.
210 static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
213 case AArch64::GPRRegBankID:
215 switch (GenericOpc) {
216 case TargetOpcode::G_SHL:
217 return AArch64::LSLVWr;
218 case TargetOpcode::G_LSHR:
219 return AArch64::LSRVWr;
220 case TargetOpcode::G_ASHR:
221 return AArch64::ASRVWr;
225 } else if (OpSize == 64) {
226 switch (GenericOpc) {
227 case TargetOpcode::G_GEP:
228 return AArch64::ADDXrr;
229 case TargetOpcode::G_SHL:
230 return AArch64::LSLVXr;
231 case TargetOpcode::G_LSHR:
232 return AArch64::LSRVXr;
233 case TargetOpcode::G_ASHR:
234 return AArch64::ASRVXr;
240 case AArch64::FPRRegBankID:
243 switch (GenericOpc) {
244 case TargetOpcode::G_FADD:
245 return AArch64::FADDSrr;
246 case TargetOpcode::G_FSUB:
247 return AArch64::FSUBSrr;
248 case TargetOpcode::G_FMUL:
249 return AArch64::FMULSrr;
250 case TargetOpcode::G_FDIV:
251 return AArch64::FDIVSrr;
256 switch (GenericOpc) {
257 case TargetOpcode::G_FADD:
258 return AArch64::FADDDrr;
259 case TargetOpcode::G_FSUB:
260 return AArch64::FSUBDrr;
261 case TargetOpcode::G_FMUL:
262 return AArch64::FMULDrr;
263 case TargetOpcode::G_FDIV:
264 return AArch64::FDIVDrr;
265 case TargetOpcode::G_OR:
266 return AArch64::ORRv8i8;
276 /// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
277 /// appropriate for the (value) register bank \p RegBankID and of memory access
278 /// size \p OpSize. This returns the variant with the base+unsigned-immediate
279 /// addressing mode (e.g., LDRXui).
280 /// \returns \p GenericOpc if the combination is unsupported.
281 static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
283 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
285 case AArch64::GPRRegBankID:
288 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
290 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
292 return isStore ? AArch64::STRWui : AArch64::LDRWui;
294 return isStore ? AArch64::STRXui : AArch64::LDRXui;
297 case AArch64::FPRRegBankID:
300 return isStore ? AArch64::STRBui : AArch64::LDRBui;
302 return isStore ? AArch64::STRHui : AArch64::LDRHui;
304 return isStore ? AArch64::STRSui : AArch64::LDRSui;
306 return isStore ? AArch64::STRDui : AArch64::LDRDui;
313 static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
314 MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
315 const RegisterBankInfo &RBI) {
317 unsigned DstReg = I.getOperand(0).getReg();
318 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
319 assert(I.isCopy() && "Generic operators do not allow physical registers");
323 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
324 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
325 unsigned SrcReg = I.getOperand(1).getReg();
326 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
328 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
329 "No phys reg on generic operators");
331 (DstSize == SrcSize ||
332 // Copies are a mean to setup initial types, the number of
333 // bits may not exactly match.
334 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
335 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
336 // Copies are a mean to copy bits around, as long as we are
337 // on the same register class, that's fine. Otherwise, that
338 // means we need some SUBREG_TO_REG or AND & co.
339 (((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
340 "Copy with different width?!");
341 assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
342 "GPRs cannot get more than 64-bit width values");
343 const TargetRegisterClass *RC = nullptr;
345 if (RegBank.getID() == AArch64::FPRRegBankID) {
347 RC = &AArch64::FPR16RegClass;
348 else if (DstSize <= 32)
349 RC = &AArch64::FPR32RegClass;
350 else if (DstSize <= 64)
351 RC = &AArch64::FPR64RegClass;
352 else if (DstSize <= 128)
353 RC = &AArch64::FPR128RegClass;
355 DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
359 assert(RegBank.getID() == AArch64::GPRRegBankID &&
360 "Bitcast for the flags?");
362 DstSize <= 32 ? &AArch64::GPR32allRegClass : &AArch64::GPR64allRegClass;
365 // No need to constrain SrcReg. It will get constrained when
366 // we hit another of its use or its defs.
367 // Copies do not have constraints.
368 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
369 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
373 I.setDesc(TII.get(AArch64::COPY));
377 static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
378 if (!DstTy.isScalar() || !SrcTy.isScalar())
381 const unsigned DstSize = DstTy.getSizeInBits();
382 const unsigned SrcSize = SrcTy.getSizeInBits();
388 switch (GenericOpc) {
389 case TargetOpcode::G_SITOFP:
390 return AArch64::SCVTFUWSri;
391 case TargetOpcode::G_UITOFP:
392 return AArch64::UCVTFUWSri;
393 case TargetOpcode::G_FPTOSI:
394 return AArch64::FCVTZSUWSr;
395 case TargetOpcode::G_FPTOUI:
396 return AArch64::FCVTZUUWSr;
401 switch (GenericOpc) {
402 case TargetOpcode::G_SITOFP:
403 return AArch64::SCVTFUXSri;
404 case TargetOpcode::G_UITOFP:
405 return AArch64::UCVTFUXSri;
406 case TargetOpcode::G_FPTOSI:
407 return AArch64::FCVTZSUWDr;
408 case TargetOpcode::G_FPTOUI:
409 return AArch64::FCVTZUUWDr;
419 switch (GenericOpc) {
420 case TargetOpcode::G_SITOFP:
421 return AArch64::SCVTFUWDri;
422 case TargetOpcode::G_UITOFP:
423 return AArch64::UCVTFUWDri;
424 case TargetOpcode::G_FPTOSI:
425 return AArch64::FCVTZSUXSr;
426 case TargetOpcode::G_FPTOUI:
427 return AArch64::FCVTZUUXSr;
432 switch (GenericOpc) {
433 case TargetOpcode::G_SITOFP:
434 return AArch64::SCVTFUXDri;
435 case TargetOpcode::G_UITOFP:
436 return AArch64::UCVTFUXDri;
437 case TargetOpcode::G_FPTOSI:
438 return AArch64::FCVTZSUXDr;
439 case TargetOpcode::G_FPTOUI:
440 return AArch64::FCVTZUUXDr;
453 static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
456 llvm_unreachable("Unknown condition code!");
457 case CmpInst::ICMP_NE:
458 return AArch64CC::NE;
459 case CmpInst::ICMP_EQ:
460 return AArch64CC::EQ;
461 case CmpInst::ICMP_SGT:
462 return AArch64CC::GT;
463 case CmpInst::ICMP_SGE:
464 return AArch64CC::GE;
465 case CmpInst::ICMP_SLT:
466 return AArch64CC::LT;
467 case CmpInst::ICMP_SLE:
468 return AArch64CC::LE;
469 case CmpInst::ICMP_UGT:
470 return AArch64CC::HI;
471 case CmpInst::ICMP_UGE:
472 return AArch64CC::HS;
473 case CmpInst::ICMP_ULT:
474 return AArch64CC::LO;
475 case CmpInst::ICMP_ULE:
476 return AArch64CC::LS;
480 static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
481 AArch64CC::CondCode &CondCode,
482 AArch64CC::CondCode &CondCode2) {
483 CondCode2 = AArch64CC::AL;
486 llvm_unreachable("Unknown FP condition!");
487 case CmpInst::FCMP_OEQ:
488 CondCode = AArch64CC::EQ;
490 case CmpInst::FCMP_OGT:
491 CondCode = AArch64CC::GT;
493 case CmpInst::FCMP_OGE:
494 CondCode = AArch64CC::GE;
496 case CmpInst::FCMP_OLT:
497 CondCode = AArch64CC::MI;
499 case CmpInst::FCMP_OLE:
500 CondCode = AArch64CC::LS;
502 case CmpInst::FCMP_ONE:
503 CondCode = AArch64CC::MI;
504 CondCode2 = AArch64CC::GT;
506 case CmpInst::FCMP_ORD:
507 CondCode = AArch64CC::VC;
509 case CmpInst::FCMP_UNO:
510 CondCode = AArch64CC::VS;
512 case CmpInst::FCMP_UEQ:
513 CondCode = AArch64CC::EQ;
514 CondCode2 = AArch64CC::VS;
516 case CmpInst::FCMP_UGT:
517 CondCode = AArch64CC::HI;
519 case CmpInst::FCMP_UGE:
520 CondCode = AArch64CC::PL;
522 case CmpInst::FCMP_ULT:
523 CondCode = AArch64CC::LT;
525 case CmpInst::FCMP_ULE:
526 CondCode = AArch64CC::LE;
528 case CmpInst::FCMP_UNE:
529 CondCode = AArch64CC::NE;
534 bool AArch64InstructionSelector::selectCompareBranch(
535 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
537 const unsigned CondReg = I.getOperand(0).getReg();
538 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
539 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
540 if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
541 CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
542 if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
545 unsigned LHS = CCMI->getOperand(2).getReg();
546 unsigned RHS = CCMI->getOperand(3).getReg();
547 if (!getConstantVRegVal(RHS, MRI))
550 const auto RHSImm = getConstantVRegVal(RHS, MRI);
551 if (!RHSImm || *RHSImm != 0)
554 const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
555 if (RB.getID() != AArch64::GPRRegBankID)
558 const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
559 if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
562 const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
565 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
566 else if (CmpWidth == 64)
567 CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
571 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
575 constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
580 bool AArch64InstructionSelector::selectVaStartAAPCS(
581 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
585 bool AArch64InstructionSelector::selectVaStartDarwin(
586 MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
587 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
588 unsigned ListReg = I.getOperand(0).getReg();
590 unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
593 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
595 .addFrameIndex(FuncInfo->getVarArgsStackIndex())
599 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
601 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
605 .addMemOperand(*I.memoperands_begin());
607 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
612 bool AArch64InstructionSelector::select(MachineInstr &I,
613 CodeGenCoverage &CoverageInfo) const {
614 assert(I.getParent() && "Instruction should be in a basic block!");
615 assert(I.getParent()->getParent() && "Instruction should be in a function!");
617 MachineBasicBlock &MBB = *I.getParent();
618 MachineFunction &MF = *MBB.getParent();
619 MachineRegisterInfo &MRI = MF.getRegInfo();
621 unsigned Opcode = I.getOpcode();
622 // G_PHI requires same handling as PHI
623 if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
624 // Certain non-generic instructions also need some special handling.
626 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
627 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
629 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
630 const unsigned DefReg = I.getOperand(0).getReg();
631 const LLT DefTy = MRI.getType(DefReg);
633 const TargetRegisterClass *DefRC = nullptr;
634 if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
635 DefRC = TRI.getRegClass(DefReg);
637 const RegClassOrRegBank &RegClassOrBank =
638 MRI.getRegClassOrRegBank(DefReg);
640 DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
642 if (!DefTy.isValid()) {
643 DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
646 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
647 DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
649 DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
654 I.setDesc(TII.get(TargetOpcode::PHI));
656 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
660 return selectCopy(I, TII, MRI, TRI, RBI);
666 if (I.getNumOperands() != I.getNumExplicitOperands()) {
667 DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
671 if (selectImpl(I, CoverageInfo))
675 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
678 case TargetOpcode::G_BRCOND: {
679 if (Ty.getSizeInBits() > 32) {
680 // We shouldn't need this on AArch64, but it would be implemented as an
681 // EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
682 // bit being tested is < 32.
683 DEBUG(dbgs() << "G_BRCOND has type: " << Ty
684 << ", expected at most 32-bits");
688 const unsigned CondReg = I.getOperand(0).getReg();
689 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
691 if (selectCompareBranch(I, MF, MRI))
694 auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
696 .addImm(/*bit offset=*/0)
700 return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
703 case TargetOpcode::G_BRINDIRECT: {
704 I.setDesc(TII.get(AArch64::BR));
705 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
708 case TargetOpcode::G_FCONSTANT:
709 case TargetOpcode::G_CONSTANT: {
710 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
712 const LLT s32 = LLT::scalar(32);
713 const LLT s64 = LLT::scalar(64);
714 const LLT p0 = LLT::pointer(0, 64);
716 const unsigned DefReg = I.getOperand(0).getReg();
717 const LLT DefTy = MRI.getType(DefReg);
718 const unsigned DefSize = DefTy.getSizeInBits();
719 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
721 // FIXME: Redundant check, but even less readable when factored out.
723 if (Ty != s32 && Ty != s64) {
724 DEBUG(dbgs() << "Unable to materialize FP " << Ty
725 << " constant, expected: " << s32 << " or " << s64
730 if (RB.getID() != AArch64::FPRRegBankID) {
731 DEBUG(dbgs() << "Unable to materialize FP " << Ty
732 << " constant on bank: " << RB << ", expected: FPR\n");
736 // The case when we have 0.0 is covered by tablegen. Reject it here so we
737 // can be sure tablegen works correctly and isn't rescued by this code.
738 if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
741 // s32 and s64 are covered by tablegen.
743 DEBUG(dbgs() << "Unable to materialize integer " << Ty
744 << " constant, expected: " << s32 << ", " << s64 << ", or "
749 if (RB.getID() != AArch64::GPRRegBankID) {
750 DEBUG(dbgs() << "Unable to materialize integer " << Ty
751 << " constant on bank: " << RB << ", expected: GPR\n");
756 const unsigned MovOpc =
757 DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
759 I.setDesc(TII.get(MovOpc));
762 const TargetRegisterClass &GPRRC =
763 DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
764 const TargetRegisterClass &FPRRC =
765 DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
767 const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
768 MachineOperand &RegOp = I.getOperand(0);
769 RegOp.setReg(DefGPRReg);
771 BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
772 TII.get(AArch64::COPY))
776 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
777 DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
781 MachineOperand &ImmOp = I.getOperand(1);
782 // FIXME: Is going through int64_t always correct?
783 ImmOp.ChangeToImmediate(
784 ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
785 } else if (I.getOperand(1).isCImm()) {
786 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
787 I.getOperand(1).ChangeToImmediate(Val);
788 } else if (I.getOperand(1).isImm()) {
789 uint64_t Val = I.getOperand(1).getImm();
790 I.getOperand(1).ChangeToImmediate(Val);
793 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
796 case TargetOpcode::G_EXTRACT: {
797 LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
798 // Larger extracts are vectors, same-size extracts should be something else
799 // by now (either split up or simplified to a COPY).
800 if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
803 I.setDesc(TII.get(AArch64::UBFMXri));
804 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
805 Ty.getSizeInBits() - 1);
807 unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
808 BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
809 TII.get(AArch64::COPY))
810 .addDef(I.getOperand(0).getReg())
811 .addUse(DstReg, 0, AArch64::sub_32);
812 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
813 AArch64::GPR32RegClass, MRI);
814 I.getOperand(0).setReg(DstReg);
816 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
819 case TargetOpcode::G_INSERT: {
820 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
821 // Larger inserts are vectors, same-size ones should be something else by
822 // now (split up or turned into COPYs).
823 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
826 I.setDesc(TII.get(AArch64::BFMXri));
827 unsigned LSB = I.getOperand(3).getImm();
828 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
829 I.getOperand(3).setImm((64 - LSB) % 64);
830 MachineInstrBuilder(MF, I).addImm(Width - 1);
832 unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
833 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
834 TII.get(AArch64::SUBREG_TO_REG))
837 .addUse(I.getOperand(2).getReg())
838 .addImm(AArch64::sub_32);
839 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
840 AArch64::GPR32RegClass, MRI);
841 I.getOperand(2).setReg(SrcReg);
843 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
845 case TargetOpcode::G_FRAME_INDEX: {
846 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
847 if (Ty != LLT::pointer(0, 64)) {
848 DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
849 << ", expected: " << LLT::pointer(0, 64) << '\n');
852 I.setDesc(TII.get(AArch64::ADDXri));
854 // MOs for a #0 shifted immediate.
855 I.addOperand(MachineOperand::CreateImm(0));
856 I.addOperand(MachineOperand::CreateImm(0));
858 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
861 case TargetOpcode::G_GLOBAL_VALUE: {
862 auto GV = I.getOperand(1).getGlobal();
863 if (GV->isThreadLocal()) {
864 // FIXME: we don't support TLS yet.
867 unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
868 if (OpFlags & AArch64II::MO_GOT) {
869 I.setDesc(TII.get(AArch64::LOADgot));
870 I.getOperand(1).setTargetFlags(OpFlags);
871 } else if (TM.getCodeModel() == CodeModel::Large) {
872 // Materialize the global using movz/movk instructions.
873 unsigned MovZDstReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
874 auto InsertPt = std::next(I.getIterator());
876 BuildMI(MBB, InsertPt, I.getDebugLoc(), TII.get(AArch64::MOVZXi))
878 MovZ->addOperand(MF, I.getOperand(1));
879 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
881 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
882 constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
884 auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags,
885 unsigned Offset, unsigned ForceDstReg) {
887 ForceDstReg ? ForceDstReg
888 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
889 auto MovI = BuildMI(MBB, InsertPt, MovZ->getDebugLoc(),
890 TII.get(AArch64::MOVKXi))
893 MovI->addOperand(MF, MachineOperand::CreateGA(
894 GV, MovZ->getOperand(1).getOffset(), Flags));
895 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
896 constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
899 unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
900 AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
901 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
902 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
906 I.setDesc(TII.get(AArch64::MOVaddr));
907 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
908 MachineInstrBuilder MIB(MF, I);
909 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
910 OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
912 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
915 case TargetOpcode::G_LOAD:
916 case TargetOpcode::G_STORE: {
918 LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
920 if (PtrTy != LLT::pointer(0, 64)) {
921 DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
922 << ", expected: " << LLT::pointer(0, 64) << '\n');
926 auto &MemOp = **I.memoperands_begin();
927 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
928 DEBUG(dbgs() << "Atomic load/store not supported yet\n");
932 // FIXME: PR36018: Volatile loads in some cases are incorrectly selected by
933 // folding with an extend. Until we have a G_SEXTLOAD solution bail out if
935 if (Opcode == TargetOpcode::G_LOAD && MemOp.isVolatile())
938 const unsigned PtrReg = I.getOperand(1).getReg();
940 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
941 // Sanity-check the pointer register.
942 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
943 "Load/Store pointer operand isn't a GPR");
944 assert(MRI.getType(PtrReg).isPointer() &&
945 "Load/Store pointer operand isn't a pointer");
948 const unsigned ValReg = I.getOperand(0).getReg();
949 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
951 const unsigned NewOpc =
952 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemTy.getSizeInBits());
953 if (NewOpc == I.getOpcode())
956 I.setDesc(TII.get(NewOpc));
959 auto *PtrMI = MRI.getVRegDef(PtrReg);
961 // Try to fold a GEP into our unsigned immediate addressing mode.
962 if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
963 if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
965 const unsigned Size = MemTy.getSizeInBits() / 8;
966 const unsigned Scale = Log2_32(Size);
967 if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
968 unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
969 I.getOperand(1).setReg(Ptr2Reg);
970 PtrMI = MRI.getVRegDef(Ptr2Reg);
976 // If we haven't folded anything into our addressing mode yet, try to fold
977 // a frame index into the base+offset.
978 if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
979 I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
981 I.addOperand(MachineOperand::CreateImm(Offset));
983 // If we're storing a 0, use WZR/XZR.
984 if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
985 if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
986 if (I.getOpcode() == AArch64::STRWui)
987 I.getOperand(0).setReg(AArch64::WZR);
988 else if (I.getOpcode() == AArch64::STRXui)
989 I.getOperand(0).setReg(AArch64::XZR);
993 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
996 case TargetOpcode::G_SMULH:
997 case TargetOpcode::G_UMULH: {
998 // Reject the various things we don't support yet.
999 if (unsupportedBinOp(I, RBI, MRI, TRI))
1002 const unsigned DefReg = I.getOperand(0).getReg();
1003 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1005 if (RB.getID() != AArch64::GPRRegBankID) {
1006 DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
1010 if (Ty != LLT::scalar(64)) {
1011 DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
1012 << ", expected: " << LLT::scalar(64) << '\n');
1016 unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
1018 I.setDesc(TII.get(NewOpc));
1020 // Now that we selected an opcode, we need to constrain the register
1021 // operands to use appropriate classes.
1022 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1024 case TargetOpcode::G_FADD:
1025 case TargetOpcode::G_FSUB:
1026 case TargetOpcode::G_FMUL:
1027 case TargetOpcode::G_FDIV:
1029 case TargetOpcode::G_OR:
1030 case TargetOpcode::G_SHL:
1031 case TargetOpcode::G_LSHR:
1032 case TargetOpcode::G_ASHR:
1033 case TargetOpcode::G_GEP: {
1034 // Reject the various things we don't support yet.
1035 if (unsupportedBinOp(I, RBI, MRI, TRI))
1038 const unsigned OpSize = Ty.getSizeInBits();
1040 const unsigned DefReg = I.getOperand(0).getReg();
1041 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1043 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
1044 if (NewOpc == I.getOpcode())
1047 I.setDesc(TII.get(NewOpc));
1048 // FIXME: Should the type be always reset in setDesc?
1050 // Now that we selected an opcode, we need to constrain the register
1051 // operands to use appropriate classes.
1052 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1055 case TargetOpcode::G_PTR_MASK: {
1056 uint64_t Align = I.getOperand(2).getImm();
1057 if (Align >= 64 || Align == 0)
1060 uint64_t Mask = ~((1ULL << Align) - 1);
1061 I.setDesc(TII.get(AArch64::ANDXri));
1062 I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
1064 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1066 case TargetOpcode::G_PTRTOINT:
1067 case TargetOpcode::G_TRUNC: {
1068 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
1069 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
1071 const unsigned DstReg = I.getOperand(0).getReg();
1072 const unsigned SrcReg = I.getOperand(1).getReg();
1074 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1075 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
1077 if (DstRB.getID() != SrcRB.getID()) {
1078 DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
1082 if (DstRB.getID() == AArch64::GPRRegBankID) {
1083 const TargetRegisterClass *DstRC =
1084 getRegClassForTypeOnBank(DstTy, DstRB, RBI);
1088 const TargetRegisterClass *SrcRC =
1089 getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
1093 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1094 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1095 DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
1099 if (DstRC == SrcRC) {
1100 // Nothing to be done
1101 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
1102 SrcTy == LLT::scalar(64)) {
1103 llvm_unreachable("TableGen can import this case");
1105 } else if (DstRC == &AArch64::GPR32RegClass &&
1106 SrcRC == &AArch64::GPR64RegClass) {
1107 I.getOperand(1).setSubReg(AArch64::sub_32);
1109 DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
1113 I.setDesc(TII.get(TargetOpcode::COPY));
1115 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
1116 if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
1117 I.setDesc(TII.get(AArch64::XTNv4i16));
1118 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1126 case TargetOpcode::G_ANYEXT: {
1127 const unsigned DstReg = I.getOperand(0).getReg();
1128 const unsigned SrcReg = I.getOperand(1).getReg();
1130 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
1131 if (RBDst.getID() != AArch64::GPRRegBankID) {
1132 DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n");
1136 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
1137 if (RBSrc.getID() != AArch64::GPRRegBankID) {
1138 DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n");
1142 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
1145 DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
1149 if (DstSize != 64 && DstSize > 32) {
1150 DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
1151 << ", expected: 32 or 64\n");
1154 // At this point G_ANYEXT is just like a plain COPY, but we need
1155 // to explicitly form the 64-bit value if any.
1157 unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
1158 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1162 .addImm(AArch64::sub_32);
1163 I.getOperand(1).setReg(ExtSrc);
1165 return selectCopy(I, TII, MRI, TRI, RBI);
1168 case TargetOpcode::G_ZEXT:
1169 case TargetOpcode::G_SEXT: {
1170 unsigned Opcode = I.getOpcode();
1171 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1172 SrcTy = MRI.getType(I.getOperand(1).getReg());
1173 const bool isSigned = Opcode == TargetOpcode::G_SEXT;
1174 const unsigned DefReg = I.getOperand(0).getReg();
1175 const unsigned SrcReg = I.getOperand(1).getReg();
1176 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
1178 if (RB.getID() != AArch64::GPRRegBankID) {
1179 DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
1180 << ", expected: GPR\n");
1185 if (DstTy == LLT::scalar(64)) {
1186 // FIXME: Can we avoid manually doing this?
1187 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
1188 DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
1193 const unsigned SrcXReg =
1194 MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1195 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
1199 .addImm(AArch64::sub_32);
1201 const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
1202 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1206 .addImm(SrcTy.getSizeInBits() - 1);
1207 } else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
1208 const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
1209 ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
1213 .addImm(SrcTy.getSizeInBits() - 1);
1218 constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1220 I.eraseFromParent();
1224 case TargetOpcode::G_SITOFP:
1225 case TargetOpcode::G_UITOFP:
1226 case TargetOpcode::G_FPTOSI:
1227 case TargetOpcode::G_FPTOUI: {
1228 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
1229 SrcTy = MRI.getType(I.getOperand(1).getReg());
1230 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
1231 if (NewOpc == Opcode)
1234 I.setDesc(TII.get(NewOpc));
1235 constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1241 case TargetOpcode::G_INTTOPTR:
1242 // The importer is currently unable to import pointer types since they
1243 // didn't exist in SelectionDAG.
1244 return selectCopy(I, TII, MRI, TRI, RBI);
1246 case TargetOpcode::G_BITCAST:
1247 // Imported SelectionDAG rules can handle every bitcast except those that
1248 // bitcast from a type to the same type. Ideally, these shouldn't occur
1249 // but we might not run an optimizer that deletes them.
1250 if (MRI.getType(I.getOperand(0).getReg()) ==
1251 MRI.getType(I.getOperand(1).getReg()))
1252 return selectCopy(I, TII, MRI, TRI, RBI);
1255 case TargetOpcode::G_SELECT: {
1256 if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
1257 DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
1258 << ", expected: " << LLT::scalar(1) << '\n');
1262 const unsigned CondReg = I.getOperand(1).getReg();
1263 const unsigned TReg = I.getOperand(2).getReg();
1264 const unsigned FReg = I.getOperand(3).getReg();
1266 unsigned CSelOpc = 0;
1268 if (Ty == LLT::scalar(32)) {
1269 CSelOpc = AArch64::CSELWr;
1270 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
1271 CSelOpc = AArch64::CSELXr;
1276 MachineInstr &TstMI =
1277 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
1278 .addDef(AArch64::WZR)
1280 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
1282 MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
1283 .addDef(I.getOperand(0).getReg())
1286 .addImm(AArch64CC::NE);
1288 constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
1289 constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
1291 I.eraseFromParent();
1294 case TargetOpcode::G_ICMP: {
1295 if (Ty != LLT::scalar(32)) {
1296 DEBUG(dbgs() << "G_ICMP result has type: " << Ty
1297 << ", expected: " << LLT::scalar(32) << '\n');
1301 unsigned CmpOpc = 0;
1304 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1305 if (CmpTy == LLT::scalar(32)) {
1306 CmpOpc = AArch64::SUBSWrr;
1307 ZReg = AArch64::WZR;
1308 } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
1309 CmpOpc = AArch64::SUBSXrr;
1310 ZReg = AArch64::XZR;
1315 // CSINC increments the result by one when the condition code is false.
1316 // Therefore, we have to invert the predicate to get an increment by 1 when
1317 // the predicate is true.
1318 const AArch64CC::CondCode invCC =
1319 changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
1320 (CmpInst::Predicate)I.getOperand(1).getPredicate()));
1322 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1324 .addUse(I.getOperand(2).getReg())
1325 .addUse(I.getOperand(3).getReg());
1327 MachineInstr &CSetMI =
1328 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1329 .addDef(I.getOperand(0).getReg())
1330 .addUse(AArch64::WZR)
1331 .addUse(AArch64::WZR)
1334 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1335 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1337 I.eraseFromParent();
1341 case TargetOpcode::G_FCMP: {
1342 if (Ty != LLT::scalar(32)) {
1343 DEBUG(dbgs() << "G_FCMP result has type: " << Ty
1344 << ", expected: " << LLT::scalar(32) << '\n');
1348 unsigned CmpOpc = 0;
1349 LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
1350 if (CmpTy == LLT::scalar(32)) {
1351 CmpOpc = AArch64::FCMPSrr;
1352 } else if (CmpTy == LLT::scalar(64)) {
1353 CmpOpc = AArch64::FCMPDrr;
1360 AArch64CC::CondCode CC1, CC2;
1361 changeFCMPPredToAArch64CC(
1362 (CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
1364 MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
1365 .addUse(I.getOperand(2).getReg())
1366 .addUse(I.getOperand(3).getReg());
1368 const unsigned DefReg = I.getOperand(0).getReg();
1369 unsigned Def1Reg = DefReg;
1370 if (CC2 != AArch64CC::AL)
1371 Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1373 MachineInstr &CSetMI =
1374 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1376 .addUse(AArch64::WZR)
1377 .addUse(AArch64::WZR)
1378 .addImm(getInvertedCondCode(CC1));
1380 if (CC2 != AArch64CC::AL) {
1381 unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
1382 MachineInstr &CSet2MI =
1383 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
1385 .addUse(AArch64::WZR)
1386 .addUse(AArch64::WZR)
1387 .addImm(getInvertedCondCode(CC2));
1388 MachineInstr &OrMI =
1389 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
1393 constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
1394 constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
1397 constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
1398 constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
1400 I.eraseFromParent();
1403 case TargetOpcode::G_VASTART:
1404 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
1405 : selectVaStartAAPCS(I, MF, MRI);
1406 case TargetOpcode::G_IMPLICIT_DEF:
1407 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1414 /// SelectArithImmed - Select an immediate value that can be represented as
1415 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
1416 /// Val set to the 12-bit value and Shift set to the shifter operand.
1417 InstructionSelector::ComplexRendererFns
1418 AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
1419 MachineInstr &MI = *Root.getParent();
1420 MachineBasicBlock &MBB = *MI.getParent();
1421 MachineFunction &MF = *MBB.getParent();
1422 MachineRegisterInfo &MRI = MF.getRegInfo();
1424 // This function is called from the addsub_shifted_imm ComplexPattern,
1425 // which lists [imm] as the list of opcode it's interested in, however
1426 // we still need to check whether the operand is actually an immediate
1427 // here because the ComplexPattern opcode list is only used in
1428 // root-level opcode matching.
1431 Immed = Root.getImm();
1432 else if (Root.isCImm())
1433 Immed = Root.getCImm()->getZExtValue();
1434 else if (Root.isReg()) {
1435 MachineInstr *Def = MRI.getVRegDef(Root.getReg());
1436 if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
1438 MachineOperand &Op1 = Def->getOperand(1);
1439 if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
1441 Immed = Op1.getCImm()->getZExtValue();
1447 if (Immed >> 12 == 0) {
1449 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
1451 Immed = Immed >> 12;
1455 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
1457 [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
1458 [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
1462 /// Select a "register plus unscaled signed 9-bit immediate" address. This
1463 /// should only match when there is an offset that is not valid for a scaled
1464 /// immediate addressing mode. The "Size" argument is the size in bytes of the
1465 /// memory reference, which is needed here to know what is valid for a scaled
1467 InstructionSelector::ComplexRendererFns
1468 AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
1469 unsigned Size) const {
1470 MachineRegisterInfo &MRI =
1471 Root.getParent()->getParent()->getParent()->getRegInfo();
1476 if (!isBaseWithConstantOffset(Root, MRI))
1479 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1483 MachineOperand &OffImm = RootDef->getOperand(2);
1484 if (!OffImm.isReg())
1486 MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
1487 if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
1490 MachineOperand &RHSOp1 = RHS->getOperand(1);
1491 if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
1493 RHSC = RHSOp1.getCImm()->getSExtValue();
1495 // If the offset is valid as a scaled immediate, don't match here.
1496 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
1498 if (RHSC >= -256 && RHSC < 256) {
1499 MachineOperand &Base = RootDef->getOperand(1);
1501 [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
1502 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
1508 /// Select a "register plus scaled unsigned 12-bit immediate" address. The
1509 /// "Size" argument is the size in bytes of the memory reference, which
1510 /// determines the scale.
1511 InstructionSelector::ComplexRendererFns
1512 AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
1513 unsigned Size) const {
1514 MachineRegisterInfo &MRI =
1515 Root.getParent()->getParent()->getParent()->getRegInfo();
1520 MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
1524 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
1526 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
1527 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1531 if (isBaseWithConstantOffset(Root, MRI)) {
1532 MachineOperand &LHS = RootDef->getOperand(1);
1533 MachineOperand &RHS = RootDef->getOperand(2);
1534 MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
1535 MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
1536 if (LHSDef && RHSDef) {
1537 int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
1538 unsigned Scale = Log2_32(Size);
1539 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
1540 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1542 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
1543 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1547 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
1548 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
1554 // Before falling back to our general case, check if the unscaled
1555 // instructions can handle this. If so, that's preferable.
1556 if (selectAddrModeUnscaled(Root, Size).hasValue())
1560 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1561 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
1566 InstructionSelector *
1567 createAArch64InstructionSelector(const AArch64TargetMachine &TM,
1568 AArch64Subtarget &Subtarget,
1569 AArch64RegisterBankInfo &RBI) {
1570 return new AArch64InstructionSelector(TM, Subtarget, RBI);